Bluetooth: Change sk lock to chan lock in L2CAP core
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87 }
88
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
109
110 return c;
111 }
112
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 {
115 struct l2cap_chan *c;
116
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
119 return c;
120 }
121 return NULL;
122 }
123
124 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 {
126 struct l2cap_chan *c;
127
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_ident(conn, ident);
130 mutex_unlock(&conn->chan_lock);
131
132 return c;
133 }
134
135 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 {
137 struct l2cap_chan *c;
138
139 list_for_each_entry(c, &chan_list, global_l) {
140 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
141 return c;
142 }
143 return NULL;
144 }
145
146 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
147 {
148 int err;
149
150 write_lock(&chan_list_lock);
151
152 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
153 err = -EADDRINUSE;
154 goto done;
155 }
156
157 if (psm) {
158 chan->psm = psm;
159 chan->sport = psm;
160 err = 0;
161 } else {
162 u16 p;
163
164 err = -EINVAL;
165 for (p = 0x1001; p < 0x1100; p += 2)
166 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
167 chan->psm = cpu_to_le16(p);
168 chan->sport = cpu_to_le16(p);
169 err = 0;
170 break;
171 }
172 }
173
174 done:
175 write_unlock(&chan_list_lock);
176 return err;
177 }
178
179 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 {
181 write_lock(&chan_list_lock);
182
183 chan->scid = scid;
184
185 write_unlock(&chan_list_lock);
186
187 return 0;
188 }
189
190 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 {
192 u16 cid = L2CAP_CID_DYN_START;
193
194 for (; cid < L2CAP_CID_DYN_END; cid++) {
195 if (!__l2cap_get_chan_by_scid(conn, cid))
196 return cid;
197 }
198
199 return 0;
200 }
201
202 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 {
204 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
205 state_to_string(state));
206
207 chan->state = state;
208 chan->ops->state_change(chan->data, state);
209 }
210
211 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 {
213 struct sock *sk = chan->sk;
214
215 lock_sock(sk);
216 __l2cap_state_change(chan, state);
217 release_sock(sk);
218 }
219
220 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 {
222 struct sock *sk = chan->sk;
223
224 sk->sk_err = err;
225 }
226
227 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 {
229 struct sock *sk = chan->sk;
230
231 lock_sock(sk);
232 __l2cap_chan_set_err(chan, err);
233 release_sock(sk);
234 }
235
236 static void l2cap_chan_timeout(struct work_struct *work)
237 {
238 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
239 chan_timer.work);
240 struct l2cap_conn *conn = chan->conn;
241 int reason;
242
243 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
244
245 mutex_lock(&conn->chan_lock);
246 l2cap_chan_lock(chan);
247
248 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
249 reason = ECONNREFUSED;
250 else if (chan->state == BT_CONNECT &&
251 chan->sec_level != BT_SECURITY_SDP)
252 reason = ECONNREFUSED;
253 else
254 reason = ETIMEDOUT;
255
256 l2cap_chan_close(chan, reason);
257
258 l2cap_chan_unlock(chan);
259
260 chan->ops->close(chan->data);
261 mutex_unlock(&conn->chan_lock);
262
263 l2cap_chan_put(chan);
264 }
265
266 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
267 {
268 struct l2cap_chan *chan;
269
270 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
271 if (!chan)
272 return NULL;
273
274 mutex_init(&chan->lock);
275
276 chan->sk = sk;
277
278 write_lock(&chan_list_lock);
279 list_add(&chan->global_l, &chan_list);
280 write_unlock(&chan_list_lock);
281
282 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
283
284 chan->state = BT_OPEN;
285
286 atomic_set(&chan->refcnt, 1);
287
288 BT_DBG("sk %p chan %p", sk, chan);
289
290 return chan;
291 }
292
293 void l2cap_chan_destroy(struct l2cap_chan *chan)
294 {
295 write_lock(&chan_list_lock);
296 list_del(&chan->global_l);
297 write_unlock(&chan_list_lock);
298
299 l2cap_chan_put(chan);
300 }
301
302 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
303 {
304 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
305 chan->psm, chan->dcid);
306
307 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
308
309 chan->conn = conn;
310
311 switch (chan->chan_type) {
312 case L2CAP_CHAN_CONN_ORIENTED:
313 if (conn->hcon->type == LE_LINK) {
314 /* LE connection */
315 chan->omtu = L2CAP_LE_DEFAULT_MTU;
316 chan->scid = L2CAP_CID_LE_DATA;
317 chan->dcid = L2CAP_CID_LE_DATA;
318 } else {
319 /* Alloc CID for connection-oriented socket */
320 chan->scid = l2cap_alloc_cid(conn);
321 chan->omtu = L2CAP_DEFAULT_MTU;
322 }
323 break;
324
325 case L2CAP_CHAN_CONN_LESS:
326 /* Connectionless socket */
327 chan->scid = L2CAP_CID_CONN_LESS;
328 chan->dcid = L2CAP_CID_CONN_LESS;
329 chan->omtu = L2CAP_DEFAULT_MTU;
330 break;
331
332 default:
333 /* Raw socket can send/recv signalling messages only */
334 chan->scid = L2CAP_CID_SIGNALING;
335 chan->dcid = L2CAP_CID_SIGNALING;
336 chan->omtu = L2CAP_DEFAULT_MTU;
337 }
338
339 chan->local_id = L2CAP_BESTEFFORT_ID;
340 chan->local_stype = L2CAP_SERV_BESTEFFORT;
341 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
342 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
343 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
344 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
345
346 l2cap_chan_hold(chan);
347
348 list_add(&chan->list, &conn->chan_l);
349 }
350
351 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
352 {
353 mutex_lock(&conn->chan_lock);
354 __l2cap_chan_add(conn, chan);
355 mutex_unlock(&conn->chan_lock);
356 }
357
358 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 {
360 struct sock *sk = chan->sk;
361 struct l2cap_conn *conn = chan->conn;
362 struct sock *parent = bt_sk(sk)->parent;
363
364 __clear_chan_timer(chan);
365
366 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
367
368 if (conn) {
369 /* Delete from channel list */
370 list_del(&chan->list);
371
372 l2cap_chan_put(chan);
373
374 chan->conn = NULL;
375 hci_conn_put(conn->hcon);
376 }
377
378 lock_sock(sk);
379
380 __l2cap_state_change(chan, BT_CLOSED);
381 sock_set_flag(sk, SOCK_ZAPPED);
382
383 if (err)
384 __l2cap_chan_set_err(chan, err);
385
386 if (parent) {
387 bt_accept_unlink(sk);
388 parent->sk_data_ready(parent, 0);
389 } else
390 sk->sk_state_change(sk);
391
392 release_sock(sk);
393
394 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
395 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
396 return;
397
398 skb_queue_purge(&chan->tx_q);
399
400 if (chan->mode == L2CAP_MODE_ERTM) {
401 struct srej_list *l, *tmp;
402
403 __clear_retrans_timer(chan);
404 __clear_monitor_timer(chan);
405 __clear_ack_timer(chan);
406
407 skb_queue_purge(&chan->srej_q);
408
409 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
410 list_del(&l->list);
411 kfree(l);
412 }
413 }
414 }
415
416 static void l2cap_chan_cleanup_listen(struct sock *parent)
417 {
418 struct sock *sk;
419
420 BT_DBG("parent %p", parent);
421
422 /* Close not yet accepted channels */
423 while ((sk = bt_accept_dequeue(parent, NULL))) {
424 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425
426 l2cap_chan_lock(chan);
427 __clear_chan_timer(chan);
428 l2cap_chan_close(chan, ECONNRESET);
429 l2cap_chan_unlock(chan);
430
431 chan->ops->close(chan->data);
432 }
433 }
434
435 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 {
437 struct l2cap_conn *conn = chan->conn;
438 struct sock *sk = chan->sk;
439
440 BT_DBG("chan %p state %s sk %p", chan,
441 state_to_string(chan->state), sk);
442
443 switch (chan->state) {
444 case BT_LISTEN:
445 lock_sock(sk);
446 l2cap_chan_cleanup_listen(sk);
447
448 __l2cap_state_change(chan, BT_CLOSED);
449 sock_set_flag(sk, SOCK_ZAPPED);
450 release_sock(sk);
451 break;
452
453 case BT_CONNECTED:
454 case BT_CONFIG:
455 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
456 conn->hcon->type == ACL_LINK) {
457 __clear_chan_timer(chan);
458 __set_chan_timer(chan, sk->sk_sndtimeo);
459 l2cap_send_disconn_req(conn, chan, reason);
460 } else
461 l2cap_chan_del(chan, reason);
462 break;
463
464 case BT_CONNECT2:
465 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
466 conn->hcon->type == ACL_LINK) {
467 struct l2cap_conn_rsp rsp;
468 __u16 result;
469
470 if (bt_sk(sk)->defer_setup)
471 result = L2CAP_CR_SEC_BLOCK;
472 else
473 result = L2CAP_CR_BAD_PSM;
474 l2cap_state_change(chan, BT_DISCONN);
475
476 rsp.scid = cpu_to_le16(chan->dcid);
477 rsp.dcid = cpu_to_le16(chan->scid);
478 rsp.result = cpu_to_le16(result);
479 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
480 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
481 sizeof(rsp), &rsp);
482 }
483
484 l2cap_chan_del(chan, reason);
485 break;
486
487 case BT_CONNECT:
488 case BT_DISCONN:
489 l2cap_chan_del(chan, reason);
490 break;
491
492 default:
493 lock_sock(sk);
494 sock_set_flag(sk, SOCK_ZAPPED);
495 release_sock(sk);
496 break;
497 }
498 }
499
500 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
501 {
502 if (chan->chan_type == L2CAP_CHAN_RAW) {
503 switch (chan->sec_level) {
504 case BT_SECURITY_HIGH:
505 return HCI_AT_DEDICATED_BONDING_MITM;
506 case BT_SECURITY_MEDIUM:
507 return HCI_AT_DEDICATED_BONDING;
508 default:
509 return HCI_AT_NO_BONDING;
510 }
511 } else if (chan->psm == cpu_to_le16(0x0001)) {
512 if (chan->sec_level == BT_SECURITY_LOW)
513 chan->sec_level = BT_SECURITY_SDP;
514
515 if (chan->sec_level == BT_SECURITY_HIGH)
516 return HCI_AT_NO_BONDING_MITM;
517 else
518 return HCI_AT_NO_BONDING;
519 } else {
520 switch (chan->sec_level) {
521 case BT_SECURITY_HIGH:
522 return HCI_AT_GENERAL_BONDING_MITM;
523 case BT_SECURITY_MEDIUM:
524 return HCI_AT_GENERAL_BONDING;
525 default:
526 return HCI_AT_NO_BONDING;
527 }
528 }
529 }
530
531 /* Service level security */
532 int l2cap_chan_check_security(struct l2cap_chan *chan)
533 {
534 struct l2cap_conn *conn = chan->conn;
535 __u8 auth_type;
536
537 auth_type = l2cap_get_auth_type(chan);
538
539 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
540 }
541
542 static u8 l2cap_get_ident(struct l2cap_conn *conn)
543 {
544 u8 id;
545
546 /* Get next available identificator.
547 * 1 - 128 are used by kernel.
548 * 129 - 199 are reserved.
549 * 200 - 254 are used by utilities like l2ping, etc.
550 */
551
552 spin_lock(&conn->lock);
553
554 if (++conn->tx_ident > 128)
555 conn->tx_ident = 1;
556
557 id = conn->tx_ident;
558
559 spin_unlock(&conn->lock);
560
561 return id;
562 }
563
564 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
565 {
566 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
567 u8 flags;
568
569 BT_DBG("code 0x%2.2x", code);
570
571 if (!skb)
572 return;
573
574 if (lmp_no_flush_capable(conn->hcon->hdev))
575 flags = ACL_START_NO_FLUSH;
576 else
577 flags = ACL_START;
578
579 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
580 skb->priority = HCI_PRIO_MAX;
581
582 hci_send_acl(conn->hchan, skb, flags);
583 }
584
585 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
586 {
587 struct hci_conn *hcon = chan->conn->hcon;
588 u16 flags;
589
590 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
591 skb->priority);
592
593 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
594 lmp_no_flush_capable(hcon->hdev))
595 flags = ACL_START_NO_FLUSH;
596 else
597 flags = ACL_START;
598
599 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
600 hci_send_acl(chan->conn->hchan, skb, flags);
601 }
602
603 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
604 {
605 struct sk_buff *skb;
606 struct l2cap_hdr *lh;
607 struct l2cap_conn *conn = chan->conn;
608 int count, hlen;
609
610 if (chan->state != BT_CONNECTED)
611 return;
612
613 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
614 hlen = L2CAP_EXT_HDR_SIZE;
615 else
616 hlen = L2CAP_ENH_HDR_SIZE;
617
618 if (chan->fcs == L2CAP_FCS_CRC16)
619 hlen += L2CAP_FCS_SIZE;
620
621 BT_DBG("chan %p, control 0x%8.8x", chan, control);
622
623 count = min_t(unsigned int, conn->mtu, hlen);
624
625 control |= __set_sframe(chan);
626
627 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
628 control |= __set_ctrl_final(chan);
629
630 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
631 control |= __set_ctrl_poll(chan);
632
633 skb = bt_skb_alloc(count, GFP_ATOMIC);
634 if (!skb)
635 return;
636
637 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
638 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
639 lh->cid = cpu_to_le16(chan->dcid);
640
641 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
642
643 if (chan->fcs == L2CAP_FCS_CRC16) {
644 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
645 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
646 }
647
648 skb->priority = HCI_PRIO_MAX;
649 l2cap_do_send(chan, skb);
650 }
651
652 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
653 {
654 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
656 set_bit(CONN_RNR_SENT, &chan->conn_state);
657 } else
658 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
659
660 control |= __set_reqseq(chan, chan->buffer_seq);
661
662 l2cap_send_sframe(chan, control);
663 }
664
665 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
666 {
667 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
668 }
669
670 static void l2cap_do_start(struct l2cap_chan *chan)
671 {
672 struct l2cap_conn *conn = chan->conn;
673
674 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
675 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
676 return;
677
678 if (l2cap_chan_check_security(chan) &&
679 __l2cap_no_conn_pending(chan)) {
680 struct l2cap_conn_req req;
681 req.scid = cpu_to_le16(chan->scid);
682 req.psm = chan->psm;
683
684 chan->ident = l2cap_get_ident(conn);
685 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
686
687 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
688 sizeof(req), &req);
689 }
690 } else {
691 struct l2cap_info_req req;
692 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
693
694 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
695 conn->info_ident = l2cap_get_ident(conn);
696
697 schedule_delayed_work(&conn->info_timer,
698 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
699
700 l2cap_send_cmd(conn, conn->info_ident,
701 L2CAP_INFO_REQ, sizeof(req), &req);
702 }
703 }
704
705 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
706 {
707 u32 local_feat_mask = l2cap_feat_mask;
708 if (!disable_ertm)
709 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
710
711 switch (mode) {
712 case L2CAP_MODE_ERTM:
713 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
714 case L2CAP_MODE_STREAMING:
715 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
716 default:
717 return 0x00;
718 }
719 }
720
721 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
722 {
723 struct sock *sk = chan->sk;
724 struct l2cap_disconn_req req;
725
726 if (!conn)
727 return;
728
729 if (chan->mode == L2CAP_MODE_ERTM) {
730 __clear_retrans_timer(chan);
731 __clear_monitor_timer(chan);
732 __clear_ack_timer(chan);
733 }
734
735 req.dcid = cpu_to_le16(chan->dcid);
736 req.scid = cpu_to_le16(chan->scid);
737 l2cap_send_cmd(conn, l2cap_get_ident(conn),
738 L2CAP_DISCONN_REQ, sizeof(req), &req);
739
740 lock_sock(sk);
741 __l2cap_state_change(chan, BT_DISCONN);
742 __l2cap_chan_set_err(chan, err);
743 release_sock(sk);
744 }
745
746 /* ---- L2CAP connections ---- */
747 static void l2cap_conn_start(struct l2cap_conn *conn)
748 {
749 struct l2cap_chan *chan, *tmp;
750
751 BT_DBG("conn %p", conn);
752
753 mutex_lock(&conn->chan_lock);
754
755 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
756 struct sock *sk = chan->sk;
757
758 l2cap_chan_lock(chan);
759
760 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
761 l2cap_chan_unlock(chan);
762 continue;
763 }
764
765 if (chan->state == BT_CONNECT) {
766 struct l2cap_conn_req req;
767
768 if (!l2cap_chan_check_security(chan) ||
769 !__l2cap_no_conn_pending(chan)) {
770 l2cap_chan_unlock(chan);
771 continue;
772 }
773
774 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
775 && test_bit(CONF_STATE2_DEVICE,
776 &chan->conf_state)) {
777 l2cap_chan_close(chan, ECONNRESET);
778 l2cap_chan_unlock(chan);
779 continue;
780 }
781
782 req.scid = cpu_to_le16(chan->scid);
783 req.psm = chan->psm;
784
785 chan->ident = l2cap_get_ident(conn);
786 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
787
788 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
789 sizeof(req), &req);
790
791 } else if (chan->state == BT_CONNECT2) {
792 struct l2cap_conn_rsp rsp;
793 char buf[128];
794 rsp.scid = cpu_to_le16(chan->dcid);
795 rsp.dcid = cpu_to_le16(chan->scid);
796
797 if (l2cap_chan_check_security(chan)) {
798 lock_sock(sk);
799 if (bt_sk(sk)->defer_setup) {
800 struct sock *parent = bt_sk(sk)->parent;
801 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
802 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
803 if (parent)
804 parent->sk_data_ready(parent, 0);
805
806 } else {
807 __l2cap_state_change(chan, BT_CONFIG);
808 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
809 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
810 }
811 release_sock(sk);
812 } else {
813 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
814 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
815 }
816
817 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
818 sizeof(rsp), &rsp);
819
820 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
821 rsp.result != L2CAP_CR_SUCCESS) {
822 l2cap_chan_unlock(chan);
823 continue;
824 }
825
826 set_bit(CONF_REQ_SENT, &chan->conf_state);
827 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
828 l2cap_build_conf_req(chan, buf), buf);
829 chan->num_conf_req++;
830 }
831
832 l2cap_chan_unlock(chan);
833 }
834
835 mutex_unlock(&conn->chan_lock);
836 }
837
838 /* Find socket with cid and source bdaddr.
839 * Returns closest match, locked.
840 */
841 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
842 {
843 struct l2cap_chan *c, *c1 = NULL;
844
845 read_lock(&chan_list_lock);
846
847 list_for_each_entry(c, &chan_list, global_l) {
848 struct sock *sk = c->sk;
849
850 if (state && c->state != state)
851 continue;
852
853 if (c->scid == cid) {
854 /* Exact match. */
855 if (!bacmp(&bt_sk(sk)->src, src)) {
856 read_unlock(&chan_list_lock);
857 return c;
858 }
859
860 /* Closest match */
861 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
862 c1 = c;
863 }
864 }
865
866 read_unlock(&chan_list_lock);
867
868 return c1;
869 }
870
871 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
872 {
873 struct sock *parent, *sk;
874 struct l2cap_chan *chan, *pchan;
875
876 BT_DBG("");
877
878 /* Check if we have socket listening on cid */
879 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
880 conn->src);
881 if (!pchan)
882 return;
883
884 parent = pchan->sk;
885
886 lock_sock(parent);
887
888 /* Check for backlog size */
889 if (sk_acceptq_is_full(parent)) {
890 BT_DBG("backlog full %d", parent->sk_ack_backlog);
891 goto clean;
892 }
893
894 chan = pchan->ops->new_connection(pchan->data);
895 if (!chan)
896 goto clean;
897
898 sk = chan->sk;
899
900 hci_conn_hold(conn->hcon);
901
902 bacpy(&bt_sk(sk)->src, conn->src);
903 bacpy(&bt_sk(sk)->dst, conn->dst);
904
905 bt_accept_enqueue(parent, sk);
906
907 l2cap_chan_add(conn, chan);
908
909 __set_chan_timer(chan, sk->sk_sndtimeo);
910
911 __l2cap_state_change(chan, BT_CONNECTED);
912 parent->sk_data_ready(parent, 0);
913
914 clean:
915 release_sock(parent);
916 }
917
918 static void l2cap_chan_ready(struct l2cap_chan *chan)
919 {
920 struct sock *sk = chan->sk;
921 struct sock *parent;
922
923 lock_sock(sk);
924
925 parent = bt_sk(sk)->parent;
926
927 BT_DBG("sk %p, parent %p", sk, parent);
928
929 chan->conf_state = 0;
930 __clear_chan_timer(chan);
931
932 __l2cap_state_change(chan, BT_CONNECTED);
933 sk->sk_state_change(sk);
934
935 if (parent)
936 parent->sk_data_ready(parent, 0);
937
938 release_sock(sk);
939 }
940
941 static void l2cap_conn_ready(struct l2cap_conn *conn)
942 {
943 struct l2cap_chan *chan;
944
945 BT_DBG("conn %p", conn);
946
947 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
948 l2cap_le_conn_ready(conn);
949
950 if (conn->hcon->out && conn->hcon->type == LE_LINK)
951 smp_conn_security(conn, conn->hcon->pending_sec_level);
952
953 mutex_lock(&conn->chan_lock);
954
955 list_for_each_entry(chan, &conn->chan_l, list) {
956
957 l2cap_chan_lock(chan);
958
959 if (conn->hcon->type == LE_LINK) {
960 if (smp_conn_security(conn, chan->sec_level))
961 l2cap_chan_ready(chan);
962
963 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
964 struct sock *sk = chan->sk;
965 __clear_chan_timer(chan);
966 lock_sock(sk);
967 __l2cap_state_change(chan, BT_CONNECTED);
968 sk->sk_state_change(sk);
969 release_sock(sk);
970
971 } else if (chan->state == BT_CONNECT)
972 l2cap_do_start(chan);
973
974 l2cap_chan_unlock(chan);
975 }
976
977 mutex_unlock(&conn->chan_lock);
978 }
979
980 /* Notify sockets that we cannot guaranty reliability anymore */
981 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
982 {
983 struct l2cap_chan *chan;
984
985 BT_DBG("conn %p", conn);
986
987 mutex_lock(&conn->chan_lock);
988
989 list_for_each_entry(chan, &conn->chan_l, list) {
990 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
991 __l2cap_chan_set_err(chan, err);
992 }
993
994 mutex_unlock(&conn->chan_lock);
995 }
996
997 static void l2cap_info_timeout(struct work_struct *work)
998 {
999 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1000 info_timer.work);
1001
1002 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1003 conn->info_ident = 0;
1004
1005 l2cap_conn_start(conn);
1006 }
1007
1008 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1009 {
1010 struct l2cap_conn *conn = hcon->l2cap_data;
1011 struct l2cap_chan *chan, *l;
1012
1013 if (!conn)
1014 return;
1015
1016 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1017
1018 kfree_skb(conn->rx_skb);
1019
1020 mutex_lock(&conn->chan_lock);
1021
1022 /* Kill channels */
1023 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1024 l2cap_chan_lock(chan);
1025
1026 l2cap_chan_del(chan, err);
1027
1028 l2cap_chan_unlock(chan);
1029
1030 chan->ops->close(chan->data);
1031 }
1032
1033 mutex_unlock(&conn->chan_lock);
1034
1035 hci_chan_del(conn->hchan);
1036
1037 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1038 cancel_delayed_work_sync(&conn->info_timer);
1039
1040 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1041 cancel_delayed_work_sync(&conn->security_timer);
1042 smp_chan_destroy(conn);
1043 }
1044
1045 hcon->l2cap_data = NULL;
1046 kfree(conn);
1047 }
1048
1049 static void security_timeout(struct work_struct *work)
1050 {
1051 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1052 security_timer.work);
1053
1054 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1055 }
1056
1057 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1058 {
1059 struct l2cap_conn *conn = hcon->l2cap_data;
1060 struct hci_chan *hchan;
1061
1062 if (conn || status)
1063 return conn;
1064
1065 hchan = hci_chan_create(hcon);
1066 if (!hchan)
1067 return NULL;
1068
1069 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1070 if (!conn) {
1071 hci_chan_del(hchan);
1072 return NULL;
1073 }
1074
1075 hcon->l2cap_data = conn;
1076 conn->hcon = hcon;
1077 conn->hchan = hchan;
1078
1079 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1080
1081 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1082 conn->mtu = hcon->hdev->le_mtu;
1083 else
1084 conn->mtu = hcon->hdev->acl_mtu;
1085
1086 conn->src = &hcon->hdev->bdaddr;
1087 conn->dst = &hcon->dst;
1088
1089 conn->feat_mask = 0;
1090
1091 spin_lock_init(&conn->lock);
1092 mutex_init(&conn->chan_lock);
1093
1094 INIT_LIST_HEAD(&conn->chan_l);
1095
1096 if (hcon->type == LE_LINK)
1097 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1098 else
1099 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1100
1101 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1102
1103 return conn;
1104 }
1105
1106 /* ---- Socket interface ---- */
1107
1108 /* Find socket with psm and source bdaddr.
1109 * Returns closest match.
1110 */
1111 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1112 {
1113 struct l2cap_chan *c, *c1 = NULL;
1114
1115 read_lock(&chan_list_lock);
1116
1117 list_for_each_entry(c, &chan_list, global_l) {
1118 struct sock *sk = c->sk;
1119
1120 if (state && c->state != state)
1121 continue;
1122
1123 if (c->psm == psm) {
1124 /* Exact match. */
1125 if (!bacmp(&bt_sk(sk)->src, src)) {
1126 read_unlock(&chan_list_lock);
1127 return c;
1128 }
1129
1130 /* Closest match */
1131 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1132 c1 = c;
1133 }
1134 }
1135
1136 read_unlock(&chan_list_lock);
1137
1138 return c1;
1139 }
1140
1141 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1142 {
1143 struct sock *sk = chan->sk;
1144 bdaddr_t *src = &bt_sk(sk)->src;
1145 struct l2cap_conn *conn;
1146 struct hci_conn *hcon;
1147 struct hci_dev *hdev;
1148 __u8 auth_type;
1149 int err;
1150
1151 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1152 chan->psm);
1153
1154 hdev = hci_get_route(dst, src);
1155 if (!hdev)
1156 return -EHOSTUNREACH;
1157
1158 hci_dev_lock(hdev);
1159
1160 l2cap_chan_lock(chan);
1161
1162 /* PSM must be odd and lsb of upper byte must be 0 */
1163 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1164 chan->chan_type != L2CAP_CHAN_RAW) {
1165 err = -EINVAL;
1166 goto done;
1167 }
1168
1169 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1170 err = -EINVAL;
1171 goto done;
1172 }
1173
1174 switch (chan->mode) {
1175 case L2CAP_MODE_BASIC:
1176 break;
1177 case L2CAP_MODE_ERTM:
1178 case L2CAP_MODE_STREAMING:
1179 if (!disable_ertm)
1180 break;
1181 /* fall through */
1182 default:
1183 err = -ENOTSUPP;
1184 goto done;
1185 }
1186
1187 lock_sock(sk);
1188
1189 switch (sk->sk_state) {
1190 case BT_CONNECT:
1191 case BT_CONNECT2:
1192 case BT_CONFIG:
1193 /* Already connecting */
1194 err = 0;
1195 release_sock(sk);
1196 goto done;
1197
1198 case BT_CONNECTED:
1199 /* Already connected */
1200 err = -EISCONN;
1201 release_sock(sk);
1202 goto done;
1203
1204 case BT_OPEN:
1205 case BT_BOUND:
1206 /* Can connect */
1207 break;
1208
1209 default:
1210 err = -EBADFD;
1211 release_sock(sk);
1212 goto done;
1213 }
1214
1215 /* Set destination address and psm */
1216 bacpy(&bt_sk(sk)->dst, dst);
1217
1218 release_sock(sk);
1219
1220 chan->psm = psm;
1221 chan->dcid = cid;
1222
1223 auth_type = l2cap_get_auth_type(chan);
1224
1225 if (chan->dcid == L2CAP_CID_LE_DATA)
1226 hcon = hci_connect(hdev, LE_LINK, dst,
1227 chan->sec_level, auth_type);
1228 else
1229 hcon = hci_connect(hdev, ACL_LINK, dst,
1230 chan->sec_level, auth_type);
1231
1232 if (IS_ERR(hcon)) {
1233 err = PTR_ERR(hcon);
1234 goto done;
1235 }
1236
1237 conn = l2cap_conn_add(hcon, 0);
1238 if (!conn) {
1239 hci_conn_put(hcon);
1240 err = -ENOMEM;
1241 goto done;
1242 }
1243
1244 /* Update source addr of the socket */
1245 bacpy(src, conn->src);
1246
1247 l2cap_chan_unlock(chan);
1248 l2cap_chan_add(conn, chan);
1249 l2cap_chan_lock(chan);
1250
1251 l2cap_state_change(chan, BT_CONNECT);
1252 __set_chan_timer(chan, sk->sk_sndtimeo);
1253
1254 if (hcon->state == BT_CONNECTED) {
1255 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1256 __clear_chan_timer(chan);
1257 if (l2cap_chan_check_security(chan))
1258 l2cap_state_change(chan, BT_CONNECTED);
1259 } else
1260 l2cap_do_start(chan);
1261 }
1262
1263 err = 0;
1264
1265 done:
1266 l2cap_chan_unlock(chan);
1267 hci_dev_unlock(hdev);
1268 hci_dev_put(hdev);
1269 return err;
1270 }
1271
1272 int __l2cap_wait_ack(struct sock *sk)
1273 {
1274 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1275 DECLARE_WAITQUEUE(wait, current);
1276 int err = 0;
1277 int timeo = HZ/5;
1278
1279 add_wait_queue(sk_sleep(sk), &wait);
1280 set_current_state(TASK_INTERRUPTIBLE);
1281 while (chan->unacked_frames > 0 && chan->conn) {
1282 if (!timeo)
1283 timeo = HZ/5;
1284
1285 if (signal_pending(current)) {
1286 err = sock_intr_errno(timeo);
1287 break;
1288 }
1289
1290 release_sock(sk);
1291 timeo = schedule_timeout(timeo);
1292 lock_sock(sk);
1293 set_current_state(TASK_INTERRUPTIBLE);
1294
1295 err = sock_error(sk);
1296 if (err)
1297 break;
1298 }
1299 set_current_state(TASK_RUNNING);
1300 remove_wait_queue(sk_sleep(sk), &wait);
1301 return err;
1302 }
1303
1304 static void l2cap_monitor_timeout(struct work_struct *work)
1305 {
1306 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1307 monitor_timer.work);
1308
1309 BT_DBG("chan %p", chan);
1310
1311 l2cap_chan_lock(chan);
1312
1313 if (chan->retry_count >= chan->remote_max_tx) {
1314 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1315 l2cap_chan_unlock(chan);
1316 return;
1317 }
1318
1319 chan->retry_count++;
1320 __set_monitor_timer(chan);
1321
1322 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1323 l2cap_chan_unlock(chan);
1324 }
1325
1326 static void l2cap_retrans_timeout(struct work_struct *work)
1327 {
1328 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1329 retrans_timer.work);
1330
1331 BT_DBG("chan %p", chan);
1332
1333 l2cap_chan_lock(chan);
1334
1335 chan->retry_count = 1;
1336 __set_monitor_timer(chan);
1337
1338 set_bit(CONN_WAIT_F, &chan->conn_state);
1339
1340 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1341
1342 l2cap_chan_unlock(chan);
1343 }
1344
1345 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1346 {
1347 struct sk_buff *skb;
1348
1349 while ((skb = skb_peek(&chan->tx_q)) &&
1350 chan->unacked_frames) {
1351 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1352 break;
1353
1354 skb = skb_dequeue(&chan->tx_q);
1355 kfree_skb(skb);
1356
1357 chan->unacked_frames--;
1358 }
1359
1360 if (!chan->unacked_frames)
1361 __clear_retrans_timer(chan);
1362 }
1363
1364 static void l2cap_streaming_send(struct l2cap_chan *chan)
1365 {
1366 struct sk_buff *skb;
1367 u32 control;
1368 u16 fcs;
1369
1370 while ((skb = skb_dequeue(&chan->tx_q))) {
1371 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1372 control |= __set_txseq(chan, chan->next_tx_seq);
1373 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1374
1375 if (chan->fcs == L2CAP_FCS_CRC16) {
1376 fcs = crc16(0, (u8 *)skb->data,
1377 skb->len - L2CAP_FCS_SIZE);
1378 put_unaligned_le16(fcs,
1379 skb->data + skb->len - L2CAP_FCS_SIZE);
1380 }
1381
1382 l2cap_do_send(chan, skb);
1383
1384 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1385 }
1386 }
1387
1388 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1389 {
1390 struct sk_buff *skb, *tx_skb;
1391 u16 fcs;
1392 u32 control;
1393
1394 skb = skb_peek(&chan->tx_q);
1395 if (!skb)
1396 return;
1397
1398 while (bt_cb(skb)->tx_seq != tx_seq) {
1399 if (skb_queue_is_last(&chan->tx_q, skb))
1400 return;
1401
1402 skb = skb_queue_next(&chan->tx_q, skb);
1403 }
1404
1405 if (chan->remote_max_tx &&
1406 bt_cb(skb)->retries == chan->remote_max_tx) {
1407 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1408 return;
1409 }
1410
1411 tx_skb = skb_clone(skb, GFP_ATOMIC);
1412 bt_cb(skb)->retries++;
1413
1414 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1415 control &= __get_sar_mask(chan);
1416
1417 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1418 control |= __set_ctrl_final(chan);
1419
1420 control |= __set_reqseq(chan, chan->buffer_seq);
1421 control |= __set_txseq(chan, tx_seq);
1422
1423 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1424
1425 if (chan->fcs == L2CAP_FCS_CRC16) {
1426 fcs = crc16(0, (u8 *)tx_skb->data,
1427 tx_skb->len - L2CAP_FCS_SIZE);
1428 put_unaligned_le16(fcs,
1429 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1430 }
1431
1432 l2cap_do_send(chan, tx_skb);
1433 }
1434
1435 static int l2cap_ertm_send(struct l2cap_chan *chan)
1436 {
1437 struct sk_buff *skb, *tx_skb;
1438 u16 fcs;
1439 u32 control;
1440 int nsent = 0;
1441
1442 if (chan->state != BT_CONNECTED)
1443 return -ENOTCONN;
1444
1445 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1446
1447 if (chan->remote_max_tx &&
1448 bt_cb(skb)->retries == chan->remote_max_tx) {
1449 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1450 break;
1451 }
1452
1453 tx_skb = skb_clone(skb, GFP_ATOMIC);
1454
1455 bt_cb(skb)->retries++;
1456
1457 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1458 control &= __get_sar_mask(chan);
1459
1460 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1461 control |= __set_ctrl_final(chan);
1462
1463 control |= __set_reqseq(chan, chan->buffer_seq);
1464 control |= __set_txseq(chan, chan->next_tx_seq);
1465
1466 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1467
1468 if (chan->fcs == L2CAP_FCS_CRC16) {
1469 fcs = crc16(0, (u8 *)skb->data,
1470 tx_skb->len - L2CAP_FCS_SIZE);
1471 put_unaligned_le16(fcs, skb->data +
1472 tx_skb->len - L2CAP_FCS_SIZE);
1473 }
1474
1475 l2cap_do_send(chan, tx_skb);
1476
1477 __set_retrans_timer(chan);
1478
1479 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1480
1481 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1482
1483 if (bt_cb(skb)->retries == 1) {
1484 chan->unacked_frames++;
1485
1486 if (!nsent++)
1487 __clear_ack_timer(chan);
1488 }
1489
1490 chan->frames_sent++;
1491
1492 if (skb_queue_is_last(&chan->tx_q, skb))
1493 chan->tx_send_head = NULL;
1494 else
1495 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1496 }
1497
1498 return nsent;
1499 }
1500
1501 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1502 {
1503 int ret;
1504
1505 if (!skb_queue_empty(&chan->tx_q))
1506 chan->tx_send_head = chan->tx_q.next;
1507
1508 chan->next_tx_seq = chan->expected_ack_seq;
1509 ret = l2cap_ertm_send(chan);
1510 return ret;
1511 }
1512
1513 static void __l2cap_send_ack(struct l2cap_chan *chan)
1514 {
1515 u32 control = 0;
1516
1517 control |= __set_reqseq(chan, chan->buffer_seq);
1518
1519 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1520 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1521 set_bit(CONN_RNR_SENT, &chan->conn_state);
1522 l2cap_send_sframe(chan, control);
1523 return;
1524 }
1525
1526 if (l2cap_ertm_send(chan) > 0)
1527 return;
1528
1529 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1530 l2cap_send_sframe(chan, control);
1531 }
1532
1533 static void l2cap_send_ack(struct l2cap_chan *chan)
1534 {
1535 __clear_ack_timer(chan);
1536 __l2cap_send_ack(chan);
1537 }
1538
1539 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1540 {
1541 struct srej_list *tail;
1542 u32 control;
1543
1544 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1545 control |= __set_ctrl_final(chan);
1546
1547 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1548 control |= __set_reqseq(chan, tail->tx_seq);
1549
1550 l2cap_send_sframe(chan, control);
1551 }
1552
1553 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1554 {
1555 struct l2cap_conn *conn = chan->conn;
1556 struct sk_buff **frag;
1557 int err, sent = 0;
1558
1559 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1560 return -EFAULT;
1561
1562 sent += count;
1563 len -= count;
1564
1565 /* Continuation fragments (no L2CAP header) */
1566 frag = &skb_shinfo(skb)->frag_list;
1567 while (len) {
1568 count = min_t(unsigned int, conn->mtu, len);
1569
1570 *frag = chan->ops->alloc_skb(chan, count,
1571 msg->msg_flags & MSG_DONTWAIT, &err);
1572
1573 if (!*frag)
1574 return err;
1575 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1576 return -EFAULT;
1577
1578 (*frag)->priority = skb->priority;
1579
1580 sent += count;
1581 len -= count;
1582
1583 frag = &(*frag)->next;
1584 }
1585
1586 return sent;
1587 }
1588
1589 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1590 struct msghdr *msg, size_t len,
1591 u32 priority)
1592 {
1593 struct l2cap_conn *conn = chan->conn;
1594 struct sk_buff *skb;
1595 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1596 struct l2cap_hdr *lh;
1597
1598 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1599
1600 count = min_t(unsigned int, (conn->mtu - hlen), len);
1601
1602 skb = chan->ops->alloc_skb(chan, count + hlen,
1603 msg->msg_flags & MSG_DONTWAIT, &err);
1604
1605 if (!skb)
1606 return ERR_PTR(err);
1607
1608 skb->priority = priority;
1609
1610 /* Create L2CAP header */
1611 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1612 lh->cid = cpu_to_le16(chan->dcid);
1613 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1614 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1615
1616 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1617 if (unlikely(err < 0)) {
1618 kfree_skb(skb);
1619 return ERR_PTR(err);
1620 }
1621 return skb;
1622 }
1623
1624 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1625 struct msghdr *msg, size_t len,
1626 u32 priority)
1627 {
1628 struct l2cap_conn *conn = chan->conn;
1629 struct sk_buff *skb;
1630 int err, count, hlen = L2CAP_HDR_SIZE;
1631 struct l2cap_hdr *lh;
1632
1633 BT_DBG("chan %p len %d", chan, (int)len);
1634
1635 count = min_t(unsigned int, (conn->mtu - hlen), len);
1636
1637 skb = chan->ops->alloc_skb(chan, count + hlen,
1638 msg->msg_flags & MSG_DONTWAIT, &err);
1639
1640 if (!skb)
1641 return ERR_PTR(err);
1642
1643 skb->priority = priority;
1644
1645 /* Create L2CAP header */
1646 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1647 lh->cid = cpu_to_le16(chan->dcid);
1648 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1649
1650 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1651 if (unlikely(err < 0)) {
1652 kfree_skb(skb);
1653 return ERR_PTR(err);
1654 }
1655 return skb;
1656 }
1657
1658 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1659 struct msghdr *msg, size_t len,
1660 u32 control, u16 sdulen)
1661 {
1662 struct l2cap_conn *conn = chan->conn;
1663 struct sk_buff *skb;
1664 int err, count, hlen;
1665 struct l2cap_hdr *lh;
1666
1667 BT_DBG("chan %p len %d", chan, (int)len);
1668
1669 if (!conn)
1670 return ERR_PTR(-ENOTCONN);
1671
1672 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1673 hlen = L2CAP_EXT_HDR_SIZE;
1674 else
1675 hlen = L2CAP_ENH_HDR_SIZE;
1676
1677 if (sdulen)
1678 hlen += L2CAP_SDULEN_SIZE;
1679
1680 if (chan->fcs == L2CAP_FCS_CRC16)
1681 hlen += L2CAP_FCS_SIZE;
1682
1683 count = min_t(unsigned int, (conn->mtu - hlen), len);
1684
1685 skb = chan->ops->alloc_skb(chan, count + hlen,
1686 msg->msg_flags & MSG_DONTWAIT, &err);
1687
1688 if (!skb)
1689 return ERR_PTR(err);
1690
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(chan->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695
1696 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1697
1698 if (sdulen)
1699 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1700
1701 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1702 if (unlikely(err < 0)) {
1703 kfree_skb(skb);
1704 return ERR_PTR(err);
1705 }
1706
1707 if (chan->fcs == L2CAP_FCS_CRC16)
1708 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1709
1710 bt_cb(skb)->retries = 0;
1711 return skb;
1712 }
1713
1714 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1715 {
1716 struct sk_buff *skb;
1717 struct sk_buff_head sar_queue;
1718 u32 control;
1719 size_t size = 0;
1720
1721 skb_queue_head_init(&sar_queue);
1722 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1723 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1724 if (IS_ERR(skb))
1725 return PTR_ERR(skb);
1726
1727 __skb_queue_tail(&sar_queue, skb);
1728 len -= chan->remote_mps;
1729 size += chan->remote_mps;
1730
1731 while (len > 0) {
1732 size_t buflen;
1733
1734 if (len > chan->remote_mps) {
1735 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1736 buflen = chan->remote_mps;
1737 } else {
1738 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1739 buflen = len;
1740 }
1741
1742 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1743 if (IS_ERR(skb)) {
1744 skb_queue_purge(&sar_queue);
1745 return PTR_ERR(skb);
1746 }
1747
1748 __skb_queue_tail(&sar_queue, skb);
1749 len -= buflen;
1750 size += buflen;
1751 }
1752 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1753 if (chan->tx_send_head == NULL)
1754 chan->tx_send_head = sar_queue.next;
1755
1756 return size;
1757 }
1758
1759 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1760 u32 priority)
1761 {
1762 struct sk_buff *skb;
1763 u32 control;
1764 int err;
1765
1766 /* Connectionless channel */
1767 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1768 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1769 if (IS_ERR(skb))
1770 return PTR_ERR(skb);
1771
1772 l2cap_do_send(chan, skb);
1773 return len;
1774 }
1775
1776 switch (chan->mode) {
1777 case L2CAP_MODE_BASIC:
1778 /* Check outgoing MTU */
1779 if (len > chan->omtu)
1780 return -EMSGSIZE;
1781
1782 /* Create a basic PDU */
1783 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1784 if (IS_ERR(skb))
1785 return PTR_ERR(skb);
1786
1787 l2cap_do_send(chan, skb);
1788 err = len;
1789 break;
1790
1791 case L2CAP_MODE_ERTM:
1792 case L2CAP_MODE_STREAMING:
1793 /* Entire SDU fits into one PDU */
1794 if (len <= chan->remote_mps) {
1795 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1796 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1797 0);
1798 if (IS_ERR(skb))
1799 return PTR_ERR(skb);
1800
1801 __skb_queue_tail(&chan->tx_q, skb);
1802
1803 if (chan->tx_send_head == NULL)
1804 chan->tx_send_head = skb;
1805
1806 } else {
1807 /* Segment SDU into multiples PDUs */
1808 err = l2cap_sar_segment_sdu(chan, msg, len);
1809 if (err < 0)
1810 return err;
1811 }
1812
1813 if (chan->mode == L2CAP_MODE_STREAMING) {
1814 l2cap_streaming_send(chan);
1815 err = len;
1816 break;
1817 }
1818
1819 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1820 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1821 err = len;
1822 break;
1823 }
1824
1825 err = l2cap_ertm_send(chan);
1826 if (err >= 0)
1827 err = len;
1828
1829 break;
1830
1831 default:
1832 BT_DBG("bad state %1.1x", chan->mode);
1833 err = -EBADFD;
1834 }
1835
1836 return err;
1837 }
1838
1839 /* Copy frame to all raw sockets on that connection */
1840 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1841 {
1842 struct sk_buff *nskb;
1843 struct l2cap_chan *chan;
1844
1845 BT_DBG("conn %p", conn);
1846
1847 mutex_lock(&conn->chan_lock);
1848
1849 list_for_each_entry(chan, &conn->chan_l, list) {
1850 struct sock *sk = chan->sk;
1851 if (chan->chan_type != L2CAP_CHAN_RAW)
1852 continue;
1853
1854 /* Don't send frame to the socket it came from */
1855 if (skb->sk == sk)
1856 continue;
1857 nskb = skb_clone(skb, GFP_ATOMIC);
1858 if (!nskb)
1859 continue;
1860
1861 if (chan->ops->recv(chan->data, nskb))
1862 kfree_skb(nskb);
1863 }
1864
1865 mutex_unlock(&conn->chan_lock);
1866 }
1867
1868 /* ---- L2CAP signalling commands ---- */
1869 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1870 u8 code, u8 ident, u16 dlen, void *data)
1871 {
1872 struct sk_buff *skb, **frag;
1873 struct l2cap_cmd_hdr *cmd;
1874 struct l2cap_hdr *lh;
1875 int len, count;
1876
1877 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1878 conn, code, ident, dlen);
1879
1880 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1881 count = min_t(unsigned int, conn->mtu, len);
1882
1883 skb = bt_skb_alloc(count, GFP_ATOMIC);
1884 if (!skb)
1885 return NULL;
1886
1887 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1888 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1889
1890 if (conn->hcon->type == LE_LINK)
1891 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1892 else
1893 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1894
1895 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1896 cmd->code = code;
1897 cmd->ident = ident;
1898 cmd->len = cpu_to_le16(dlen);
1899
1900 if (dlen) {
1901 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1902 memcpy(skb_put(skb, count), data, count);
1903 data += count;
1904 }
1905
1906 len -= skb->len;
1907
1908 /* Continuation fragments (no L2CAP header) */
1909 frag = &skb_shinfo(skb)->frag_list;
1910 while (len) {
1911 count = min_t(unsigned int, conn->mtu, len);
1912
1913 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1914 if (!*frag)
1915 goto fail;
1916
1917 memcpy(skb_put(*frag, count), data, count);
1918
1919 len -= count;
1920 data += count;
1921
1922 frag = &(*frag)->next;
1923 }
1924
1925 return skb;
1926
1927 fail:
1928 kfree_skb(skb);
1929 return NULL;
1930 }
1931
1932 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1933 {
1934 struct l2cap_conf_opt *opt = *ptr;
1935 int len;
1936
1937 len = L2CAP_CONF_OPT_SIZE + opt->len;
1938 *ptr += len;
1939
1940 *type = opt->type;
1941 *olen = opt->len;
1942
1943 switch (opt->len) {
1944 case 1:
1945 *val = *((u8 *) opt->val);
1946 break;
1947
1948 case 2:
1949 *val = get_unaligned_le16(opt->val);
1950 break;
1951
1952 case 4:
1953 *val = get_unaligned_le32(opt->val);
1954 break;
1955
1956 default:
1957 *val = (unsigned long) opt->val;
1958 break;
1959 }
1960
1961 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1962 return len;
1963 }
1964
1965 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1966 {
1967 struct l2cap_conf_opt *opt = *ptr;
1968
1969 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1970
1971 opt->type = type;
1972 opt->len = len;
1973
1974 switch (len) {
1975 case 1:
1976 *((u8 *) opt->val) = val;
1977 break;
1978
1979 case 2:
1980 put_unaligned_le16(val, opt->val);
1981 break;
1982
1983 case 4:
1984 put_unaligned_le32(val, opt->val);
1985 break;
1986
1987 default:
1988 memcpy(opt->val, (void *) val, len);
1989 break;
1990 }
1991
1992 *ptr += L2CAP_CONF_OPT_SIZE + len;
1993 }
1994
1995 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1996 {
1997 struct l2cap_conf_efs efs;
1998
1999 switch (chan->mode) {
2000 case L2CAP_MODE_ERTM:
2001 efs.id = chan->local_id;
2002 efs.stype = chan->local_stype;
2003 efs.msdu = cpu_to_le16(chan->local_msdu);
2004 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2005 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2006 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2007 break;
2008
2009 case L2CAP_MODE_STREAMING:
2010 efs.id = 1;
2011 efs.stype = L2CAP_SERV_BESTEFFORT;
2012 efs.msdu = cpu_to_le16(chan->local_msdu);
2013 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2014 efs.acc_lat = 0;
2015 efs.flush_to = 0;
2016 break;
2017
2018 default:
2019 return;
2020 }
2021
2022 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2023 (unsigned long) &efs);
2024 }
2025
2026 static void l2cap_ack_timeout(struct work_struct *work)
2027 {
2028 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2029 ack_timer.work);
2030
2031 BT_DBG("chan %p", chan);
2032
2033 l2cap_chan_lock(chan);
2034
2035 __l2cap_send_ack(chan);
2036
2037 l2cap_chan_unlock(chan);
2038
2039 l2cap_chan_put(chan);
2040 }
2041
2042 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2043 {
2044 chan->expected_ack_seq = 0;
2045 chan->unacked_frames = 0;
2046 chan->buffer_seq = 0;
2047 chan->num_acked = 0;
2048 chan->frames_sent = 0;
2049
2050 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2051 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2052 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2053
2054 skb_queue_head_init(&chan->srej_q);
2055
2056 INIT_LIST_HEAD(&chan->srej_l);
2057 }
2058
2059 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2060 {
2061 switch (mode) {
2062 case L2CAP_MODE_STREAMING:
2063 case L2CAP_MODE_ERTM:
2064 if (l2cap_mode_supported(mode, remote_feat_mask))
2065 return mode;
2066 /* fall through */
2067 default:
2068 return L2CAP_MODE_BASIC;
2069 }
2070 }
2071
2072 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2073 {
2074 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2075 }
2076
2077 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2078 {
2079 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2080 }
2081
2082 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2083 {
2084 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2085 __l2cap_ews_supported(chan)) {
2086 /* use extended control field */
2087 set_bit(FLAG_EXT_CTRL, &chan->flags);
2088 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2089 } else {
2090 chan->tx_win = min_t(u16, chan->tx_win,
2091 L2CAP_DEFAULT_TX_WINDOW);
2092 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2093 }
2094 }
2095
2096 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2097 {
2098 struct l2cap_conf_req *req = data;
2099 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2100 void *ptr = req->data;
2101 u16 size;
2102
2103 BT_DBG("chan %p", chan);
2104
2105 if (chan->num_conf_req || chan->num_conf_rsp)
2106 goto done;
2107
2108 switch (chan->mode) {
2109 case L2CAP_MODE_STREAMING:
2110 case L2CAP_MODE_ERTM:
2111 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2112 break;
2113
2114 if (__l2cap_efs_supported(chan))
2115 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2116
2117 /* fall through */
2118 default:
2119 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2120 break;
2121 }
2122
2123 done:
2124 if (chan->imtu != L2CAP_DEFAULT_MTU)
2125 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2126
2127 switch (chan->mode) {
2128 case L2CAP_MODE_BASIC:
2129 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2130 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2131 break;
2132
2133 rfc.mode = L2CAP_MODE_BASIC;
2134 rfc.txwin_size = 0;
2135 rfc.max_transmit = 0;
2136 rfc.retrans_timeout = 0;
2137 rfc.monitor_timeout = 0;
2138 rfc.max_pdu_size = 0;
2139
2140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2141 (unsigned long) &rfc);
2142 break;
2143
2144 case L2CAP_MODE_ERTM:
2145 rfc.mode = L2CAP_MODE_ERTM;
2146 rfc.max_transmit = chan->max_tx;
2147 rfc.retrans_timeout = 0;
2148 rfc.monitor_timeout = 0;
2149
2150 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2151 L2CAP_EXT_HDR_SIZE -
2152 L2CAP_SDULEN_SIZE -
2153 L2CAP_FCS_SIZE);
2154 rfc.max_pdu_size = cpu_to_le16(size);
2155
2156 l2cap_txwin_setup(chan);
2157
2158 rfc.txwin_size = min_t(u16, chan->tx_win,
2159 L2CAP_DEFAULT_TX_WINDOW);
2160
2161 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2162 (unsigned long) &rfc);
2163
2164 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2165 l2cap_add_opt_efs(&ptr, chan);
2166
2167 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2168 break;
2169
2170 if (chan->fcs == L2CAP_FCS_NONE ||
2171 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2172 chan->fcs = L2CAP_FCS_NONE;
2173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2174 }
2175
2176 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2178 chan->tx_win);
2179 break;
2180
2181 case L2CAP_MODE_STREAMING:
2182 rfc.mode = L2CAP_MODE_STREAMING;
2183 rfc.txwin_size = 0;
2184 rfc.max_transmit = 0;
2185 rfc.retrans_timeout = 0;
2186 rfc.monitor_timeout = 0;
2187
2188 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2189 L2CAP_EXT_HDR_SIZE -
2190 L2CAP_SDULEN_SIZE -
2191 L2CAP_FCS_SIZE);
2192 rfc.max_pdu_size = cpu_to_le16(size);
2193
2194 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2195 (unsigned long) &rfc);
2196
2197 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2198 l2cap_add_opt_efs(&ptr, chan);
2199
2200 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2201 break;
2202
2203 if (chan->fcs == L2CAP_FCS_NONE ||
2204 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2205 chan->fcs = L2CAP_FCS_NONE;
2206 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2207 }
2208 break;
2209 }
2210
2211 req->dcid = cpu_to_le16(chan->dcid);
2212 req->flags = cpu_to_le16(0);
2213
2214 return ptr - data;
2215 }
2216
2217 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2218 {
2219 struct l2cap_conf_rsp *rsp = data;
2220 void *ptr = rsp->data;
2221 void *req = chan->conf_req;
2222 int len = chan->conf_len;
2223 int type, hint, olen;
2224 unsigned long val;
2225 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2226 struct l2cap_conf_efs efs;
2227 u8 remote_efs = 0;
2228 u16 mtu = L2CAP_DEFAULT_MTU;
2229 u16 result = L2CAP_CONF_SUCCESS;
2230 u16 size;
2231
2232 BT_DBG("chan %p", chan);
2233
2234 while (len >= L2CAP_CONF_OPT_SIZE) {
2235 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2236
2237 hint = type & L2CAP_CONF_HINT;
2238 type &= L2CAP_CONF_MASK;
2239
2240 switch (type) {
2241 case L2CAP_CONF_MTU:
2242 mtu = val;
2243 break;
2244
2245 case L2CAP_CONF_FLUSH_TO:
2246 chan->flush_to = val;
2247 break;
2248
2249 case L2CAP_CONF_QOS:
2250 break;
2251
2252 case L2CAP_CONF_RFC:
2253 if (olen == sizeof(rfc))
2254 memcpy(&rfc, (void *) val, olen);
2255 break;
2256
2257 case L2CAP_CONF_FCS:
2258 if (val == L2CAP_FCS_NONE)
2259 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2260 break;
2261
2262 case L2CAP_CONF_EFS:
2263 remote_efs = 1;
2264 if (olen == sizeof(efs))
2265 memcpy(&efs, (void *) val, olen);
2266 break;
2267
2268 case L2CAP_CONF_EWS:
2269 if (!enable_hs)
2270 return -ECONNREFUSED;
2271
2272 set_bit(FLAG_EXT_CTRL, &chan->flags);
2273 set_bit(CONF_EWS_RECV, &chan->conf_state);
2274 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2275 chan->remote_tx_win = val;
2276 break;
2277
2278 default:
2279 if (hint)
2280 break;
2281
2282 result = L2CAP_CONF_UNKNOWN;
2283 *((u8 *) ptr++) = type;
2284 break;
2285 }
2286 }
2287
2288 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2289 goto done;
2290
2291 switch (chan->mode) {
2292 case L2CAP_MODE_STREAMING:
2293 case L2CAP_MODE_ERTM:
2294 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2295 chan->mode = l2cap_select_mode(rfc.mode,
2296 chan->conn->feat_mask);
2297 break;
2298 }
2299
2300 if (remote_efs) {
2301 if (__l2cap_efs_supported(chan))
2302 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2303 else
2304 return -ECONNREFUSED;
2305 }
2306
2307 if (chan->mode != rfc.mode)
2308 return -ECONNREFUSED;
2309
2310 break;
2311 }
2312
2313 done:
2314 if (chan->mode != rfc.mode) {
2315 result = L2CAP_CONF_UNACCEPT;
2316 rfc.mode = chan->mode;
2317
2318 if (chan->num_conf_rsp == 1)
2319 return -ECONNREFUSED;
2320
2321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2322 sizeof(rfc), (unsigned long) &rfc);
2323 }
2324
2325 if (result == L2CAP_CONF_SUCCESS) {
2326 /* Configure output options and let the other side know
2327 * which ones we don't like. */
2328
2329 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2330 result = L2CAP_CONF_UNACCEPT;
2331 else {
2332 chan->omtu = mtu;
2333 set_bit(CONF_MTU_DONE, &chan->conf_state);
2334 }
2335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2336
2337 if (remote_efs) {
2338 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2339 efs.stype != L2CAP_SERV_NOTRAFIC &&
2340 efs.stype != chan->local_stype) {
2341
2342 result = L2CAP_CONF_UNACCEPT;
2343
2344 if (chan->num_conf_req >= 1)
2345 return -ECONNREFUSED;
2346
2347 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2348 sizeof(efs),
2349 (unsigned long) &efs);
2350 } else {
2351 /* Send PENDING Conf Rsp */
2352 result = L2CAP_CONF_PENDING;
2353 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2354 }
2355 }
2356
2357 switch (rfc.mode) {
2358 case L2CAP_MODE_BASIC:
2359 chan->fcs = L2CAP_FCS_NONE;
2360 set_bit(CONF_MODE_DONE, &chan->conf_state);
2361 break;
2362
2363 case L2CAP_MODE_ERTM:
2364 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2365 chan->remote_tx_win = rfc.txwin_size;
2366 else
2367 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2368
2369 chan->remote_max_tx = rfc.max_transmit;
2370
2371 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2372 chan->conn->mtu -
2373 L2CAP_EXT_HDR_SIZE -
2374 L2CAP_SDULEN_SIZE -
2375 L2CAP_FCS_SIZE);
2376 rfc.max_pdu_size = cpu_to_le16(size);
2377 chan->remote_mps = size;
2378
2379 rfc.retrans_timeout =
2380 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2381 rfc.monitor_timeout =
2382 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2383
2384 set_bit(CONF_MODE_DONE, &chan->conf_state);
2385
2386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2387 sizeof(rfc), (unsigned long) &rfc);
2388
2389 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2390 chan->remote_id = efs.id;
2391 chan->remote_stype = efs.stype;
2392 chan->remote_msdu = le16_to_cpu(efs.msdu);
2393 chan->remote_flush_to =
2394 le32_to_cpu(efs.flush_to);
2395 chan->remote_acc_lat =
2396 le32_to_cpu(efs.acc_lat);
2397 chan->remote_sdu_itime =
2398 le32_to_cpu(efs.sdu_itime);
2399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2400 sizeof(efs), (unsigned long) &efs);
2401 }
2402 break;
2403
2404 case L2CAP_MODE_STREAMING:
2405 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2406 chan->conn->mtu -
2407 L2CAP_EXT_HDR_SIZE -
2408 L2CAP_SDULEN_SIZE -
2409 L2CAP_FCS_SIZE);
2410 rfc.max_pdu_size = cpu_to_le16(size);
2411 chan->remote_mps = size;
2412
2413 set_bit(CONF_MODE_DONE, &chan->conf_state);
2414
2415 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2416 sizeof(rfc), (unsigned long) &rfc);
2417
2418 break;
2419
2420 default:
2421 result = L2CAP_CONF_UNACCEPT;
2422
2423 memset(&rfc, 0, sizeof(rfc));
2424 rfc.mode = chan->mode;
2425 }
2426
2427 if (result == L2CAP_CONF_SUCCESS)
2428 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2429 }
2430 rsp->scid = cpu_to_le16(chan->dcid);
2431 rsp->result = cpu_to_le16(result);
2432 rsp->flags = cpu_to_le16(0x0000);
2433
2434 return ptr - data;
2435 }
2436
2437 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2438 {
2439 struct l2cap_conf_req *req = data;
2440 void *ptr = req->data;
2441 int type, olen;
2442 unsigned long val;
2443 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2444 struct l2cap_conf_efs efs;
2445
2446 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2447
2448 while (len >= L2CAP_CONF_OPT_SIZE) {
2449 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2450
2451 switch (type) {
2452 case L2CAP_CONF_MTU:
2453 if (val < L2CAP_DEFAULT_MIN_MTU) {
2454 *result = L2CAP_CONF_UNACCEPT;
2455 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2456 } else
2457 chan->imtu = val;
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2459 break;
2460
2461 case L2CAP_CONF_FLUSH_TO:
2462 chan->flush_to = val;
2463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2464 2, chan->flush_to);
2465 break;
2466
2467 case L2CAP_CONF_RFC:
2468 if (olen == sizeof(rfc))
2469 memcpy(&rfc, (void *)val, olen);
2470
2471 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2472 rfc.mode != chan->mode)
2473 return -ECONNREFUSED;
2474
2475 chan->fcs = 0;
2476
2477 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2478 sizeof(rfc), (unsigned long) &rfc);
2479 break;
2480
2481 case L2CAP_CONF_EWS:
2482 chan->tx_win = min_t(u16, val,
2483 L2CAP_DEFAULT_EXT_WINDOW);
2484 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2485 chan->tx_win);
2486 break;
2487
2488 case L2CAP_CONF_EFS:
2489 if (olen == sizeof(efs))
2490 memcpy(&efs, (void *)val, olen);
2491
2492 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2493 efs.stype != L2CAP_SERV_NOTRAFIC &&
2494 efs.stype != chan->local_stype)
2495 return -ECONNREFUSED;
2496
2497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2498 sizeof(efs), (unsigned long) &efs);
2499 break;
2500 }
2501 }
2502
2503 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2504 return -ECONNREFUSED;
2505
2506 chan->mode = rfc.mode;
2507
2508 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2509 switch (rfc.mode) {
2510 case L2CAP_MODE_ERTM:
2511 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2512 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2513 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2514
2515 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2516 chan->local_msdu = le16_to_cpu(efs.msdu);
2517 chan->local_sdu_itime =
2518 le32_to_cpu(efs.sdu_itime);
2519 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2520 chan->local_flush_to =
2521 le32_to_cpu(efs.flush_to);
2522 }
2523 break;
2524
2525 case L2CAP_MODE_STREAMING:
2526 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2527 }
2528 }
2529
2530 req->dcid = cpu_to_le16(chan->dcid);
2531 req->flags = cpu_to_le16(0x0000);
2532
2533 return ptr - data;
2534 }
2535
2536 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2537 {
2538 struct l2cap_conf_rsp *rsp = data;
2539 void *ptr = rsp->data;
2540
2541 BT_DBG("chan %p", chan);
2542
2543 rsp->scid = cpu_to_le16(chan->dcid);
2544 rsp->result = cpu_to_le16(result);
2545 rsp->flags = cpu_to_le16(flags);
2546
2547 return ptr - data;
2548 }
2549
2550 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2551 {
2552 struct l2cap_conn_rsp rsp;
2553 struct l2cap_conn *conn = chan->conn;
2554 u8 buf[128];
2555
2556 rsp.scid = cpu_to_le16(chan->dcid);
2557 rsp.dcid = cpu_to_le16(chan->scid);
2558 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2559 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2560 l2cap_send_cmd(conn, chan->ident,
2561 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2562
2563 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2564 return;
2565
2566 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2567 l2cap_build_conf_req(chan, buf), buf);
2568 chan->num_conf_req++;
2569 }
2570
2571 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2572 {
2573 int type, olen;
2574 unsigned long val;
2575 struct l2cap_conf_rfc rfc;
2576
2577 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2578
2579 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2580 return;
2581
2582 while (len >= L2CAP_CONF_OPT_SIZE) {
2583 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2584
2585 switch (type) {
2586 case L2CAP_CONF_RFC:
2587 if (olen == sizeof(rfc))
2588 memcpy(&rfc, (void *)val, olen);
2589 goto done;
2590 }
2591 }
2592
2593 /* Use sane default values in case a misbehaving remote device
2594 * did not send an RFC option.
2595 */
2596 rfc.mode = chan->mode;
2597 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2598 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2599 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2600
2601 BT_ERR("Expected RFC option was not found, using defaults");
2602
2603 done:
2604 switch (rfc.mode) {
2605 case L2CAP_MODE_ERTM:
2606 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2607 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2608 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2609 break;
2610 case L2CAP_MODE_STREAMING:
2611 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2612 }
2613 }
2614
2615 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2616 {
2617 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2618
2619 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2620 return 0;
2621
2622 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2623 cmd->ident == conn->info_ident) {
2624 cancel_delayed_work(&conn->info_timer);
2625
2626 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2627 conn->info_ident = 0;
2628
2629 l2cap_conn_start(conn);
2630 }
2631
2632 return 0;
2633 }
2634
2635 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2636 {
2637 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2638 struct l2cap_conn_rsp rsp;
2639 struct l2cap_chan *chan = NULL, *pchan;
2640 struct sock *parent, *sk = NULL;
2641 int result, status = L2CAP_CS_NO_INFO;
2642
2643 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2644 __le16 psm = req->psm;
2645
2646 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2647
2648 /* Check if we have socket listening on psm */
2649 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2650 if (!pchan) {
2651 result = L2CAP_CR_BAD_PSM;
2652 goto sendresp;
2653 }
2654
2655 parent = pchan->sk;
2656
2657 mutex_lock(&conn->chan_lock);
2658 lock_sock(parent);
2659
2660 /* Check if the ACL is secure enough (if not SDP) */
2661 if (psm != cpu_to_le16(0x0001) &&
2662 !hci_conn_check_link_mode(conn->hcon)) {
2663 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2664 result = L2CAP_CR_SEC_BLOCK;
2665 goto response;
2666 }
2667
2668 result = L2CAP_CR_NO_MEM;
2669
2670 /* Check for backlog size */
2671 if (sk_acceptq_is_full(parent)) {
2672 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2673 goto response;
2674 }
2675
2676 chan = pchan->ops->new_connection(pchan->data);
2677 if (!chan)
2678 goto response;
2679
2680 sk = chan->sk;
2681
2682 /* Check if we already have channel with that dcid */
2683 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2684 sock_set_flag(sk, SOCK_ZAPPED);
2685 chan->ops->close(chan->data);
2686 goto response;
2687 }
2688
2689 hci_conn_hold(conn->hcon);
2690
2691 bacpy(&bt_sk(sk)->src, conn->src);
2692 bacpy(&bt_sk(sk)->dst, conn->dst);
2693 chan->psm = psm;
2694 chan->dcid = scid;
2695
2696 bt_accept_enqueue(parent, sk);
2697
2698 __l2cap_chan_add(conn, chan);
2699
2700 dcid = chan->scid;
2701
2702 __set_chan_timer(chan, sk->sk_sndtimeo);
2703
2704 chan->ident = cmd->ident;
2705
2706 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2707 if (l2cap_chan_check_security(chan)) {
2708 if (bt_sk(sk)->defer_setup) {
2709 __l2cap_state_change(chan, BT_CONNECT2);
2710 result = L2CAP_CR_PEND;
2711 status = L2CAP_CS_AUTHOR_PEND;
2712 parent->sk_data_ready(parent, 0);
2713 } else {
2714 __l2cap_state_change(chan, BT_CONFIG);
2715 result = L2CAP_CR_SUCCESS;
2716 status = L2CAP_CS_NO_INFO;
2717 }
2718 } else {
2719 __l2cap_state_change(chan, BT_CONNECT2);
2720 result = L2CAP_CR_PEND;
2721 status = L2CAP_CS_AUTHEN_PEND;
2722 }
2723 } else {
2724 __l2cap_state_change(chan, BT_CONNECT2);
2725 result = L2CAP_CR_PEND;
2726 status = L2CAP_CS_NO_INFO;
2727 }
2728
2729 response:
2730 release_sock(parent);
2731 mutex_unlock(&conn->chan_lock);
2732
2733 sendresp:
2734 rsp.scid = cpu_to_le16(scid);
2735 rsp.dcid = cpu_to_le16(dcid);
2736 rsp.result = cpu_to_le16(result);
2737 rsp.status = cpu_to_le16(status);
2738 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2739
2740 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2741 struct l2cap_info_req info;
2742 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2743
2744 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2745 conn->info_ident = l2cap_get_ident(conn);
2746
2747 schedule_delayed_work(&conn->info_timer,
2748 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2749
2750 l2cap_send_cmd(conn, conn->info_ident,
2751 L2CAP_INFO_REQ, sizeof(info), &info);
2752 }
2753
2754 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2755 result == L2CAP_CR_SUCCESS) {
2756 u8 buf[128];
2757 set_bit(CONF_REQ_SENT, &chan->conf_state);
2758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2759 l2cap_build_conf_req(chan, buf), buf);
2760 chan->num_conf_req++;
2761 }
2762
2763 return 0;
2764 }
2765
2766 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2767 {
2768 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2769 u16 scid, dcid, result, status;
2770 struct l2cap_chan *chan;
2771 u8 req[128];
2772 int err;
2773
2774 scid = __le16_to_cpu(rsp->scid);
2775 dcid = __le16_to_cpu(rsp->dcid);
2776 result = __le16_to_cpu(rsp->result);
2777 status = __le16_to_cpu(rsp->status);
2778
2779 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2780 dcid, scid, result, status);
2781
2782 mutex_lock(&conn->chan_lock);
2783
2784 if (scid) {
2785 chan = __l2cap_get_chan_by_scid(conn, scid);
2786 if (!chan) {
2787 err = -EFAULT;
2788 goto unlock;
2789 }
2790 } else {
2791 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2792 if (!chan) {
2793 err = -EFAULT;
2794 goto unlock;
2795 }
2796 }
2797
2798 err = 0;
2799
2800 l2cap_chan_lock(chan);
2801
2802 switch (result) {
2803 case L2CAP_CR_SUCCESS:
2804 l2cap_state_change(chan, BT_CONFIG);
2805 chan->ident = 0;
2806 chan->dcid = dcid;
2807 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2808
2809 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2810 break;
2811
2812 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2813 l2cap_build_conf_req(chan, req), req);
2814 chan->num_conf_req++;
2815 break;
2816
2817 case L2CAP_CR_PEND:
2818 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2819 break;
2820
2821 default:
2822 l2cap_chan_del(chan, ECONNREFUSED);
2823 break;
2824 }
2825
2826 l2cap_chan_unlock(chan);
2827
2828 unlock:
2829 mutex_unlock(&conn->chan_lock);
2830
2831 return err;
2832 }
2833
2834 static inline void set_default_fcs(struct l2cap_chan *chan)
2835 {
2836 /* FCS is enabled only in ERTM or streaming mode, if one or both
2837 * sides request it.
2838 */
2839 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2840 chan->fcs = L2CAP_FCS_NONE;
2841 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2842 chan->fcs = L2CAP_FCS_CRC16;
2843 }
2844
2845 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2846 {
2847 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2848 u16 dcid, flags;
2849 u8 rsp[64];
2850 struct l2cap_chan *chan;
2851 int len;
2852
2853 dcid = __le16_to_cpu(req->dcid);
2854 flags = __le16_to_cpu(req->flags);
2855
2856 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2857
2858 chan = l2cap_get_chan_by_scid(conn, dcid);
2859 if (!chan)
2860 return -ENOENT;
2861
2862 l2cap_chan_lock(chan);
2863
2864 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2865 struct l2cap_cmd_rej_cid rej;
2866
2867 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2868 rej.scid = cpu_to_le16(chan->scid);
2869 rej.dcid = cpu_to_le16(chan->dcid);
2870
2871 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2872 sizeof(rej), &rej);
2873 goto unlock;
2874 }
2875
2876 /* Reject if config buffer is too small. */
2877 len = cmd_len - sizeof(*req);
2878 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2879 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2880 l2cap_build_conf_rsp(chan, rsp,
2881 L2CAP_CONF_REJECT, flags), rsp);
2882 goto unlock;
2883 }
2884
2885 /* Store config. */
2886 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2887 chan->conf_len += len;
2888
2889 if (flags & 0x0001) {
2890 /* Incomplete config. Send empty response. */
2891 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2892 l2cap_build_conf_rsp(chan, rsp,
2893 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2894 goto unlock;
2895 }
2896
2897 /* Complete config. */
2898 len = l2cap_parse_conf_req(chan, rsp);
2899 if (len < 0) {
2900 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2901 goto unlock;
2902 }
2903
2904 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2905 chan->num_conf_rsp++;
2906
2907 /* Reset config buffer. */
2908 chan->conf_len = 0;
2909
2910 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2911 goto unlock;
2912
2913 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2914 set_default_fcs(chan);
2915
2916 l2cap_state_change(chan, BT_CONNECTED);
2917
2918 chan->next_tx_seq = 0;
2919 chan->expected_tx_seq = 0;
2920 skb_queue_head_init(&chan->tx_q);
2921 if (chan->mode == L2CAP_MODE_ERTM)
2922 l2cap_ertm_init(chan);
2923
2924 l2cap_chan_ready(chan);
2925 goto unlock;
2926 }
2927
2928 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2929 u8 buf[64];
2930 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2931 l2cap_build_conf_req(chan, buf), buf);
2932 chan->num_conf_req++;
2933 }
2934
2935 /* Got Conf Rsp PENDING from remote side and asume we sent
2936 Conf Rsp PENDING in the code above */
2937 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2938 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2939
2940 /* check compatibility */
2941
2942 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2943 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2944
2945 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2946 l2cap_build_conf_rsp(chan, rsp,
2947 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2948 }
2949
2950 unlock:
2951 l2cap_chan_unlock(chan);
2952 return 0;
2953 }
2954
2955 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2956 {
2957 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2958 u16 scid, flags, result;
2959 struct l2cap_chan *chan;
2960 int len = cmd->len - sizeof(*rsp);
2961
2962 scid = __le16_to_cpu(rsp->scid);
2963 flags = __le16_to_cpu(rsp->flags);
2964 result = __le16_to_cpu(rsp->result);
2965
2966 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2967 scid, flags, result);
2968
2969 chan = l2cap_get_chan_by_scid(conn, scid);
2970 if (!chan)
2971 return 0;
2972
2973 l2cap_chan_lock(chan);
2974
2975 switch (result) {
2976 case L2CAP_CONF_SUCCESS:
2977 l2cap_conf_rfc_get(chan, rsp->data, len);
2978 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2979 break;
2980
2981 case L2CAP_CONF_PENDING:
2982 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2983
2984 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2985 char buf[64];
2986
2987 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2988 buf, &result);
2989 if (len < 0) {
2990 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2991 goto done;
2992 }
2993
2994 /* check compatibility */
2995
2996 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2997 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2998
2999 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3000 l2cap_build_conf_rsp(chan, buf,
3001 L2CAP_CONF_SUCCESS, 0x0000), buf);
3002 }
3003 goto done;
3004
3005 case L2CAP_CONF_UNACCEPT:
3006 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3007 char req[64];
3008
3009 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3010 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3011 goto done;
3012 }
3013
3014 /* throw out any old stored conf requests */
3015 result = L2CAP_CONF_SUCCESS;
3016 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3017 req, &result);
3018 if (len < 0) {
3019 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3020 goto done;
3021 }
3022
3023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3024 L2CAP_CONF_REQ, len, req);
3025 chan->num_conf_req++;
3026 if (result != L2CAP_CONF_SUCCESS)
3027 goto done;
3028 break;
3029 }
3030
3031 default:
3032 l2cap_chan_set_err(chan, ECONNRESET);
3033
3034 __set_chan_timer(chan,
3035 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
3036 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3037 goto done;
3038 }
3039
3040 if (flags & 0x01)
3041 goto done;
3042
3043 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3044
3045 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3046 set_default_fcs(chan);
3047
3048 l2cap_state_change(chan, BT_CONNECTED);
3049 chan->next_tx_seq = 0;
3050 chan->expected_tx_seq = 0;
3051 skb_queue_head_init(&chan->tx_q);
3052 if (chan->mode == L2CAP_MODE_ERTM)
3053 l2cap_ertm_init(chan);
3054
3055 l2cap_chan_ready(chan);
3056 }
3057
3058 done:
3059 l2cap_chan_unlock(chan);
3060 return 0;
3061 }
3062
3063 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3064 {
3065 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3066 struct l2cap_disconn_rsp rsp;
3067 u16 dcid, scid;
3068 struct l2cap_chan *chan;
3069 struct sock *sk;
3070
3071 scid = __le16_to_cpu(req->scid);
3072 dcid = __le16_to_cpu(req->dcid);
3073
3074 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3075
3076 mutex_lock(&conn->chan_lock);
3077
3078 chan = __l2cap_get_chan_by_scid(conn, dcid);
3079 if (!chan) {
3080 mutex_unlock(&conn->chan_lock);
3081 return 0;
3082 }
3083
3084 l2cap_chan_lock(chan);
3085
3086 sk = chan->sk;
3087
3088 rsp.dcid = cpu_to_le16(chan->scid);
3089 rsp.scid = cpu_to_le16(chan->dcid);
3090 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3091
3092 lock_sock(sk);
3093 sk->sk_shutdown = SHUTDOWN_MASK;
3094 release_sock(sk);
3095
3096 l2cap_chan_del(chan, ECONNRESET);
3097
3098 l2cap_chan_unlock(chan);
3099
3100 chan->ops->close(chan->data);
3101
3102 mutex_unlock(&conn->chan_lock);
3103
3104 return 0;
3105 }
3106
3107 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3108 {
3109 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3110 u16 dcid, scid;
3111 struct l2cap_chan *chan;
3112
3113 scid = __le16_to_cpu(rsp->scid);
3114 dcid = __le16_to_cpu(rsp->dcid);
3115
3116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3117
3118 mutex_lock(&conn->chan_lock);
3119
3120 chan = __l2cap_get_chan_by_scid(conn, scid);
3121 if (!chan) {
3122 mutex_unlock(&conn->chan_lock);
3123 return 0;
3124 }
3125
3126 l2cap_chan_lock(chan);
3127
3128 l2cap_chan_del(chan, 0);
3129
3130 l2cap_chan_unlock(chan);
3131
3132 chan->ops->close(chan->data);
3133
3134 mutex_unlock(&conn->chan_lock);
3135
3136 return 0;
3137 }
3138
3139 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3140 {
3141 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3142 u16 type;
3143
3144 type = __le16_to_cpu(req->type);
3145
3146 BT_DBG("type 0x%4.4x", type);
3147
3148 if (type == L2CAP_IT_FEAT_MASK) {
3149 u8 buf[8];
3150 u32 feat_mask = l2cap_feat_mask;
3151 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3152 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3153 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3154 if (!disable_ertm)
3155 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3156 | L2CAP_FEAT_FCS;
3157 if (enable_hs)
3158 feat_mask |= L2CAP_FEAT_EXT_FLOW
3159 | L2CAP_FEAT_EXT_WINDOW;
3160
3161 put_unaligned_le32(feat_mask, rsp->data);
3162 l2cap_send_cmd(conn, cmd->ident,
3163 L2CAP_INFO_RSP, sizeof(buf), buf);
3164 } else if (type == L2CAP_IT_FIXED_CHAN) {
3165 u8 buf[12];
3166 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3167
3168 if (enable_hs)
3169 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3170 else
3171 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3172
3173 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3174 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3175 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3176 l2cap_send_cmd(conn, cmd->ident,
3177 L2CAP_INFO_RSP, sizeof(buf), buf);
3178 } else {
3179 struct l2cap_info_rsp rsp;
3180 rsp.type = cpu_to_le16(type);
3181 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3182 l2cap_send_cmd(conn, cmd->ident,
3183 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3184 }
3185
3186 return 0;
3187 }
3188
3189 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3190 {
3191 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3192 u16 type, result;
3193
3194 type = __le16_to_cpu(rsp->type);
3195 result = __le16_to_cpu(rsp->result);
3196
3197 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3198
3199 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3200 if (cmd->ident != conn->info_ident ||
3201 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3202 return 0;
3203
3204 cancel_delayed_work(&conn->info_timer);
3205
3206 if (result != L2CAP_IR_SUCCESS) {
3207 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3208 conn->info_ident = 0;
3209
3210 l2cap_conn_start(conn);
3211
3212 return 0;
3213 }
3214
3215 if (type == L2CAP_IT_FEAT_MASK) {
3216 conn->feat_mask = get_unaligned_le32(rsp->data);
3217
3218 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3219 struct l2cap_info_req req;
3220 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3221
3222 conn->info_ident = l2cap_get_ident(conn);
3223
3224 l2cap_send_cmd(conn, conn->info_ident,
3225 L2CAP_INFO_REQ, sizeof(req), &req);
3226 } else {
3227 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3228 conn->info_ident = 0;
3229
3230 l2cap_conn_start(conn);
3231 }
3232 } else if (type == L2CAP_IT_FIXED_CHAN) {
3233 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3234 conn->info_ident = 0;
3235
3236 l2cap_conn_start(conn);
3237 }
3238
3239 return 0;
3240 }
3241
3242 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3243 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3244 void *data)
3245 {
3246 struct l2cap_create_chan_req *req = data;
3247 struct l2cap_create_chan_rsp rsp;
3248 u16 psm, scid;
3249
3250 if (cmd_len != sizeof(*req))
3251 return -EPROTO;
3252
3253 if (!enable_hs)
3254 return -EINVAL;
3255
3256 psm = le16_to_cpu(req->psm);
3257 scid = le16_to_cpu(req->scid);
3258
3259 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3260
3261 /* Placeholder: Always reject */
3262 rsp.dcid = 0;
3263 rsp.scid = cpu_to_le16(scid);
3264 rsp.result = L2CAP_CR_NO_MEM;
3265 rsp.status = L2CAP_CS_NO_INFO;
3266
3267 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3268 sizeof(rsp), &rsp);
3269
3270 return 0;
3271 }
3272
3273 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3274 struct l2cap_cmd_hdr *cmd, void *data)
3275 {
3276 BT_DBG("conn %p", conn);
3277
3278 return l2cap_connect_rsp(conn, cmd, data);
3279 }
3280
3281 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3282 u16 icid, u16 result)
3283 {
3284 struct l2cap_move_chan_rsp rsp;
3285
3286 BT_DBG("icid %d, result %d", icid, result);
3287
3288 rsp.icid = cpu_to_le16(icid);
3289 rsp.result = cpu_to_le16(result);
3290
3291 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3292 }
3293
3294 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3295 struct l2cap_chan *chan, u16 icid, u16 result)
3296 {
3297 struct l2cap_move_chan_cfm cfm;
3298 u8 ident;
3299
3300 BT_DBG("icid %d, result %d", icid, result);
3301
3302 ident = l2cap_get_ident(conn);
3303 if (chan)
3304 chan->ident = ident;
3305
3306 cfm.icid = cpu_to_le16(icid);
3307 cfm.result = cpu_to_le16(result);
3308
3309 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3310 }
3311
3312 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3313 u16 icid)
3314 {
3315 struct l2cap_move_chan_cfm_rsp rsp;
3316
3317 BT_DBG("icid %d", icid);
3318
3319 rsp.icid = cpu_to_le16(icid);
3320 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3321 }
3322
3323 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3324 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3325 {
3326 struct l2cap_move_chan_req *req = data;
3327 u16 icid = 0;
3328 u16 result = L2CAP_MR_NOT_ALLOWED;
3329
3330 if (cmd_len != sizeof(*req))
3331 return -EPROTO;
3332
3333 icid = le16_to_cpu(req->icid);
3334
3335 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3336
3337 if (!enable_hs)
3338 return -EINVAL;
3339
3340 /* Placeholder: Always refuse */
3341 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3342
3343 return 0;
3344 }
3345
3346 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3347 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3348 {
3349 struct l2cap_move_chan_rsp *rsp = data;
3350 u16 icid, result;
3351
3352 if (cmd_len != sizeof(*rsp))
3353 return -EPROTO;
3354
3355 icid = le16_to_cpu(rsp->icid);
3356 result = le16_to_cpu(rsp->result);
3357
3358 BT_DBG("icid %d, result %d", icid, result);
3359
3360 /* Placeholder: Always unconfirmed */
3361 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3362
3363 return 0;
3364 }
3365
3366 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3367 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3368 {
3369 struct l2cap_move_chan_cfm *cfm = data;
3370 u16 icid, result;
3371
3372 if (cmd_len != sizeof(*cfm))
3373 return -EPROTO;
3374
3375 icid = le16_to_cpu(cfm->icid);
3376 result = le16_to_cpu(cfm->result);
3377
3378 BT_DBG("icid %d, result %d", icid, result);
3379
3380 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3381
3382 return 0;
3383 }
3384
3385 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3386 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3387 {
3388 struct l2cap_move_chan_cfm_rsp *rsp = data;
3389 u16 icid;
3390
3391 if (cmd_len != sizeof(*rsp))
3392 return -EPROTO;
3393
3394 icid = le16_to_cpu(rsp->icid);
3395
3396 BT_DBG("icid %d", icid);
3397
3398 return 0;
3399 }
3400
3401 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3402 u16 to_multiplier)
3403 {
3404 u16 max_latency;
3405
3406 if (min > max || min < 6 || max > 3200)
3407 return -EINVAL;
3408
3409 if (to_multiplier < 10 || to_multiplier > 3200)
3410 return -EINVAL;
3411
3412 if (max >= to_multiplier * 8)
3413 return -EINVAL;
3414
3415 max_latency = (to_multiplier * 8 / max) - 1;
3416 if (latency > 499 || latency > max_latency)
3417 return -EINVAL;
3418
3419 return 0;
3420 }
3421
3422 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3423 struct l2cap_cmd_hdr *cmd, u8 *data)
3424 {
3425 struct hci_conn *hcon = conn->hcon;
3426 struct l2cap_conn_param_update_req *req;
3427 struct l2cap_conn_param_update_rsp rsp;
3428 u16 min, max, latency, to_multiplier, cmd_len;
3429 int err;
3430
3431 if (!(hcon->link_mode & HCI_LM_MASTER))
3432 return -EINVAL;
3433
3434 cmd_len = __le16_to_cpu(cmd->len);
3435 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3436 return -EPROTO;
3437
3438 req = (struct l2cap_conn_param_update_req *) data;
3439 min = __le16_to_cpu(req->min);
3440 max = __le16_to_cpu(req->max);
3441 latency = __le16_to_cpu(req->latency);
3442 to_multiplier = __le16_to_cpu(req->to_multiplier);
3443
3444 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3445 min, max, latency, to_multiplier);
3446
3447 memset(&rsp, 0, sizeof(rsp));
3448
3449 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3450 if (err)
3451 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3452 else
3453 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3454
3455 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3456 sizeof(rsp), &rsp);
3457
3458 if (!err)
3459 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3460
3461 return 0;
3462 }
3463
3464 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3465 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3466 {
3467 int err = 0;
3468
3469 switch (cmd->code) {
3470 case L2CAP_COMMAND_REJ:
3471 l2cap_command_rej(conn, cmd, data);
3472 break;
3473
3474 case L2CAP_CONN_REQ:
3475 err = l2cap_connect_req(conn, cmd, data);
3476 break;
3477
3478 case L2CAP_CONN_RSP:
3479 err = l2cap_connect_rsp(conn, cmd, data);
3480 break;
3481
3482 case L2CAP_CONF_REQ:
3483 err = l2cap_config_req(conn, cmd, cmd_len, data);
3484 break;
3485
3486 case L2CAP_CONF_RSP:
3487 err = l2cap_config_rsp(conn, cmd, data);
3488 break;
3489
3490 case L2CAP_DISCONN_REQ:
3491 err = l2cap_disconnect_req(conn, cmd, data);
3492 break;
3493
3494 case L2CAP_DISCONN_RSP:
3495 err = l2cap_disconnect_rsp(conn, cmd, data);
3496 break;
3497
3498 case L2CAP_ECHO_REQ:
3499 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3500 break;
3501
3502 case L2CAP_ECHO_RSP:
3503 break;
3504
3505 case L2CAP_INFO_REQ:
3506 err = l2cap_information_req(conn, cmd, data);
3507 break;
3508
3509 case L2CAP_INFO_RSP:
3510 err = l2cap_information_rsp(conn, cmd, data);
3511 break;
3512
3513 case L2CAP_CREATE_CHAN_REQ:
3514 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3515 break;
3516
3517 case L2CAP_CREATE_CHAN_RSP:
3518 err = l2cap_create_channel_rsp(conn, cmd, data);
3519 break;
3520
3521 case L2CAP_MOVE_CHAN_REQ:
3522 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3523 break;
3524
3525 case L2CAP_MOVE_CHAN_RSP:
3526 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3527 break;
3528
3529 case L2CAP_MOVE_CHAN_CFM:
3530 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3531 break;
3532
3533 case L2CAP_MOVE_CHAN_CFM_RSP:
3534 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3535 break;
3536
3537 default:
3538 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3539 err = -EINVAL;
3540 break;
3541 }
3542
3543 return err;
3544 }
3545
3546 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3547 struct l2cap_cmd_hdr *cmd, u8 *data)
3548 {
3549 switch (cmd->code) {
3550 case L2CAP_COMMAND_REJ:
3551 return 0;
3552
3553 case L2CAP_CONN_PARAM_UPDATE_REQ:
3554 return l2cap_conn_param_update_req(conn, cmd, data);
3555
3556 case L2CAP_CONN_PARAM_UPDATE_RSP:
3557 return 0;
3558
3559 default:
3560 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3561 return -EINVAL;
3562 }
3563 }
3564
3565 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3566 struct sk_buff *skb)
3567 {
3568 u8 *data = skb->data;
3569 int len = skb->len;
3570 struct l2cap_cmd_hdr cmd;
3571 int err;
3572
3573 l2cap_raw_recv(conn, skb);
3574
3575 while (len >= L2CAP_CMD_HDR_SIZE) {
3576 u16 cmd_len;
3577 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3578 data += L2CAP_CMD_HDR_SIZE;
3579 len -= L2CAP_CMD_HDR_SIZE;
3580
3581 cmd_len = le16_to_cpu(cmd.len);
3582
3583 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3584
3585 if (cmd_len > len || !cmd.ident) {
3586 BT_DBG("corrupted command");
3587 break;
3588 }
3589
3590 if (conn->hcon->type == LE_LINK)
3591 err = l2cap_le_sig_cmd(conn, &cmd, data);
3592 else
3593 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3594
3595 if (err) {
3596 struct l2cap_cmd_rej_unk rej;
3597
3598 BT_ERR("Wrong link type (%d)", err);
3599
3600 /* FIXME: Map err to a valid reason */
3601 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3602 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3603 }
3604
3605 data += cmd_len;
3606 len -= cmd_len;
3607 }
3608
3609 kfree_skb(skb);
3610 }
3611
3612 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3613 {
3614 u16 our_fcs, rcv_fcs;
3615 int hdr_size;
3616
3617 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3618 hdr_size = L2CAP_EXT_HDR_SIZE;
3619 else
3620 hdr_size = L2CAP_ENH_HDR_SIZE;
3621
3622 if (chan->fcs == L2CAP_FCS_CRC16) {
3623 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3624 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3625 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3626
3627 if (our_fcs != rcv_fcs)
3628 return -EBADMSG;
3629 }
3630 return 0;
3631 }
3632
3633 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3634 {
3635 u32 control = 0;
3636
3637 chan->frames_sent = 0;
3638
3639 control |= __set_reqseq(chan, chan->buffer_seq);
3640
3641 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3642 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3643 l2cap_send_sframe(chan, control);
3644 set_bit(CONN_RNR_SENT, &chan->conn_state);
3645 }
3646
3647 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3648 l2cap_retransmit_frames(chan);
3649
3650 l2cap_ertm_send(chan);
3651
3652 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3653 chan->frames_sent == 0) {
3654 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3655 l2cap_send_sframe(chan, control);
3656 }
3657 }
3658
3659 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3660 {
3661 struct sk_buff *next_skb;
3662 int tx_seq_offset, next_tx_seq_offset;
3663
3664 bt_cb(skb)->tx_seq = tx_seq;
3665 bt_cb(skb)->sar = sar;
3666
3667 next_skb = skb_peek(&chan->srej_q);
3668
3669 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3670
3671 while (next_skb) {
3672 if (bt_cb(next_skb)->tx_seq == tx_seq)
3673 return -EINVAL;
3674
3675 next_tx_seq_offset = __seq_offset(chan,
3676 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3677
3678 if (next_tx_seq_offset > tx_seq_offset) {
3679 __skb_queue_before(&chan->srej_q, next_skb, skb);
3680 return 0;
3681 }
3682
3683 if (skb_queue_is_last(&chan->srej_q, next_skb))
3684 next_skb = NULL;
3685 else
3686 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3687 }
3688
3689 __skb_queue_tail(&chan->srej_q, skb);
3690
3691 return 0;
3692 }
3693
3694 static void append_skb_frag(struct sk_buff *skb,
3695 struct sk_buff *new_frag, struct sk_buff **last_frag)
3696 {
3697 /* skb->len reflects data in skb as well as all fragments
3698 * skb->data_len reflects only data in fragments
3699 */
3700 if (!skb_has_frag_list(skb))
3701 skb_shinfo(skb)->frag_list = new_frag;
3702
3703 new_frag->next = NULL;
3704
3705 (*last_frag)->next = new_frag;
3706 *last_frag = new_frag;
3707
3708 skb->len += new_frag->len;
3709 skb->data_len += new_frag->len;
3710 skb->truesize += new_frag->truesize;
3711 }
3712
3713 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3714 {
3715 int err = -EINVAL;
3716
3717 switch (__get_ctrl_sar(chan, control)) {
3718 case L2CAP_SAR_UNSEGMENTED:
3719 if (chan->sdu)
3720 break;
3721
3722 err = chan->ops->recv(chan->data, skb);
3723 break;
3724
3725 case L2CAP_SAR_START:
3726 if (chan->sdu)
3727 break;
3728
3729 chan->sdu_len = get_unaligned_le16(skb->data);
3730 skb_pull(skb, L2CAP_SDULEN_SIZE);
3731
3732 if (chan->sdu_len > chan->imtu) {
3733 err = -EMSGSIZE;
3734 break;
3735 }
3736
3737 if (skb->len >= chan->sdu_len)
3738 break;
3739
3740 chan->sdu = skb;
3741 chan->sdu_last_frag = skb;
3742
3743 skb = NULL;
3744 err = 0;
3745 break;
3746
3747 case L2CAP_SAR_CONTINUE:
3748 if (!chan->sdu)
3749 break;
3750
3751 append_skb_frag(chan->sdu, skb,
3752 &chan->sdu_last_frag);
3753 skb = NULL;
3754
3755 if (chan->sdu->len >= chan->sdu_len)
3756 break;
3757
3758 err = 0;
3759 break;
3760
3761 case L2CAP_SAR_END:
3762 if (!chan->sdu)
3763 break;
3764
3765 append_skb_frag(chan->sdu, skb,
3766 &chan->sdu_last_frag);
3767 skb = NULL;
3768
3769 if (chan->sdu->len != chan->sdu_len)
3770 break;
3771
3772 err = chan->ops->recv(chan->data, chan->sdu);
3773
3774 if (!err) {
3775 /* Reassembly complete */
3776 chan->sdu = NULL;
3777 chan->sdu_last_frag = NULL;
3778 chan->sdu_len = 0;
3779 }
3780 break;
3781 }
3782
3783 if (err) {
3784 kfree_skb(skb);
3785 kfree_skb(chan->sdu);
3786 chan->sdu = NULL;
3787 chan->sdu_last_frag = NULL;
3788 chan->sdu_len = 0;
3789 }
3790
3791 return err;
3792 }
3793
3794 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3795 {
3796 BT_DBG("chan %p, Enter local busy", chan);
3797
3798 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3799
3800 __set_ack_timer(chan);
3801 }
3802
3803 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3804 {
3805 u32 control;
3806
3807 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3808 goto done;
3809
3810 control = __set_reqseq(chan, chan->buffer_seq);
3811 control |= __set_ctrl_poll(chan);
3812 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3813 l2cap_send_sframe(chan, control);
3814 chan->retry_count = 1;
3815
3816 __clear_retrans_timer(chan);
3817 __set_monitor_timer(chan);
3818
3819 set_bit(CONN_WAIT_F, &chan->conn_state);
3820
3821 done:
3822 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3823 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3824
3825 BT_DBG("chan %p, Exit local busy", chan);
3826 }
3827
3828 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3829 {
3830 if (chan->mode == L2CAP_MODE_ERTM) {
3831 if (busy)
3832 l2cap_ertm_enter_local_busy(chan);
3833 else
3834 l2cap_ertm_exit_local_busy(chan);
3835 }
3836 }
3837
3838 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3839 {
3840 struct sk_buff *skb;
3841 u32 control;
3842
3843 while ((skb = skb_peek(&chan->srej_q)) &&
3844 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3845 int err;
3846
3847 if (bt_cb(skb)->tx_seq != tx_seq)
3848 break;
3849
3850 skb = skb_dequeue(&chan->srej_q);
3851 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3852 err = l2cap_reassemble_sdu(chan, skb, control);
3853
3854 if (err < 0) {
3855 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3856 break;
3857 }
3858
3859 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3860 tx_seq = __next_seq(chan, tx_seq);
3861 }
3862 }
3863
3864 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3865 {
3866 struct srej_list *l, *tmp;
3867 u32 control;
3868
3869 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3870 if (l->tx_seq == tx_seq) {
3871 list_del(&l->list);
3872 kfree(l);
3873 return;
3874 }
3875 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3876 control |= __set_reqseq(chan, l->tx_seq);
3877 l2cap_send_sframe(chan, control);
3878 list_del(&l->list);
3879 list_add_tail(&l->list, &chan->srej_l);
3880 }
3881 }
3882
3883 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3884 {
3885 struct srej_list *new;
3886 u32 control;
3887
3888 while (tx_seq != chan->expected_tx_seq) {
3889 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3890 control |= __set_reqseq(chan, chan->expected_tx_seq);
3891 l2cap_send_sframe(chan, control);
3892
3893 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3894 if (!new)
3895 return -ENOMEM;
3896
3897 new->tx_seq = chan->expected_tx_seq;
3898
3899 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3900
3901 list_add_tail(&new->list, &chan->srej_l);
3902 }
3903
3904 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3905
3906 return 0;
3907 }
3908
3909 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3910 {
3911 u16 tx_seq = __get_txseq(chan, rx_control);
3912 u16 req_seq = __get_reqseq(chan, rx_control);
3913 u8 sar = __get_ctrl_sar(chan, rx_control);
3914 int tx_seq_offset, expected_tx_seq_offset;
3915 int num_to_ack = (chan->tx_win/6) + 1;
3916 int err = 0;
3917
3918 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3919 tx_seq, rx_control);
3920
3921 if (__is_ctrl_final(chan, rx_control) &&
3922 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3923 __clear_monitor_timer(chan);
3924 if (chan->unacked_frames > 0)
3925 __set_retrans_timer(chan);
3926 clear_bit(CONN_WAIT_F, &chan->conn_state);
3927 }
3928
3929 chan->expected_ack_seq = req_seq;
3930 l2cap_drop_acked_frames(chan);
3931
3932 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3933
3934 /* invalid tx_seq */
3935 if (tx_seq_offset >= chan->tx_win) {
3936 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3937 goto drop;
3938 }
3939
3940 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3941 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3942 l2cap_send_ack(chan);
3943 goto drop;
3944 }
3945
3946 if (tx_seq == chan->expected_tx_seq)
3947 goto expected;
3948
3949 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3950 struct srej_list *first;
3951
3952 first = list_first_entry(&chan->srej_l,
3953 struct srej_list, list);
3954 if (tx_seq == first->tx_seq) {
3955 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3956 l2cap_check_srej_gap(chan, tx_seq);
3957
3958 list_del(&first->list);
3959 kfree(first);
3960
3961 if (list_empty(&chan->srej_l)) {
3962 chan->buffer_seq = chan->buffer_seq_srej;
3963 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3964 l2cap_send_ack(chan);
3965 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3966 }
3967 } else {
3968 struct srej_list *l;
3969
3970 /* duplicated tx_seq */
3971 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3972 goto drop;
3973
3974 list_for_each_entry(l, &chan->srej_l, list) {
3975 if (l->tx_seq == tx_seq) {
3976 l2cap_resend_srejframe(chan, tx_seq);
3977 return 0;
3978 }
3979 }
3980
3981 err = l2cap_send_srejframe(chan, tx_seq);
3982 if (err < 0) {
3983 l2cap_send_disconn_req(chan->conn, chan, -err);
3984 return err;
3985 }
3986 }
3987 } else {
3988 expected_tx_seq_offset = __seq_offset(chan,
3989 chan->expected_tx_seq, chan->buffer_seq);
3990
3991 /* duplicated tx_seq */
3992 if (tx_seq_offset < expected_tx_seq_offset)
3993 goto drop;
3994
3995 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3996
3997 BT_DBG("chan %p, Enter SREJ", chan);
3998
3999 INIT_LIST_HEAD(&chan->srej_l);
4000 chan->buffer_seq_srej = chan->buffer_seq;
4001
4002 __skb_queue_head_init(&chan->srej_q);
4003 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4004
4005 /* Set P-bit only if there are some I-frames to ack. */
4006 if (__clear_ack_timer(chan))
4007 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4008
4009 err = l2cap_send_srejframe(chan, tx_seq);
4010 if (err < 0) {
4011 l2cap_send_disconn_req(chan->conn, chan, -err);
4012 return err;
4013 }
4014 }
4015 return 0;
4016
4017 expected:
4018 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4019
4020 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4021 bt_cb(skb)->tx_seq = tx_seq;
4022 bt_cb(skb)->sar = sar;
4023 __skb_queue_tail(&chan->srej_q, skb);
4024 return 0;
4025 }
4026
4027 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4028 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4029
4030 if (err < 0) {
4031 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4032 return err;
4033 }
4034
4035 if (__is_ctrl_final(chan, rx_control)) {
4036 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4037 l2cap_retransmit_frames(chan);
4038 }
4039
4040
4041 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4042 if (chan->num_acked == num_to_ack - 1)
4043 l2cap_send_ack(chan);
4044 else
4045 __set_ack_timer(chan);
4046
4047 return 0;
4048
4049 drop:
4050 kfree_skb(skb);
4051 return 0;
4052 }
4053
4054 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4055 {
4056 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4057 __get_reqseq(chan, rx_control), rx_control);
4058
4059 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4060 l2cap_drop_acked_frames(chan);
4061
4062 if (__is_ctrl_poll(chan, rx_control)) {
4063 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4064 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4065 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4066 (chan->unacked_frames > 0))
4067 __set_retrans_timer(chan);
4068
4069 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4070 l2cap_send_srejtail(chan);
4071 } else {
4072 l2cap_send_i_or_rr_or_rnr(chan);
4073 }
4074
4075 } else if (__is_ctrl_final(chan, rx_control)) {
4076 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4077
4078 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4079 l2cap_retransmit_frames(chan);
4080
4081 } else {
4082 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4083 (chan->unacked_frames > 0))
4084 __set_retrans_timer(chan);
4085
4086 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4087 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4088 l2cap_send_ack(chan);
4089 else
4090 l2cap_ertm_send(chan);
4091 }
4092 }
4093
4094 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4095 {
4096 u16 tx_seq = __get_reqseq(chan, rx_control);
4097
4098 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4099
4100 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4101
4102 chan->expected_ack_seq = tx_seq;
4103 l2cap_drop_acked_frames(chan);
4104
4105 if (__is_ctrl_final(chan, rx_control)) {
4106 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4107 l2cap_retransmit_frames(chan);
4108 } else {
4109 l2cap_retransmit_frames(chan);
4110
4111 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4112 set_bit(CONN_REJ_ACT, &chan->conn_state);
4113 }
4114 }
4115 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4116 {
4117 u16 tx_seq = __get_reqseq(chan, rx_control);
4118
4119 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4120
4121 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4122
4123 if (__is_ctrl_poll(chan, rx_control)) {
4124 chan->expected_ack_seq = tx_seq;
4125 l2cap_drop_acked_frames(chan);
4126
4127 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4128 l2cap_retransmit_one_frame(chan, tx_seq);
4129
4130 l2cap_ertm_send(chan);
4131
4132 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4133 chan->srej_save_reqseq = tx_seq;
4134 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4135 }
4136 } else if (__is_ctrl_final(chan, rx_control)) {
4137 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4138 chan->srej_save_reqseq == tx_seq)
4139 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4140 else
4141 l2cap_retransmit_one_frame(chan, tx_seq);
4142 } else {
4143 l2cap_retransmit_one_frame(chan, tx_seq);
4144 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4145 chan->srej_save_reqseq = tx_seq;
4146 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4147 }
4148 }
4149 }
4150
4151 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4152 {
4153 u16 tx_seq = __get_reqseq(chan, rx_control);
4154
4155 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4156
4157 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4158 chan->expected_ack_seq = tx_seq;
4159 l2cap_drop_acked_frames(chan);
4160
4161 if (__is_ctrl_poll(chan, rx_control))
4162 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4163
4164 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4165 __clear_retrans_timer(chan);
4166 if (__is_ctrl_poll(chan, rx_control))
4167 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4168 return;
4169 }
4170
4171 if (__is_ctrl_poll(chan, rx_control)) {
4172 l2cap_send_srejtail(chan);
4173 } else {
4174 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4175 l2cap_send_sframe(chan, rx_control);
4176 }
4177 }
4178
4179 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4180 {
4181 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4182
4183 if (__is_ctrl_final(chan, rx_control) &&
4184 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4185 __clear_monitor_timer(chan);
4186 if (chan->unacked_frames > 0)
4187 __set_retrans_timer(chan);
4188 clear_bit(CONN_WAIT_F, &chan->conn_state);
4189 }
4190
4191 switch (__get_ctrl_super(chan, rx_control)) {
4192 case L2CAP_SUPER_RR:
4193 l2cap_data_channel_rrframe(chan, rx_control);
4194 break;
4195
4196 case L2CAP_SUPER_REJ:
4197 l2cap_data_channel_rejframe(chan, rx_control);
4198 break;
4199
4200 case L2CAP_SUPER_SREJ:
4201 l2cap_data_channel_srejframe(chan, rx_control);
4202 break;
4203
4204 case L2CAP_SUPER_RNR:
4205 l2cap_data_channel_rnrframe(chan, rx_control);
4206 break;
4207 }
4208
4209 kfree_skb(skb);
4210 return 0;
4211 }
4212
4213 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4214 {
4215 u32 control;
4216 u16 req_seq;
4217 int len, next_tx_seq_offset, req_seq_offset;
4218
4219 control = __get_control(chan, skb->data);
4220 skb_pull(skb, __ctrl_size(chan));
4221 len = skb->len;
4222
4223 /*
4224 * We can just drop the corrupted I-frame here.
4225 * Receiver will miss it and start proper recovery
4226 * procedures and ask retransmission.
4227 */
4228 if (l2cap_check_fcs(chan, skb))
4229 goto drop;
4230
4231 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4232 len -= L2CAP_SDULEN_SIZE;
4233
4234 if (chan->fcs == L2CAP_FCS_CRC16)
4235 len -= L2CAP_FCS_SIZE;
4236
4237 if (len > chan->mps) {
4238 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4239 goto drop;
4240 }
4241
4242 req_seq = __get_reqseq(chan, control);
4243
4244 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4245
4246 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4247 chan->expected_ack_seq);
4248
4249 /* check for invalid req-seq */
4250 if (req_seq_offset > next_tx_seq_offset) {
4251 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4252 goto drop;
4253 }
4254
4255 if (!__is_sframe(chan, control)) {
4256 if (len < 0) {
4257 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4258 goto drop;
4259 }
4260
4261 l2cap_data_channel_iframe(chan, control, skb);
4262 } else {
4263 if (len != 0) {
4264 BT_ERR("%d", len);
4265 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4266 goto drop;
4267 }
4268
4269 l2cap_data_channel_sframe(chan, control, skb);
4270 }
4271
4272 return 0;
4273
4274 drop:
4275 kfree_skb(skb);
4276 return 0;
4277 }
4278
4279 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4280 {
4281 struct l2cap_chan *chan;
4282 u32 control;
4283 u16 tx_seq;
4284 int len;
4285
4286 chan = l2cap_get_chan_by_scid(conn, cid);
4287 if (!chan) {
4288 BT_DBG("unknown cid 0x%4.4x", cid);
4289 /* Drop packet and return */
4290 kfree(skb);
4291 return 0;
4292 }
4293
4294 l2cap_chan_lock(chan);
4295
4296 BT_DBG("chan %p, len %d", chan, skb->len);
4297
4298 if (chan->state != BT_CONNECTED)
4299 goto drop;
4300
4301 switch (chan->mode) {
4302 case L2CAP_MODE_BASIC:
4303 /* If socket recv buffers overflows we drop data here
4304 * which is *bad* because L2CAP has to be reliable.
4305 * But we don't have any other choice. L2CAP doesn't
4306 * provide flow control mechanism. */
4307
4308 if (chan->imtu < skb->len)
4309 goto drop;
4310
4311 if (!chan->ops->recv(chan->data, skb))
4312 goto done;
4313 break;
4314
4315 case L2CAP_MODE_ERTM:
4316 l2cap_ertm_data_rcv(chan, skb);
4317
4318 goto done;
4319
4320 case L2CAP_MODE_STREAMING:
4321 control = __get_control(chan, skb->data);
4322 skb_pull(skb, __ctrl_size(chan));
4323 len = skb->len;
4324
4325 if (l2cap_check_fcs(chan, skb))
4326 goto drop;
4327
4328 if (__is_sar_start(chan, control))
4329 len -= L2CAP_SDULEN_SIZE;
4330
4331 if (chan->fcs == L2CAP_FCS_CRC16)
4332 len -= L2CAP_FCS_SIZE;
4333
4334 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4335 goto drop;
4336
4337 tx_seq = __get_txseq(chan, control);
4338
4339 if (chan->expected_tx_seq != tx_seq) {
4340 /* Frame(s) missing - must discard partial SDU */
4341 kfree_skb(chan->sdu);
4342 chan->sdu = NULL;
4343 chan->sdu_last_frag = NULL;
4344 chan->sdu_len = 0;
4345
4346 /* TODO: Notify userland of missing data */
4347 }
4348
4349 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4350
4351 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4352 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4353
4354 goto done;
4355
4356 default:
4357 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4358 break;
4359 }
4360
4361 drop:
4362 kfree_skb(skb);
4363
4364 done:
4365 l2cap_chan_unlock(chan);
4366
4367 return 0;
4368 }
4369
4370 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4371 {
4372 struct sock *sk = NULL;
4373 struct l2cap_chan *chan;
4374
4375 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4376 if (!chan)
4377 goto drop;
4378
4379 sk = chan->sk;
4380
4381 lock_sock(sk);
4382
4383 BT_DBG("sk %p, len %d", sk, skb->len);
4384
4385 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4386 goto drop;
4387
4388 if (chan->imtu < skb->len)
4389 goto drop;
4390
4391 if (!chan->ops->recv(chan->data, skb))
4392 goto done;
4393
4394 drop:
4395 kfree_skb(skb);
4396
4397 done:
4398 if (sk)
4399 release_sock(sk);
4400 return 0;
4401 }
4402
4403 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4404 {
4405 struct sock *sk = NULL;
4406 struct l2cap_chan *chan;
4407
4408 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4409 if (!chan)
4410 goto drop;
4411
4412 sk = chan->sk;
4413
4414 lock_sock(sk);
4415
4416 BT_DBG("sk %p, len %d", sk, skb->len);
4417
4418 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4419 goto drop;
4420
4421 if (chan->imtu < skb->len)
4422 goto drop;
4423
4424 if (!chan->ops->recv(chan->data, skb))
4425 goto done;
4426
4427 drop:
4428 kfree_skb(skb);
4429
4430 done:
4431 if (sk)
4432 release_sock(sk);
4433 return 0;
4434 }
4435
4436 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4437 {
4438 struct l2cap_hdr *lh = (void *) skb->data;
4439 u16 cid, len;
4440 __le16 psm;
4441
4442 skb_pull(skb, L2CAP_HDR_SIZE);
4443 cid = __le16_to_cpu(lh->cid);
4444 len = __le16_to_cpu(lh->len);
4445
4446 if (len != skb->len) {
4447 kfree_skb(skb);
4448 return;
4449 }
4450
4451 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4452
4453 switch (cid) {
4454 case L2CAP_CID_LE_SIGNALING:
4455 case L2CAP_CID_SIGNALING:
4456 l2cap_sig_channel(conn, skb);
4457 break;
4458
4459 case L2CAP_CID_CONN_LESS:
4460 psm = get_unaligned_le16(skb->data);
4461 skb_pull(skb, 2);
4462 l2cap_conless_channel(conn, psm, skb);
4463 break;
4464
4465 case L2CAP_CID_LE_DATA:
4466 l2cap_att_channel(conn, cid, skb);
4467 break;
4468
4469 case L2CAP_CID_SMP:
4470 if (smp_sig_channel(conn, skb))
4471 l2cap_conn_del(conn->hcon, EACCES);
4472 break;
4473
4474 default:
4475 l2cap_data_channel(conn, cid, skb);
4476 break;
4477 }
4478 }
4479
4480 /* ---- L2CAP interface with lower layer (HCI) ---- */
4481
4482 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4483 {
4484 int exact = 0, lm1 = 0, lm2 = 0;
4485 struct l2cap_chan *c;
4486
4487 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4488
4489 /* Find listening sockets and check their link_mode */
4490 read_lock(&chan_list_lock);
4491 list_for_each_entry(c, &chan_list, global_l) {
4492 struct sock *sk = c->sk;
4493
4494 if (c->state != BT_LISTEN)
4495 continue;
4496
4497 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4498 lm1 |= HCI_LM_ACCEPT;
4499 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4500 lm1 |= HCI_LM_MASTER;
4501 exact++;
4502 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4503 lm2 |= HCI_LM_ACCEPT;
4504 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4505 lm2 |= HCI_LM_MASTER;
4506 }
4507 }
4508 read_unlock(&chan_list_lock);
4509
4510 return exact ? lm1 : lm2;
4511 }
4512
4513 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4514 {
4515 struct l2cap_conn *conn;
4516
4517 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4518
4519 if (!status) {
4520 conn = l2cap_conn_add(hcon, status);
4521 if (conn)
4522 l2cap_conn_ready(conn);
4523 } else
4524 l2cap_conn_del(hcon, bt_to_errno(status));
4525
4526 return 0;
4527 }
4528
4529 int l2cap_disconn_ind(struct hci_conn *hcon)
4530 {
4531 struct l2cap_conn *conn = hcon->l2cap_data;
4532
4533 BT_DBG("hcon %p", hcon);
4534
4535 if (!conn)
4536 return HCI_ERROR_REMOTE_USER_TERM;
4537 return conn->disc_reason;
4538 }
4539
4540 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4541 {
4542 BT_DBG("hcon %p reason %d", hcon, reason);
4543
4544 l2cap_conn_del(hcon, bt_to_errno(reason));
4545 return 0;
4546 }
4547
4548 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4549 {
4550 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4551 return;
4552
4553 if (encrypt == 0x00) {
4554 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4555 __clear_chan_timer(chan);
4556 __set_chan_timer(chan,
4557 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4558 } else if (chan->sec_level == BT_SECURITY_HIGH)
4559 l2cap_chan_close(chan, ECONNREFUSED);
4560 } else {
4561 if (chan->sec_level == BT_SECURITY_MEDIUM)
4562 __clear_chan_timer(chan);
4563 }
4564 }
4565
4566 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4567 {
4568 struct l2cap_conn *conn = hcon->l2cap_data;
4569 struct l2cap_chan *chan;
4570
4571 if (!conn)
4572 return 0;
4573
4574 BT_DBG("conn %p", conn);
4575
4576 if (hcon->type == LE_LINK) {
4577 smp_distribute_keys(conn, 0);
4578 cancel_delayed_work(&conn->security_timer);
4579 }
4580
4581 mutex_lock(&conn->chan_lock);
4582
4583 list_for_each_entry(chan, &conn->chan_l, list) {
4584 l2cap_chan_lock(chan);
4585
4586 BT_DBG("chan->scid %d", chan->scid);
4587
4588 if (chan->scid == L2CAP_CID_LE_DATA) {
4589 if (!status && encrypt) {
4590 chan->sec_level = hcon->sec_level;
4591 l2cap_chan_ready(chan);
4592 }
4593
4594 l2cap_chan_unlock(chan);
4595 continue;
4596 }
4597
4598 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4599 l2cap_chan_unlock(chan);
4600 continue;
4601 }
4602
4603 if (!status && (chan->state == BT_CONNECTED ||
4604 chan->state == BT_CONFIG)) {
4605 l2cap_check_encryption(chan, encrypt);
4606 l2cap_chan_unlock(chan);
4607 continue;
4608 }
4609
4610 if (chan->state == BT_CONNECT) {
4611 if (!status) {
4612 struct l2cap_conn_req req;
4613 req.scid = cpu_to_le16(chan->scid);
4614 req.psm = chan->psm;
4615
4616 chan->ident = l2cap_get_ident(conn);
4617 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4618
4619 l2cap_send_cmd(conn, chan->ident,
4620 L2CAP_CONN_REQ, sizeof(req), &req);
4621 } else {
4622 __clear_chan_timer(chan);
4623 __set_chan_timer(chan,
4624 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4625 }
4626 } else if (chan->state == BT_CONNECT2) {
4627 struct sock *sk = chan->sk;
4628 struct l2cap_conn_rsp rsp;
4629 __u16 res, stat;
4630
4631 lock_sock(sk);
4632
4633 if (!status) {
4634 if (bt_sk(sk)->defer_setup) {
4635 struct sock *parent = bt_sk(sk)->parent;
4636 res = L2CAP_CR_PEND;
4637 stat = L2CAP_CS_AUTHOR_PEND;
4638 if (parent)
4639 parent->sk_data_ready(parent, 0);
4640 } else {
4641 __l2cap_state_change(chan, BT_CONFIG);
4642 res = L2CAP_CR_SUCCESS;
4643 stat = L2CAP_CS_NO_INFO;
4644 }
4645 } else {
4646 __l2cap_state_change(chan, BT_DISCONN);
4647 __set_chan_timer(chan,
4648 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4649 res = L2CAP_CR_SEC_BLOCK;
4650 stat = L2CAP_CS_NO_INFO;
4651 }
4652
4653 release_sock(sk);
4654
4655 rsp.scid = cpu_to_le16(chan->dcid);
4656 rsp.dcid = cpu_to_le16(chan->scid);
4657 rsp.result = cpu_to_le16(res);
4658 rsp.status = cpu_to_le16(stat);
4659 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4660 sizeof(rsp), &rsp);
4661 }
4662
4663 l2cap_chan_unlock(chan);
4664 }
4665
4666 mutex_unlock(&conn->chan_lock);
4667
4668 return 0;
4669 }
4670
4671 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4672 {
4673 struct l2cap_conn *conn = hcon->l2cap_data;
4674
4675 if (!conn)
4676 conn = l2cap_conn_add(hcon, 0);
4677
4678 if (!conn)
4679 goto drop;
4680
4681 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4682
4683 if (!(flags & ACL_CONT)) {
4684 struct l2cap_hdr *hdr;
4685 struct l2cap_chan *chan;
4686 u16 cid;
4687 int len;
4688
4689 if (conn->rx_len) {
4690 BT_ERR("Unexpected start frame (len %d)", skb->len);
4691 kfree_skb(conn->rx_skb);
4692 conn->rx_skb = NULL;
4693 conn->rx_len = 0;
4694 l2cap_conn_unreliable(conn, ECOMM);
4695 }
4696
4697 /* Start fragment always begin with Basic L2CAP header */
4698 if (skb->len < L2CAP_HDR_SIZE) {
4699 BT_ERR("Frame is too short (len %d)", skb->len);
4700 l2cap_conn_unreliable(conn, ECOMM);
4701 goto drop;
4702 }
4703
4704 hdr = (struct l2cap_hdr *) skb->data;
4705 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4706 cid = __le16_to_cpu(hdr->cid);
4707
4708 if (len == skb->len) {
4709 /* Complete frame received */
4710 l2cap_recv_frame(conn, skb);
4711 return 0;
4712 }
4713
4714 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4715
4716 if (skb->len > len) {
4717 BT_ERR("Frame is too long (len %d, expected len %d)",
4718 skb->len, len);
4719 l2cap_conn_unreliable(conn, ECOMM);
4720 goto drop;
4721 }
4722
4723 chan = l2cap_get_chan_by_scid(conn, cid);
4724
4725 if (chan && chan->sk) {
4726 struct sock *sk = chan->sk;
4727 lock_sock(sk);
4728
4729 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4730 BT_ERR("Frame exceeding recv MTU (len %d, "
4731 "MTU %d)", len,
4732 chan->imtu);
4733 release_sock(sk);
4734 l2cap_conn_unreliable(conn, ECOMM);
4735 goto drop;
4736 }
4737 release_sock(sk);
4738 }
4739
4740 /* Allocate skb for the complete frame (with header) */
4741 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4742 if (!conn->rx_skb)
4743 goto drop;
4744
4745 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4746 skb->len);
4747 conn->rx_len = len - skb->len;
4748 } else {
4749 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4750
4751 if (!conn->rx_len) {
4752 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4753 l2cap_conn_unreliable(conn, ECOMM);
4754 goto drop;
4755 }
4756
4757 if (skb->len > conn->rx_len) {
4758 BT_ERR("Fragment is too long (len %d, expected %d)",
4759 skb->len, conn->rx_len);
4760 kfree_skb(conn->rx_skb);
4761 conn->rx_skb = NULL;
4762 conn->rx_len = 0;
4763 l2cap_conn_unreliable(conn, ECOMM);
4764 goto drop;
4765 }
4766
4767 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4768 skb->len);
4769 conn->rx_len -= skb->len;
4770
4771 if (!conn->rx_len) {
4772 /* Complete frame received */
4773 l2cap_recv_frame(conn, conn->rx_skb);
4774 conn->rx_skb = NULL;
4775 }
4776 }
4777
4778 drop:
4779 kfree_skb(skb);
4780 return 0;
4781 }
4782
4783 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4784 {
4785 struct l2cap_chan *c;
4786
4787 read_lock(&chan_list_lock);
4788
4789 list_for_each_entry(c, &chan_list, global_l) {
4790 struct sock *sk = c->sk;
4791
4792 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4793 batostr(&bt_sk(sk)->src),
4794 batostr(&bt_sk(sk)->dst),
4795 c->state, __le16_to_cpu(c->psm),
4796 c->scid, c->dcid, c->imtu, c->omtu,
4797 c->sec_level, c->mode);
4798 }
4799
4800 read_unlock(&chan_list_lock);
4801
4802 return 0;
4803 }
4804
4805 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4806 {
4807 return single_open(file, l2cap_debugfs_show, inode->i_private);
4808 }
4809
4810 static const struct file_operations l2cap_debugfs_fops = {
4811 .open = l2cap_debugfs_open,
4812 .read = seq_read,
4813 .llseek = seq_lseek,
4814 .release = single_release,
4815 };
4816
4817 static struct dentry *l2cap_debugfs;
4818
4819 int __init l2cap_init(void)
4820 {
4821 int err;
4822
4823 err = l2cap_init_sockets();
4824 if (err < 0)
4825 return err;
4826
4827 if (bt_debugfs) {
4828 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4829 bt_debugfs, NULL, &l2cap_debugfs_fops);
4830 if (!l2cap_debugfs)
4831 BT_ERR("Failed to create L2CAP debug file");
4832 }
4833
4834 return 0;
4835 }
4836
4837 void l2cap_exit(void)
4838 {
4839 debugfs_remove(l2cap_debugfs);
4840 l2cap_cleanup_sockets();
4841 }
4842
4843 module_param(disable_ertm, bool, 0644);
4844 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.194294 seconds and 5 git commands to generate.