Bluetooth: Fix possible missing I-Frame acknowledgement
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c, *r = NULL;
81
82 rcu_read_lock();
83
84 list_for_each_entry_rcu(c, &conn->chan_l, list) {
85 if (c->dcid == cid) {
86 r = c;
87 break;
88 }
89 }
90
91 rcu_read_unlock();
92 return r;
93 }
94
95 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 {
97 struct l2cap_chan *c, *r = NULL;
98
99 rcu_read_lock();
100
101 list_for_each_entry_rcu(c, &conn->chan_l, list) {
102 if (c->scid == cid) {
103 r = c;
104 break;
105 }
106 }
107
108 rcu_read_unlock();
109 return r;
110 }
111
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
115 {
116 struct l2cap_chan *c;
117
118 c = __l2cap_get_chan_by_scid(conn, cid);
119 if (c)
120 lock_sock(c->sk);
121 return c;
122 }
123
124 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 {
126 struct l2cap_chan *c, *r = NULL;
127
128 rcu_read_lock();
129
130 list_for_each_entry_rcu(c, &conn->chan_l, list) {
131 if (c->ident == ident) {
132 r = c;
133 break;
134 }
135 }
136
137 rcu_read_unlock();
138 return r;
139 }
140
141 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
142 {
143 struct l2cap_chan *c;
144
145 c = __l2cap_get_chan_by_ident(conn, ident);
146 if (c)
147 lock_sock(c->sk);
148 return c;
149 }
150
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 {
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 return c;
158 }
159 return NULL;
160 }
161
162 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
163 {
164 int err;
165
166 write_lock(&chan_list_lock);
167
168 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
169 err = -EADDRINUSE;
170 goto done;
171 }
172
173 if (psm) {
174 chan->psm = psm;
175 chan->sport = psm;
176 err = 0;
177 } else {
178 u16 p;
179
180 err = -EINVAL;
181 for (p = 0x1001; p < 0x1100; p += 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
183 chan->psm = cpu_to_le16(p);
184 chan->sport = cpu_to_le16(p);
185 err = 0;
186 break;
187 }
188 }
189
190 done:
191 write_unlock(&chan_list_lock);
192 return err;
193 }
194
195 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
196 {
197 write_lock(&chan_list_lock);
198
199 chan->scid = scid;
200
201 write_unlock(&chan_list_lock);
202
203 return 0;
204 }
205
206 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
207 {
208 u16 cid = L2CAP_CID_DYN_START;
209
210 for (; cid < L2CAP_CID_DYN_END; cid++) {
211 if (!__l2cap_get_chan_by_scid(conn, cid))
212 return cid;
213 }
214
215 return 0;
216 }
217
218 static char *state_to_string(int state)
219 {
220 switch(state) {
221 case BT_CONNECTED:
222 return "BT_CONNECTED";
223 case BT_OPEN:
224 return "BT_OPEN";
225 case BT_BOUND:
226 return "BT_BOUND";
227 case BT_LISTEN:
228 return "BT_LISTEN";
229 case BT_CONNECT:
230 return "BT_CONNECT";
231 case BT_CONNECT2:
232 return "BT_CONNECT2";
233 case BT_CONFIG:
234 return "BT_CONFIG";
235 case BT_DISCONN:
236 return "BT_DISCONN";
237 case BT_CLOSED:
238 return "BT_CLOSED";
239 }
240
241 return "invalid state";
242 }
243
244 static void l2cap_state_change(struct l2cap_chan *chan, int state)
245 {
246 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
247 state_to_string(state));
248
249 chan->state = state;
250 chan->ops->state_change(chan->data, state);
251 }
252
253 static void l2cap_chan_timeout(struct work_struct *work)
254 {
255 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
256 chan_timer.work);
257 struct sock *sk = chan->sk;
258 int reason;
259
260 BT_DBG("chan %p state %d", chan, chan->state);
261
262 lock_sock(sk);
263
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
269 else
270 reason = ETIMEDOUT;
271
272 l2cap_chan_close(chan, reason);
273
274 release_sock(sk);
275
276 chan->ops->close(chan->data);
277 l2cap_chan_put(chan);
278 }
279
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
281 {
282 struct l2cap_chan *chan;
283
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
285 if (!chan)
286 return NULL;
287
288 chan->sk = sk;
289
290 write_lock(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock(&chan_list_lock);
293
294 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
295
296 chan->state = BT_OPEN;
297
298 atomic_set(&chan->refcnt, 1);
299
300 BT_DBG("sk %p chan %p", sk, chan);
301
302 return chan;
303 }
304
305 void l2cap_chan_destroy(struct l2cap_chan *chan)
306 {
307 write_lock(&chan_list_lock);
308 list_del(&chan->global_l);
309 write_unlock(&chan_list_lock);
310
311 l2cap_chan_put(chan);
312 }
313
314 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
315 {
316 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
317 chan->psm, chan->dcid);
318
319 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
320
321 chan->conn = conn;
322
323 switch (chan->chan_type) {
324 case L2CAP_CHAN_CONN_ORIENTED:
325 if (conn->hcon->type == LE_LINK) {
326 /* LE connection */
327 chan->omtu = L2CAP_LE_DEFAULT_MTU;
328 chan->scid = L2CAP_CID_LE_DATA;
329 chan->dcid = L2CAP_CID_LE_DATA;
330 } else {
331 /* Alloc CID for connection-oriented socket */
332 chan->scid = l2cap_alloc_cid(conn);
333 chan->omtu = L2CAP_DEFAULT_MTU;
334 }
335 break;
336
337 case L2CAP_CHAN_CONN_LESS:
338 /* Connectionless socket */
339 chan->scid = L2CAP_CID_CONN_LESS;
340 chan->dcid = L2CAP_CID_CONN_LESS;
341 chan->omtu = L2CAP_DEFAULT_MTU;
342 break;
343
344 default:
345 /* Raw socket can send/recv signalling messages only */
346 chan->scid = L2CAP_CID_SIGNALING;
347 chan->dcid = L2CAP_CID_SIGNALING;
348 chan->omtu = L2CAP_DEFAULT_MTU;
349 }
350
351 chan->local_id = L2CAP_BESTEFFORT_ID;
352 chan->local_stype = L2CAP_SERV_BESTEFFORT;
353 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
354 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
355 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
356 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
357
358 l2cap_chan_hold(chan);
359
360 list_add_rcu(&chan->list, &conn->chan_l);
361 }
362
363 /* Delete channel.
364 * Must be called on the locked socket. */
365 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
366 {
367 struct sock *sk = chan->sk;
368 struct l2cap_conn *conn = chan->conn;
369 struct sock *parent = bt_sk(sk)->parent;
370
371 __clear_chan_timer(chan);
372
373 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
374
375 if (conn) {
376 /* Delete from channel list */
377 list_del_rcu(&chan->list);
378 synchronize_rcu();
379
380 l2cap_chan_put(chan);
381
382 chan->conn = NULL;
383 hci_conn_put(conn->hcon);
384 }
385
386 l2cap_state_change(chan, BT_CLOSED);
387 sock_set_flag(sk, SOCK_ZAPPED);
388
389 if (err)
390 sk->sk_err = err;
391
392 if (parent) {
393 bt_accept_unlink(sk);
394 parent->sk_data_ready(parent, 0);
395 } else
396 sk->sk_state_change(sk);
397
398 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
399 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
400 return;
401
402 skb_queue_purge(&chan->tx_q);
403
404 if (chan->mode == L2CAP_MODE_ERTM) {
405 struct srej_list *l, *tmp;
406
407 __clear_retrans_timer(chan);
408 __clear_monitor_timer(chan);
409 __clear_ack_timer(chan);
410
411 skb_queue_purge(&chan->srej_q);
412
413 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
414 list_del(&l->list);
415 kfree(l);
416 }
417 }
418 }
419
420 static void l2cap_chan_cleanup_listen(struct sock *parent)
421 {
422 struct sock *sk;
423
424 BT_DBG("parent %p", parent);
425
426 /* Close not yet accepted channels */
427 while ((sk = bt_accept_dequeue(parent, NULL))) {
428 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
429 __clear_chan_timer(chan);
430 lock_sock(sk);
431 l2cap_chan_close(chan, ECONNRESET);
432 release_sock(sk);
433 chan->ops->close(chan->data);
434 }
435 }
436
437 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
438 {
439 struct l2cap_conn *conn = chan->conn;
440 struct sock *sk = chan->sk;
441
442 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
443
444 switch (chan->state) {
445 case BT_LISTEN:
446 l2cap_chan_cleanup_listen(sk);
447
448 l2cap_state_change(chan, BT_CLOSED);
449 sock_set_flag(sk, SOCK_ZAPPED);
450 break;
451
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
459 } else
460 l2cap_chan_del(chan, reason);
461 break;
462
463 case BT_CONNECT2:
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
467 __u16 result;
468
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
471 else
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
474
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
480 sizeof(rsp), &rsp);
481 }
482
483 l2cap_chan_del(chan, reason);
484 break;
485
486 case BT_CONNECT:
487 case BT_DISCONN:
488 l2cap_chan_del(chan, reason);
489 break;
490
491 default:
492 sock_set_flag(sk, SOCK_ZAPPED);
493 break;
494 }
495 }
496
497 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
498 {
499 if (chan->chan_type == L2CAP_CHAN_RAW) {
500 switch (chan->sec_level) {
501 case BT_SECURITY_HIGH:
502 return HCI_AT_DEDICATED_BONDING_MITM;
503 case BT_SECURITY_MEDIUM:
504 return HCI_AT_DEDICATED_BONDING;
505 default:
506 return HCI_AT_NO_BONDING;
507 }
508 } else if (chan->psm == cpu_to_le16(0x0001)) {
509 if (chan->sec_level == BT_SECURITY_LOW)
510 chan->sec_level = BT_SECURITY_SDP;
511
512 if (chan->sec_level == BT_SECURITY_HIGH)
513 return HCI_AT_NO_BONDING_MITM;
514 else
515 return HCI_AT_NO_BONDING;
516 } else {
517 switch (chan->sec_level) {
518 case BT_SECURITY_HIGH:
519 return HCI_AT_GENERAL_BONDING_MITM;
520 case BT_SECURITY_MEDIUM:
521 return HCI_AT_GENERAL_BONDING;
522 default:
523 return HCI_AT_NO_BONDING;
524 }
525 }
526 }
527
528 /* Service level security */
529 int l2cap_chan_check_security(struct l2cap_chan *chan)
530 {
531 struct l2cap_conn *conn = chan->conn;
532 __u8 auth_type;
533
534 auth_type = l2cap_get_auth_type(chan);
535
536 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
537 }
538
539 static u8 l2cap_get_ident(struct l2cap_conn *conn)
540 {
541 u8 id;
542
543 /* Get next available identificator.
544 * 1 - 128 are used by kernel.
545 * 129 - 199 are reserved.
546 * 200 - 254 are used by utilities like l2ping, etc.
547 */
548
549 spin_lock(&conn->lock);
550
551 if (++conn->tx_ident > 128)
552 conn->tx_ident = 1;
553
554 id = conn->tx_ident;
555
556 spin_unlock(&conn->lock);
557
558 return id;
559 }
560
561 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
562 {
563 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
564 u8 flags;
565
566 BT_DBG("code 0x%2.2x", code);
567
568 if (!skb)
569 return;
570
571 if (lmp_no_flush_capable(conn->hcon->hdev))
572 flags = ACL_START_NO_FLUSH;
573 else
574 flags = ACL_START;
575
576 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
577 skb->priority = HCI_PRIO_MAX;
578
579 hci_send_acl(conn->hchan, skb, flags);
580 }
581
582 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
583 {
584 struct hci_conn *hcon = chan->conn->hcon;
585 u16 flags;
586
587 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
588 skb->priority);
589
590 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
591 lmp_no_flush_capable(hcon->hdev))
592 flags = ACL_START_NO_FLUSH;
593 else
594 flags = ACL_START;
595
596 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
597 hci_send_acl(chan->conn->hchan, skb, flags);
598 }
599
600 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
601 {
602 struct sk_buff *skb;
603 struct l2cap_hdr *lh;
604 struct l2cap_conn *conn = chan->conn;
605 int count, hlen;
606
607 if (chan->state != BT_CONNECTED)
608 return;
609
610 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
611 hlen = L2CAP_EXT_HDR_SIZE;
612 else
613 hlen = L2CAP_ENH_HDR_SIZE;
614
615 if (chan->fcs == L2CAP_FCS_CRC16)
616 hlen += L2CAP_FCS_SIZE;
617
618 BT_DBG("chan %p, control 0x%8.8x", chan, control);
619
620 count = min_t(unsigned int, conn->mtu, hlen);
621
622 control |= __set_sframe(chan);
623
624 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
625 control |= __set_ctrl_final(chan);
626
627 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
628 control |= __set_ctrl_poll(chan);
629
630 skb = bt_skb_alloc(count, GFP_ATOMIC);
631 if (!skb)
632 return;
633
634 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
635 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
636 lh->cid = cpu_to_le16(chan->dcid);
637
638 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
639
640 if (chan->fcs == L2CAP_FCS_CRC16) {
641 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
642 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
643 }
644
645 skb->priority = HCI_PRIO_MAX;
646 l2cap_do_send(chan, skb);
647 }
648
649 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
650 {
651 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
653 set_bit(CONN_RNR_SENT, &chan->conn_state);
654 } else
655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
656
657 control |= __set_reqseq(chan, chan->buffer_seq);
658
659 l2cap_send_sframe(chan, control);
660 }
661
662 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
663 {
664 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
665 }
666
667 static void l2cap_do_start(struct l2cap_chan *chan)
668 {
669 struct l2cap_conn *conn = chan->conn;
670
671 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
672 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
673 return;
674
675 if (l2cap_chan_check_security(chan) &&
676 __l2cap_no_conn_pending(chan)) {
677 struct l2cap_conn_req req;
678 req.scid = cpu_to_le16(chan->scid);
679 req.psm = chan->psm;
680
681 chan->ident = l2cap_get_ident(conn);
682 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
683
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
685 sizeof(req), &req);
686 }
687 } else {
688 struct l2cap_info_req req;
689 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
690
691 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
692 conn->info_ident = l2cap_get_ident(conn);
693
694 schedule_delayed_work(&conn->info_timer,
695 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
696
697 l2cap_send_cmd(conn, conn->info_ident,
698 L2CAP_INFO_REQ, sizeof(req), &req);
699 }
700 }
701
702 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
703 {
704 u32 local_feat_mask = l2cap_feat_mask;
705 if (!disable_ertm)
706 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
707
708 switch (mode) {
709 case L2CAP_MODE_ERTM:
710 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
711 case L2CAP_MODE_STREAMING:
712 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
713 default:
714 return 0x00;
715 }
716 }
717
718 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
719 {
720 struct sock *sk;
721 struct l2cap_disconn_req req;
722
723 if (!conn)
724 return;
725
726 sk = chan->sk;
727
728 if (chan->mode == L2CAP_MODE_ERTM) {
729 __clear_retrans_timer(chan);
730 __clear_monitor_timer(chan);
731 __clear_ack_timer(chan);
732 }
733
734 req.dcid = cpu_to_le16(chan->dcid);
735 req.scid = cpu_to_le16(chan->scid);
736 l2cap_send_cmd(conn, l2cap_get_ident(conn),
737 L2CAP_DISCONN_REQ, sizeof(req), &req);
738
739 l2cap_state_change(chan, BT_DISCONN);
740 sk->sk_err = err;
741 }
742
743 /* ---- L2CAP connections ---- */
744 static void l2cap_conn_start(struct l2cap_conn *conn)
745 {
746 struct l2cap_chan *chan;
747
748 BT_DBG("conn %p", conn);
749
750 rcu_read_lock();
751
752 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
753 struct sock *sk = chan->sk;
754
755 bh_lock_sock(sk);
756
757 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
758 bh_unlock_sock(sk);
759 continue;
760 }
761
762 if (chan->state == BT_CONNECT) {
763 struct l2cap_conn_req req;
764
765 if (!l2cap_chan_check_security(chan) ||
766 !__l2cap_no_conn_pending(chan)) {
767 bh_unlock_sock(sk);
768 continue;
769 }
770
771 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
772 && test_bit(CONF_STATE2_DEVICE,
773 &chan->conf_state)) {
774 /* l2cap_chan_close() calls list_del(chan)
775 * so release the lock */
776 l2cap_chan_close(chan, ECONNRESET);
777 bh_unlock_sock(sk);
778 continue;
779 }
780
781 req.scid = cpu_to_le16(chan->scid);
782 req.psm = chan->psm;
783
784 chan->ident = l2cap_get_ident(conn);
785 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
786
787 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
788 sizeof(req), &req);
789
790 } else if (chan->state == BT_CONNECT2) {
791 struct l2cap_conn_rsp rsp;
792 char buf[128];
793 rsp.scid = cpu_to_le16(chan->dcid);
794 rsp.dcid = cpu_to_le16(chan->scid);
795
796 if (l2cap_chan_check_security(chan)) {
797 if (bt_sk(sk)->defer_setup) {
798 struct sock *parent = bt_sk(sk)->parent;
799 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
800 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
801 if (parent)
802 parent->sk_data_ready(parent, 0);
803
804 } else {
805 l2cap_state_change(chan, BT_CONFIG);
806 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
807 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
808 }
809 } else {
810 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
811 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
812 }
813
814 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
815 sizeof(rsp), &rsp);
816
817 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
818 rsp.result != L2CAP_CR_SUCCESS) {
819 bh_unlock_sock(sk);
820 continue;
821 }
822
823 set_bit(CONF_REQ_SENT, &chan->conf_state);
824 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
825 l2cap_build_conf_req(chan, buf), buf);
826 chan->num_conf_req++;
827 }
828
829 bh_unlock_sock(sk);
830 }
831
832 rcu_read_unlock();
833 }
834
835 /* Find socket with cid and source bdaddr.
836 * Returns closest match, locked.
837 */
838 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
839 {
840 struct l2cap_chan *c, *c1 = NULL;
841
842 read_lock(&chan_list_lock);
843
844 list_for_each_entry(c, &chan_list, global_l) {
845 struct sock *sk = c->sk;
846
847 if (state && c->state != state)
848 continue;
849
850 if (c->scid == cid) {
851 /* Exact match. */
852 if (!bacmp(&bt_sk(sk)->src, src)) {
853 read_unlock(&chan_list_lock);
854 return c;
855 }
856
857 /* Closest match */
858 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
859 c1 = c;
860 }
861 }
862
863 read_unlock(&chan_list_lock);
864
865 return c1;
866 }
867
868 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
869 {
870 struct sock *parent, *sk;
871 struct l2cap_chan *chan, *pchan;
872
873 BT_DBG("");
874
875 /* Check if we have socket listening on cid */
876 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
877 conn->src);
878 if (!pchan)
879 return;
880
881 parent = pchan->sk;
882
883 lock_sock(parent);
884
885 /* Check for backlog size */
886 if (sk_acceptq_is_full(parent)) {
887 BT_DBG("backlog full %d", parent->sk_ack_backlog);
888 goto clean;
889 }
890
891 chan = pchan->ops->new_connection(pchan->data);
892 if (!chan)
893 goto clean;
894
895 sk = chan->sk;
896
897 hci_conn_hold(conn->hcon);
898
899 bacpy(&bt_sk(sk)->src, conn->src);
900 bacpy(&bt_sk(sk)->dst, conn->dst);
901
902 bt_accept_enqueue(parent, sk);
903
904 l2cap_chan_add(conn, chan);
905
906 __set_chan_timer(chan, sk->sk_sndtimeo);
907
908 l2cap_state_change(chan, BT_CONNECTED);
909 parent->sk_data_ready(parent, 0);
910
911 clean:
912 release_sock(parent);
913 }
914
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
916 {
917 struct sock *sk = chan->sk;
918 struct sock *parent = bt_sk(sk)->parent;
919
920 BT_DBG("sk %p, parent %p", sk, parent);
921
922 chan->conf_state = 0;
923 __clear_chan_timer(chan);
924
925 l2cap_state_change(chan, BT_CONNECTED);
926 sk->sk_state_change(sk);
927
928 if (parent)
929 parent->sk_data_ready(parent, 0);
930 }
931
932 static void l2cap_conn_ready(struct l2cap_conn *conn)
933 {
934 struct l2cap_chan *chan;
935
936 BT_DBG("conn %p", conn);
937
938 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
939 l2cap_le_conn_ready(conn);
940
941 if (conn->hcon->out && conn->hcon->type == LE_LINK)
942 smp_conn_security(conn, conn->hcon->pending_sec_level);
943
944 rcu_read_lock();
945
946 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
947 struct sock *sk = chan->sk;
948
949 bh_lock_sock(sk);
950
951 if (conn->hcon->type == LE_LINK) {
952 if (smp_conn_security(conn, chan->sec_level))
953 l2cap_chan_ready(chan);
954
955 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
956 __clear_chan_timer(chan);
957 l2cap_state_change(chan, BT_CONNECTED);
958 sk->sk_state_change(sk);
959
960 } else if (chan->state == BT_CONNECT)
961 l2cap_do_start(chan);
962
963 bh_unlock_sock(sk);
964 }
965
966 rcu_read_unlock();
967 }
968
969 /* Notify sockets that we cannot guaranty reliability anymore */
970 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
971 {
972 struct l2cap_chan *chan;
973
974 BT_DBG("conn %p", conn);
975
976 rcu_read_lock();
977
978 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
979 struct sock *sk = chan->sk;
980
981 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
982 sk->sk_err = err;
983 }
984
985 rcu_read_unlock();
986 }
987
988 static void l2cap_info_timeout(struct work_struct *work)
989 {
990 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
991 info_timer.work);
992
993 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
994 conn->info_ident = 0;
995
996 l2cap_conn_start(conn);
997 }
998
999 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1000 {
1001 struct l2cap_conn *conn = hcon->l2cap_data;
1002 struct l2cap_chan *chan, *l;
1003 struct sock *sk;
1004
1005 if (!conn)
1006 return;
1007
1008 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1009
1010 kfree_skb(conn->rx_skb);
1011
1012 /* Kill channels */
1013 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1014 sk = chan->sk;
1015 lock_sock(sk);
1016 l2cap_chan_del(chan, err);
1017 release_sock(sk);
1018 chan->ops->close(chan->data);
1019 }
1020
1021 hci_chan_del(conn->hchan);
1022
1023 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1024 cancel_delayed_work_sync(&conn->info_timer);
1025
1026 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1027 cancel_delayed_work_sync(&conn->security_timer);
1028 smp_chan_destroy(conn);
1029 }
1030
1031 hcon->l2cap_data = NULL;
1032 kfree(conn);
1033 }
1034
1035 static void security_timeout(struct work_struct *work)
1036 {
1037 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1038 security_timer.work);
1039
1040 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1041 }
1042
1043 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1044 {
1045 struct l2cap_conn *conn = hcon->l2cap_data;
1046 struct hci_chan *hchan;
1047
1048 if (conn || status)
1049 return conn;
1050
1051 hchan = hci_chan_create(hcon);
1052 if (!hchan)
1053 return NULL;
1054
1055 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1056 if (!conn) {
1057 hci_chan_del(hchan);
1058 return NULL;
1059 }
1060
1061 hcon->l2cap_data = conn;
1062 conn->hcon = hcon;
1063 conn->hchan = hchan;
1064
1065 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1066
1067 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1068 conn->mtu = hcon->hdev->le_mtu;
1069 else
1070 conn->mtu = hcon->hdev->acl_mtu;
1071
1072 conn->src = &hcon->hdev->bdaddr;
1073 conn->dst = &hcon->dst;
1074
1075 conn->feat_mask = 0;
1076
1077 spin_lock_init(&conn->lock);
1078
1079 INIT_LIST_HEAD(&conn->chan_l);
1080
1081 if (hcon->type == LE_LINK)
1082 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1083 else
1084 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1085
1086 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1087
1088 return conn;
1089 }
1090
1091 /* ---- Socket interface ---- */
1092
1093 /* Find socket with psm and source bdaddr.
1094 * Returns closest match.
1095 */
1096 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1097 {
1098 struct l2cap_chan *c, *c1 = NULL;
1099
1100 read_lock(&chan_list_lock);
1101
1102 list_for_each_entry(c, &chan_list, global_l) {
1103 struct sock *sk = c->sk;
1104
1105 if (state && c->state != state)
1106 continue;
1107
1108 if (c->psm == psm) {
1109 /* Exact match. */
1110 if (!bacmp(&bt_sk(sk)->src, src)) {
1111 read_unlock(&chan_list_lock);
1112 return c;
1113 }
1114
1115 /* Closest match */
1116 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1117 c1 = c;
1118 }
1119 }
1120
1121 read_unlock(&chan_list_lock);
1122
1123 return c1;
1124 }
1125
1126 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1127 {
1128 struct sock *sk = chan->sk;
1129 bdaddr_t *src = &bt_sk(sk)->src;
1130 struct l2cap_conn *conn;
1131 struct hci_conn *hcon;
1132 struct hci_dev *hdev;
1133 __u8 auth_type;
1134 int err;
1135
1136 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1137 chan->psm);
1138
1139 hdev = hci_get_route(dst, src);
1140 if (!hdev)
1141 return -EHOSTUNREACH;
1142
1143 hci_dev_lock(hdev);
1144
1145 lock_sock(sk);
1146
1147 /* PSM must be odd and lsb of upper byte must be 0 */
1148 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1149 chan->chan_type != L2CAP_CHAN_RAW) {
1150 err = -EINVAL;
1151 goto done;
1152 }
1153
1154 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1155 err = -EINVAL;
1156 goto done;
1157 }
1158
1159 switch (chan->mode) {
1160 case L2CAP_MODE_BASIC:
1161 break;
1162 case L2CAP_MODE_ERTM:
1163 case L2CAP_MODE_STREAMING:
1164 if (!disable_ertm)
1165 break;
1166 /* fall through */
1167 default:
1168 err = -ENOTSUPP;
1169 goto done;
1170 }
1171
1172 switch (sk->sk_state) {
1173 case BT_CONNECT:
1174 case BT_CONNECT2:
1175 case BT_CONFIG:
1176 /* Already connecting */
1177 err = 0;
1178 goto done;
1179
1180 case BT_CONNECTED:
1181 /* Already connected */
1182 err = -EISCONN;
1183 goto done;
1184
1185 case BT_OPEN:
1186 case BT_BOUND:
1187 /* Can connect */
1188 break;
1189
1190 default:
1191 err = -EBADFD;
1192 goto done;
1193 }
1194
1195 /* Set destination address and psm */
1196 bacpy(&bt_sk(sk)->dst, dst);
1197 chan->psm = psm;
1198 chan->dcid = cid;
1199
1200 auth_type = l2cap_get_auth_type(chan);
1201
1202 if (chan->dcid == L2CAP_CID_LE_DATA)
1203 hcon = hci_connect(hdev, LE_LINK, dst,
1204 chan->sec_level, auth_type);
1205 else
1206 hcon = hci_connect(hdev, ACL_LINK, dst,
1207 chan->sec_level, auth_type);
1208
1209 if (IS_ERR(hcon)) {
1210 err = PTR_ERR(hcon);
1211 goto done;
1212 }
1213
1214 conn = l2cap_conn_add(hcon, 0);
1215 if (!conn) {
1216 hci_conn_put(hcon);
1217 err = -ENOMEM;
1218 goto done;
1219 }
1220
1221 /* Update source addr of the socket */
1222 bacpy(src, conn->src);
1223
1224 l2cap_chan_add(conn, chan);
1225
1226 l2cap_state_change(chan, BT_CONNECT);
1227 __set_chan_timer(chan, sk->sk_sndtimeo);
1228
1229 if (hcon->state == BT_CONNECTED) {
1230 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1231 __clear_chan_timer(chan);
1232 if (l2cap_chan_check_security(chan))
1233 l2cap_state_change(chan, BT_CONNECTED);
1234 } else
1235 l2cap_do_start(chan);
1236 }
1237
1238 err = 0;
1239
1240 done:
1241 hci_dev_unlock(hdev);
1242 hci_dev_put(hdev);
1243 return err;
1244 }
1245
1246 int __l2cap_wait_ack(struct sock *sk)
1247 {
1248 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1249 DECLARE_WAITQUEUE(wait, current);
1250 int err = 0;
1251 int timeo = HZ/5;
1252
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 set_current_state(TASK_INTERRUPTIBLE);
1255 while (chan->unacked_frames > 0 && chan->conn) {
1256 if (!timeo)
1257 timeo = HZ/5;
1258
1259 if (signal_pending(current)) {
1260 err = sock_intr_errno(timeo);
1261 break;
1262 }
1263
1264 release_sock(sk);
1265 timeo = schedule_timeout(timeo);
1266 lock_sock(sk);
1267 set_current_state(TASK_INTERRUPTIBLE);
1268
1269 err = sock_error(sk);
1270 if (err)
1271 break;
1272 }
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1275 return err;
1276 }
1277
1278 static void l2cap_monitor_timeout(struct work_struct *work)
1279 {
1280 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1281 monitor_timer.work);
1282 struct sock *sk = chan->sk;
1283
1284 BT_DBG("chan %p", chan);
1285
1286 lock_sock(sk);
1287 if (chan->retry_count >= chan->remote_max_tx) {
1288 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1289 release_sock(sk);
1290 return;
1291 }
1292
1293 chan->retry_count++;
1294 __set_monitor_timer(chan);
1295
1296 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1297 release_sock(sk);
1298 }
1299
1300 static void l2cap_retrans_timeout(struct work_struct *work)
1301 {
1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 retrans_timer.work);
1304 struct sock *sk = chan->sk;
1305
1306 BT_DBG("chan %p", chan);
1307
1308 lock_sock(sk);
1309 chan->retry_count = 1;
1310 __set_monitor_timer(chan);
1311
1312 set_bit(CONN_WAIT_F, &chan->conn_state);
1313
1314 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1315 release_sock(sk);
1316 }
1317
1318 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1319 {
1320 struct sk_buff *skb;
1321
1322 while ((skb = skb_peek(&chan->tx_q)) &&
1323 chan->unacked_frames) {
1324 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1325 break;
1326
1327 skb = skb_dequeue(&chan->tx_q);
1328 kfree_skb(skb);
1329
1330 chan->unacked_frames--;
1331 }
1332
1333 if (!chan->unacked_frames)
1334 __clear_retrans_timer(chan);
1335 }
1336
1337 static void l2cap_streaming_send(struct l2cap_chan *chan)
1338 {
1339 struct sk_buff *skb;
1340 u32 control;
1341 u16 fcs;
1342
1343 while ((skb = skb_dequeue(&chan->tx_q))) {
1344 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1345 control |= __set_txseq(chan, chan->next_tx_seq);
1346 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1347
1348 if (chan->fcs == L2CAP_FCS_CRC16) {
1349 fcs = crc16(0, (u8 *)skb->data,
1350 skb->len - L2CAP_FCS_SIZE);
1351 put_unaligned_le16(fcs,
1352 skb->data + skb->len - L2CAP_FCS_SIZE);
1353 }
1354
1355 l2cap_do_send(chan, skb);
1356
1357 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1358 }
1359 }
1360
1361 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1362 {
1363 struct sk_buff *skb, *tx_skb;
1364 u16 fcs;
1365 u32 control;
1366
1367 skb = skb_peek(&chan->tx_q);
1368 if (!skb)
1369 return;
1370
1371 while (bt_cb(skb)->tx_seq != tx_seq) {
1372 if (skb_queue_is_last(&chan->tx_q, skb))
1373 return;
1374
1375 skb = skb_queue_next(&chan->tx_q, skb);
1376 }
1377
1378 if (chan->remote_max_tx &&
1379 bt_cb(skb)->retries == chan->remote_max_tx) {
1380 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1381 return;
1382 }
1383
1384 tx_skb = skb_clone(skb, GFP_ATOMIC);
1385 bt_cb(skb)->retries++;
1386
1387 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1388 control &= __get_sar_mask(chan);
1389
1390 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1391 control |= __set_ctrl_final(chan);
1392
1393 control |= __set_reqseq(chan, chan->buffer_seq);
1394 control |= __set_txseq(chan, tx_seq);
1395
1396 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1397
1398 if (chan->fcs == L2CAP_FCS_CRC16) {
1399 fcs = crc16(0, (u8 *)tx_skb->data,
1400 tx_skb->len - L2CAP_FCS_SIZE);
1401 put_unaligned_le16(fcs,
1402 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1403 }
1404
1405 l2cap_do_send(chan, tx_skb);
1406 }
1407
1408 static int l2cap_ertm_send(struct l2cap_chan *chan)
1409 {
1410 struct sk_buff *skb, *tx_skb;
1411 u16 fcs;
1412 u32 control;
1413 int nsent = 0;
1414
1415 if (chan->state != BT_CONNECTED)
1416 return -ENOTCONN;
1417
1418 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1419
1420 if (chan->remote_max_tx &&
1421 bt_cb(skb)->retries == chan->remote_max_tx) {
1422 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1423 break;
1424 }
1425
1426 tx_skb = skb_clone(skb, GFP_ATOMIC);
1427
1428 bt_cb(skb)->retries++;
1429
1430 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1431 control &= __get_sar_mask(chan);
1432
1433 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1434 control |= __set_ctrl_final(chan);
1435
1436 control |= __set_reqseq(chan, chan->buffer_seq);
1437 control |= __set_txseq(chan, chan->next_tx_seq);
1438
1439 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1440
1441 if (chan->fcs == L2CAP_FCS_CRC16) {
1442 fcs = crc16(0, (u8 *)skb->data,
1443 tx_skb->len - L2CAP_FCS_SIZE);
1444 put_unaligned_le16(fcs, skb->data +
1445 tx_skb->len - L2CAP_FCS_SIZE);
1446 }
1447
1448 l2cap_do_send(chan, tx_skb);
1449
1450 __set_retrans_timer(chan);
1451
1452 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1453
1454 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1455
1456 if (bt_cb(skb)->retries == 1) {
1457 chan->unacked_frames++;
1458 nsent++;
1459 }
1460
1461 chan->frames_sent++;
1462
1463 if (skb_queue_is_last(&chan->tx_q, skb))
1464 chan->tx_send_head = NULL;
1465 else
1466 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1467 }
1468
1469 return nsent;
1470 }
1471
1472 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1473 {
1474 int ret;
1475
1476 if (!skb_queue_empty(&chan->tx_q))
1477 chan->tx_send_head = chan->tx_q.next;
1478
1479 chan->next_tx_seq = chan->expected_ack_seq;
1480 ret = l2cap_ertm_send(chan);
1481 return ret;
1482 }
1483
1484 static void __l2cap_send_ack(struct l2cap_chan *chan)
1485 {
1486 u32 control = 0;
1487
1488 control |= __set_reqseq(chan, chan->buffer_seq);
1489
1490 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1491 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1492 set_bit(CONN_RNR_SENT, &chan->conn_state);
1493 l2cap_send_sframe(chan, control);
1494 return;
1495 }
1496
1497 if (l2cap_ertm_send(chan) > 0)
1498 return;
1499
1500 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1501 l2cap_send_sframe(chan, control);
1502 }
1503
1504 static void l2cap_send_ack(struct l2cap_chan *chan)
1505 {
1506 __clear_ack_timer(chan);
1507 __l2cap_send_ack(chan);
1508 }
1509
1510 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1511 {
1512 struct srej_list *tail;
1513 u32 control;
1514
1515 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1516 control |= __set_ctrl_final(chan);
1517
1518 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1519 control |= __set_reqseq(chan, tail->tx_seq);
1520
1521 l2cap_send_sframe(chan, control);
1522 }
1523
1524 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1525 {
1526 struct l2cap_conn *conn = chan->conn;
1527 struct sk_buff **frag;
1528 int err, sent = 0;
1529
1530 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1531 return -EFAULT;
1532
1533 sent += count;
1534 len -= count;
1535
1536 /* Continuation fragments (no L2CAP header) */
1537 frag = &skb_shinfo(skb)->frag_list;
1538 while (len) {
1539 count = min_t(unsigned int, conn->mtu, len);
1540
1541 *frag = chan->ops->alloc_skb(chan, count,
1542 msg->msg_flags & MSG_DONTWAIT, &err);
1543
1544 if (!*frag)
1545 return err;
1546 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1547 return -EFAULT;
1548
1549 (*frag)->priority = skb->priority;
1550
1551 sent += count;
1552 len -= count;
1553
1554 frag = &(*frag)->next;
1555 }
1556
1557 return sent;
1558 }
1559
1560 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1561 struct msghdr *msg, size_t len,
1562 u32 priority)
1563 {
1564 struct l2cap_conn *conn = chan->conn;
1565 struct sk_buff *skb;
1566 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1567 struct l2cap_hdr *lh;
1568
1569 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1570
1571 count = min_t(unsigned int, (conn->mtu - hlen), len);
1572
1573 skb = chan->ops->alloc_skb(chan, count + hlen,
1574 msg->msg_flags & MSG_DONTWAIT, &err);
1575
1576 if (!skb)
1577 return ERR_PTR(err);
1578
1579 skb->priority = priority;
1580
1581 /* Create L2CAP header */
1582 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1583 lh->cid = cpu_to_le16(chan->dcid);
1584 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1585 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1586
1587 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1588 if (unlikely(err < 0)) {
1589 kfree_skb(skb);
1590 return ERR_PTR(err);
1591 }
1592 return skb;
1593 }
1594
1595 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1596 struct msghdr *msg, size_t len,
1597 u32 priority)
1598 {
1599 struct l2cap_conn *conn = chan->conn;
1600 struct sk_buff *skb;
1601 int err, count, hlen = L2CAP_HDR_SIZE;
1602 struct l2cap_hdr *lh;
1603
1604 BT_DBG("chan %p len %d", chan, (int)len);
1605
1606 count = min_t(unsigned int, (conn->mtu - hlen), len);
1607
1608 skb = chan->ops->alloc_skb(chan, count + hlen,
1609 msg->msg_flags & MSG_DONTWAIT, &err);
1610
1611 if (!skb)
1612 return ERR_PTR(err);
1613
1614 skb->priority = priority;
1615
1616 /* Create L2CAP header */
1617 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1618 lh->cid = cpu_to_le16(chan->dcid);
1619 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1620
1621 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1622 if (unlikely(err < 0)) {
1623 kfree_skb(skb);
1624 return ERR_PTR(err);
1625 }
1626 return skb;
1627 }
1628
1629 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1630 struct msghdr *msg, size_t len,
1631 u32 control, u16 sdulen)
1632 {
1633 struct l2cap_conn *conn = chan->conn;
1634 struct sk_buff *skb;
1635 int err, count, hlen;
1636 struct l2cap_hdr *lh;
1637
1638 BT_DBG("chan %p len %d", chan, (int)len);
1639
1640 if (!conn)
1641 return ERR_PTR(-ENOTCONN);
1642
1643 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1644 hlen = L2CAP_EXT_HDR_SIZE;
1645 else
1646 hlen = L2CAP_ENH_HDR_SIZE;
1647
1648 if (sdulen)
1649 hlen += L2CAP_SDULEN_SIZE;
1650
1651 if (chan->fcs == L2CAP_FCS_CRC16)
1652 hlen += L2CAP_FCS_SIZE;
1653
1654 count = min_t(unsigned int, (conn->mtu - hlen), len);
1655
1656 skb = chan->ops->alloc_skb(chan, count + hlen,
1657 msg->msg_flags & MSG_DONTWAIT, &err);
1658
1659 if (!skb)
1660 return ERR_PTR(err);
1661
1662 /* Create L2CAP header */
1663 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1664 lh->cid = cpu_to_le16(chan->dcid);
1665 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1666
1667 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1668
1669 if (sdulen)
1670 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1671
1672 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1673 if (unlikely(err < 0)) {
1674 kfree_skb(skb);
1675 return ERR_PTR(err);
1676 }
1677
1678 if (chan->fcs == L2CAP_FCS_CRC16)
1679 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1680
1681 bt_cb(skb)->retries = 0;
1682 return skb;
1683 }
1684
1685 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1686 {
1687 struct sk_buff *skb;
1688 struct sk_buff_head sar_queue;
1689 u32 control;
1690 size_t size = 0;
1691
1692 skb_queue_head_init(&sar_queue);
1693 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1694 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1695 if (IS_ERR(skb))
1696 return PTR_ERR(skb);
1697
1698 __skb_queue_tail(&sar_queue, skb);
1699 len -= chan->remote_mps;
1700 size += chan->remote_mps;
1701
1702 while (len > 0) {
1703 size_t buflen;
1704
1705 if (len > chan->remote_mps) {
1706 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1707 buflen = chan->remote_mps;
1708 } else {
1709 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1710 buflen = len;
1711 }
1712
1713 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1714 if (IS_ERR(skb)) {
1715 skb_queue_purge(&sar_queue);
1716 return PTR_ERR(skb);
1717 }
1718
1719 __skb_queue_tail(&sar_queue, skb);
1720 len -= buflen;
1721 size += buflen;
1722 }
1723 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1724 if (chan->tx_send_head == NULL)
1725 chan->tx_send_head = sar_queue.next;
1726
1727 return size;
1728 }
1729
1730 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1731 u32 priority)
1732 {
1733 struct sk_buff *skb;
1734 u32 control;
1735 int err;
1736
1737 /* Connectionless channel */
1738 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1739 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1740 if (IS_ERR(skb))
1741 return PTR_ERR(skb);
1742
1743 l2cap_do_send(chan, skb);
1744 return len;
1745 }
1746
1747 switch (chan->mode) {
1748 case L2CAP_MODE_BASIC:
1749 /* Check outgoing MTU */
1750 if (len > chan->omtu)
1751 return -EMSGSIZE;
1752
1753 /* Create a basic PDU */
1754 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1755 if (IS_ERR(skb))
1756 return PTR_ERR(skb);
1757
1758 l2cap_do_send(chan, skb);
1759 err = len;
1760 break;
1761
1762 case L2CAP_MODE_ERTM:
1763 case L2CAP_MODE_STREAMING:
1764 /* Entire SDU fits into one PDU */
1765 if (len <= chan->remote_mps) {
1766 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1767 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1768 0);
1769 if (IS_ERR(skb))
1770 return PTR_ERR(skb);
1771
1772 __skb_queue_tail(&chan->tx_q, skb);
1773
1774 if (chan->tx_send_head == NULL)
1775 chan->tx_send_head = skb;
1776
1777 } else {
1778 /* Segment SDU into multiples PDUs */
1779 err = l2cap_sar_segment_sdu(chan, msg, len);
1780 if (err < 0)
1781 return err;
1782 }
1783
1784 if (chan->mode == L2CAP_MODE_STREAMING) {
1785 l2cap_streaming_send(chan);
1786 err = len;
1787 break;
1788 }
1789
1790 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1791 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1792 err = len;
1793 break;
1794 }
1795
1796 err = l2cap_ertm_send(chan);
1797 if (err >= 0)
1798 err = len;
1799
1800 break;
1801
1802 default:
1803 BT_DBG("bad state %1.1x", chan->mode);
1804 err = -EBADFD;
1805 }
1806
1807 return err;
1808 }
1809
1810 /* Copy frame to all raw sockets on that connection */
1811 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1812 {
1813 struct sk_buff *nskb;
1814 struct l2cap_chan *chan;
1815
1816 BT_DBG("conn %p", conn);
1817
1818 rcu_read_lock();
1819
1820 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1821 struct sock *sk = chan->sk;
1822 if (chan->chan_type != L2CAP_CHAN_RAW)
1823 continue;
1824
1825 /* Don't send frame to the socket it came from */
1826 if (skb->sk == sk)
1827 continue;
1828 nskb = skb_clone(skb, GFP_ATOMIC);
1829 if (!nskb)
1830 continue;
1831
1832 if (chan->ops->recv(chan->data, nskb))
1833 kfree_skb(nskb);
1834 }
1835
1836 rcu_read_unlock();
1837 }
1838
1839 /* ---- L2CAP signalling commands ---- */
1840 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1841 u8 code, u8 ident, u16 dlen, void *data)
1842 {
1843 struct sk_buff *skb, **frag;
1844 struct l2cap_cmd_hdr *cmd;
1845 struct l2cap_hdr *lh;
1846 int len, count;
1847
1848 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1849 conn, code, ident, dlen);
1850
1851 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1852 count = min_t(unsigned int, conn->mtu, len);
1853
1854 skb = bt_skb_alloc(count, GFP_ATOMIC);
1855 if (!skb)
1856 return NULL;
1857
1858 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1859 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1860
1861 if (conn->hcon->type == LE_LINK)
1862 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1863 else
1864 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1865
1866 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1867 cmd->code = code;
1868 cmd->ident = ident;
1869 cmd->len = cpu_to_le16(dlen);
1870
1871 if (dlen) {
1872 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1873 memcpy(skb_put(skb, count), data, count);
1874 data += count;
1875 }
1876
1877 len -= skb->len;
1878
1879 /* Continuation fragments (no L2CAP header) */
1880 frag = &skb_shinfo(skb)->frag_list;
1881 while (len) {
1882 count = min_t(unsigned int, conn->mtu, len);
1883
1884 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1885 if (!*frag)
1886 goto fail;
1887
1888 memcpy(skb_put(*frag, count), data, count);
1889
1890 len -= count;
1891 data += count;
1892
1893 frag = &(*frag)->next;
1894 }
1895
1896 return skb;
1897
1898 fail:
1899 kfree_skb(skb);
1900 return NULL;
1901 }
1902
1903 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1904 {
1905 struct l2cap_conf_opt *opt = *ptr;
1906 int len;
1907
1908 len = L2CAP_CONF_OPT_SIZE + opt->len;
1909 *ptr += len;
1910
1911 *type = opt->type;
1912 *olen = opt->len;
1913
1914 switch (opt->len) {
1915 case 1:
1916 *val = *((u8 *) opt->val);
1917 break;
1918
1919 case 2:
1920 *val = get_unaligned_le16(opt->val);
1921 break;
1922
1923 case 4:
1924 *val = get_unaligned_le32(opt->val);
1925 break;
1926
1927 default:
1928 *val = (unsigned long) opt->val;
1929 break;
1930 }
1931
1932 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1933 return len;
1934 }
1935
1936 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1937 {
1938 struct l2cap_conf_opt *opt = *ptr;
1939
1940 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1941
1942 opt->type = type;
1943 opt->len = len;
1944
1945 switch (len) {
1946 case 1:
1947 *((u8 *) opt->val) = val;
1948 break;
1949
1950 case 2:
1951 put_unaligned_le16(val, opt->val);
1952 break;
1953
1954 case 4:
1955 put_unaligned_le32(val, opt->val);
1956 break;
1957
1958 default:
1959 memcpy(opt->val, (void *) val, len);
1960 break;
1961 }
1962
1963 *ptr += L2CAP_CONF_OPT_SIZE + len;
1964 }
1965
1966 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1967 {
1968 struct l2cap_conf_efs efs;
1969
1970 switch (chan->mode) {
1971 case L2CAP_MODE_ERTM:
1972 efs.id = chan->local_id;
1973 efs.stype = chan->local_stype;
1974 efs.msdu = cpu_to_le16(chan->local_msdu);
1975 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1976 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1977 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1978 break;
1979
1980 case L2CAP_MODE_STREAMING:
1981 efs.id = 1;
1982 efs.stype = L2CAP_SERV_BESTEFFORT;
1983 efs.msdu = cpu_to_le16(chan->local_msdu);
1984 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1985 efs.acc_lat = 0;
1986 efs.flush_to = 0;
1987 break;
1988
1989 default:
1990 return;
1991 }
1992
1993 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1994 (unsigned long) &efs);
1995 }
1996
1997 static void l2cap_ack_timeout(struct work_struct *work)
1998 {
1999 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2000 ack_timer.work);
2001
2002 BT_DBG("chan %p", chan);
2003
2004 lock_sock(chan->sk);
2005 __l2cap_send_ack(chan);
2006 release_sock(chan->sk);
2007
2008 l2cap_chan_put(chan);
2009 }
2010
2011 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2012 {
2013 chan->expected_ack_seq = 0;
2014 chan->unacked_frames = 0;
2015 chan->buffer_seq = 0;
2016 chan->num_acked = 0;
2017 chan->frames_sent = 0;
2018
2019 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2020 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2021 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2022
2023 skb_queue_head_init(&chan->srej_q);
2024
2025 INIT_LIST_HEAD(&chan->srej_l);
2026 }
2027
2028 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2029 {
2030 switch (mode) {
2031 case L2CAP_MODE_STREAMING:
2032 case L2CAP_MODE_ERTM:
2033 if (l2cap_mode_supported(mode, remote_feat_mask))
2034 return mode;
2035 /* fall through */
2036 default:
2037 return L2CAP_MODE_BASIC;
2038 }
2039 }
2040
2041 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2042 {
2043 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2044 }
2045
2046 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2047 {
2048 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2049 }
2050
2051 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2052 {
2053 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2054 __l2cap_ews_supported(chan)) {
2055 /* use extended control field */
2056 set_bit(FLAG_EXT_CTRL, &chan->flags);
2057 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2058 } else {
2059 chan->tx_win = min_t(u16, chan->tx_win,
2060 L2CAP_DEFAULT_TX_WINDOW);
2061 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2062 }
2063 }
2064
2065 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2066 {
2067 struct l2cap_conf_req *req = data;
2068 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2069 void *ptr = req->data;
2070 u16 size;
2071
2072 BT_DBG("chan %p", chan);
2073
2074 if (chan->num_conf_req || chan->num_conf_rsp)
2075 goto done;
2076
2077 switch (chan->mode) {
2078 case L2CAP_MODE_STREAMING:
2079 case L2CAP_MODE_ERTM:
2080 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2081 break;
2082
2083 if (__l2cap_efs_supported(chan))
2084 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2085
2086 /* fall through */
2087 default:
2088 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2089 break;
2090 }
2091
2092 done:
2093 if (chan->imtu != L2CAP_DEFAULT_MTU)
2094 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2095
2096 switch (chan->mode) {
2097 case L2CAP_MODE_BASIC:
2098 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2099 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2100 break;
2101
2102 rfc.mode = L2CAP_MODE_BASIC;
2103 rfc.txwin_size = 0;
2104 rfc.max_transmit = 0;
2105 rfc.retrans_timeout = 0;
2106 rfc.monitor_timeout = 0;
2107 rfc.max_pdu_size = 0;
2108
2109 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2110 (unsigned long) &rfc);
2111 break;
2112
2113 case L2CAP_MODE_ERTM:
2114 rfc.mode = L2CAP_MODE_ERTM;
2115 rfc.max_transmit = chan->max_tx;
2116 rfc.retrans_timeout = 0;
2117 rfc.monitor_timeout = 0;
2118
2119 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2120 L2CAP_EXT_HDR_SIZE -
2121 L2CAP_SDULEN_SIZE -
2122 L2CAP_FCS_SIZE);
2123 rfc.max_pdu_size = cpu_to_le16(size);
2124
2125 l2cap_txwin_setup(chan);
2126
2127 rfc.txwin_size = min_t(u16, chan->tx_win,
2128 L2CAP_DEFAULT_TX_WINDOW);
2129
2130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2131 (unsigned long) &rfc);
2132
2133 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2134 l2cap_add_opt_efs(&ptr, chan);
2135
2136 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2137 break;
2138
2139 if (chan->fcs == L2CAP_FCS_NONE ||
2140 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2141 chan->fcs = L2CAP_FCS_NONE;
2142 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2143 }
2144
2145 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2146 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2147 chan->tx_win);
2148 break;
2149
2150 case L2CAP_MODE_STREAMING:
2151 rfc.mode = L2CAP_MODE_STREAMING;
2152 rfc.txwin_size = 0;
2153 rfc.max_transmit = 0;
2154 rfc.retrans_timeout = 0;
2155 rfc.monitor_timeout = 0;
2156
2157 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2158 L2CAP_EXT_HDR_SIZE -
2159 L2CAP_SDULEN_SIZE -
2160 L2CAP_FCS_SIZE);
2161 rfc.max_pdu_size = cpu_to_le16(size);
2162
2163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2164 (unsigned long) &rfc);
2165
2166 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2167 l2cap_add_opt_efs(&ptr, chan);
2168
2169 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2170 break;
2171
2172 if (chan->fcs == L2CAP_FCS_NONE ||
2173 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2174 chan->fcs = L2CAP_FCS_NONE;
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2176 }
2177 break;
2178 }
2179
2180 req->dcid = cpu_to_le16(chan->dcid);
2181 req->flags = cpu_to_le16(0);
2182
2183 return ptr - data;
2184 }
2185
2186 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2187 {
2188 struct l2cap_conf_rsp *rsp = data;
2189 void *ptr = rsp->data;
2190 void *req = chan->conf_req;
2191 int len = chan->conf_len;
2192 int type, hint, olen;
2193 unsigned long val;
2194 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2195 struct l2cap_conf_efs efs;
2196 u8 remote_efs = 0;
2197 u16 mtu = L2CAP_DEFAULT_MTU;
2198 u16 result = L2CAP_CONF_SUCCESS;
2199 u16 size;
2200
2201 BT_DBG("chan %p", chan);
2202
2203 while (len >= L2CAP_CONF_OPT_SIZE) {
2204 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2205
2206 hint = type & L2CAP_CONF_HINT;
2207 type &= L2CAP_CONF_MASK;
2208
2209 switch (type) {
2210 case L2CAP_CONF_MTU:
2211 mtu = val;
2212 break;
2213
2214 case L2CAP_CONF_FLUSH_TO:
2215 chan->flush_to = val;
2216 break;
2217
2218 case L2CAP_CONF_QOS:
2219 break;
2220
2221 case L2CAP_CONF_RFC:
2222 if (olen == sizeof(rfc))
2223 memcpy(&rfc, (void *) val, olen);
2224 break;
2225
2226 case L2CAP_CONF_FCS:
2227 if (val == L2CAP_FCS_NONE)
2228 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2229 break;
2230
2231 case L2CAP_CONF_EFS:
2232 remote_efs = 1;
2233 if (olen == sizeof(efs))
2234 memcpy(&efs, (void *) val, olen);
2235 break;
2236
2237 case L2CAP_CONF_EWS:
2238 if (!enable_hs)
2239 return -ECONNREFUSED;
2240
2241 set_bit(FLAG_EXT_CTRL, &chan->flags);
2242 set_bit(CONF_EWS_RECV, &chan->conf_state);
2243 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2244 chan->remote_tx_win = val;
2245 break;
2246
2247 default:
2248 if (hint)
2249 break;
2250
2251 result = L2CAP_CONF_UNKNOWN;
2252 *((u8 *) ptr++) = type;
2253 break;
2254 }
2255 }
2256
2257 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2258 goto done;
2259
2260 switch (chan->mode) {
2261 case L2CAP_MODE_STREAMING:
2262 case L2CAP_MODE_ERTM:
2263 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2264 chan->mode = l2cap_select_mode(rfc.mode,
2265 chan->conn->feat_mask);
2266 break;
2267 }
2268
2269 if (remote_efs) {
2270 if (__l2cap_efs_supported(chan))
2271 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2272 else
2273 return -ECONNREFUSED;
2274 }
2275
2276 if (chan->mode != rfc.mode)
2277 return -ECONNREFUSED;
2278
2279 break;
2280 }
2281
2282 done:
2283 if (chan->mode != rfc.mode) {
2284 result = L2CAP_CONF_UNACCEPT;
2285 rfc.mode = chan->mode;
2286
2287 if (chan->num_conf_rsp == 1)
2288 return -ECONNREFUSED;
2289
2290 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2291 sizeof(rfc), (unsigned long) &rfc);
2292 }
2293
2294 if (result == L2CAP_CONF_SUCCESS) {
2295 /* Configure output options and let the other side know
2296 * which ones we don't like. */
2297
2298 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2299 result = L2CAP_CONF_UNACCEPT;
2300 else {
2301 chan->omtu = mtu;
2302 set_bit(CONF_MTU_DONE, &chan->conf_state);
2303 }
2304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2305
2306 if (remote_efs) {
2307 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2308 efs.stype != L2CAP_SERV_NOTRAFIC &&
2309 efs.stype != chan->local_stype) {
2310
2311 result = L2CAP_CONF_UNACCEPT;
2312
2313 if (chan->num_conf_req >= 1)
2314 return -ECONNREFUSED;
2315
2316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2317 sizeof(efs),
2318 (unsigned long) &efs);
2319 } else {
2320 /* Send PENDING Conf Rsp */
2321 result = L2CAP_CONF_PENDING;
2322 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2323 }
2324 }
2325
2326 switch (rfc.mode) {
2327 case L2CAP_MODE_BASIC:
2328 chan->fcs = L2CAP_FCS_NONE;
2329 set_bit(CONF_MODE_DONE, &chan->conf_state);
2330 break;
2331
2332 case L2CAP_MODE_ERTM:
2333 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2334 chan->remote_tx_win = rfc.txwin_size;
2335 else
2336 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2337
2338 chan->remote_max_tx = rfc.max_transmit;
2339
2340 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2341 chan->conn->mtu -
2342 L2CAP_EXT_HDR_SIZE -
2343 L2CAP_SDULEN_SIZE -
2344 L2CAP_FCS_SIZE);
2345 rfc.max_pdu_size = cpu_to_le16(size);
2346 chan->remote_mps = size;
2347
2348 rfc.retrans_timeout =
2349 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2350 rfc.monitor_timeout =
2351 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2352
2353 set_bit(CONF_MODE_DONE, &chan->conf_state);
2354
2355 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2356 sizeof(rfc), (unsigned long) &rfc);
2357
2358 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2359 chan->remote_id = efs.id;
2360 chan->remote_stype = efs.stype;
2361 chan->remote_msdu = le16_to_cpu(efs.msdu);
2362 chan->remote_flush_to =
2363 le32_to_cpu(efs.flush_to);
2364 chan->remote_acc_lat =
2365 le32_to_cpu(efs.acc_lat);
2366 chan->remote_sdu_itime =
2367 le32_to_cpu(efs.sdu_itime);
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2369 sizeof(efs), (unsigned long) &efs);
2370 }
2371 break;
2372
2373 case L2CAP_MODE_STREAMING:
2374 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2375 chan->conn->mtu -
2376 L2CAP_EXT_HDR_SIZE -
2377 L2CAP_SDULEN_SIZE -
2378 L2CAP_FCS_SIZE);
2379 rfc.max_pdu_size = cpu_to_le16(size);
2380 chan->remote_mps = size;
2381
2382 set_bit(CONF_MODE_DONE, &chan->conf_state);
2383
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2386
2387 break;
2388
2389 default:
2390 result = L2CAP_CONF_UNACCEPT;
2391
2392 memset(&rfc, 0, sizeof(rfc));
2393 rfc.mode = chan->mode;
2394 }
2395
2396 if (result == L2CAP_CONF_SUCCESS)
2397 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2398 }
2399 rsp->scid = cpu_to_le16(chan->dcid);
2400 rsp->result = cpu_to_le16(result);
2401 rsp->flags = cpu_to_le16(0x0000);
2402
2403 return ptr - data;
2404 }
2405
2406 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2407 {
2408 struct l2cap_conf_req *req = data;
2409 void *ptr = req->data;
2410 int type, olen;
2411 unsigned long val;
2412 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2413 struct l2cap_conf_efs efs;
2414
2415 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2416
2417 while (len >= L2CAP_CONF_OPT_SIZE) {
2418 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2419
2420 switch (type) {
2421 case L2CAP_CONF_MTU:
2422 if (val < L2CAP_DEFAULT_MIN_MTU) {
2423 *result = L2CAP_CONF_UNACCEPT;
2424 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2425 } else
2426 chan->imtu = val;
2427 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2428 break;
2429
2430 case L2CAP_CONF_FLUSH_TO:
2431 chan->flush_to = val;
2432 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2433 2, chan->flush_to);
2434 break;
2435
2436 case L2CAP_CONF_RFC:
2437 if (olen == sizeof(rfc))
2438 memcpy(&rfc, (void *)val, olen);
2439
2440 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2441 rfc.mode != chan->mode)
2442 return -ECONNREFUSED;
2443
2444 chan->fcs = 0;
2445
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2447 sizeof(rfc), (unsigned long) &rfc);
2448 break;
2449
2450 case L2CAP_CONF_EWS:
2451 chan->tx_win = min_t(u16, val,
2452 L2CAP_DEFAULT_EXT_WINDOW);
2453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2454 chan->tx_win);
2455 break;
2456
2457 case L2CAP_CONF_EFS:
2458 if (olen == sizeof(efs))
2459 memcpy(&efs, (void *)val, olen);
2460
2461 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2462 efs.stype != L2CAP_SERV_NOTRAFIC &&
2463 efs.stype != chan->local_stype)
2464 return -ECONNREFUSED;
2465
2466 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2467 sizeof(efs), (unsigned long) &efs);
2468 break;
2469 }
2470 }
2471
2472 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2473 return -ECONNREFUSED;
2474
2475 chan->mode = rfc.mode;
2476
2477 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2478 switch (rfc.mode) {
2479 case L2CAP_MODE_ERTM:
2480 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2481 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2482 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2483
2484 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2485 chan->local_msdu = le16_to_cpu(efs.msdu);
2486 chan->local_sdu_itime =
2487 le32_to_cpu(efs.sdu_itime);
2488 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2489 chan->local_flush_to =
2490 le32_to_cpu(efs.flush_to);
2491 }
2492 break;
2493
2494 case L2CAP_MODE_STREAMING:
2495 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2496 }
2497 }
2498
2499 req->dcid = cpu_to_le16(chan->dcid);
2500 req->flags = cpu_to_le16(0x0000);
2501
2502 return ptr - data;
2503 }
2504
2505 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2506 {
2507 struct l2cap_conf_rsp *rsp = data;
2508 void *ptr = rsp->data;
2509
2510 BT_DBG("chan %p", chan);
2511
2512 rsp->scid = cpu_to_le16(chan->dcid);
2513 rsp->result = cpu_to_le16(result);
2514 rsp->flags = cpu_to_le16(flags);
2515
2516 return ptr - data;
2517 }
2518
2519 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2520 {
2521 struct l2cap_conn_rsp rsp;
2522 struct l2cap_conn *conn = chan->conn;
2523 u8 buf[128];
2524
2525 rsp.scid = cpu_to_le16(chan->dcid);
2526 rsp.dcid = cpu_to_le16(chan->scid);
2527 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2528 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2529 l2cap_send_cmd(conn, chan->ident,
2530 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2531
2532 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2533 return;
2534
2535 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2536 l2cap_build_conf_req(chan, buf), buf);
2537 chan->num_conf_req++;
2538 }
2539
2540 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2541 {
2542 int type, olen;
2543 unsigned long val;
2544 struct l2cap_conf_rfc rfc;
2545
2546 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2547
2548 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2549 return;
2550
2551 while (len >= L2CAP_CONF_OPT_SIZE) {
2552 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2553
2554 switch (type) {
2555 case L2CAP_CONF_RFC:
2556 if (olen == sizeof(rfc))
2557 memcpy(&rfc, (void *)val, olen);
2558 goto done;
2559 }
2560 }
2561
2562 /* Use sane default values in case a misbehaving remote device
2563 * did not send an RFC option.
2564 */
2565 rfc.mode = chan->mode;
2566 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2567 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2568 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2569
2570 BT_ERR("Expected RFC option was not found, using defaults");
2571
2572 done:
2573 switch (rfc.mode) {
2574 case L2CAP_MODE_ERTM:
2575 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2576 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2577 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2578 break;
2579 case L2CAP_MODE_STREAMING:
2580 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2581 }
2582 }
2583
2584 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2585 {
2586 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2587
2588 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2589 return 0;
2590
2591 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2592 cmd->ident == conn->info_ident) {
2593 cancel_delayed_work(&conn->info_timer);
2594
2595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2596 conn->info_ident = 0;
2597
2598 l2cap_conn_start(conn);
2599 }
2600
2601 return 0;
2602 }
2603
2604 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2605 {
2606 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2607 struct l2cap_conn_rsp rsp;
2608 struct l2cap_chan *chan = NULL, *pchan;
2609 struct sock *parent, *sk = NULL;
2610 int result, status = L2CAP_CS_NO_INFO;
2611
2612 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2613 __le16 psm = req->psm;
2614
2615 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2616
2617 /* Check if we have socket listening on psm */
2618 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2619 if (!pchan) {
2620 result = L2CAP_CR_BAD_PSM;
2621 goto sendresp;
2622 }
2623
2624 parent = pchan->sk;
2625
2626 lock_sock(parent);
2627
2628 /* Check if the ACL is secure enough (if not SDP) */
2629 if (psm != cpu_to_le16(0x0001) &&
2630 !hci_conn_check_link_mode(conn->hcon)) {
2631 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2632 result = L2CAP_CR_SEC_BLOCK;
2633 goto response;
2634 }
2635
2636 result = L2CAP_CR_NO_MEM;
2637
2638 /* Check for backlog size */
2639 if (sk_acceptq_is_full(parent)) {
2640 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2641 goto response;
2642 }
2643
2644 chan = pchan->ops->new_connection(pchan->data);
2645 if (!chan)
2646 goto response;
2647
2648 sk = chan->sk;
2649
2650 /* Check if we already have channel with that dcid */
2651 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2652 sock_set_flag(sk, SOCK_ZAPPED);
2653 chan->ops->close(chan->data);
2654 goto response;
2655 }
2656
2657 hci_conn_hold(conn->hcon);
2658
2659 bacpy(&bt_sk(sk)->src, conn->src);
2660 bacpy(&bt_sk(sk)->dst, conn->dst);
2661 chan->psm = psm;
2662 chan->dcid = scid;
2663
2664 bt_accept_enqueue(parent, sk);
2665
2666 l2cap_chan_add(conn, chan);
2667
2668 dcid = chan->scid;
2669
2670 __set_chan_timer(chan, sk->sk_sndtimeo);
2671
2672 chan->ident = cmd->ident;
2673
2674 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2675 if (l2cap_chan_check_security(chan)) {
2676 if (bt_sk(sk)->defer_setup) {
2677 l2cap_state_change(chan, BT_CONNECT2);
2678 result = L2CAP_CR_PEND;
2679 status = L2CAP_CS_AUTHOR_PEND;
2680 parent->sk_data_ready(parent, 0);
2681 } else {
2682 l2cap_state_change(chan, BT_CONFIG);
2683 result = L2CAP_CR_SUCCESS;
2684 status = L2CAP_CS_NO_INFO;
2685 }
2686 } else {
2687 l2cap_state_change(chan, BT_CONNECT2);
2688 result = L2CAP_CR_PEND;
2689 status = L2CAP_CS_AUTHEN_PEND;
2690 }
2691 } else {
2692 l2cap_state_change(chan, BT_CONNECT2);
2693 result = L2CAP_CR_PEND;
2694 status = L2CAP_CS_NO_INFO;
2695 }
2696
2697 response:
2698 release_sock(parent);
2699
2700 sendresp:
2701 rsp.scid = cpu_to_le16(scid);
2702 rsp.dcid = cpu_to_le16(dcid);
2703 rsp.result = cpu_to_le16(result);
2704 rsp.status = cpu_to_le16(status);
2705 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2706
2707 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2708 struct l2cap_info_req info;
2709 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2710
2711 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2712 conn->info_ident = l2cap_get_ident(conn);
2713
2714 schedule_delayed_work(&conn->info_timer,
2715 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2716
2717 l2cap_send_cmd(conn, conn->info_ident,
2718 L2CAP_INFO_REQ, sizeof(info), &info);
2719 }
2720
2721 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2722 result == L2CAP_CR_SUCCESS) {
2723 u8 buf[128];
2724 set_bit(CONF_REQ_SENT, &chan->conf_state);
2725 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2726 l2cap_build_conf_req(chan, buf), buf);
2727 chan->num_conf_req++;
2728 }
2729
2730 return 0;
2731 }
2732
2733 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2734 {
2735 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2736 u16 scid, dcid, result, status;
2737 struct l2cap_chan *chan;
2738 struct sock *sk;
2739 u8 req[128];
2740
2741 scid = __le16_to_cpu(rsp->scid);
2742 dcid = __le16_to_cpu(rsp->dcid);
2743 result = __le16_to_cpu(rsp->result);
2744 status = __le16_to_cpu(rsp->status);
2745
2746 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2747
2748 if (scid) {
2749 chan = l2cap_get_chan_by_scid(conn, scid);
2750 if (!chan)
2751 return -EFAULT;
2752 } else {
2753 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2754 if (!chan)
2755 return -EFAULT;
2756 }
2757
2758 sk = chan->sk;
2759
2760 switch (result) {
2761 case L2CAP_CR_SUCCESS:
2762 l2cap_state_change(chan, BT_CONFIG);
2763 chan->ident = 0;
2764 chan->dcid = dcid;
2765 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2766
2767 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2768 break;
2769
2770 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2771 l2cap_build_conf_req(chan, req), req);
2772 chan->num_conf_req++;
2773 break;
2774
2775 case L2CAP_CR_PEND:
2776 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2777 break;
2778
2779 default:
2780 l2cap_chan_del(chan, ECONNREFUSED);
2781 break;
2782 }
2783
2784 release_sock(sk);
2785 return 0;
2786 }
2787
2788 static inline void set_default_fcs(struct l2cap_chan *chan)
2789 {
2790 /* FCS is enabled only in ERTM or streaming mode, if one or both
2791 * sides request it.
2792 */
2793 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2794 chan->fcs = L2CAP_FCS_NONE;
2795 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2796 chan->fcs = L2CAP_FCS_CRC16;
2797 }
2798
2799 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2800 {
2801 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2802 u16 dcid, flags;
2803 u8 rsp[64];
2804 struct l2cap_chan *chan;
2805 struct sock *sk;
2806 int len;
2807
2808 dcid = __le16_to_cpu(req->dcid);
2809 flags = __le16_to_cpu(req->flags);
2810
2811 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2812
2813 chan = l2cap_get_chan_by_scid(conn, dcid);
2814 if (!chan)
2815 return -ENOENT;
2816
2817 sk = chan->sk;
2818
2819 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2820 struct l2cap_cmd_rej_cid rej;
2821
2822 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2823 rej.scid = cpu_to_le16(chan->scid);
2824 rej.dcid = cpu_to_le16(chan->dcid);
2825
2826 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2827 sizeof(rej), &rej);
2828 goto unlock;
2829 }
2830
2831 /* Reject if config buffer is too small. */
2832 len = cmd_len - sizeof(*req);
2833 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2834 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2835 l2cap_build_conf_rsp(chan, rsp,
2836 L2CAP_CONF_REJECT, flags), rsp);
2837 goto unlock;
2838 }
2839
2840 /* Store config. */
2841 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2842 chan->conf_len += len;
2843
2844 if (flags & 0x0001) {
2845 /* Incomplete config. Send empty response. */
2846 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2847 l2cap_build_conf_rsp(chan, rsp,
2848 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2849 goto unlock;
2850 }
2851
2852 /* Complete config. */
2853 len = l2cap_parse_conf_req(chan, rsp);
2854 if (len < 0) {
2855 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2856 goto unlock;
2857 }
2858
2859 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2860 chan->num_conf_rsp++;
2861
2862 /* Reset config buffer. */
2863 chan->conf_len = 0;
2864
2865 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2866 goto unlock;
2867
2868 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2869 set_default_fcs(chan);
2870
2871 l2cap_state_change(chan, BT_CONNECTED);
2872
2873 chan->next_tx_seq = 0;
2874 chan->expected_tx_seq = 0;
2875 skb_queue_head_init(&chan->tx_q);
2876 if (chan->mode == L2CAP_MODE_ERTM)
2877 l2cap_ertm_init(chan);
2878
2879 l2cap_chan_ready(chan);
2880 goto unlock;
2881 }
2882
2883 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2884 u8 buf[64];
2885 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2886 l2cap_build_conf_req(chan, buf), buf);
2887 chan->num_conf_req++;
2888 }
2889
2890 /* Got Conf Rsp PENDING from remote side and asume we sent
2891 Conf Rsp PENDING in the code above */
2892 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2893 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2894
2895 /* check compatibility */
2896
2897 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2898 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2899
2900 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2901 l2cap_build_conf_rsp(chan, rsp,
2902 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2903 }
2904
2905 unlock:
2906 release_sock(sk);
2907 return 0;
2908 }
2909
2910 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2911 {
2912 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2913 u16 scid, flags, result;
2914 struct l2cap_chan *chan;
2915 struct sock *sk;
2916 int len = cmd->len - sizeof(*rsp);
2917
2918 scid = __le16_to_cpu(rsp->scid);
2919 flags = __le16_to_cpu(rsp->flags);
2920 result = __le16_to_cpu(rsp->result);
2921
2922 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2923 scid, flags, result);
2924
2925 chan = l2cap_get_chan_by_scid(conn, scid);
2926 if (!chan)
2927 return 0;
2928
2929 sk = chan->sk;
2930
2931 switch (result) {
2932 case L2CAP_CONF_SUCCESS:
2933 l2cap_conf_rfc_get(chan, rsp->data, len);
2934 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2935 break;
2936
2937 case L2CAP_CONF_PENDING:
2938 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2939
2940 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2941 char buf[64];
2942
2943 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2944 buf, &result);
2945 if (len < 0) {
2946 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2947 goto done;
2948 }
2949
2950 /* check compatibility */
2951
2952 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2953 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2954
2955 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2956 l2cap_build_conf_rsp(chan, buf,
2957 L2CAP_CONF_SUCCESS, 0x0000), buf);
2958 }
2959 goto done;
2960
2961 case L2CAP_CONF_UNACCEPT:
2962 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2963 char req[64];
2964
2965 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2966 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2967 goto done;
2968 }
2969
2970 /* throw out any old stored conf requests */
2971 result = L2CAP_CONF_SUCCESS;
2972 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2973 req, &result);
2974 if (len < 0) {
2975 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2976 goto done;
2977 }
2978
2979 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2980 L2CAP_CONF_REQ, len, req);
2981 chan->num_conf_req++;
2982 if (result != L2CAP_CONF_SUCCESS)
2983 goto done;
2984 break;
2985 }
2986
2987 default:
2988 sk->sk_err = ECONNRESET;
2989 __set_chan_timer(chan,
2990 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2991 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2992 goto done;
2993 }
2994
2995 if (flags & 0x01)
2996 goto done;
2997
2998 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2999
3000 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3001 set_default_fcs(chan);
3002
3003 l2cap_state_change(chan, BT_CONNECTED);
3004 chan->next_tx_seq = 0;
3005 chan->expected_tx_seq = 0;
3006 skb_queue_head_init(&chan->tx_q);
3007 if (chan->mode == L2CAP_MODE_ERTM)
3008 l2cap_ertm_init(chan);
3009
3010 l2cap_chan_ready(chan);
3011 }
3012
3013 done:
3014 release_sock(sk);
3015 return 0;
3016 }
3017
3018 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3019 {
3020 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3021 struct l2cap_disconn_rsp rsp;
3022 u16 dcid, scid;
3023 struct l2cap_chan *chan;
3024 struct sock *sk;
3025
3026 scid = __le16_to_cpu(req->scid);
3027 dcid = __le16_to_cpu(req->dcid);
3028
3029 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3030
3031 chan = l2cap_get_chan_by_scid(conn, dcid);
3032 if (!chan)
3033 return 0;
3034
3035 sk = chan->sk;
3036
3037 rsp.dcid = cpu_to_le16(chan->scid);
3038 rsp.scid = cpu_to_le16(chan->dcid);
3039 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3040
3041 sk->sk_shutdown = SHUTDOWN_MASK;
3042
3043 l2cap_chan_del(chan, ECONNRESET);
3044 release_sock(sk);
3045
3046 chan->ops->close(chan->data);
3047 return 0;
3048 }
3049
3050 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3051 {
3052 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3053 u16 dcid, scid;
3054 struct l2cap_chan *chan;
3055 struct sock *sk;
3056
3057 scid = __le16_to_cpu(rsp->scid);
3058 dcid = __le16_to_cpu(rsp->dcid);
3059
3060 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3061
3062 chan = l2cap_get_chan_by_scid(conn, scid);
3063 if (!chan)
3064 return 0;
3065
3066 sk = chan->sk;
3067
3068 l2cap_chan_del(chan, 0);
3069 release_sock(sk);
3070
3071 chan->ops->close(chan->data);
3072 return 0;
3073 }
3074
3075 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3076 {
3077 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3078 u16 type;
3079
3080 type = __le16_to_cpu(req->type);
3081
3082 BT_DBG("type 0x%4.4x", type);
3083
3084 if (type == L2CAP_IT_FEAT_MASK) {
3085 u8 buf[8];
3086 u32 feat_mask = l2cap_feat_mask;
3087 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3088 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3089 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3090 if (!disable_ertm)
3091 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3092 | L2CAP_FEAT_FCS;
3093 if (enable_hs)
3094 feat_mask |= L2CAP_FEAT_EXT_FLOW
3095 | L2CAP_FEAT_EXT_WINDOW;
3096
3097 put_unaligned_le32(feat_mask, rsp->data);
3098 l2cap_send_cmd(conn, cmd->ident,
3099 L2CAP_INFO_RSP, sizeof(buf), buf);
3100 } else if (type == L2CAP_IT_FIXED_CHAN) {
3101 u8 buf[12];
3102 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3103
3104 if (enable_hs)
3105 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3106 else
3107 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3108
3109 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3110 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3111 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3112 l2cap_send_cmd(conn, cmd->ident,
3113 L2CAP_INFO_RSP, sizeof(buf), buf);
3114 } else {
3115 struct l2cap_info_rsp rsp;
3116 rsp.type = cpu_to_le16(type);
3117 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3118 l2cap_send_cmd(conn, cmd->ident,
3119 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3120 }
3121
3122 return 0;
3123 }
3124
3125 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3126 {
3127 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3128 u16 type, result;
3129
3130 type = __le16_to_cpu(rsp->type);
3131 result = __le16_to_cpu(rsp->result);
3132
3133 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3134
3135 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3136 if (cmd->ident != conn->info_ident ||
3137 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3138 return 0;
3139
3140 cancel_delayed_work(&conn->info_timer);
3141
3142 if (result != L2CAP_IR_SUCCESS) {
3143 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3144 conn->info_ident = 0;
3145
3146 l2cap_conn_start(conn);
3147
3148 return 0;
3149 }
3150
3151 if (type == L2CAP_IT_FEAT_MASK) {
3152 conn->feat_mask = get_unaligned_le32(rsp->data);
3153
3154 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3155 struct l2cap_info_req req;
3156 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3157
3158 conn->info_ident = l2cap_get_ident(conn);
3159
3160 l2cap_send_cmd(conn, conn->info_ident,
3161 L2CAP_INFO_REQ, sizeof(req), &req);
3162 } else {
3163 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3164 conn->info_ident = 0;
3165
3166 l2cap_conn_start(conn);
3167 }
3168 } else if (type == L2CAP_IT_FIXED_CHAN) {
3169 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3170 conn->info_ident = 0;
3171
3172 l2cap_conn_start(conn);
3173 }
3174
3175 return 0;
3176 }
3177
3178 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3179 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3180 void *data)
3181 {
3182 struct l2cap_create_chan_req *req = data;
3183 struct l2cap_create_chan_rsp rsp;
3184 u16 psm, scid;
3185
3186 if (cmd_len != sizeof(*req))
3187 return -EPROTO;
3188
3189 if (!enable_hs)
3190 return -EINVAL;
3191
3192 psm = le16_to_cpu(req->psm);
3193 scid = le16_to_cpu(req->scid);
3194
3195 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3196
3197 /* Placeholder: Always reject */
3198 rsp.dcid = 0;
3199 rsp.scid = cpu_to_le16(scid);
3200 rsp.result = L2CAP_CR_NO_MEM;
3201 rsp.status = L2CAP_CS_NO_INFO;
3202
3203 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3204 sizeof(rsp), &rsp);
3205
3206 return 0;
3207 }
3208
3209 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3210 struct l2cap_cmd_hdr *cmd, void *data)
3211 {
3212 BT_DBG("conn %p", conn);
3213
3214 return l2cap_connect_rsp(conn, cmd, data);
3215 }
3216
3217 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3218 u16 icid, u16 result)
3219 {
3220 struct l2cap_move_chan_rsp rsp;
3221
3222 BT_DBG("icid %d, result %d", icid, result);
3223
3224 rsp.icid = cpu_to_le16(icid);
3225 rsp.result = cpu_to_le16(result);
3226
3227 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3228 }
3229
3230 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3231 struct l2cap_chan *chan, u16 icid, u16 result)
3232 {
3233 struct l2cap_move_chan_cfm cfm;
3234 u8 ident;
3235
3236 BT_DBG("icid %d, result %d", icid, result);
3237
3238 ident = l2cap_get_ident(conn);
3239 if (chan)
3240 chan->ident = ident;
3241
3242 cfm.icid = cpu_to_le16(icid);
3243 cfm.result = cpu_to_le16(result);
3244
3245 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3246 }
3247
3248 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3249 u16 icid)
3250 {
3251 struct l2cap_move_chan_cfm_rsp rsp;
3252
3253 BT_DBG("icid %d", icid);
3254
3255 rsp.icid = cpu_to_le16(icid);
3256 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3257 }
3258
3259 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3260 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3261 {
3262 struct l2cap_move_chan_req *req = data;
3263 u16 icid = 0;
3264 u16 result = L2CAP_MR_NOT_ALLOWED;
3265
3266 if (cmd_len != sizeof(*req))
3267 return -EPROTO;
3268
3269 icid = le16_to_cpu(req->icid);
3270
3271 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3272
3273 if (!enable_hs)
3274 return -EINVAL;
3275
3276 /* Placeholder: Always refuse */
3277 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3278
3279 return 0;
3280 }
3281
3282 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3283 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3284 {
3285 struct l2cap_move_chan_rsp *rsp = data;
3286 u16 icid, result;
3287
3288 if (cmd_len != sizeof(*rsp))
3289 return -EPROTO;
3290
3291 icid = le16_to_cpu(rsp->icid);
3292 result = le16_to_cpu(rsp->result);
3293
3294 BT_DBG("icid %d, result %d", icid, result);
3295
3296 /* Placeholder: Always unconfirmed */
3297 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3298
3299 return 0;
3300 }
3301
3302 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3303 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3304 {
3305 struct l2cap_move_chan_cfm *cfm = data;
3306 u16 icid, result;
3307
3308 if (cmd_len != sizeof(*cfm))
3309 return -EPROTO;
3310
3311 icid = le16_to_cpu(cfm->icid);
3312 result = le16_to_cpu(cfm->result);
3313
3314 BT_DBG("icid %d, result %d", icid, result);
3315
3316 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3317
3318 return 0;
3319 }
3320
3321 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3322 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3323 {
3324 struct l2cap_move_chan_cfm_rsp *rsp = data;
3325 u16 icid;
3326
3327 if (cmd_len != sizeof(*rsp))
3328 return -EPROTO;
3329
3330 icid = le16_to_cpu(rsp->icid);
3331
3332 BT_DBG("icid %d", icid);
3333
3334 return 0;
3335 }
3336
3337 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3338 u16 to_multiplier)
3339 {
3340 u16 max_latency;
3341
3342 if (min > max || min < 6 || max > 3200)
3343 return -EINVAL;
3344
3345 if (to_multiplier < 10 || to_multiplier > 3200)
3346 return -EINVAL;
3347
3348 if (max >= to_multiplier * 8)
3349 return -EINVAL;
3350
3351 max_latency = (to_multiplier * 8 / max) - 1;
3352 if (latency > 499 || latency > max_latency)
3353 return -EINVAL;
3354
3355 return 0;
3356 }
3357
3358 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3359 struct l2cap_cmd_hdr *cmd, u8 *data)
3360 {
3361 struct hci_conn *hcon = conn->hcon;
3362 struct l2cap_conn_param_update_req *req;
3363 struct l2cap_conn_param_update_rsp rsp;
3364 u16 min, max, latency, to_multiplier, cmd_len;
3365 int err;
3366
3367 if (!(hcon->link_mode & HCI_LM_MASTER))
3368 return -EINVAL;
3369
3370 cmd_len = __le16_to_cpu(cmd->len);
3371 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3372 return -EPROTO;
3373
3374 req = (struct l2cap_conn_param_update_req *) data;
3375 min = __le16_to_cpu(req->min);
3376 max = __le16_to_cpu(req->max);
3377 latency = __le16_to_cpu(req->latency);
3378 to_multiplier = __le16_to_cpu(req->to_multiplier);
3379
3380 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3381 min, max, latency, to_multiplier);
3382
3383 memset(&rsp, 0, sizeof(rsp));
3384
3385 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3386 if (err)
3387 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3388 else
3389 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3390
3391 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3392 sizeof(rsp), &rsp);
3393
3394 if (!err)
3395 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3396
3397 return 0;
3398 }
3399
3400 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3401 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3402 {
3403 int err = 0;
3404
3405 switch (cmd->code) {
3406 case L2CAP_COMMAND_REJ:
3407 l2cap_command_rej(conn, cmd, data);
3408 break;
3409
3410 case L2CAP_CONN_REQ:
3411 err = l2cap_connect_req(conn, cmd, data);
3412 break;
3413
3414 case L2CAP_CONN_RSP:
3415 err = l2cap_connect_rsp(conn, cmd, data);
3416 break;
3417
3418 case L2CAP_CONF_REQ:
3419 err = l2cap_config_req(conn, cmd, cmd_len, data);
3420 break;
3421
3422 case L2CAP_CONF_RSP:
3423 err = l2cap_config_rsp(conn, cmd, data);
3424 break;
3425
3426 case L2CAP_DISCONN_REQ:
3427 err = l2cap_disconnect_req(conn, cmd, data);
3428 break;
3429
3430 case L2CAP_DISCONN_RSP:
3431 err = l2cap_disconnect_rsp(conn, cmd, data);
3432 break;
3433
3434 case L2CAP_ECHO_REQ:
3435 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3436 break;
3437
3438 case L2CAP_ECHO_RSP:
3439 break;
3440
3441 case L2CAP_INFO_REQ:
3442 err = l2cap_information_req(conn, cmd, data);
3443 break;
3444
3445 case L2CAP_INFO_RSP:
3446 err = l2cap_information_rsp(conn, cmd, data);
3447 break;
3448
3449 case L2CAP_CREATE_CHAN_REQ:
3450 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3451 break;
3452
3453 case L2CAP_CREATE_CHAN_RSP:
3454 err = l2cap_create_channel_rsp(conn, cmd, data);
3455 break;
3456
3457 case L2CAP_MOVE_CHAN_REQ:
3458 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3459 break;
3460
3461 case L2CAP_MOVE_CHAN_RSP:
3462 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3463 break;
3464
3465 case L2CAP_MOVE_CHAN_CFM:
3466 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3467 break;
3468
3469 case L2CAP_MOVE_CHAN_CFM_RSP:
3470 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3471 break;
3472
3473 default:
3474 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3475 err = -EINVAL;
3476 break;
3477 }
3478
3479 return err;
3480 }
3481
3482 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3483 struct l2cap_cmd_hdr *cmd, u8 *data)
3484 {
3485 switch (cmd->code) {
3486 case L2CAP_COMMAND_REJ:
3487 return 0;
3488
3489 case L2CAP_CONN_PARAM_UPDATE_REQ:
3490 return l2cap_conn_param_update_req(conn, cmd, data);
3491
3492 case L2CAP_CONN_PARAM_UPDATE_RSP:
3493 return 0;
3494
3495 default:
3496 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3497 return -EINVAL;
3498 }
3499 }
3500
3501 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3502 struct sk_buff *skb)
3503 {
3504 u8 *data = skb->data;
3505 int len = skb->len;
3506 struct l2cap_cmd_hdr cmd;
3507 int err;
3508
3509 l2cap_raw_recv(conn, skb);
3510
3511 while (len >= L2CAP_CMD_HDR_SIZE) {
3512 u16 cmd_len;
3513 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3514 data += L2CAP_CMD_HDR_SIZE;
3515 len -= L2CAP_CMD_HDR_SIZE;
3516
3517 cmd_len = le16_to_cpu(cmd.len);
3518
3519 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3520
3521 if (cmd_len > len || !cmd.ident) {
3522 BT_DBG("corrupted command");
3523 break;
3524 }
3525
3526 if (conn->hcon->type == LE_LINK)
3527 err = l2cap_le_sig_cmd(conn, &cmd, data);
3528 else
3529 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3530
3531 if (err) {
3532 struct l2cap_cmd_rej_unk rej;
3533
3534 BT_ERR("Wrong link type (%d)", err);
3535
3536 /* FIXME: Map err to a valid reason */
3537 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3538 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3539 }
3540
3541 data += cmd_len;
3542 len -= cmd_len;
3543 }
3544
3545 kfree_skb(skb);
3546 }
3547
3548 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3549 {
3550 u16 our_fcs, rcv_fcs;
3551 int hdr_size;
3552
3553 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3554 hdr_size = L2CAP_EXT_HDR_SIZE;
3555 else
3556 hdr_size = L2CAP_ENH_HDR_SIZE;
3557
3558 if (chan->fcs == L2CAP_FCS_CRC16) {
3559 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3560 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3561 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3562
3563 if (our_fcs != rcv_fcs)
3564 return -EBADMSG;
3565 }
3566 return 0;
3567 }
3568
3569 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3570 {
3571 u32 control = 0;
3572
3573 chan->frames_sent = 0;
3574
3575 control |= __set_reqseq(chan, chan->buffer_seq);
3576
3577 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3578 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3579 l2cap_send_sframe(chan, control);
3580 set_bit(CONN_RNR_SENT, &chan->conn_state);
3581 }
3582
3583 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3584 l2cap_retransmit_frames(chan);
3585
3586 l2cap_ertm_send(chan);
3587
3588 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3589 chan->frames_sent == 0) {
3590 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3591 l2cap_send_sframe(chan, control);
3592 }
3593 }
3594
3595 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3596 {
3597 struct sk_buff *next_skb;
3598 int tx_seq_offset, next_tx_seq_offset;
3599
3600 bt_cb(skb)->tx_seq = tx_seq;
3601 bt_cb(skb)->sar = sar;
3602
3603 next_skb = skb_peek(&chan->srej_q);
3604
3605 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3606
3607 while (next_skb) {
3608 if (bt_cb(next_skb)->tx_seq == tx_seq)
3609 return -EINVAL;
3610
3611 next_tx_seq_offset = __seq_offset(chan,
3612 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3613
3614 if (next_tx_seq_offset > tx_seq_offset) {
3615 __skb_queue_before(&chan->srej_q, next_skb, skb);
3616 return 0;
3617 }
3618
3619 if (skb_queue_is_last(&chan->srej_q, next_skb))
3620 next_skb = NULL;
3621 else
3622 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3623 }
3624
3625 __skb_queue_tail(&chan->srej_q, skb);
3626
3627 return 0;
3628 }
3629
3630 static void append_skb_frag(struct sk_buff *skb,
3631 struct sk_buff *new_frag, struct sk_buff **last_frag)
3632 {
3633 /* skb->len reflects data in skb as well as all fragments
3634 * skb->data_len reflects only data in fragments
3635 */
3636 if (!skb_has_frag_list(skb))
3637 skb_shinfo(skb)->frag_list = new_frag;
3638
3639 new_frag->next = NULL;
3640
3641 (*last_frag)->next = new_frag;
3642 *last_frag = new_frag;
3643
3644 skb->len += new_frag->len;
3645 skb->data_len += new_frag->len;
3646 skb->truesize += new_frag->truesize;
3647 }
3648
3649 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3650 {
3651 int err = -EINVAL;
3652
3653 switch (__get_ctrl_sar(chan, control)) {
3654 case L2CAP_SAR_UNSEGMENTED:
3655 if (chan->sdu)
3656 break;
3657
3658 err = chan->ops->recv(chan->data, skb);
3659 break;
3660
3661 case L2CAP_SAR_START:
3662 if (chan->sdu)
3663 break;
3664
3665 chan->sdu_len = get_unaligned_le16(skb->data);
3666 skb_pull(skb, L2CAP_SDULEN_SIZE);
3667
3668 if (chan->sdu_len > chan->imtu) {
3669 err = -EMSGSIZE;
3670 break;
3671 }
3672
3673 if (skb->len >= chan->sdu_len)
3674 break;
3675
3676 chan->sdu = skb;
3677 chan->sdu_last_frag = skb;
3678
3679 skb = NULL;
3680 err = 0;
3681 break;
3682
3683 case L2CAP_SAR_CONTINUE:
3684 if (!chan->sdu)
3685 break;
3686
3687 append_skb_frag(chan->sdu, skb,
3688 &chan->sdu_last_frag);
3689 skb = NULL;
3690
3691 if (chan->sdu->len >= chan->sdu_len)
3692 break;
3693
3694 err = 0;
3695 break;
3696
3697 case L2CAP_SAR_END:
3698 if (!chan->sdu)
3699 break;
3700
3701 append_skb_frag(chan->sdu, skb,
3702 &chan->sdu_last_frag);
3703 skb = NULL;
3704
3705 if (chan->sdu->len != chan->sdu_len)
3706 break;
3707
3708 err = chan->ops->recv(chan->data, chan->sdu);
3709
3710 if (!err) {
3711 /* Reassembly complete */
3712 chan->sdu = NULL;
3713 chan->sdu_last_frag = NULL;
3714 chan->sdu_len = 0;
3715 }
3716 break;
3717 }
3718
3719 if (err) {
3720 kfree_skb(skb);
3721 kfree_skb(chan->sdu);
3722 chan->sdu = NULL;
3723 chan->sdu_last_frag = NULL;
3724 chan->sdu_len = 0;
3725 }
3726
3727 return err;
3728 }
3729
3730 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3731 {
3732 BT_DBG("chan %p, Enter local busy", chan);
3733
3734 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3735
3736 __set_ack_timer(chan);
3737 }
3738
3739 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3740 {
3741 u32 control;
3742
3743 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3744 goto done;
3745
3746 control = __set_reqseq(chan, chan->buffer_seq);
3747 control |= __set_ctrl_poll(chan);
3748 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3749 l2cap_send_sframe(chan, control);
3750 chan->retry_count = 1;
3751
3752 __clear_retrans_timer(chan);
3753 __set_monitor_timer(chan);
3754
3755 set_bit(CONN_WAIT_F, &chan->conn_state);
3756
3757 done:
3758 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3759 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3760
3761 BT_DBG("chan %p, Exit local busy", chan);
3762 }
3763
3764 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3765 {
3766 if (chan->mode == L2CAP_MODE_ERTM) {
3767 if (busy)
3768 l2cap_ertm_enter_local_busy(chan);
3769 else
3770 l2cap_ertm_exit_local_busy(chan);
3771 }
3772 }
3773
3774 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3775 {
3776 struct sk_buff *skb;
3777 u32 control;
3778
3779 while ((skb = skb_peek(&chan->srej_q)) &&
3780 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3781 int err;
3782
3783 if (bt_cb(skb)->tx_seq != tx_seq)
3784 break;
3785
3786 skb = skb_dequeue(&chan->srej_q);
3787 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3788 err = l2cap_reassemble_sdu(chan, skb, control);
3789
3790 if (err < 0) {
3791 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3792 break;
3793 }
3794
3795 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3796 tx_seq = __next_seq(chan, tx_seq);
3797 }
3798 }
3799
3800 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3801 {
3802 struct srej_list *l, *tmp;
3803 u32 control;
3804
3805 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3806 if (l->tx_seq == tx_seq) {
3807 list_del(&l->list);
3808 kfree(l);
3809 return;
3810 }
3811 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3812 control |= __set_reqseq(chan, l->tx_seq);
3813 l2cap_send_sframe(chan, control);
3814 list_del(&l->list);
3815 list_add_tail(&l->list, &chan->srej_l);
3816 }
3817 }
3818
3819 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3820 {
3821 struct srej_list *new;
3822 u32 control;
3823
3824 while (tx_seq != chan->expected_tx_seq) {
3825 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3826 control |= __set_reqseq(chan, chan->expected_tx_seq);
3827 l2cap_send_sframe(chan, control);
3828
3829 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3830 if (!new)
3831 return -ENOMEM;
3832
3833 new->tx_seq = chan->expected_tx_seq;
3834
3835 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3836
3837 list_add_tail(&new->list, &chan->srej_l);
3838 }
3839
3840 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3841
3842 return 0;
3843 }
3844
3845 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3846 {
3847 u16 tx_seq = __get_txseq(chan, rx_control);
3848 u16 req_seq = __get_reqseq(chan, rx_control);
3849 u8 sar = __get_ctrl_sar(chan, rx_control);
3850 int tx_seq_offset, expected_tx_seq_offset;
3851 int num_to_ack = (chan->tx_win/6) + 1;
3852 int err = 0;
3853
3854 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3855 tx_seq, rx_control);
3856
3857 if (__is_ctrl_final(chan, rx_control) &&
3858 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3859 __clear_monitor_timer(chan);
3860 if (chan->unacked_frames > 0)
3861 __set_retrans_timer(chan);
3862 clear_bit(CONN_WAIT_F, &chan->conn_state);
3863 }
3864
3865 chan->expected_ack_seq = req_seq;
3866 l2cap_drop_acked_frames(chan);
3867
3868 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3869
3870 /* invalid tx_seq */
3871 if (tx_seq_offset >= chan->tx_win) {
3872 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3873 goto drop;
3874 }
3875
3876 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3877 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3878 l2cap_send_ack(chan);
3879 goto drop;
3880 }
3881
3882 if (tx_seq == chan->expected_tx_seq)
3883 goto expected;
3884
3885 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3886 struct srej_list *first;
3887
3888 first = list_first_entry(&chan->srej_l,
3889 struct srej_list, list);
3890 if (tx_seq == first->tx_seq) {
3891 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3892 l2cap_check_srej_gap(chan, tx_seq);
3893
3894 list_del(&first->list);
3895 kfree(first);
3896
3897 if (list_empty(&chan->srej_l)) {
3898 chan->buffer_seq = chan->buffer_seq_srej;
3899 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3900 l2cap_send_ack(chan);
3901 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3902 }
3903 } else {
3904 struct srej_list *l;
3905
3906 /* duplicated tx_seq */
3907 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3908 goto drop;
3909
3910 list_for_each_entry(l, &chan->srej_l, list) {
3911 if (l->tx_seq == tx_seq) {
3912 l2cap_resend_srejframe(chan, tx_seq);
3913 return 0;
3914 }
3915 }
3916
3917 err = l2cap_send_srejframe(chan, tx_seq);
3918 if (err < 0) {
3919 l2cap_send_disconn_req(chan->conn, chan, -err);
3920 return err;
3921 }
3922 }
3923 } else {
3924 expected_tx_seq_offset = __seq_offset(chan,
3925 chan->expected_tx_seq, chan->buffer_seq);
3926
3927 /* duplicated tx_seq */
3928 if (tx_seq_offset < expected_tx_seq_offset)
3929 goto drop;
3930
3931 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3932
3933 BT_DBG("chan %p, Enter SREJ", chan);
3934
3935 INIT_LIST_HEAD(&chan->srej_l);
3936 chan->buffer_seq_srej = chan->buffer_seq;
3937
3938 __skb_queue_head_init(&chan->srej_q);
3939 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3940
3941 /* Set P-bit only if there are some I-frames to ack. */
3942 if (__clear_ack_timer(chan))
3943 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3944
3945 err = l2cap_send_srejframe(chan, tx_seq);
3946 if (err < 0) {
3947 l2cap_send_disconn_req(chan->conn, chan, -err);
3948 return err;
3949 }
3950 }
3951 return 0;
3952
3953 expected:
3954 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3955
3956 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3957 bt_cb(skb)->tx_seq = tx_seq;
3958 bt_cb(skb)->sar = sar;
3959 __skb_queue_tail(&chan->srej_q, skb);
3960 return 0;
3961 }
3962
3963 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3964 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3965
3966 if (err < 0) {
3967 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3968 return err;
3969 }
3970
3971 if (__is_ctrl_final(chan, rx_control)) {
3972 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3973 l2cap_retransmit_frames(chan);
3974 }
3975
3976
3977 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3978 if (chan->num_acked == num_to_ack - 1)
3979 l2cap_send_ack(chan);
3980 else
3981 __set_ack_timer(chan);
3982
3983 return 0;
3984
3985 drop:
3986 kfree_skb(skb);
3987 return 0;
3988 }
3989
3990 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3991 {
3992 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3993 __get_reqseq(chan, rx_control), rx_control);
3994
3995 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3996 l2cap_drop_acked_frames(chan);
3997
3998 if (__is_ctrl_poll(chan, rx_control)) {
3999 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4000 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4001 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4002 (chan->unacked_frames > 0))
4003 __set_retrans_timer(chan);
4004
4005 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4006 l2cap_send_srejtail(chan);
4007 } else {
4008 l2cap_send_i_or_rr_or_rnr(chan);
4009 }
4010
4011 } else if (__is_ctrl_final(chan, rx_control)) {
4012 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4013
4014 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4015 l2cap_retransmit_frames(chan);
4016
4017 } else {
4018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4019 (chan->unacked_frames > 0))
4020 __set_retrans_timer(chan);
4021
4022 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4023 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4024 l2cap_send_ack(chan);
4025 else
4026 l2cap_ertm_send(chan);
4027 }
4028 }
4029
4030 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4031 {
4032 u16 tx_seq = __get_reqseq(chan, rx_control);
4033
4034 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4035
4036 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4037
4038 chan->expected_ack_seq = tx_seq;
4039 l2cap_drop_acked_frames(chan);
4040
4041 if (__is_ctrl_final(chan, rx_control)) {
4042 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4043 l2cap_retransmit_frames(chan);
4044 } else {
4045 l2cap_retransmit_frames(chan);
4046
4047 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4048 set_bit(CONN_REJ_ACT, &chan->conn_state);
4049 }
4050 }
4051 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4052 {
4053 u16 tx_seq = __get_reqseq(chan, rx_control);
4054
4055 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4056
4057 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4058
4059 if (__is_ctrl_poll(chan, rx_control)) {
4060 chan->expected_ack_seq = tx_seq;
4061 l2cap_drop_acked_frames(chan);
4062
4063 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4064 l2cap_retransmit_one_frame(chan, tx_seq);
4065
4066 l2cap_ertm_send(chan);
4067
4068 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4069 chan->srej_save_reqseq = tx_seq;
4070 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4071 }
4072 } else if (__is_ctrl_final(chan, rx_control)) {
4073 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4074 chan->srej_save_reqseq == tx_seq)
4075 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4076 else
4077 l2cap_retransmit_one_frame(chan, tx_seq);
4078 } else {
4079 l2cap_retransmit_one_frame(chan, tx_seq);
4080 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4081 chan->srej_save_reqseq = tx_seq;
4082 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4083 }
4084 }
4085 }
4086
4087 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4088 {
4089 u16 tx_seq = __get_reqseq(chan, rx_control);
4090
4091 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4092
4093 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4094 chan->expected_ack_seq = tx_seq;
4095 l2cap_drop_acked_frames(chan);
4096
4097 if (__is_ctrl_poll(chan, rx_control))
4098 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4099
4100 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4101 __clear_retrans_timer(chan);
4102 if (__is_ctrl_poll(chan, rx_control))
4103 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4104 return;
4105 }
4106
4107 if (__is_ctrl_poll(chan, rx_control)) {
4108 l2cap_send_srejtail(chan);
4109 } else {
4110 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4111 l2cap_send_sframe(chan, rx_control);
4112 }
4113 }
4114
4115 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4116 {
4117 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4118
4119 if (__is_ctrl_final(chan, rx_control) &&
4120 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4121 __clear_monitor_timer(chan);
4122 if (chan->unacked_frames > 0)
4123 __set_retrans_timer(chan);
4124 clear_bit(CONN_WAIT_F, &chan->conn_state);
4125 }
4126
4127 switch (__get_ctrl_super(chan, rx_control)) {
4128 case L2CAP_SUPER_RR:
4129 l2cap_data_channel_rrframe(chan, rx_control);
4130 break;
4131
4132 case L2CAP_SUPER_REJ:
4133 l2cap_data_channel_rejframe(chan, rx_control);
4134 break;
4135
4136 case L2CAP_SUPER_SREJ:
4137 l2cap_data_channel_srejframe(chan, rx_control);
4138 break;
4139
4140 case L2CAP_SUPER_RNR:
4141 l2cap_data_channel_rnrframe(chan, rx_control);
4142 break;
4143 }
4144
4145 kfree_skb(skb);
4146 return 0;
4147 }
4148
4149 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4150 {
4151 u32 control;
4152 u16 req_seq;
4153 int len, next_tx_seq_offset, req_seq_offset;
4154
4155 control = __get_control(chan, skb->data);
4156 skb_pull(skb, __ctrl_size(chan));
4157 len = skb->len;
4158
4159 /*
4160 * We can just drop the corrupted I-frame here.
4161 * Receiver will miss it and start proper recovery
4162 * procedures and ask retransmission.
4163 */
4164 if (l2cap_check_fcs(chan, skb))
4165 goto drop;
4166
4167 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4168 len -= L2CAP_SDULEN_SIZE;
4169
4170 if (chan->fcs == L2CAP_FCS_CRC16)
4171 len -= L2CAP_FCS_SIZE;
4172
4173 if (len > chan->mps) {
4174 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4175 goto drop;
4176 }
4177
4178 req_seq = __get_reqseq(chan, control);
4179
4180 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4181
4182 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4183 chan->expected_ack_seq);
4184
4185 /* check for invalid req-seq */
4186 if (req_seq_offset > next_tx_seq_offset) {
4187 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4188 goto drop;
4189 }
4190
4191 if (!__is_sframe(chan, control)) {
4192 if (len < 0) {
4193 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4194 goto drop;
4195 }
4196
4197 l2cap_data_channel_iframe(chan, control, skb);
4198 } else {
4199 if (len != 0) {
4200 BT_ERR("%d", len);
4201 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4202 goto drop;
4203 }
4204
4205 l2cap_data_channel_sframe(chan, control, skb);
4206 }
4207
4208 return 0;
4209
4210 drop:
4211 kfree_skb(skb);
4212 return 0;
4213 }
4214
4215 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4216 {
4217 struct l2cap_chan *chan;
4218 struct sock *sk = NULL;
4219 u32 control;
4220 u16 tx_seq;
4221 int len;
4222
4223 chan = l2cap_get_chan_by_scid(conn, cid);
4224 if (!chan) {
4225 BT_DBG("unknown cid 0x%4.4x", cid);
4226 goto drop;
4227 }
4228
4229 sk = chan->sk;
4230
4231 BT_DBG("chan %p, len %d", chan, skb->len);
4232
4233 if (chan->state != BT_CONNECTED)
4234 goto drop;
4235
4236 switch (chan->mode) {
4237 case L2CAP_MODE_BASIC:
4238 /* If socket recv buffers overflows we drop data here
4239 * which is *bad* because L2CAP has to be reliable.
4240 * But we don't have any other choice. L2CAP doesn't
4241 * provide flow control mechanism. */
4242
4243 if (chan->imtu < skb->len)
4244 goto drop;
4245
4246 if (!chan->ops->recv(chan->data, skb))
4247 goto done;
4248 break;
4249
4250 case L2CAP_MODE_ERTM:
4251 l2cap_ertm_data_rcv(chan, skb);
4252
4253 goto done;
4254
4255 case L2CAP_MODE_STREAMING:
4256 control = __get_control(chan, skb->data);
4257 skb_pull(skb, __ctrl_size(chan));
4258 len = skb->len;
4259
4260 if (l2cap_check_fcs(chan, skb))
4261 goto drop;
4262
4263 if (__is_sar_start(chan, control))
4264 len -= L2CAP_SDULEN_SIZE;
4265
4266 if (chan->fcs == L2CAP_FCS_CRC16)
4267 len -= L2CAP_FCS_SIZE;
4268
4269 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4270 goto drop;
4271
4272 tx_seq = __get_txseq(chan, control);
4273
4274 if (chan->expected_tx_seq != tx_seq) {
4275 /* Frame(s) missing - must discard partial SDU */
4276 kfree_skb(chan->sdu);
4277 chan->sdu = NULL;
4278 chan->sdu_last_frag = NULL;
4279 chan->sdu_len = 0;
4280
4281 /* TODO: Notify userland of missing data */
4282 }
4283
4284 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4285
4286 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4287 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4288
4289 goto done;
4290
4291 default:
4292 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4293 break;
4294 }
4295
4296 drop:
4297 kfree_skb(skb);
4298
4299 done:
4300 if (sk)
4301 release_sock(sk);
4302
4303 return 0;
4304 }
4305
4306 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4307 {
4308 struct sock *sk = NULL;
4309 struct l2cap_chan *chan;
4310
4311 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4312 if (!chan)
4313 goto drop;
4314
4315 sk = chan->sk;
4316
4317 lock_sock(sk);
4318
4319 BT_DBG("sk %p, len %d", sk, skb->len);
4320
4321 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4322 goto drop;
4323
4324 if (chan->imtu < skb->len)
4325 goto drop;
4326
4327 if (!chan->ops->recv(chan->data, skb))
4328 goto done;
4329
4330 drop:
4331 kfree_skb(skb);
4332
4333 done:
4334 if (sk)
4335 release_sock(sk);
4336 return 0;
4337 }
4338
4339 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4340 {
4341 struct sock *sk = NULL;
4342 struct l2cap_chan *chan;
4343
4344 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4345 if (!chan)
4346 goto drop;
4347
4348 sk = chan->sk;
4349
4350 lock_sock(sk);
4351
4352 BT_DBG("sk %p, len %d", sk, skb->len);
4353
4354 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4355 goto drop;
4356
4357 if (chan->imtu < skb->len)
4358 goto drop;
4359
4360 if (!chan->ops->recv(chan->data, skb))
4361 goto done;
4362
4363 drop:
4364 kfree_skb(skb);
4365
4366 done:
4367 if (sk)
4368 release_sock(sk);
4369 return 0;
4370 }
4371
4372 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4373 {
4374 struct l2cap_hdr *lh = (void *) skb->data;
4375 u16 cid, len;
4376 __le16 psm;
4377
4378 skb_pull(skb, L2CAP_HDR_SIZE);
4379 cid = __le16_to_cpu(lh->cid);
4380 len = __le16_to_cpu(lh->len);
4381
4382 if (len != skb->len) {
4383 kfree_skb(skb);
4384 return;
4385 }
4386
4387 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4388
4389 switch (cid) {
4390 case L2CAP_CID_LE_SIGNALING:
4391 case L2CAP_CID_SIGNALING:
4392 l2cap_sig_channel(conn, skb);
4393 break;
4394
4395 case L2CAP_CID_CONN_LESS:
4396 psm = get_unaligned_le16(skb->data);
4397 skb_pull(skb, 2);
4398 l2cap_conless_channel(conn, psm, skb);
4399 break;
4400
4401 case L2CAP_CID_LE_DATA:
4402 l2cap_att_channel(conn, cid, skb);
4403 break;
4404
4405 case L2CAP_CID_SMP:
4406 if (smp_sig_channel(conn, skb))
4407 l2cap_conn_del(conn->hcon, EACCES);
4408 break;
4409
4410 default:
4411 l2cap_data_channel(conn, cid, skb);
4412 break;
4413 }
4414 }
4415
4416 /* ---- L2CAP interface with lower layer (HCI) ---- */
4417
4418 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4419 {
4420 int exact = 0, lm1 = 0, lm2 = 0;
4421 struct l2cap_chan *c;
4422
4423 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4424
4425 /* Find listening sockets and check their link_mode */
4426 read_lock(&chan_list_lock);
4427 list_for_each_entry(c, &chan_list, global_l) {
4428 struct sock *sk = c->sk;
4429
4430 if (c->state != BT_LISTEN)
4431 continue;
4432
4433 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4434 lm1 |= HCI_LM_ACCEPT;
4435 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4436 lm1 |= HCI_LM_MASTER;
4437 exact++;
4438 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4439 lm2 |= HCI_LM_ACCEPT;
4440 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4441 lm2 |= HCI_LM_MASTER;
4442 }
4443 }
4444 read_unlock(&chan_list_lock);
4445
4446 return exact ? lm1 : lm2;
4447 }
4448
4449 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4450 {
4451 struct l2cap_conn *conn;
4452
4453 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4454
4455 if (!status) {
4456 conn = l2cap_conn_add(hcon, status);
4457 if (conn)
4458 l2cap_conn_ready(conn);
4459 } else
4460 l2cap_conn_del(hcon, bt_to_errno(status));
4461
4462 return 0;
4463 }
4464
4465 int l2cap_disconn_ind(struct hci_conn *hcon)
4466 {
4467 struct l2cap_conn *conn = hcon->l2cap_data;
4468
4469 BT_DBG("hcon %p", hcon);
4470
4471 if (!conn)
4472 return HCI_ERROR_REMOTE_USER_TERM;
4473 return conn->disc_reason;
4474 }
4475
4476 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4477 {
4478 BT_DBG("hcon %p reason %d", hcon, reason);
4479
4480 l2cap_conn_del(hcon, bt_to_errno(reason));
4481 return 0;
4482 }
4483
4484 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4485 {
4486 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4487 return;
4488
4489 if (encrypt == 0x00) {
4490 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4491 __clear_chan_timer(chan);
4492 __set_chan_timer(chan,
4493 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4494 } else if (chan->sec_level == BT_SECURITY_HIGH)
4495 l2cap_chan_close(chan, ECONNREFUSED);
4496 } else {
4497 if (chan->sec_level == BT_SECURITY_MEDIUM)
4498 __clear_chan_timer(chan);
4499 }
4500 }
4501
4502 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4503 {
4504 struct l2cap_conn *conn = hcon->l2cap_data;
4505 struct l2cap_chan *chan;
4506
4507 if (!conn)
4508 return 0;
4509
4510 BT_DBG("conn %p", conn);
4511
4512 if (hcon->type == LE_LINK) {
4513 smp_distribute_keys(conn, 0);
4514 cancel_delayed_work(&conn->security_timer);
4515 }
4516
4517 rcu_read_lock();
4518
4519 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4520 struct sock *sk = chan->sk;
4521
4522 bh_lock_sock(sk);
4523
4524 BT_DBG("chan->scid %d", chan->scid);
4525
4526 if (chan->scid == L2CAP_CID_LE_DATA) {
4527 if (!status && encrypt) {
4528 chan->sec_level = hcon->sec_level;
4529 l2cap_chan_ready(chan);
4530 }
4531
4532 bh_unlock_sock(sk);
4533 continue;
4534 }
4535
4536 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4537 bh_unlock_sock(sk);
4538 continue;
4539 }
4540
4541 if (!status && (chan->state == BT_CONNECTED ||
4542 chan->state == BT_CONFIG)) {
4543 l2cap_check_encryption(chan, encrypt);
4544 bh_unlock_sock(sk);
4545 continue;
4546 }
4547
4548 if (chan->state == BT_CONNECT) {
4549 if (!status) {
4550 struct l2cap_conn_req req;
4551 req.scid = cpu_to_le16(chan->scid);
4552 req.psm = chan->psm;
4553
4554 chan->ident = l2cap_get_ident(conn);
4555 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4556
4557 l2cap_send_cmd(conn, chan->ident,
4558 L2CAP_CONN_REQ, sizeof(req), &req);
4559 } else {
4560 __clear_chan_timer(chan);
4561 __set_chan_timer(chan,
4562 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4563 }
4564 } else if (chan->state == BT_CONNECT2) {
4565 struct l2cap_conn_rsp rsp;
4566 __u16 res, stat;
4567
4568 if (!status) {
4569 if (bt_sk(sk)->defer_setup) {
4570 struct sock *parent = bt_sk(sk)->parent;
4571 res = L2CAP_CR_PEND;
4572 stat = L2CAP_CS_AUTHOR_PEND;
4573 if (parent)
4574 parent->sk_data_ready(parent, 0);
4575 } else {
4576 l2cap_state_change(chan, BT_CONFIG);
4577 res = L2CAP_CR_SUCCESS;
4578 stat = L2CAP_CS_NO_INFO;
4579 }
4580 } else {
4581 l2cap_state_change(chan, BT_DISCONN);
4582 __set_chan_timer(chan,
4583 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4584 res = L2CAP_CR_SEC_BLOCK;
4585 stat = L2CAP_CS_NO_INFO;
4586 }
4587
4588 rsp.scid = cpu_to_le16(chan->dcid);
4589 rsp.dcid = cpu_to_le16(chan->scid);
4590 rsp.result = cpu_to_le16(res);
4591 rsp.status = cpu_to_le16(stat);
4592 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4593 sizeof(rsp), &rsp);
4594 }
4595
4596 bh_unlock_sock(sk);
4597 }
4598
4599 rcu_read_unlock();
4600
4601 return 0;
4602 }
4603
4604 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4605 {
4606 struct l2cap_conn *conn = hcon->l2cap_data;
4607
4608 if (!conn)
4609 conn = l2cap_conn_add(hcon, 0);
4610
4611 if (!conn)
4612 goto drop;
4613
4614 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4615
4616 if (!(flags & ACL_CONT)) {
4617 struct l2cap_hdr *hdr;
4618 struct l2cap_chan *chan;
4619 u16 cid;
4620 int len;
4621
4622 if (conn->rx_len) {
4623 BT_ERR("Unexpected start frame (len %d)", skb->len);
4624 kfree_skb(conn->rx_skb);
4625 conn->rx_skb = NULL;
4626 conn->rx_len = 0;
4627 l2cap_conn_unreliable(conn, ECOMM);
4628 }
4629
4630 /* Start fragment always begin with Basic L2CAP header */
4631 if (skb->len < L2CAP_HDR_SIZE) {
4632 BT_ERR("Frame is too short (len %d)", skb->len);
4633 l2cap_conn_unreliable(conn, ECOMM);
4634 goto drop;
4635 }
4636
4637 hdr = (struct l2cap_hdr *) skb->data;
4638 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4639 cid = __le16_to_cpu(hdr->cid);
4640
4641 if (len == skb->len) {
4642 /* Complete frame received */
4643 l2cap_recv_frame(conn, skb);
4644 return 0;
4645 }
4646
4647 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4648
4649 if (skb->len > len) {
4650 BT_ERR("Frame is too long (len %d, expected len %d)",
4651 skb->len, len);
4652 l2cap_conn_unreliable(conn, ECOMM);
4653 goto drop;
4654 }
4655
4656 chan = l2cap_get_chan_by_scid(conn, cid);
4657
4658 if (chan && chan->sk) {
4659 struct sock *sk = chan->sk;
4660
4661 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4662 BT_ERR("Frame exceeding recv MTU (len %d, "
4663 "MTU %d)", len,
4664 chan->imtu);
4665 release_sock(sk);
4666 l2cap_conn_unreliable(conn, ECOMM);
4667 goto drop;
4668 }
4669 release_sock(sk);
4670 }
4671
4672 /* Allocate skb for the complete frame (with header) */
4673 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4674 if (!conn->rx_skb)
4675 goto drop;
4676
4677 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4678 skb->len);
4679 conn->rx_len = len - skb->len;
4680 } else {
4681 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4682
4683 if (!conn->rx_len) {
4684 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4685 l2cap_conn_unreliable(conn, ECOMM);
4686 goto drop;
4687 }
4688
4689 if (skb->len > conn->rx_len) {
4690 BT_ERR("Fragment is too long (len %d, expected %d)",
4691 skb->len, conn->rx_len);
4692 kfree_skb(conn->rx_skb);
4693 conn->rx_skb = NULL;
4694 conn->rx_len = 0;
4695 l2cap_conn_unreliable(conn, ECOMM);
4696 goto drop;
4697 }
4698
4699 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4700 skb->len);
4701 conn->rx_len -= skb->len;
4702
4703 if (!conn->rx_len) {
4704 /* Complete frame received */
4705 l2cap_recv_frame(conn, conn->rx_skb);
4706 conn->rx_skb = NULL;
4707 }
4708 }
4709
4710 drop:
4711 kfree_skb(skb);
4712 return 0;
4713 }
4714
4715 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4716 {
4717 struct l2cap_chan *c;
4718
4719 read_lock(&chan_list_lock);
4720
4721 list_for_each_entry(c, &chan_list, global_l) {
4722 struct sock *sk = c->sk;
4723
4724 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4725 batostr(&bt_sk(sk)->src),
4726 batostr(&bt_sk(sk)->dst),
4727 c->state, __le16_to_cpu(c->psm),
4728 c->scid, c->dcid, c->imtu, c->omtu,
4729 c->sec_level, c->mode);
4730 }
4731
4732 read_unlock(&chan_list_lock);
4733
4734 return 0;
4735 }
4736
4737 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4738 {
4739 return single_open(file, l2cap_debugfs_show, inode->i_private);
4740 }
4741
4742 static const struct file_operations l2cap_debugfs_fops = {
4743 .open = l2cap_debugfs_open,
4744 .read = seq_read,
4745 .llseek = seq_lseek,
4746 .release = single_release,
4747 };
4748
4749 static struct dentry *l2cap_debugfs;
4750
4751 int __init l2cap_init(void)
4752 {
4753 int err;
4754
4755 err = l2cap_init_sockets();
4756 if (err < 0)
4757 return err;
4758
4759 if (bt_debugfs) {
4760 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4761 bt_debugfs, NULL, &l2cap_debugfs_fops);
4762 if (!l2cap_debugfs)
4763 BT_ERR("Failed to create L2CAP debug file");
4764 }
4765
4766 return 0;
4767 }
4768
4769 void l2cap_exit(void)
4770 {
4771 debugfs_remove(l2cap_debugfs);
4772 l2cap_cleanup_sockets();
4773 }
4774
4775 module_param(disable_ertm, bool, 0644);
4776 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.200398 seconds and 5 git commands to generate.