Bluetooth: Use symbolic names for state in debug
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c, *r = NULL;
81
82 rcu_read_lock();
83
84 list_for_each_entry_rcu(c, &conn->chan_l, list) {
85 if (c->dcid == cid) {
86 r = c;
87 break;
88 }
89 }
90
91 rcu_read_unlock();
92 return r;
93 }
94
95 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 {
97 struct l2cap_chan *c, *r = NULL;
98
99 rcu_read_lock();
100
101 list_for_each_entry_rcu(c, &conn->chan_l, list) {
102 if (c->scid == cid) {
103 r = c;
104 break;
105 }
106 }
107
108 rcu_read_unlock();
109 return r;
110 }
111
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
115 {
116 struct l2cap_chan *c;
117
118 c = __l2cap_get_chan_by_scid(conn, cid);
119 if (c)
120 lock_sock(c->sk);
121 return c;
122 }
123
124 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 {
126 struct l2cap_chan *c, *r = NULL;
127
128 rcu_read_lock();
129
130 list_for_each_entry_rcu(c, &conn->chan_l, list) {
131 if (c->ident == ident) {
132 r = c;
133 break;
134 }
135 }
136
137 rcu_read_unlock();
138 return r;
139 }
140
141 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
142 {
143 struct l2cap_chan *c;
144
145 c = __l2cap_get_chan_by_ident(conn, ident);
146 if (c)
147 lock_sock(c->sk);
148 return c;
149 }
150
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 {
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 return c;
158 }
159 return NULL;
160 }
161
162 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
163 {
164 int err;
165
166 write_lock(&chan_list_lock);
167
168 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
169 err = -EADDRINUSE;
170 goto done;
171 }
172
173 if (psm) {
174 chan->psm = psm;
175 chan->sport = psm;
176 err = 0;
177 } else {
178 u16 p;
179
180 err = -EINVAL;
181 for (p = 0x1001; p < 0x1100; p += 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
183 chan->psm = cpu_to_le16(p);
184 chan->sport = cpu_to_le16(p);
185 err = 0;
186 break;
187 }
188 }
189
190 done:
191 write_unlock(&chan_list_lock);
192 return err;
193 }
194
195 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
196 {
197 write_lock(&chan_list_lock);
198
199 chan->scid = scid;
200
201 write_unlock(&chan_list_lock);
202
203 return 0;
204 }
205
206 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
207 {
208 u16 cid = L2CAP_CID_DYN_START;
209
210 for (; cid < L2CAP_CID_DYN_END; cid++) {
211 if (!__l2cap_get_chan_by_scid(conn, cid))
212 return cid;
213 }
214
215 return 0;
216 }
217
218 static void l2cap_state_change(struct l2cap_chan *chan, int state)
219 {
220 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
221 state_to_string(state));
222
223 chan->state = state;
224 chan->ops->state_change(chan->data, state);
225 }
226
227 static void l2cap_chan_timeout(struct work_struct *work)
228 {
229 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
230 chan_timer.work);
231 struct sock *sk = chan->sk;
232 int reason;
233
234 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
235
236 lock_sock(sk);
237
238 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
239 reason = ECONNREFUSED;
240 else if (chan->state == BT_CONNECT &&
241 chan->sec_level != BT_SECURITY_SDP)
242 reason = ECONNREFUSED;
243 else
244 reason = ETIMEDOUT;
245
246 l2cap_chan_close(chan, reason);
247
248 release_sock(sk);
249
250 chan->ops->close(chan->data);
251 l2cap_chan_put(chan);
252 }
253
254 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
255 {
256 struct l2cap_chan *chan;
257
258 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
259 if (!chan)
260 return NULL;
261
262 chan->sk = sk;
263
264 write_lock(&chan_list_lock);
265 list_add(&chan->global_l, &chan_list);
266 write_unlock(&chan_list_lock);
267
268 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
269
270 chan->state = BT_OPEN;
271
272 atomic_set(&chan->refcnt, 1);
273
274 BT_DBG("sk %p chan %p", sk, chan);
275
276 return chan;
277 }
278
279 void l2cap_chan_destroy(struct l2cap_chan *chan)
280 {
281 write_lock(&chan_list_lock);
282 list_del(&chan->global_l);
283 write_unlock(&chan_list_lock);
284
285 l2cap_chan_put(chan);
286 }
287
288 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
289 {
290 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
291 chan->psm, chan->dcid);
292
293 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
294
295 chan->conn = conn;
296
297 switch (chan->chan_type) {
298 case L2CAP_CHAN_CONN_ORIENTED:
299 if (conn->hcon->type == LE_LINK) {
300 /* LE connection */
301 chan->omtu = L2CAP_LE_DEFAULT_MTU;
302 chan->scid = L2CAP_CID_LE_DATA;
303 chan->dcid = L2CAP_CID_LE_DATA;
304 } else {
305 /* Alloc CID for connection-oriented socket */
306 chan->scid = l2cap_alloc_cid(conn);
307 chan->omtu = L2CAP_DEFAULT_MTU;
308 }
309 break;
310
311 case L2CAP_CHAN_CONN_LESS:
312 /* Connectionless socket */
313 chan->scid = L2CAP_CID_CONN_LESS;
314 chan->dcid = L2CAP_CID_CONN_LESS;
315 chan->omtu = L2CAP_DEFAULT_MTU;
316 break;
317
318 default:
319 /* Raw socket can send/recv signalling messages only */
320 chan->scid = L2CAP_CID_SIGNALING;
321 chan->dcid = L2CAP_CID_SIGNALING;
322 chan->omtu = L2CAP_DEFAULT_MTU;
323 }
324
325 chan->local_id = L2CAP_BESTEFFORT_ID;
326 chan->local_stype = L2CAP_SERV_BESTEFFORT;
327 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
328 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
329 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
330 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
331
332 l2cap_chan_hold(chan);
333
334 list_add_rcu(&chan->list, &conn->chan_l);
335 }
336
337 /* Delete channel.
338 * Must be called on the locked socket. */
339 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
340 {
341 struct sock *sk = chan->sk;
342 struct l2cap_conn *conn = chan->conn;
343 struct sock *parent = bt_sk(sk)->parent;
344
345 __clear_chan_timer(chan);
346
347 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
348
349 if (conn) {
350 /* Delete from channel list */
351 list_del_rcu(&chan->list);
352 synchronize_rcu();
353
354 l2cap_chan_put(chan);
355
356 chan->conn = NULL;
357 hci_conn_put(conn->hcon);
358 }
359
360 l2cap_state_change(chan, BT_CLOSED);
361 sock_set_flag(sk, SOCK_ZAPPED);
362
363 if (err)
364 sk->sk_err = err;
365
366 if (parent) {
367 bt_accept_unlink(sk);
368 parent->sk_data_ready(parent, 0);
369 } else
370 sk->sk_state_change(sk);
371
372 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
373 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
374 return;
375
376 skb_queue_purge(&chan->tx_q);
377
378 if (chan->mode == L2CAP_MODE_ERTM) {
379 struct srej_list *l, *tmp;
380
381 __clear_retrans_timer(chan);
382 __clear_monitor_timer(chan);
383 __clear_ack_timer(chan);
384
385 skb_queue_purge(&chan->srej_q);
386
387 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
388 list_del(&l->list);
389 kfree(l);
390 }
391 }
392 }
393
394 static void l2cap_chan_cleanup_listen(struct sock *parent)
395 {
396 struct sock *sk;
397
398 BT_DBG("parent %p", parent);
399
400 /* Close not yet accepted channels */
401 while ((sk = bt_accept_dequeue(parent, NULL))) {
402 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
403 __clear_chan_timer(chan);
404 lock_sock(sk);
405 l2cap_chan_close(chan, ECONNRESET);
406 release_sock(sk);
407 chan->ops->close(chan->data);
408 }
409 }
410
411 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
412 {
413 struct l2cap_conn *conn = chan->conn;
414 struct sock *sk = chan->sk;
415
416 BT_DBG("chan %p state %s sk %p", chan,
417 state_to_string(chan->state), sk);
418
419 switch (chan->state) {
420 case BT_LISTEN:
421 l2cap_chan_cleanup_listen(sk);
422
423 l2cap_state_change(chan, BT_CLOSED);
424 sock_set_flag(sk, SOCK_ZAPPED);
425 break;
426
427 case BT_CONNECTED:
428 case BT_CONFIG:
429 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
430 conn->hcon->type == ACL_LINK) {
431 __clear_chan_timer(chan);
432 __set_chan_timer(chan, sk->sk_sndtimeo);
433 l2cap_send_disconn_req(conn, chan, reason);
434 } else
435 l2cap_chan_del(chan, reason);
436 break;
437
438 case BT_CONNECT2:
439 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
440 conn->hcon->type == ACL_LINK) {
441 struct l2cap_conn_rsp rsp;
442 __u16 result;
443
444 if (bt_sk(sk)->defer_setup)
445 result = L2CAP_CR_SEC_BLOCK;
446 else
447 result = L2CAP_CR_BAD_PSM;
448 l2cap_state_change(chan, BT_DISCONN);
449
450 rsp.scid = cpu_to_le16(chan->dcid);
451 rsp.dcid = cpu_to_le16(chan->scid);
452 rsp.result = cpu_to_le16(result);
453 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
454 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
455 sizeof(rsp), &rsp);
456 }
457
458 l2cap_chan_del(chan, reason);
459 break;
460
461 case BT_CONNECT:
462 case BT_DISCONN:
463 l2cap_chan_del(chan, reason);
464 break;
465
466 default:
467 sock_set_flag(sk, SOCK_ZAPPED);
468 break;
469 }
470 }
471
472 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
473 {
474 if (chan->chan_type == L2CAP_CHAN_RAW) {
475 switch (chan->sec_level) {
476 case BT_SECURITY_HIGH:
477 return HCI_AT_DEDICATED_BONDING_MITM;
478 case BT_SECURITY_MEDIUM:
479 return HCI_AT_DEDICATED_BONDING;
480 default:
481 return HCI_AT_NO_BONDING;
482 }
483 } else if (chan->psm == cpu_to_le16(0x0001)) {
484 if (chan->sec_level == BT_SECURITY_LOW)
485 chan->sec_level = BT_SECURITY_SDP;
486
487 if (chan->sec_level == BT_SECURITY_HIGH)
488 return HCI_AT_NO_BONDING_MITM;
489 else
490 return HCI_AT_NO_BONDING;
491 } else {
492 switch (chan->sec_level) {
493 case BT_SECURITY_HIGH:
494 return HCI_AT_GENERAL_BONDING_MITM;
495 case BT_SECURITY_MEDIUM:
496 return HCI_AT_GENERAL_BONDING;
497 default:
498 return HCI_AT_NO_BONDING;
499 }
500 }
501 }
502
503 /* Service level security */
504 int l2cap_chan_check_security(struct l2cap_chan *chan)
505 {
506 struct l2cap_conn *conn = chan->conn;
507 __u8 auth_type;
508
509 auth_type = l2cap_get_auth_type(chan);
510
511 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
512 }
513
514 static u8 l2cap_get_ident(struct l2cap_conn *conn)
515 {
516 u8 id;
517
518 /* Get next available identificator.
519 * 1 - 128 are used by kernel.
520 * 129 - 199 are reserved.
521 * 200 - 254 are used by utilities like l2ping, etc.
522 */
523
524 spin_lock(&conn->lock);
525
526 if (++conn->tx_ident > 128)
527 conn->tx_ident = 1;
528
529 id = conn->tx_ident;
530
531 spin_unlock(&conn->lock);
532
533 return id;
534 }
535
536 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
537 {
538 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
539 u8 flags;
540
541 BT_DBG("code 0x%2.2x", code);
542
543 if (!skb)
544 return;
545
546 if (lmp_no_flush_capable(conn->hcon->hdev))
547 flags = ACL_START_NO_FLUSH;
548 else
549 flags = ACL_START;
550
551 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
552 skb->priority = HCI_PRIO_MAX;
553
554 hci_send_acl(conn->hchan, skb, flags);
555 }
556
557 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
558 {
559 struct hci_conn *hcon = chan->conn->hcon;
560 u16 flags;
561
562 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
563 skb->priority);
564
565 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
566 lmp_no_flush_capable(hcon->hdev))
567 flags = ACL_START_NO_FLUSH;
568 else
569 flags = ACL_START;
570
571 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
572 hci_send_acl(chan->conn->hchan, skb, flags);
573 }
574
575 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
576 {
577 struct sk_buff *skb;
578 struct l2cap_hdr *lh;
579 struct l2cap_conn *conn = chan->conn;
580 int count, hlen;
581
582 if (chan->state != BT_CONNECTED)
583 return;
584
585 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
586 hlen = L2CAP_EXT_HDR_SIZE;
587 else
588 hlen = L2CAP_ENH_HDR_SIZE;
589
590 if (chan->fcs == L2CAP_FCS_CRC16)
591 hlen += L2CAP_FCS_SIZE;
592
593 BT_DBG("chan %p, control 0x%8.8x", chan, control);
594
595 count = min_t(unsigned int, conn->mtu, hlen);
596
597 control |= __set_sframe(chan);
598
599 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
600 control |= __set_ctrl_final(chan);
601
602 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
603 control |= __set_ctrl_poll(chan);
604
605 skb = bt_skb_alloc(count, GFP_ATOMIC);
606 if (!skb)
607 return;
608
609 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
610 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
611 lh->cid = cpu_to_le16(chan->dcid);
612
613 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
614
615 if (chan->fcs == L2CAP_FCS_CRC16) {
616 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
617 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
618 }
619
620 skb->priority = HCI_PRIO_MAX;
621 l2cap_do_send(chan, skb);
622 }
623
624 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
625 {
626 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
627 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
628 set_bit(CONN_RNR_SENT, &chan->conn_state);
629 } else
630 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
631
632 control |= __set_reqseq(chan, chan->buffer_seq);
633
634 l2cap_send_sframe(chan, control);
635 }
636
637 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
638 {
639 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
640 }
641
642 static void l2cap_do_start(struct l2cap_chan *chan)
643 {
644 struct l2cap_conn *conn = chan->conn;
645
646 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
647 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
648 return;
649
650 if (l2cap_chan_check_security(chan) &&
651 __l2cap_no_conn_pending(chan)) {
652 struct l2cap_conn_req req;
653 req.scid = cpu_to_le16(chan->scid);
654 req.psm = chan->psm;
655
656 chan->ident = l2cap_get_ident(conn);
657 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
658
659 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
660 sizeof(req), &req);
661 }
662 } else {
663 struct l2cap_info_req req;
664 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
665
666 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
667 conn->info_ident = l2cap_get_ident(conn);
668
669 schedule_delayed_work(&conn->info_timer,
670 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
671
672 l2cap_send_cmd(conn, conn->info_ident,
673 L2CAP_INFO_REQ, sizeof(req), &req);
674 }
675 }
676
677 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
678 {
679 u32 local_feat_mask = l2cap_feat_mask;
680 if (!disable_ertm)
681 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
682
683 switch (mode) {
684 case L2CAP_MODE_ERTM:
685 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
686 case L2CAP_MODE_STREAMING:
687 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
688 default:
689 return 0x00;
690 }
691 }
692
693 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
694 {
695 struct sock *sk;
696 struct l2cap_disconn_req req;
697
698 if (!conn)
699 return;
700
701 sk = chan->sk;
702
703 if (chan->mode == L2CAP_MODE_ERTM) {
704 __clear_retrans_timer(chan);
705 __clear_monitor_timer(chan);
706 __clear_ack_timer(chan);
707 }
708
709 req.dcid = cpu_to_le16(chan->dcid);
710 req.scid = cpu_to_le16(chan->scid);
711 l2cap_send_cmd(conn, l2cap_get_ident(conn),
712 L2CAP_DISCONN_REQ, sizeof(req), &req);
713
714 l2cap_state_change(chan, BT_DISCONN);
715 sk->sk_err = err;
716 }
717
718 /* ---- L2CAP connections ---- */
719 static void l2cap_conn_start(struct l2cap_conn *conn)
720 {
721 struct l2cap_chan *chan;
722
723 BT_DBG("conn %p", conn);
724
725 rcu_read_lock();
726
727 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
728 struct sock *sk = chan->sk;
729
730 bh_lock_sock(sk);
731
732 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
733 bh_unlock_sock(sk);
734 continue;
735 }
736
737 if (chan->state == BT_CONNECT) {
738 struct l2cap_conn_req req;
739
740 if (!l2cap_chan_check_security(chan) ||
741 !__l2cap_no_conn_pending(chan)) {
742 bh_unlock_sock(sk);
743 continue;
744 }
745
746 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
747 && test_bit(CONF_STATE2_DEVICE,
748 &chan->conf_state)) {
749 /* l2cap_chan_close() calls list_del(chan)
750 * so release the lock */
751 l2cap_chan_close(chan, ECONNRESET);
752 bh_unlock_sock(sk);
753 continue;
754 }
755
756 req.scid = cpu_to_le16(chan->scid);
757 req.psm = chan->psm;
758
759 chan->ident = l2cap_get_ident(conn);
760 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
761
762 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
763 sizeof(req), &req);
764
765 } else if (chan->state == BT_CONNECT2) {
766 struct l2cap_conn_rsp rsp;
767 char buf[128];
768 rsp.scid = cpu_to_le16(chan->dcid);
769 rsp.dcid = cpu_to_le16(chan->scid);
770
771 if (l2cap_chan_check_security(chan)) {
772 if (bt_sk(sk)->defer_setup) {
773 struct sock *parent = bt_sk(sk)->parent;
774 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
775 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
776 if (parent)
777 parent->sk_data_ready(parent, 0);
778
779 } else {
780 l2cap_state_change(chan, BT_CONFIG);
781 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
782 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
783 }
784 } else {
785 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
786 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
787 }
788
789 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
790 sizeof(rsp), &rsp);
791
792 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
793 rsp.result != L2CAP_CR_SUCCESS) {
794 bh_unlock_sock(sk);
795 continue;
796 }
797
798 set_bit(CONF_REQ_SENT, &chan->conf_state);
799 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
800 l2cap_build_conf_req(chan, buf), buf);
801 chan->num_conf_req++;
802 }
803
804 bh_unlock_sock(sk);
805 }
806
807 rcu_read_unlock();
808 }
809
810 /* Find socket with cid and source bdaddr.
811 * Returns closest match, locked.
812 */
813 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
814 {
815 struct l2cap_chan *c, *c1 = NULL;
816
817 read_lock(&chan_list_lock);
818
819 list_for_each_entry(c, &chan_list, global_l) {
820 struct sock *sk = c->sk;
821
822 if (state && c->state != state)
823 continue;
824
825 if (c->scid == cid) {
826 /* Exact match. */
827 if (!bacmp(&bt_sk(sk)->src, src)) {
828 read_unlock(&chan_list_lock);
829 return c;
830 }
831
832 /* Closest match */
833 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
834 c1 = c;
835 }
836 }
837
838 read_unlock(&chan_list_lock);
839
840 return c1;
841 }
842
843 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
844 {
845 struct sock *parent, *sk;
846 struct l2cap_chan *chan, *pchan;
847
848 BT_DBG("");
849
850 /* Check if we have socket listening on cid */
851 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
852 conn->src);
853 if (!pchan)
854 return;
855
856 parent = pchan->sk;
857
858 lock_sock(parent);
859
860 /* Check for backlog size */
861 if (sk_acceptq_is_full(parent)) {
862 BT_DBG("backlog full %d", parent->sk_ack_backlog);
863 goto clean;
864 }
865
866 chan = pchan->ops->new_connection(pchan->data);
867 if (!chan)
868 goto clean;
869
870 sk = chan->sk;
871
872 hci_conn_hold(conn->hcon);
873
874 bacpy(&bt_sk(sk)->src, conn->src);
875 bacpy(&bt_sk(sk)->dst, conn->dst);
876
877 bt_accept_enqueue(parent, sk);
878
879 l2cap_chan_add(conn, chan);
880
881 __set_chan_timer(chan, sk->sk_sndtimeo);
882
883 l2cap_state_change(chan, BT_CONNECTED);
884 parent->sk_data_ready(parent, 0);
885
886 clean:
887 release_sock(parent);
888 }
889
890 static void l2cap_chan_ready(struct l2cap_chan *chan)
891 {
892 struct sock *sk = chan->sk;
893 struct sock *parent = bt_sk(sk)->parent;
894
895 BT_DBG("sk %p, parent %p", sk, parent);
896
897 chan->conf_state = 0;
898 __clear_chan_timer(chan);
899
900 l2cap_state_change(chan, BT_CONNECTED);
901 sk->sk_state_change(sk);
902
903 if (parent)
904 parent->sk_data_ready(parent, 0);
905 }
906
907 static void l2cap_conn_ready(struct l2cap_conn *conn)
908 {
909 struct l2cap_chan *chan;
910
911 BT_DBG("conn %p", conn);
912
913 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
914 l2cap_le_conn_ready(conn);
915
916 if (conn->hcon->out && conn->hcon->type == LE_LINK)
917 smp_conn_security(conn, conn->hcon->pending_sec_level);
918
919 rcu_read_lock();
920
921 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
922 struct sock *sk = chan->sk;
923
924 bh_lock_sock(sk);
925
926 if (conn->hcon->type == LE_LINK) {
927 if (smp_conn_security(conn, chan->sec_level))
928 l2cap_chan_ready(chan);
929
930 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
931 __clear_chan_timer(chan);
932 l2cap_state_change(chan, BT_CONNECTED);
933 sk->sk_state_change(sk);
934
935 } else if (chan->state == BT_CONNECT)
936 l2cap_do_start(chan);
937
938 bh_unlock_sock(sk);
939 }
940
941 rcu_read_unlock();
942 }
943
944 /* Notify sockets that we cannot guaranty reliability anymore */
945 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
946 {
947 struct l2cap_chan *chan;
948
949 BT_DBG("conn %p", conn);
950
951 rcu_read_lock();
952
953 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
954 struct sock *sk = chan->sk;
955
956 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
957 sk->sk_err = err;
958 }
959
960 rcu_read_unlock();
961 }
962
963 static void l2cap_info_timeout(struct work_struct *work)
964 {
965 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
966 info_timer.work);
967
968 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
969 conn->info_ident = 0;
970
971 l2cap_conn_start(conn);
972 }
973
974 static void l2cap_conn_del(struct hci_conn *hcon, int err)
975 {
976 struct l2cap_conn *conn = hcon->l2cap_data;
977 struct l2cap_chan *chan, *l;
978 struct sock *sk;
979
980 if (!conn)
981 return;
982
983 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
984
985 kfree_skb(conn->rx_skb);
986
987 /* Kill channels */
988 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
989 sk = chan->sk;
990 lock_sock(sk);
991 l2cap_chan_del(chan, err);
992 release_sock(sk);
993 chan->ops->close(chan->data);
994 }
995
996 hci_chan_del(conn->hchan);
997
998 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
999 cancel_delayed_work_sync(&conn->info_timer);
1000
1001 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1002 cancel_delayed_work_sync(&conn->security_timer);
1003 smp_chan_destroy(conn);
1004 }
1005
1006 hcon->l2cap_data = NULL;
1007 kfree(conn);
1008 }
1009
1010 static void security_timeout(struct work_struct *work)
1011 {
1012 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1013 security_timer.work);
1014
1015 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1016 }
1017
1018 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1019 {
1020 struct l2cap_conn *conn = hcon->l2cap_data;
1021 struct hci_chan *hchan;
1022
1023 if (conn || status)
1024 return conn;
1025
1026 hchan = hci_chan_create(hcon);
1027 if (!hchan)
1028 return NULL;
1029
1030 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1031 if (!conn) {
1032 hci_chan_del(hchan);
1033 return NULL;
1034 }
1035
1036 hcon->l2cap_data = conn;
1037 conn->hcon = hcon;
1038 conn->hchan = hchan;
1039
1040 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1041
1042 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1043 conn->mtu = hcon->hdev->le_mtu;
1044 else
1045 conn->mtu = hcon->hdev->acl_mtu;
1046
1047 conn->src = &hcon->hdev->bdaddr;
1048 conn->dst = &hcon->dst;
1049
1050 conn->feat_mask = 0;
1051
1052 spin_lock_init(&conn->lock);
1053
1054 INIT_LIST_HEAD(&conn->chan_l);
1055
1056 if (hcon->type == LE_LINK)
1057 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1058 else
1059 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1060
1061 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1062
1063 return conn;
1064 }
1065
1066 /* ---- Socket interface ---- */
1067
1068 /* Find socket with psm and source bdaddr.
1069 * Returns closest match.
1070 */
1071 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1072 {
1073 struct l2cap_chan *c, *c1 = NULL;
1074
1075 read_lock(&chan_list_lock);
1076
1077 list_for_each_entry(c, &chan_list, global_l) {
1078 struct sock *sk = c->sk;
1079
1080 if (state && c->state != state)
1081 continue;
1082
1083 if (c->psm == psm) {
1084 /* Exact match. */
1085 if (!bacmp(&bt_sk(sk)->src, src)) {
1086 read_unlock(&chan_list_lock);
1087 return c;
1088 }
1089
1090 /* Closest match */
1091 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1092 c1 = c;
1093 }
1094 }
1095
1096 read_unlock(&chan_list_lock);
1097
1098 return c1;
1099 }
1100
1101 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1102 {
1103 struct sock *sk = chan->sk;
1104 bdaddr_t *src = &bt_sk(sk)->src;
1105 struct l2cap_conn *conn;
1106 struct hci_conn *hcon;
1107 struct hci_dev *hdev;
1108 __u8 auth_type;
1109 int err;
1110
1111 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1112 chan->psm);
1113
1114 hdev = hci_get_route(dst, src);
1115 if (!hdev)
1116 return -EHOSTUNREACH;
1117
1118 hci_dev_lock(hdev);
1119
1120 lock_sock(sk);
1121
1122 /* PSM must be odd and lsb of upper byte must be 0 */
1123 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1124 chan->chan_type != L2CAP_CHAN_RAW) {
1125 err = -EINVAL;
1126 goto done;
1127 }
1128
1129 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1130 err = -EINVAL;
1131 goto done;
1132 }
1133
1134 switch (chan->mode) {
1135 case L2CAP_MODE_BASIC:
1136 break;
1137 case L2CAP_MODE_ERTM:
1138 case L2CAP_MODE_STREAMING:
1139 if (!disable_ertm)
1140 break;
1141 /* fall through */
1142 default:
1143 err = -ENOTSUPP;
1144 goto done;
1145 }
1146
1147 switch (sk->sk_state) {
1148 case BT_CONNECT:
1149 case BT_CONNECT2:
1150 case BT_CONFIG:
1151 /* Already connecting */
1152 err = 0;
1153 goto done;
1154
1155 case BT_CONNECTED:
1156 /* Already connected */
1157 err = -EISCONN;
1158 goto done;
1159
1160 case BT_OPEN:
1161 case BT_BOUND:
1162 /* Can connect */
1163 break;
1164
1165 default:
1166 err = -EBADFD;
1167 goto done;
1168 }
1169
1170 /* Set destination address and psm */
1171 bacpy(&bt_sk(sk)->dst, dst);
1172 chan->psm = psm;
1173 chan->dcid = cid;
1174
1175 auth_type = l2cap_get_auth_type(chan);
1176
1177 if (chan->dcid == L2CAP_CID_LE_DATA)
1178 hcon = hci_connect(hdev, LE_LINK, dst,
1179 chan->sec_level, auth_type);
1180 else
1181 hcon = hci_connect(hdev, ACL_LINK, dst,
1182 chan->sec_level, auth_type);
1183
1184 if (IS_ERR(hcon)) {
1185 err = PTR_ERR(hcon);
1186 goto done;
1187 }
1188
1189 conn = l2cap_conn_add(hcon, 0);
1190 if (!conn) {
1191 hci_conn_put(hcon);
1192 err = -ENOMEM;
1193 goto done;
1194 }
1195
1196 /* Update source addr of the socket */
1197 bacpy(src, conn->src);
1198
1199 l2cap_chan_add(conn, chan);
1200
1201 l2cap_state_change(chan, BT_CONNECT);
1202 __set_chan_timer(chan, sk->sk_sndtimeo);
1203
1204 if (hcon->state == BT_CONNECTED) {
1205 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1206 __clear_chan_timer(chan);
1207 if (l2cap_chan_check_security(chan))
1208 l2cap_state_change(chan, BT_CONNECTED);
1209 } else
1210 l2cap_do_start(chan);
1211 }
1212
1213 err = 0;
1214
1215 done:
1216 hci_dev_unlock(hdev);
1217 hci_dev_put(hdev);
1218 return err;
1219 }
1220
1221 int __l2cap_wait_ack(struct sock *sk)
1222 {
1223 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1224 DECLARE_WAITQUEUE(wait, current);
1225 int err = 0;
1226 int timeo = HZ/5;
1227
1228 add_wait_queue(sk_sleep(sk), &wait);
1229 set_current_state(TASK_INTERRUPTIBLE);
1230 while (chan->unacked_frames > 0 && chan->conn) {
1231 if (!timeo)
1232 timeo = HZ/5;
1233
1234 if (signal_pending(current)) {
1235 err = sock_intr_errno(timeo);
1236 break;
1237 }
1238
1239 release_sock(sk);
1240 timeo = schedule_timeout(timeo);
1241 lock_sock(sk);
1242 set_current_state(TASK_INTERRUPTIBLE);
1243
1244 err = sock_error(sk);
1245 if (err)
1246 break;
1247 }
1248 set_current_state(TASK_RUNNING);
1249 remove_wait_queue(sk_sleep(sk), &wait);
1250 return err;
1251 }
1252
1253 static void l2cap_monitor_timeout(struct work_struct *work)
1254 {
1255 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1256 monitor_timer.work);
1257 struct sock *sk = chan->sk;
1258
1259 BT_DBG("chan %p", chan);
1260
1261 lock_sock(sk);
1262 if (chan->retry_count >= chan->remote_max_tx) {
1263 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1264 release_sock(sk);
1265 return;
1266 }
1267
1268 chan->retry_count++;
1269 __set_monitor_timer(chan);
1270
1271 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1272 release_sock(sk);
1273 }
1274
1275 static void l2cap_retrans_timeout(struct work_struct *work)
1276 {
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 retrans_timer.work);
1279 struct sock *sk = chan->sk;
1280
1281 BT_DBG("chan %p", chan);
1282
1283 lock_sock(sk);
1284 chan->retry_count = 1;
1285 __set_monitor_timer(chan);
1286
1287 set_bit(CONN_WAIT_F, &chan->conn_state);
1288
1289 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1290 release_sock(sk);
1291 }
1292
1293 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1294 {
1295 struct sk_buff *skb;
1296
1297 while ((skb = skb_peek(&chan->tx_q)) &&
1298 chan->unacked_frames) {
1299 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1300 break;
1301
1302 skb = skb_dequeue(&chan->tx_q);
1303 kfree_skb(skb);
1304
1305 chan->unacked_frames--;
1306 }
1307
1308 if (!chan->unacked_frames)
1309 __clear_retrans_timer(chan);
1310 }
1311
1312 static void l2cap_streaming_send(struct l2cap_chan *chan)
1313 {
1314 struct sk_buff *skb;
1315 u32 control;
1316 u16 fcs;
1317
1318 while ((skb = skb_dequeue(&chan->tx_q))) {
1319 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1320 control |= __set_txseq(chan, chan->next_tx_seq);
1321 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1322
1323 if (chan->fcs == L2CAP_FCS_CRC16) {
1324 fcs = crc16(0, (u8 *)skb->data,
1325 skb->len - L2CAP_FCS_SIZE);
1326 put_unaligned_le16(fcs,
1327 skb->data + skb->len - L2CAP_FCS_SIZE);
1328 }
1329
1330 l2cap_do_send(chan, skb);
1331
1332 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1333 }
1334 }
1335
1336 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1337 {
1338 struct sk_buff *skb, *tx_skb;
1339 u16 fcs;
1340 u32 control;
1341
1342 skb = skb_peek(&chan->tx_q);
1343 if (!skb)
1344 return;
1345
1346 while (bt_cb(skb)->tx_seq != tx_seq) {
1347 if (skb_queue_is_last(&chan->tx_q, skb))
1348 return;
1349
1350 skb = skb_queue_next(&chan->tx_q, skb);
1351 }
1352
1353 if (chan->remote_max_tx &&
1354 bt_cb(skb)->retries == chan->remote_max_tx) {
1355 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1356 return;
1357 }
1358
1359 tx_skb = skb_clone(skb, GFP_ATOMIC);
1360 bt_cb(skb)->retries++;
1361
1362 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1363 control &= __get_sar_mask(chan);
1364
1365 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1366 control |= __set_ctrl_final(chan);
1367
1368 control |= __set_reqseq(chan, chan->buffer_seq);
1369 control |= __set_txseq(chan, tx_seq);
1370
1371 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1372
1373 if (chan->fcs == L2CAP_FCS_CRC16) {
1374 fcs = crc16(0, (u8 *)tx_skb->data,
1375 tx_skb->len - L2CAP_FCS_SIZE);
1376 put_unaligned_le16(fcs,
1377 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1378 }
1379
1380 l2cap_do_send(chan, tx_skb);
1381 }
1382
1383 static int l2cap_ertm_send(struct l2cap_chan *chan)
1384 {
1385 struct sk_buff *skb, *tx_skb;
1386 u16 fcs;
1387 u32 control;
1388 int nsent = 0;
1389
1390 if (chan->state != BT_CONNECTED)
1391 return -ENOTCONN;
1392
1393 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1394
1395 if (chan->remote_max_tx &&
1396 bt_cb(skb)->retries == chan->remote_max_tx) {
1397 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1398 break;
1399 }
1400
1401 tx_skb = skb_clone(skb, GFP_ATOMIC);
1402
1403 bt_cb(skb)->retries++;
1404
1405 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1406 control &= __get_sar_mask(chan);
1407
1408 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1409 control |= __set_ctrl_final(chan);
1410
1411 control |= __set_reqseq(chan, chan->buffer_seq);
1412 control |= __set_txseq(chan, chan->next_tx_seq);
1413
1414 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1415
1416 if (chan->fcs == L2CAP_FCS_CRC16) {
1417 fcs = crc16(0, (u8 *)skb->data,
1418 tx_skb->len - L2CAP_FCS_SIZE);
1419 put_unaligned_le16(fcs, skb->data +
1420 tx_skb->len - L2CAP_FCS_SIZE);
1421 }
1422
1423 l2cap_do_send(chan, tx_skb);
1424
1425 __set_retrans_timer(chan);
1426
1427 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1428
1429 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1430
1431 if (bt_cb(skb)->retries == 1) {
1432 chan->unacked_frames++;
1433
1434 if (!nsent++)
1435 __clear_ack_timer(chan);
1436 }
1437
1438 chan->frames_sent++;
1439
1440 if (skb_queue_is_last(&chan->tx_q, skb))
1441 chan->tx_send_head = NULL;
1442 else
1443 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1444 }
1445
1446 return nsent;
1447 }
1448
1449 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1450 {
1451 int ret;
1452
1453 if (!skb_queue_empty(&chan->tx_q))
1454 chan->tx_send_head = chan->tx_q.next;
1455
1456 chan->next_tx_seq = chan->expected_ack_seq;
1457 ret = l2cap_ertm_send(chan);
1458 return ret;
1459 }
1460
1461 static void __l2cap_send_ack(struct l2cap_chan *chan)
1462 {
1463 u32 control = 0;
1464
1465 control |= __set_reqseq(chan, chan->buffer_seq);
1466
1467 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1468 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1469 set_bit(CONN_RNR_SENT, &chan->conn_state);
1470 l2cap_send_sframe(chan, control);
1471 return;
1472 }
1473
1474 if (l2cap_ertm_send(chan) > 0)
1475 return;
1476
1477 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1478 l2cap_send_sframe(chan, control);
1479 }
1480
1481 static void l2cap_send_ack(struct l2cap_chan *chan)
1482 {
1483 __clear_ack_timer(chan);
1484 __l2cap_send_ack(chan);
1485 }
1486
1487 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1488 {
1489 struct srej_list *tail;
1490 u32 control;
1491
1492 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1493 control |= __set_ctrl_final(chan);
1494
1495 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1496 control |= __set_reqseq(chan, tail->tx_seq);
1497
1498 l2cap_send_sframe(chan, control);
1499 }
1500
1501 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1502 {
1503 struct l2cap_conn *conn = chan->conn;
1504 struct sk_buff **frag;
1505 int err, sent = 0;
1506
1507 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1508 return -EFAULT;
1509
1510 sent += count;
1511 len -= count;
1512
1513 /* Continuation fragments (no L2CAP header) */
1514 frag = &skb_shinfo(skb)->frag_list;
1515 while (len) {
1516 count = min_t(unsigned int, conn->mtu, len);
1517
1518 *frag = chan->ops->alloc_skb(chan, count,
1519 msg->msg_flags & MSG_DONTWAIT, &err);
1520
1521 if (!*frag)
1522 return err;
1523 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1524 return -EFAULT;
1525
1526 (*frag)->priority = skb->priority;
1527
1528 sent += count;
1529 len -= count;
1530
1531 frag = &(*frag)->next;
1532 }
1533
1534 return sent;
1535 }
1536
1537 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1538 struct msghdr *msg, size_t len,
1539 u32 priority)
1540 {
1541 struct l2cap_conn *conn = chan->conn;
1542 struct sk_buff *skb;
1543 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1544 struct l2cap_hdr *lh;
1545
1546 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1547
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549
1550 skb = chan->ops->alloc_skb(chan, count + hlen,
1551 msg->msg_flags & MSG_DONTWAIT, &err);
1552
1553 if (!skb)
1554 return ERR_PTR(err);
1555
1556 skb->priority = priority;
1557
1558 /* Create L2CAP header */
1559 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1560 lh->cid = cpu_to_le16(chan->dcid);
1561 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1562 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1563
1564 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1565 if (unlikely(err < 0)) {
1566 kfree_skb(skb);
1567 return ERR_PTR(err);
1568 }
1569 return skb;
1570 }
1571
1572 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1573 struct msghdr *msg, size_t len,
1574 u32 priority)
1575 {
1576 struct l2cap_conn *conn = chan->conn;
1577 struct sk_buff *skb;
1578 int err, count, hlen = L2CAP_HDR_SIZE;
1579 struct l2cap_hdr *lh;
1580
1581 BT_DBG("chan %p len %d", chan, (int)len);
1582
1583 count = min_t(unsigned int, (conn->mtu - hlen), len);
1584
1585 skb = chan->ops->alloc_skb(chan, count + hlen,
1586 msg->msg_flags & MSG_DONTWAIT, &err);
1587
1588 if (!skb)
1589 return ERR_PTR(err);
1590
1591 skb->priority = priority;
1592
1593 /* Create L2CAP header */
1594 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1595 lh->cid = cpu_to_le16(chan->dcid);
1596 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1597
1598 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1599 if (unlikely(err < 0)) {
1600 kfree_skb(skb);
1601 return ERR_PTR(err);
1602 }
1603 return skb;
1604 }
1605
1606 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1607 struct msghdr *msg, size_t len,
1608 u32 control, u16 sdulen)
1609 {
1610 struct l2cap_conn *conn = chan->conn;
1611 struct sk_buff *skb;
1612 int err, count, hlen;
1613 struct l2cap_hdr *lh;
1614
1615 BT_DBG("chan %p len %d", chan, (int)len);
1616
1617 if (!conn)
1618 return ERR_PTR(-ENOTCONN);
1619
1620 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1621 hlen = L2CAP_EXT_HDR_SIZE;
1622 else
1623 hlen = L2CAP_ENH_HDR_SIZE;
1624
1625 if (sdulen)
1626 hlen += L2CAP_SDULEN_SIZE;
1627
1628 if (chan->fcs == L2CAP_FCS_CRC16)
1629 hlen += L2CAP_FCS_SIZE;
1630
1631 count = min_t(unsigned int, (conn->mtu - hlen), len);
1632
1633 skb = chan->ops->alloc_skb(chan, count + hlen,
1634 msg->msg_flags & MSG_DONTWAIT, &err);
1635
1636 if (!skb)
1637 return ERR_PTR(err);
1638
1639 /* Create L2CAP header */
1640 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1641 lh->cid = cpu_to_le16(chan->dcid);
1642 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1643
1644 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1645
1646 if (sdulen)
1647 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1648
1649 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1650 if (unlikely(err < 0)) {
1651 kfree_skb(skb);
1652 return ERR_PTR(err);
1653 }
1654
1655 if (chan->fcs == L2CAP_FCS_CRC16)
1656 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1657
1658 bt_cb(skb)->retries = 0;
1659 return skb;
1660 }
1661
1662 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1663 {
1664 struct sk_buff *skb;
1665 struct sk_buff_head sar_queue;
1666 u32 control;
1667 size_t size = 0;
1668
1669 skb_queue_head_init(&sar_queue);
1670 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1671 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1672 if (IS_ERR(skb))
1673 return PTR_ERR(skb);
1674
1675 __skb_queue_tail(&sar_queue, skb);
1676 len -= chan->remote_mps;
1677 size += chan->remote_mps;
1678
1679 while (len > 0) {
1680 size_t buflen;
1681
1682 if (len > chan->remote_mps) {
1683 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1684 buflen = chan->remote_mps;
1685 } else {
1686 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1687 buflen = len;
1688 }
1689
1690 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1691 if (IS_ERR(skb)) {
1692 skb_queue_purge(&sar_queue);
1693 return PTR_ERR(skb);
1694 }
1695
1696 __skb_queue_tail(&sar_queue, skb);
1697 len -= buflen;
1698 size += buflen;
1699 }
1700 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1701 if (chan->tx_send_head == NULL)
1702 chan->tx_send_head = sar_queue.next;
1703
1704 return size;
1705 }
1706
1707 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1708 u32 priority)
1709 {
1710 struct sk_buff *skb;
1711 u32 control;
1712 int err;
1713
1714 /* Connectionless channel */
1715 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1716 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1717 if (IS_ERR(skb))
1718 return PTR_ERR(skb);
1719
1720 l2cap_do_send(chan, skb);
1721 return len;
1722 }
1723
1724 switch (chan->mode) {
1725 case L2CAP_MODE_BASIC:
1726 /* Check outgoing MTU */
1727 if (len > chan->omtu)
1728 return -EMSGSIZE;
1729
1730 /* Create a basic PDU */
1731 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1732 if (IS_ERR(skb))
1733 return PTR_ERR(skb);
1734
1735 l2cap_do_send(chan, skb);
1736 err = len;
1737 break;
1738
1739 case L2CAP_MODE_ERTM:
1740 case L2CAP_MODE_STREAMING:
1741 /* Entire SDU fits into one PDU */
1742 if (len <= chan->remote_mps) {
1743 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1744 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1745 0);
1746 if (IS_ERR(skb))
1747 return PTR_ERR(skb);
1748
1749 __skb_queue_tail(&chan->tx_q, skb);
1750
1751 if (chan->tx_send_head == NULL)
1752 chan->tx_send_head = skb;
1753
1754 } else {
1755 /* Segment SDU into multiples PDUs */
1756 err = l2cap_sar_segment_sdu(chan, msg, len);
1757 if (err < 0)
1758 return err;
1759 }
1760
1761 if (chan->mode == L2CAP_MODE_STREAMING) {
1762 l2cap_streaming_send(chan);
1763 err = len;
1764 break;
1765 }
1766
1767 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1768 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1769 err = len;
1770 break;
1771 }
1772
1773 err = l2cap_ertm_send(chan);
1774 if (err >= 0)
1775 err = len;
1776
1777 break;
1778
1779 default:
1780 BT_DBG("bad state %1.1x", chan->mode);
1781 err = -EBADFD;
1782 }
1783
1784 return err;
1785 }
1786
1787 /* Copy frame to all raw sockets on that connection */
1788 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1789 {
1790 struct sk_buff *nskb;
1791 struct l2cap_chan *chan;
1792
1793 BT_DBG("conn %p", conn);
1794
1795 rcu_read_lock();
1796
1797 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1798 struct sock *sk = chan->sk;
1799 if (chan->chan_type != L2CAP_CHAN_RAW)
1800 continue;
1801
1802 /* Don't send frame to the socket it came from */
1803 if (skb->sk == sk)
1804 continue;
1805 nskb = skb_clone(skb, GFP_ATOMIC);
1806 if (!nskb)
1807 continue;
1808
1809 if (chan->ops->recv(chan->data, nskb))
1810 kfree_skb(nskb);
1811 }
1812
1813 rcu_read_unlock();
1814 }
1815
1816 /* ---- L2CAP signalling commands ---- */
1817 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1818 u8 code, u8 ident, u16 dlen, void *data)
1819 {
1820 struct sk_buff *skb, **frag;
1821 struct l2cap_cmd_hdr *cmd;
1822 struct l2cap_hdr *lh;
1823 int len, count;
1824
1825 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1826 conn, code, ident, dlen);
1827
1828 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1829 count = min_t(unsigned int, conn->mtu, len);
1830
1831 skb = bt_skb_alloc(count, GFP_ATOMIC);
1832 if (!skb)
1833 return NULL;
1834
1835 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1836 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1837
1838 if (conn->hcon->type == LE_LINK)
1839 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1840 else
1841 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1842
1843 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1844 cmd->code = code;
1845 cmd->ident = ident;
1846 cmd->len = cpu_to_le16(dlen);
1847
1848 if (dlen) {
1849 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1850 memcpy(skb_put(skb, count), data, count);
1851 data += count;
1852 }
1853
1854 len -= skb->len;
1855
1856 /* Continuation fragments (no L2CAP header) */
1857 frag = &skb_shinfo(skb)->frag_list;
1858 while (len) {
1859 count = min_t(unsigned int, conn->mtu, len);
1860
1861 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1862 if (!*frag)
1863 goto fail;
1864
1865 memcpy(skb_put(*frag, count), data, count);
1866
1867 len -= count;
1868 data += count;
1869
1870 frag = &(*frag)->next;
1871 }
1872
1873 return skb;
1874
1875 fail:
1876 kfree_skb(skb);
1877 return NULL;
1878 }
1879
1880 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1881 {
1882 struct l2cap_conf_opt *opt = *ptr;
1883 int len;
1884
1885 len = L2CAP_CONF_OPT_SIZE + opt->len;
1886 *ptr += len;
1887
1888 *type = opt->type;
1889 *olen = opt->len;
1890
1891 switch (opt->len) {
1892 case 1:
1893 *val = *((u8 *) opt->val);
1894 break;
1895
1896 case 2:
1897 *val = get_unaligned_le16(opt->val);
1898 break;
1899
1900 case 4:
1901 *val = get_unaligned_le32(opt->val);
1902 break;
1903
1904 default:
1905 *val = (unsigned long) opt->val;
1906 break;
1907 }
1908
1909 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1910 return len;
1911 }
1912
1913 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1914 {
1915 struct l2cap_conf_opt *opt = *ptr;
1916
1917 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1918
1919 opt->type = type;
1920 opt->len = len;
1921
1922 switch (len) {
1923 case 1:
1924 *((u8 *) opt->val) = val;
1925 break;
1926
1927 case 2:
1928 put_unaligned_le16(val, opt->val);
1929 break;
1930
1931 case 4:
1932 put_unaligned_le32(val, opt->val);
1933 break;
1934
1935 default:
1936 memcpy(opt->val, (void *) val, len);
1937 break;
1938 }
1939
1940 *ptr += L2CAP_CONF_OPT_SIZE + len;
1941 }
1942
1943 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1944 {
1945 struct l2cap_conf_efs efs;
1946
1947 switch (chan->mode) {
1948 case L2CAP_MODE_ERTM:
1949 efs.id = chan->local_id;
1950 efs.stype = chan->local_stype;
1951 efs.msdu = cpu_to_le16(chan->local_msdu);
1952 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1953 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1954 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1955 break;
1956
1957 case L2CAP_MODE_STREAMING:
1958 efs.id = 1;
1959 efs.stype = L2CAP_SERV_BESTEFFORT;
1960 efs.msdu = cpu_to_le16(chan->local_msdu);
1961 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1962 efs.acc_lat = 0;
1963 efs.flush_to = 0;
1964 break;
1965
1966 default:
1967 return;
1968 }
1969
1970 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1971 (unsigned long) &efs);
1972 }
1973
1974 static void l2cap_ack_timeout(struct work_struct *work)
1975 {
1976 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1977 ack_timer.work);
1978
1979 BT_DBG("chan %p", chan);
1980
1981 lock_sock(chan->sk);
1982 __l2cap_send_ack(chan);
1983 release_sock(chan->sk);
1984
1985 l2cap_chan_put(chan);
1986 }
1987
1988 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1989 {
1990 chan->expected_ack_seq = 0;
1991 chan->unacked_frames = 0;
1992 chan->buffer_seq = 0;
1993 chan->num_acked = 0;
1994 chan->frames_sent = 0;
1995
1996 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
1997 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
1998 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
1999
2000 skb_queue_head_init(&chan->srej_q);
2001
2002 INIT_LIST_HEAD(&chan->srej_l);
2003 }
2004
2005 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2006 {
2007 switch (mode) {
2008 case L2CAP_MODE_STREAMING:
2009 case L2CAP_MODE_ERTM:
2010 if (l2cap_mode_supported(mode, remote_feat_mask))
2011 return mode;
2012 /* fall through */
2013 default:
2014 return L2CAP_MODE_BASIC;
2015 }
2016 }
2017
2018 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2019 {
2020 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2021 }
2022
2023 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2024 {
2025 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2026 }
2027
2028 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2029 {
2030 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2031 __l2cap_ews_supported(chan)) {
2032 /* use extended control field */
2033 set_bit(FLAG_EXT_CTRL, &chan->flags);
2034 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2035 } else {
2036 chan->tx_win = min_t(u16, chan->tx_win,
2037 L2CAP_DEFAULT_TX_WINDOW);
2038 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2039 }
2040 }
2041
2042 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2043 {
2044 struct l2cap_conf_req *req = data;
2045 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2046 void *ptr = req->data;
2047 u16 size;
2048
2049 BT_DBG("chan %p", chan);
2050
2051 if (chan->num_conf_req || chan->num_conf_rsp)
2052 goto done;
2053
2054 switch (chan->mode) {
2055 case L2CAP_MODE_STREAMING:
2056 case L2CAP_MODE_ERTM:
2057 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2058 break;
2059
2060 if (__l2cap_efs_supported(chan))
2061 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2062
2063 /* fall through */
2064 default:
2065 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2066 break;
2067 }
2068
2069 done:
2070 if (chan->imtu != L2CAP_DEFAULT_MTU)
2071 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2072
2073 switch (chan->mode) {
2074 case L2CAP_MODE_BASIC:
2075 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2076 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2077 break;
2078
2079 rfc.mode = L2CAP_MODE_BASIC;
2080 rfc.txwin_size = 0;
2081 rfc.max_transmit = 0;
2082 rfc.retrans_timeout = 0;
2083 rfc.monitor_timeout = 0;
2084 rfc.max_pdu_size = 0;
2085
2086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2087 (unsigned long) &rfc);
2088 break;
2089
2090 case L2CAP_MODE_ERTM:
2091 rfc.mode = L2CAP_MODE_ERTM;
2092 rfc.max_transmit = chan->max_tx;
2093 rfc.retrans_timeout = 0;
2094 rfc.monitor_timeout = 0;
2095
2096 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2097 L2CAP_EXT_HDR_SIZE -
2098 L2CAP_SDULEN_SIZE -
2099 L2CAP_FCS_SIZE);
2100 rfc.max_pdu_size = cpu_to_le16(size);
2101
2102 l2cap_txwin_setup(chan);
2103
2104 rfc.txwin_size = min_t(u16, chan->tx_win,
2105 L2CAP_DEFAULT_TX_WINDOW);
2106
2107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2108 (unsigned long) &rfc);
2109
2110 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2111 l2cap_add_opt_efs(&ptr, chan);
2112
2113 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2114 break;
2115
2116 if (chan->fcs == L2CAP_FCS_NONE ||
2117 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2118 chan->fcs = L2CAP_FCS_NONE;
2119 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2120 }
2121
2122 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2123 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2124 chan->tx_win);
2125 break;
2126
2127 case L2CAP_MODE_STREAMING:
2128 rfc.mode = L2CAP_MODE_STREAMING;
2129 rfc.txwin_size = 0;
2130 rfc.max_transmit = 0;
2131 rfc.retrans_timeout = 0;
2132 rfc.monitor_timeout = 0;
2133
2134 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2135 L2CAP_EXT_HDR_SIZE -
2136 L2CAP_SDULEN_SIZE -
2137 L2CAP_FCS_SIZE);
2138 rfc.max_pdu_size = cpu_to_le16(size);
2139
2140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2141 (unsigned long) &rfc);
2142
2143 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2144 l2cap_add_opt_efs(&ptr, chan);
2145
2146 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2147 break;
2148
2149 if (chan->fcs == L2CAP_FCS_NONE ||
2150 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2151 chan->fcs = L2CAP_FCS_NONE;
2152 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2153 }
2154 break;
2155 }
2156
2157 req->dcid = cpu_to_le16(chan->dcid);
2158 req->flags = cpu_to_le16(0);
2159
2160 return ptr - data;
2161 }
2162
2163 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2164 {
2165 struct l2cap_conf_rsp *rsp = data;
2166 void *ptr = rsp->data;
2167 void *req = chan->conf_req;
2168 int len = chan->conf_len;
2169 int type, hint, olen;
2170 unsigned long val;
2171 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2172 struct l2cap_conf_efs efs;
2173 u8 remote_efs = 0;
2174 u16 mtu = L2CAP_DEFAULT_MTU;
2175 u16 result = L2CAP_CONF_SUCCESS;
2176 u16 size;
2177
2178 BT_DBG("chan %p", chan);
2179
2180 while (len >= L2CAP_CONF_OPT_SIZE) {
2181 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2182
2183 hint = type & L2CAP_CONF_HINT;
2184 type &= L2CAP_CONF_MASK;
2185
2186 switch (type) {
2187 case L2CAP_CONF_MTU:
2188 mtu = val;
2189 break;
2190
2191 case L2CAP_CONF_FLUSH_TO:
2192 chan->flush_to = val;
2193 break;
2194
2195 case L2CAP_CONF_QOS:
2196 break;
2197
2198 case L2CAP_CONF_RFC:
2199 if (olen == sizeof(rfc))
2200 memcpy(&rfc, (void *) val, olen);
2201 break;
2202
2203 case L2CAP_CONF_FCS:
2204 if (val == L2CAP_FCS_NONE)
2205 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2206 break;
2207
2208 case L2CAP_CONF_EFS:
2209 remote_efs = 1;
2210 if (olen == sizeof(efs))
2211 memcpy(&efs, (void *) val, olen);
2212 break;
2213
2214 case L2CAP_CONF_EWS:
2215 if (!enable_hs)
2216 return -ECONNREFUSED;
2217
2218 set_bit(FLAG_EXT_CTRL, &chan->flags);
2219 set_bit(CONF_EWS_RECV, &chan->conf_state);
2220 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2221 chan->remote_tx_win = val;
2222 break;
2223
2224 default:
2225 if (hint)
2226 break;
2227
2228 result = L2CAP_CONF_UNKNOWN;
2229 *((u8 *) ptr++) = type;
2230 break;
2231 }
2232 }
2233
2234 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2235 goto done;
2236
2237 switch (chan->mode) {
2238 case L2CAP_MODE_STREAMING:
2239 case L2CAP_MODE_ERTM:
2240 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2241 chan->mode = l2cap_select_mode(rfc.mode,
2242 chan->conn->feat_mask);
2243 break;
2244 }
2245
2246 if (remote_efs) {
2247 if (__l2cap_efs_supported(chan))
2248 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2249 else
2250 return -ECONNREFUSED;
2251 }
2252
2253 if (chan->mode != rfc.mode)
2254 return -ECONNREFUSED;
2255
2256 break;
2257 }
2258
2259 done:
2260 if (chan->mode != rfc.mode) {
2261 result = L2CAP_CONF_UNACCEPT;
2262 rfc.mode = chan->mode;
2263
2264 if (chan->num_conf_rsp == 1)
2265 return -ECONNREFUSED;
2266
2267 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2268 sizeof(rfc), (unsigned long) &rfc);
2269 }
2270
2271 if (result == L2CAP_CONF_SUCCESS) {
2272 /* Configure output options and let the other side know
2273 * which ones we don't like. */
2274
2275 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2276 result = L2CAP_CONF_UNACCEPT;
2277 else {
2278 chan->omtu = mtu;
2279 set_bit(CONF_MTU_DONE, &chan->conf_state);
2280 }
2281 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2282
2283 if (remote_efs) {
2284 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2285 efs.stype != L2CAP_SERV_NOTRAFIC &&
2286 efs.stype != chan->local_stype) {
2287
2288 result = L2CAP_CONF_UNACCEPT;
2289
2290 if (chan->num_conf_req >= 1)
2291 return -ECONNREFUSED;
2292
2293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2294 sizeof(efs),
2295 (unsigned long) &efs);
2296 } else {
2297 /* Send PENDING Conf Rsp */
2298 result = L2CAP_CONF_PENDING;
2299 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2300 }
2301 }
2302
2303 switch (rfc.mode) {
2304 case L2CAP_MODE_BASIC:
2305 chan->fcs = L2CAP_FCS_NONE;
2306 set_bit(CONF_MODE_DONE, &chan->conf_state);
2307 break;
2308
2309 case L2CAP_MODE_ERTM:
2310 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2311 chan->remote_tx_win = rfc.txwin_size;
2312 else
2313 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2314
2315 chan->remote_max_tx = rfc.max_transmit;
2316
2317 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2318 chan->conn->mtu -
2319 L2CAP_EXT_HDR_SIZE -
2320 L2CAP_SDULEN_SIZE -
2321 L2CAP_FCS_SIZE);
2322 rfc.max_pdu_size = cpu_to_le16(size);
2323 chan->remote_mps = size;
2324
2325 rfc.retrans_timeout =
2326 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2327 rfc.monitor_timeout =
2328 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2329
2330 set_bit(CONF_MODE_DONE, &chan->conf_state);
2331
2332 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2333 sizeof(rfc), (unsigned long) &rfc);
2334
2335 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2336 chan->remote_id = efs.id;
2337 chan->remote_stype = efs.stype;
2338 chan->remote_msdu = le16_to_cpu(efs.msdu);
2339 chan->remote_flush_to =
2340 le32_to_cpu(efs.flush_to);
2341 chan->remote_acc_lat =
2342 le32_to_cpu(efs.acc_lat);
2343 chan->remote_sdu_itime =
2344 le32_to_cpu(efs.sdu_itime);
2345 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2346 sizeof(efs), (unsigned long) &efs);
2347 }
2348 break;
2349
2350 case L2CAP_MODE_STREAMING:
2351 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2352 chan->conn->mtu -
2353 L2CAP_EXT_HDR_SIZE -
2354 L2CAP_SDULEN_SIZE -
2355 L2CAP_FCS_SIZE);
2356 rfc.max_pdu_size = cpu_to_le16(size);
2357 chan->remote_mps = size;
2358
2359 set_bit(CONF_MODE_DONE, &chan->conf_state);
2360
2361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2362 sizeof(rfc), (unsigned long) &rfc);
2363
2364 break;
2365
2366 default:
2367 result = L2CAP_CONF_UNACCEPT;
2368
2369 memset(&rfc, 0, sizeof(rfc));
2370 rfc.mode = chan->mode;
2371 }
2372
2373 if (result == L2CAP_CONF_SUCCESS)
2374 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2375 }
2376 rsp->scid = cpu_to_le16(chan->dcid);
2377 rsp->result = cpu_to_le16(result);
2378 rsp->flags = cpu_to_le16(0x0000);
2379
2380 return ptr - data;
2381 }
2382
2383 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2384 {
2385 struct l2cap_conf_req *req = data;
2386 void *ptr = req->data;
2387 int type, olen;
2388 unsigned long val;
2389 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2390 struct l2cap_conf_efs efs;
2391
2392 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2393
2394 while (len >= L2CAP_CONF_OPT_SIZE) {
2395 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2396
2397 switch (type) {
2398 case L2CAP_CONF_MTU:
2399 if (val < L2CAP_DEFAULT_MIN_MTU) {
2400 *result = L2CAP_CONF_UNACCEPT;
2401 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2402 } else
2403 chan->imtu = val;
2404 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2405 break;
2406
2407 case L2CAP_CONF_FLUSH_TO:
2408 chan->flush_to = val;
2409 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2410 2, chan->flush_to);
2411 break;
2412
2413 case L2CAP_CONF_RFC:
2414 if (olen == sizeof(rfc))
2415 memcpy(&rfc, (void *)val, olen);
2416
2417 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2418 rfc.mode != chan->mode)
2419 return -ECONNREFUSED;
2420
2421 chan->fcs = 0;
2422
2423 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2424 sizeof(rfc), (unsigned long) &rfc);
2425 break;
2426
2427 case L2CAP_CONF_EWS:
2428 chan->tx_win = min_t(u16, val,
2429 L2CAP_DEFAULT_EXT_WINDOW);
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2431 chan->tx_win);
2432 break;
2433
2434 case L2CAP_CONF_EFS:
2435 if (olen == sizeof(efs))
2436 memcpy(&efs, (void *)val, olen);
2437
2438 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2439 efs.stype != L2CAP_SERV_NOTRAFIC &&
2440 efs.stype != chan->local_stype)
2441 return -ECONNREFUSED;
2442
2443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2444 sizeof(efs), (unsigned long) &efs);
2445 break;
2446 }
2447 }
2448
2449 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2450 return -ECONNREFUSED;
2451
2452 chan->mode = rfc.mode;
2453
2454 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2455 switch (rfc.mode) {
2456 case L2CAP_MODE_ERTM:
2457 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2458 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2459 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2460
2461 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2462 chan->local_msdu = le16_to_cpu(efs.msdu);
2463 chan->local_sdu_itime =
2464 le32_to_cpu(efs.sdu_itime);
2465 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2466 chan->local_flush_to =
2467 le32_to_cpu(efs.flush_to);
2468 }
2469 break;
2470
2471 case L2CAP_MODE_STREAMING:
2472 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2473 }
2474 }
2475
2476 req->dcid = cpu_to_le16(chan->dcid);
2477 req->flags = cpu_to_le16(0x0000);
2478
2479 return ptr - data;
2480 }
2481
2482 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2483 {
2484 struct l2cap_conf_rsp *rsp = data;
2485 void *ptr = rsp->data;
2486
2487 BT_DBG("chan %p", chan);
2488
2489 rsp->scid = cpu_to_le16(chan->dcid);
2490 rsp->result = cpu_to_le16(result);
2491 rsp->flags = cpu_to_le16(flags);
2492
2493 return ptr - data;
2494 }
2495
2496 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2497 {
2498 struct l2cap_conn_rsp rsp;
2499 struct l2cap_conn *conn = chan->conn;
2500 u8 buf[128];
2501
2502 rsp.scid = cpu_to_le16(chan->dcid);
2503 rsp.dcid = cpu_to_le16(chan->scid);
2504 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2505 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2506 l2cap_send_cmd(conn, chan->ident,
2507 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2508
2509 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2510 return;
2511
2512 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2513 l2cap_build_conf_req(chan, buf), buf);
2514 chan->num_conf_req++;
2515 }
2516
2517 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2518 {
2519 int type, olen;
2520 unsigned long val;
2521 struct l2cap_conf_rfc rfc;
2522
2523 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2524
2525 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2526 return;
2527
2528 while (len >= L2CAP_CONF_OPT_SIZE) {
2529 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2530
2531 switch (type) {
2532 case L2CAP_CONF_RFC:
2533 if (olen == sizeof(rfc))
2534 memcpy(&rfc, (void *)val, olen);
2535 goto done;
2536 }
2537 }
2538
2539 /* Use sane default values in case a misbehaving remote device
2540 * did not send an RFC option.
2541 */
2542 rfc.mode = chan->mode;
2543 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2544 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2545 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2546
2547 BT_ERR("Expected RFC option was not found, using defaults");
2548
2549 done:
2550 switch (rfc.mode) {
2551 case L2CAP_MODE_ERTM:
2552 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2553 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2554 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2555 break;
2556 case L2CAP_MODE_STREAMING:
2557 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2558 }
2559 }
2560
2561 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2562 {
2563 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2564
2565 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2566 return 0;
2567
2568 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2569 cmd->ident == conn->info_ident) {
2570 cancel_delayed_work(&conn->info_timer);
2571
2572 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2573 conn->info_ident = 0;
2574
2575 l2cap_conn_start(conn);
2576 }
2577
2578 return 0;
2579 }
2580
2581 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2582 {
2583 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2584 struct l2cap_conn_rsp rsp;
2585 struct l2cap_chan *chan = NULL, *pchan;
2586 struct sock *parent, *sk = NULL;
2587 int result, status = L2CAP_CS_NO_INFO;
2588
2589 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2590 __le16 psm = req->psm;
2591
2592 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2593
2594 /* Check if we have socket listening on psm */
2595 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2596 if (!pchan) {
2597 result = L2CAP_CR_BAD_PSM;
2598 goto sendresp;
2599 }
2600
2601 parent = pchan->sk;
2602
2603 lock_sock(parent);
2604
2605 /* Check if the ACL is secure enough (if not SDP) */
2606 if (psm != cpu_to_le16(0x0001) &&
2607 !hci_conn_check_link_mode(conn->hcon)) {
2608 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2609 result = L2CAP_CR_SEC_BLOCK;
2610 goto response;
2611 }
2612
2613 result = L2CAP_CR_NO_MEM;
2614
2615 /* Check for backlog size */
2616 if (sk_acceptq_is_full(parent)) {
2617 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2618 goto response;
2619 }
2620
2621 chan = pchan->ops->new_connection(pchan->data);
2622 if (!chan)
2623 goto response;
2624
2625 sk = chan->sk;
2626
2627 /* Check if we already have channel with that dcid */
2628 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2629 sock_set_flag(sk, SOCK_ZAPPED);
2630 chan->ops->close(chan->data);
2631 goto response;
2632 }
2633
2634 hci_conn_hold(conn->hcon);
2635
2636 bacpy(&bt_sk(sk)->src, conn->src);
2637 bacpy(&bt_sk(sk)->dst, conn->dst);
2638 chan->psm = psm;
2639 chan->dcid = scid;
2640
2641 bt_accept_enqueue(parent, sk);
2642
2643 l2cap_chan_add(conn, chan);
2644
2645 dcid = chan->scid;
2646
2647 __set_chan_timer(chan, sk->sk_sndtimeo);
2648
2649 chan->ident = cmd->ident;
2650
2651 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2652 if (l2cap_chan_check_security(chan)) {
2653 if (bt_sk(sk)->defer_setup) {
2654 l2cap_state_change(chan, BT_CONNECT2);
2655 result = L2CAP_CR_PEND;
2656 status = L2CAP_CS_AUTHOR_PEND;
2657 parent->sk_data_ready(parent, 0);
2658 } else {
2659 l2cap_state_change(chan, BT_CONFIG);
2660 result = L2CAP_CR_SUCCESS;
2661 status = L2CAP_CS_NO_INFO;
2662 }
2663 } else {
2664 l2cap_state_change(chan, BT_CONNECT2);
2665 result = L2CAP_CR_PEND;
2666 status = L2CAP_CS_AUTHEN_PEND;
2667 }
2668 } else {
2669 l2cap_state_change(chan, BT_CONNECT2);
2670 result = L2CAP_CR_PEND;
2671 status = L2CAP_CS_NO_INFO;
2672 }
2673
2674 response:
2675 release_sock(parent);
2676
2677 sendresp:
2678 rsp.scid = cpu_to_le16(scid);
2679 rsp.dcid = cpu_to_le16(dcid);
2680 rsp.result = cpu_to_le16(result);
2681 rsp.status = cpu_to_le16(status);
2682 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2683
2684 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2685 struct l2cap_info_req info;
2686 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2687
2688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2689 conn->info_ident = l2cap_get_ident(conn);
2690
2691 schedule_delayed_work(&conn->info_timer,
2692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2693
2694 l2cap_send_cmd(conn, conn->info_ident,
2695 L2CAP_INFO_REQ, sizeof(info), &info);
2696 }
2697
2698 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2699 result == L2CAP_CR_SUCCESS) {
2700 u8 buf[128];
2701 set_bit(CONF_REQ_SENT, &chan->conf_state);
2702 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2703 l2cap_build_conf_req(chan, buf), buf);
2704 chan->num_conf_req++;
2705 }
2706
2707 return 0;
2708 }
2709
2710 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2711 {
2712 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2713 u16 scid, dcid, result, status;
2714 struct l2cap_chan *chan;
2715 struct sock *sk;
2716 u8 req[128];
2717
2718 scid = __le16_to_cpu(rsp->scid);
2719 dcid = __le16_to_cpu(rsp->dcid);
2720 result = __le16_to_cpu(rsp->result);
2721 status = __le16_to_cpu(rsp->status);
2722
2723 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2724
2725 if (scid) {
2726 chan = l2cap_get_chan_by_scid(conn, scid);
2727 if (!chan)
2728 return -EFAULT;
2729 } else {
2730 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2731 if (!chan)
2732 return -EFAULT;
2733 }
2734
2735 sk = chan->sk;
2736
2737 switch (result) {
2738 case L2CAP_CR_SUCCESS:
2739 l2cap_state_change(chan, BT_CONFIG);
2740 chan->ident = 0;
2741 chan->dcid = dcid;
2742 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2743
2744 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2745 break;
2746
2747 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2748 l2cap_build_conf_req(chan, req), req);
2749 chan->num_conf_req++;
2750 break;
2751
2752 case L2CAP_CR_PEND:
2753 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2754 break;
2755
2756 default:
2757 l2cap_chan_del(chan, ECONNREFUSED);
2758 break;
2759 }
2760
2761 release_sock(sk);
2762 return 0;
2763 }
2764
2765 static inline void set_default_fcs(struct l2cap_chan *chan)
2766 {
2767 /* FCS is enabled only in ERTM or streaming mode, if one or both
2768 * sides request it.
2769 */
2770 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2771 chan->fcs = L2CAP_FCS_NONE;
2772 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2773 chan->fcs = L2CAP_FCS_CRC16;
2774 }
2775
2776 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2777 {
2778 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2779 u16 dcid, flags;
2780 u8 rsp[64];
2781 struct l2cap_chan *chan;
2782 struct sock *sk;
2783 int len;
2784
2785 dcid = __le16_to_cpu(req->dcid);
2786 flags = __le16_to_cpu(req->flags);
2787
2788 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2789
2790 chan = l2cap_get_chan_by_scid(conn, dcid);
2791 if (!chan)
2792 return -ENOENT;
2793
2794 sk = chan->sk;
2795
2796 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2797 struct l2cap_cmd_rej_cid rej;
2798
2799 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2800 rej.scid = cpu_to_le16(chan->scid);
2801 rej.dcid = cpu_to_le16(chan->dcid);
2802
2803 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2804 sizeof(rej), &rej);
2805 goto unlock;
2806 }
2807
2808 /* Reject if config buffer is too small. */
2809 len = cmd_len - sizeof(*req);
2810 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2811 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2812 l2cap_build_conf_rsp(chan, rsp,
2813 L2CAP_CONF_REJECT, flags), rsp);
2814 goto unlock;
2815 }
2816
2817 /* Store config. */
2818 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2819 chan->conf_len += len;
2820
2821 if (flags & 0x0001) {
2822 /* Incomplete config. Send empty response. */
2823 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2824 l2cap_build_conf_rsp(chan, rsp,
2825 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2826 goto unlock;
2827 }
2828
2829 /* Complete config. */
2830 len = l2cap_parse_conf_req(chan, rsp);
2831 if (len < 0) {
2832 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2833 goto unlock;
2834 }
2835
2836 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2837 chan->num_conf_rsp++;
2838
2839 /* Reset config buffer. */
2840 chan->conf_len = 0;
2841
2842 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2843 goto unlock;
2844
2845 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2846 set_default_fcs(chan);
2847
2848 l2cap_state_change(chan, BT_CONNECTED);
2849
2850 chan->next_tx_seq = 0;
2851 chan->expected_tx_seq = 0;
2852 skb_queue_head_init(&chan->tx_q);
2853 if (chan->mode == L2CAP_MODE_ERTM)
2854 l2cap_ertm_init(chan);
2855
2856 l2cap_chan_ready(chan);
2857 goto unlock;
2858 }
2859
2860 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2861 u8 buf[64];
2862 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2863 l2cap_build_conf_req(chan, buf), buf);
2864 chan->num_conf_req++;
2865 }
2866
2867 /* Got Conf Rsp PENDING from remote side and asume we sent
2868 Conf Rsp PENDING in the code above */
2869 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2870 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2871
2872 /* check compatibility */
2873
2874 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2875 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2876
2877 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2878 l2cap_build_conf_rsp(chan, rsp,
2879 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2880 }
2881
2882 unlock:
2883 release_sock(sk);
2884 return 0;
2885 }
2886
2887 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2888 {
2889 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2890 u16 scid, flags, result;
2891 struct l2cap_chan *chan;
2892 struct sock *sk;
2893 int len = cmd->len - sizeof(*rsp);
2894
2895 scid = __le16_to_cpu(rsp->scid);
2896 flags = __le16_to_cpu(rsp->flags);
2897 result = __le16_to_cpu(rsp->result);
2898
2899 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2900 scid, flags, result);
2901
2902 chan = l2cap_get_chan_by_scid(conn, scid);
2903 if (!chan)
2904 return 0;
2905
2906 sk = chan->sk;
2907
2908 switch (result) {
2909 case L2CAP_CONF_SUCCESS:
2910 l2cap_conf_rfc_get(chan, rsp->data, len);
2911 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2912 break;
2913
2914 case L2CAP_CONF_PENDING:
2915 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2916
2917 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2918 char buf[64];
2919
2920 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2921 buf, &result);
2922 if (len < 0) {
2923 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2924 goto done;
2925 }
2926
2927 /* check compatibility */
2928
2929 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2930 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2931
2932 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2933 l2cap_build_conf_rsp(chan, buf,
2934 L2CAP_CONF_SUCCESS, 0x0000), buf);
2935 }
2936 goto done;
2937
2938 case L2CAP_CONF_UNACCEPT:
2939 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2940 char req[64];
2941
2942 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2943 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2944 goto done;
2945 }
2946
2947 /* throw out any old stored conf requests */
2948 result = L2CAP_CONF_SUCCESS;
2949 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2950 req, &result);
2951 if (len < 0) {
2952 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2953 goto done;
2954 }
2955
2956 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2957 L2CAP_CONF_REQ, len, req);
2958 chan->num_conf_req++;
2959 if (result != L2CAP_CONF_SUCCESS)
2960 goto done;
2961 break;
2962 }
2963
2964 default:
2965 sk->sk_err = ECONNRESET;
2966 __set_chan_timer(chan,
2967 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2968 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2969 goto done;
2970 }
2971
2972 if (flags & 0x01)
2973 goto done;
2974
2975 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2976
2977 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2978 set_default_fcs(chan);
2979
2980 l2cap_state_change(chan, BT_CONNECTED);
2981 chan->next_tx_seq = 0;
2982 chan->expected_tx_seq = 0;
2983 skb_queue_head_init(&chan->tx_q);
2984 if (chan->mode == L2CAP_MODE_ERTM)
2985 l2cap_ertm_init(chan);
2986
2987 l2cap_chan_ready(chan);
2988 }
2989
2990 done:
2991 release_sock(sk);
2992 return 0;
2993 }
2994
2995 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2996 {
2997 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2998 struct l2cap_disconn_rsp rsp;
2999 u16 dcid, scid;
3000 struct l2cap_chan *chan;
3001 struct sock *sk;
3002
3003 scid = __le16_to_cpu(req->scid);
3004 dcid = __le16_to_cpu(req->dcid);
3005
3006 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3007
3008 chan = l2cap_get_chan_by_scid(conn, dcid);
3009 if (!chan)
3010 return 0;
3011
3012 sk = chan->sk;
3013
3014 rsp.dcid = cpu_to_le16(chan->scid);
3015 rsp.scid = cpu_to_le16(chan->dcid);
3016 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3017
3018 sk->sk_shutdown = SHUTDOWN_MASK;
3019
3020 l2cap_chan_del(chan, ECONNRESET);
3021 release_sock(sk);
3022
3023 chan->ops->close(chan->data);
3024 return 0;
3025 }
3026
3027 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3028 {
3029 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3030 u16 dcid, scid;
3031 struct l2cap_chan *chan;
3032 struct sock *sk;
3033
3034 scid = __le16_to_cpu(rsp->scid);
3035 dcid = __le16_to_cpu(rsp->dcid);
3036
3037 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3038
3039 chan = l2cap_get_chan_by_scid(conn, scid);
3040 if (!chan)
3041 return 0;
3042
3043 sk = chan->sk;
3044
3045 l2cap_chan_del(chan, 0);
3046 release_sock(sk);
3047
3048 chan->ops->close(chan->data);
3049 return 0;
3050 }
3051
3052 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3053 {
3054 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3055 u16 type;
3056
3057 type = __le16_to_cpu(req->type);
3058
3059 BT_DBG("type 0x%4.4x", type);
3060
3061 if (type == L2CAP_IT_FEAT_MASK) {
3062 u8 buf[8];
3063 u32 feat_mask = l2cap_feat_mask;
3064 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3065 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3066 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3067 if (!disable_ertm)
3068 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3069 | L2CAP_FEAT_FCS;
3070 if (enable_hs)
3071 feat_mask |= L2CAP_FEAT_EXT_FLOW
3072 | L2CAP_FEAT_EXT_WINDOW;
3073
3074 put_unaligned_le32(feat_mask, rsp->data);
3075 l2cap_send_cmd(conn, cmd->ident,
3076 L2CAP_INFO_RSP, sizeof(buf), buf);
3077 } else if (type == L2CAP_IT_FIXED_CHAN) {
3078 u8 buf[12];
3079 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3080
3081 if (enable_hs)
3082 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3083 else
3084 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3085
3086 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3087 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3088 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3089 l2cap_send_cmd(conn, cmd->ident,
3090 L2CAP_INFO_RSP, sizeof(buf), buf);
3091 } else {
3092 struct l2cap_info_rsp rsp;
3093 rsp.type = cpu_to_le16(type);
3094 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3095 l2cap_send_cmd(conn, cmd->ident,
3096 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3097 }
3098
3099 return 0;
3100 }
3101
3102 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3103 {
3104 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3105 u16 type, result;
3106
3107 type = __le16_to_cpu(rsp->type);
3108 result = __le16_to_cpu(rsp->result);
3109
3110 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3111
3112 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3113 if (cmd->ident != conn->info_ident ||
3114 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3115 return 0;
3116
3117 cancel_delayed_work(&conn->info_timer);
3118
3119 if (result != L2CAP_IR_SUCCESS) {
3120 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3121 conn->info_ident = 0;
3122
3123 l2cap_conn_start(conn);
3124
3125 return 0;
3126 }
3127
3128 if (type == L2CAP_IT_FEAT_MASK) {
3129 conn->feat_mask = get_unaligned_le32(rsp->data);
3130
3131 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3132 struct l2cap_info_req req;
3133 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3134
3135 conn->info_ident = l2cap_get_ident(conn);
3136
3137 l2cap_send_cmd(conn, conn->info_ident,
3138 L2CAP_INFO_REQ, sizeof(req), &req);
3139 } else {
3140 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3141 conn->info_ident = 0;
3142
3143 l2cap_conn_start(conn);
3144 }
3145 } else if (type == L2CAP_IT_FIXED_CHAN) {
3146 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3147 conn->info_ident = 0;
3148
3149 l2cap_conn_start(conn);
3150 }
3151
3152 return 0;
3153 }
3154
3155 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3156 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3157 void *data)
3158 {
3159 struct l2cap_create_chan_req *req = data;
3160 struct l2cap_create_chan_rsp rsp;
3161 u16 psm, scid;
3162
3163 if (cmd_len != sizeof(*req))
3164 return -EPROTO;
3165
3166 if (!enable_hs)
3167 return -EINVAL;
3168
3169 psm = le16_to_cpu(req->psm);
3170 scid = le16_to_cpu(req->scid);
3171
3172 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3173
3174 /* Placeholder: Always reject */
3175 rsp.dcid = 0;
3176 rsp.scid = cpu_to_le16(scid);
3177 rsp.result = L2CAP_CR_NO_MEM;
3178 rsp.status = L2CAP_CS_NO_INFO;
3179
3180 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3181 sizeof(rsp), &rsp);
3182
3183 return 0;
3184 }
3185
3186 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3187 struct l2cap_cmd_hdr *cmd, void *data)
3188 {
3189 BT_DBG("conn %p", conn);
3190
3191 return l2cap_connect_rsp(conn, cmd, data);
3192 }
3193
3194 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3195 u16 icid, u16 result)
3196 {
3197 struct l2cap_move_chan_rsp rsp;
3198
3199 BT_DBG("icid %d, result %d", icid, result);
3200
3201 rsp.icid = cpu_to_le16(icid);
3202 rsp.result = cpu_to_le16(result);
3203
3204 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3205 }
3206
3207 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3208 struct l2cap_chan *chan, u16 icid, u16 result)
3209 {
3210 struct l2cap_move_chan_cfm cfm;
3211 u8 ident;
3212
3213 BT_DBG("icid %d, result %d", icid, result);
3214
3215 ident = l2cap_get_ident(conn);
3216 if (chan)
3217 chan->ident = ident;
3218
3219 cfm.icid = cpu_to_le16(icid);
3220 cfm.result = cpu_to_le16(result);
3221
3222 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3223 }
3224
3225 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3226 u16 icid)
3227 {
3228 struct l2cap_move_chan_cfm_rsp rsp;
3229
3230 BT_DBG("icid %d", icid);
3231
3232 rsp.icid = cpu_to_le16(icid);
3233 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3234 }
3235
3236 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3237 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3238 {
3239 struct l2cap_move_chan_req *req = data;
3240 u16 icid = 0;
3241 u16 result = L2CAP_MR_NOT_ALLOWED;
3242
3243 if (cmd_len != sizeof(*req))
3244 return -EPROTO;
3245
3246 icid = le16_to_cpu(req->icid);
3247
3248 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3249
3250 if (!enable_hs)
3251 return -EINVAL;
3252
3253 /* Placeholder: Always refuse */
3254 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3255
3256 return 0;
3257 }
3258
3259 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3260 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3261 {
3262 struct l2cap_move_chan_rsp *rsp = data;
3263 u16 icid, result;
3264
3265 if (cmd_len != sizeof(*rsp))
3266 return -EPROTO;
3267
3268 icid = le16_to_cpu(rsp->icid);
3269 result = le16_to_cpu(rsp->result);
3270
3271 BT_DBG("icid %d, result %d", icid, result);
3272
3273 /* Placeholder: Always unconfirmed */
3274 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3275
3276 return 0;
3277 }
3278
3279 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3280 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3281 {
3282 struct l2cap_move_chan_cfm *cfm = data;
3283 u16 icid, result;
3284
3285 if (cmd_len != sizeof(*cfm))
3286 return -EPROTO;
3287
3288 icid = le16_to_cpu(cfm->icid);
3289 result = le16_to_cpu(cfm->result);
3290
3291 BT_DBG("icid %d, result %d", icid, result);
3292
3293 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3294
3295 return 0;
3296 }
3297
3298 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3299 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3300 {
3301 struct l2cap_move_chan_cfm_rsp *rsp = data;
3302 u16 icid;
3303
3304 if (cmd_len != sizeof(*rsp))
3305 return -EPROTO;
3306
3307 icid = le16_to_cpu(rsp->icid);
3308
3309 BT_DBG("icid %d", icid);
3310
3311 return 0;
3312 }
3313
3314 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3315 u16 to_multiplier)
3316 {
3317 u16 max_latency;
3318
3319 if (min > max || min < 6 || max > 3200)
3320 return -EINVAL;
3321
3322 if (to_multiplier < 10 || to_multiplier > 3200)
3323 return -EINVAL;
3324
3325 if (max >= to_multiplier * 8)
3326 return -EINVAL;
3327
3328 max_latency = (to_multiplier * 8 / max) - 1;
3329 if (latency > 499 || latency > max_latency)
3330 return -EINVAL;
3331
3332 return 0;
3333 }
3334
3335 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3336 struct l2cap_cmd_hdr *cmd, u8 *data)
3337 {
3338 struct hci_conn *hcon = conn->hcon;
3339 struct l2cap_conn_param_update_req *req;
3340 struct l2cap_conn_param_update_rsp rsp;
3341 u16 min, max, latency, to_multiplier, cmd_len;
3342 int err;
3343
3344 if (!(hcon->link_mode & HCI_LM_MASTER))
3345 return -EINVAL;
3346
3347 cmd_len = __le16_to_cpu(cmd->len);
3348 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3349 return -EPROTO;
3350
3351 req = (struct l2cap_conn_param_update_req *) data;
3352 min = __le16_to_cpu(req->min);
3353 max = __le16_to_cpu(req->max);
3354 latency = __le16_to_cpu(req->latency);
3355 to_multiplier = __le16_to_cpu(req->to_multiplier);
3356
3357 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3358 min, max, latency, to_multiplier);
3359
3360 memset(&rsp, 0, sizeof(rsp));
3361
3362 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3363 if (err)
3364 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3365 else
3366 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3367
3368 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3369 sizeof(rsp), &rsp);
3370
3371 if (!err)
3372 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3373
3374 return 0;
3375 }
3376
3377 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3378 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3379 {
3380 int err = 0;
3381
3382 switch (cmd->code) {
3383 case L2CAP_COMMAND_REJ:
3384 l2cap_command_rej(conn, cmd, data);
3385 break;
3386
3387 case L2CAP_CONN_REQ:
3388 err = l2cap_connect_req(conn, cmd, data);
3389 break;
3390
3391 case L2CAP_CONN_RSP:
3392 err = l2cap_connect_rsp(conn, cmd, data);
3393 break;
3394
3395 case L2CAP_CONF_REQ:
3396 err = l2cap_config_req(conn, cmd, cmd_len, data);
3397 break;
3398
3399 case L2CAP_CONF_RSP:
3400 err = l2cap_config_rsp(conn, cmd, data);
3401 break;
3402
3403 case L2CAP_DISCONN_REQ:
3404 err = l2cap_disconnect_req(conn, cmd, data);
3405 break;
3406
3407 case L2CAP_DISCONN_RSP:
3408 err = l2cap_disconnect_rsp(conn, cmd, data);
3409 break;
3410
3411 case L2CAP_ECHO_REQ:
3412 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3413 break;
3414
3415 case L2CAP_ECHO_RSP:
3416 break;
3417
3418 case L2CAP_INFO_REQ:
3419 err = l2cap_information_req(conn, cmd, data);
3420 break;
3421
3422 case L2CAP_INFO_RSP:
3423 err = l2cap_information_rsp(conn, cmd, data);
3424 break;
3425
3426 case L2CAP_CREATE_CHAN_REQ:
3427 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3428 break;
3429
3430 case L2CAP_CREATE_CHAN_RSP:
3431 err = l2cap_create_channel_rsp(conn, cmd, data);
3432 break;
3433
3434 case L2CAP_MOVE_CHAN_REQ:
3435 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3436 break;
3437
3438 case L2CAP_MOVE_CHAN_RSP:
3439 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3440 break;
3441
3442 case L2CAP_MOVE_CHAN_CFM:
3443 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3444 break;
3445
3446 case L2CAP_MOVE_CHAN_CFM_RSP:
3447 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3448 break;
3449
3450 default:
3451 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3452 err = -EINVAL;
3453 break;
3454 }
3455
3456 return err;
3457 }
3458
3459 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3460 struct l2cap_cmd_hdr *cmd, u8 *data)
3461 {
3462 switch (cmd->code) {
3463 case L2CAP_COMMAND_REJ:
3464 return 0;
3465
3466 case L2CAP_CONN_PARAM_UPDATE_REQ:
3467 return l2cap_conn_param_update_req(conn, cmd, data);
3468
3469 case L2CAP_CONN_PARAM_UPDATE_RSP:
3470 return 0;
3471
3472 default:
3473 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3474 return -EINVAL;
3475 }
3476 }
3477
3478 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3479 struct sk_buff *skb)
3480 {
3481 u8 *data = skb->data;
3482 int len = skb->len;
3483 struct l2cap_cmd_hdr cmd;
3484 int err;
3485
3486 l2cap_raw_recv(conn, skb);
3487
3488 while (len >= L2CAP_CMD_HDR_SIZE) {
3489 u16 cmd_len;
3490 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3491 data += L2CAP_CMD_HDR_SIZE;
3492 len -= L2CAP_CMD_HDR_SIZE;
3493
3494 cmd_len = le16_to_cpu(cmd.len);
3495
3496 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3497
3498 if (cmd_len > len || !cmd.ident) {
3499 BT_DBG("corrupted command");
3500 break;
3501 }
3502
3503 if (conn->hcon->type == LE_LINK)
3504 err = l2cap_le_sig_cmd(conn, &cmd, data);
3505 else
3506 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3507
3508 if (err) {
3509 struct l2cap_cmd_rej_unk rej;
3510
3511 BT_ERR("Wrong link type (%d)", err);
3512
3513 /* FIXME: Map err to a valid reason */
3514 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3515 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3516 }
3517
3518 data += cmd_len;
3519 len -= cmd_len;
3520 }
3521
3522 kfree_skb(skb);
3523 }
3524
3525 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3526 {
3527 u16 our_fcs, rcv_fcs;
3528 int hdr_size;
3529
3530 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3531 hdr_size = L2CAP_EXT_HDR_SIZE;
3532 else
3533 hdr_size = L2CAP_ENH_HDR_SIZE;
3534
3535 if (chan->fcs == L2CAP_FCS_CRC16) {
3536 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3537 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3538 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3539
3540 if (our_fcs != rcv_fcs)
3541 return -EBADMSG;
3542 }
3543 return 0;
3544 }
3545
3546 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3547 {
3548 u32 control = 0;
3549
3550 chan->frames_sent = 0;
3551
3552 control |= __set_reqseq(chan, chan->buffer_seq);
3553
3554 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3555 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3556 l2cap_send_sframe(chan, control);
3557 set_bit(CONN_RNR_SENT, &chan->conn_state);
3558 }
3559
3560 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3561 l2cap_retransmit_frames(chan);
3562
3563 l2cap_ertm_send(chan);
3564
3565 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3566 chan->frames_sent == 0) {
3567 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3568 l2cap_send_sframe(chan, control);
3569 }
3570 }
3571
3572 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3573 {
3574 struct sk_buff *next_skb;
3575 int tx_seq_offset, next_tx_seq_offset;
3576
3577 bt_cb(skb)->tx_seq = tx_seq;
3578 bt_cb(skb)->sar = sar;
3579
3580 next_skb = skb_peek(&chan->srej_q);
3581
3582 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3583
3584 while (next_skb) {
3585 if (bt_cb(next_skb)->tx_seq == tx_seq)
3586 return -EINVAL;
3587
3588 next_tx_seq_offset = __seq_offset(chan,
3589 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3590
3591 if (next_tx_seq_offset > tx_seq_offset) {
3592 __skb_queue_before(&chan->srej_q, next_skb, skb);
3593 return 0;
3594 }
3595
3596 if (skb_queue_is_last(&chan->srej_q, next_skb))
3597 next_skb = NULL;
3598 else
3599 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3600 }
3601
3602 __skb_queue_tail(&chan->srej_q, skb);
3603
3604 return 0;
3605 }
3606
3607 static void append_skb_frag(struct sk_buff *skb,
3608 struct sk_buff *new_frag, struct sk_buff **last_frag)
3609 {
3610 /* skb->len reflects data in skb as well as all fragments
3611 * skb->data_len reflects only data in fragments
3612 */
3613 if (!skb_has_frag_list(skb))
3614 skb_shinfo(skb)->frag_list = new_frag;
3615
3616 new_frag->next = NULL;
3617
3618 (*last_frag)->next = new_frag;
3619 *last_frag = new_frag;
3620
3621 skb->len += new_frag->len;
3622 skb->data_len += new_frag->len;
3623 skb->truesize += new_frag->truesize;
3624 }
3625
3626 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3627 {
3628 int err = -EINVAL;
3629
3630 switch (__get_ctrl_sar(chan, control)) {
3631 case L2CAP_SAR_UNSEGMENTED:
3632 if (chan->sdu)
3633 break;
3634
3635 err = chan->ops->recv(chan->data, skb);
3636 break;
3637
3638 case L2CAP_SAR_START:
3639 if (chan->sdu)
3640 break;
3641
3642 chan->sdu_len = get_unaligned_le16(skb->data);
3643 skb_pull(skb, L2CAP_SDULEN_SIZE);
3644
3645 if (chan->sdu_len > chan->imtu) {
3646 err = -EMSGSIZE;
3647 break;
3648 }
3649
3650 if (skb->len >= chan->sdu_len)
3651 break;
3652
3653 chan->sdu = skb;
3654 chan->sdu_last_frag = skb;
3655
3656 skb = NULL;
3657 err = 0;
3658 break;
3659
3660 case L2CAP_SAR_CONTINUE:
3661 if (!chan->sdu)
3662 break;
3663
3664 append_skb_frag(chan->sdu, skb,
3665 &chan->sdu_last_frag);
3666 skb = NULL;
3667
3668 if (chan->sdu->len >= chan->sdu_len)
3669 break;
3670
3671 err = 0;
3672 break;
3673
3674 case L2CAP_SAR_END:
3675 if (!chan->sdu)
3676 break;
3677
3678 append_skb_frag(chan->sdu, skb,
3679 &chan->sdu_last_frag);
3680 skb = NULL;
3681
3682 if (chan->sdu->len != chan->sdu_len)
3683 break;
3684
3685 err = chan->ops->recv(chan->data, chan->sdu);
3686
3687 if (!err) {
3688 /* Reassembly complete */
3689 chan->sdu = NULL;
3690 chan->sdu_last_frag = NULL;
3691 chan->sdu_len = 0;
3692 }
3693 break;
3694 }
3695
3696 if (err) {
3697 kfree_skb(skb);
3698 kfree_skb(chan->sdu);
3699 chan->sdu = NULL;
3700 chan->sdu_last_frag = NULL;
3701 chan->sdu_len = 0;
3702 }
3703
3704 return err;
3705 }
3706
3707 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3708 {
3709 BT_DBG("chan %p, Enter local busy", chan);
3710
3711 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3712
3713 __set_ack_timer(chan);
3714 }
3715
3716 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3717 {
3718 u32 control;
3719
3720 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3721 goto done;
3722
3723 control = __set_reqseq(chan, chan->buffer_seq);
3724 control |= __set_ctrl_poll(chan);
3725 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3726 l2cap_send_sframe(chan, control);
3727 chan->retry_count = 1;
3728
3729 __clear_retrans_timer(chan);
3730 __set_monitor_timer(chan);
3731
3732 set_bit(CONN_WAIT_F, &chan->conn_state);
3733
3734 done:
3735 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3736 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3737
3738 BT_DBG("chan %p, Exit local busy", chan);
3739 }
3740
3741 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3742 {
3743 if (chan->mode == L2CAP_MODE_ERTM) {
3744 if (busy)
3745 l2cap_ertm_enter_local_busy(chan);
3746 else
3747 l2cap_ertm_exit_local_busy(chan);
3748 }
3749 }
3750
3751 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3752 {
3753 struct sk_buff *skb;
3754 u32 control;
3755
3756 while ((skb = skb_peek(&chan->srej_q)) &&
3757 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3758 int err;
3759
3760 if (bt_cb(skb)->tx_seq != tx_seq)
3761 break;
3762
3763 skb = skb_dequeue(&chan->srej_q);
3764 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3765 err = l2cap_reassemble_sdu(chan, skb, control);
3766
3767 if (err < 0) {
3768 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3769 break;
3770 }
3771
3772 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3773 tx_seq = __next_seq(chan, tx_seq);
3774 }
3775 }
3776
3777 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3778 {
3779 struct srej_list *l, *tmp;
3780 u32 control;
3781
3782 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3783 if (l->tx_seq == tx_seq) {
3784 list_del(&l->list);
3785 kfree(l);
3786 return;
3787 }
3788 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3789 control |= __set_reqseq(chan, l->tx_seq);
3790 l2cap_send_sframe(chan, control);
3791 list_del(&l->list);
3792 list_add_tail(&l->list, &chan->srej_l);
3793 }
3794 }
3795
3796 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3797 {
3798 struct srej_list *new;
3799 u32 control;
3800
3801 while (tx_seq != chan->expected_tx_seq) {
3802 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3803 control |= __set_reqseq(chan, chan->expected_tx_seq);
3804 l2cap_send_sframe(chan, control);
3805
3806 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3807 if (!new)
3808 return -ENOMEM;
3809
3810 new->tx_seq = chan->expected_tx_seq;
3811
3812 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3813
3814 list_add_tail(&new->list, &chan->srej_l);
3815 }
3816
3817 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3818
3819 return 0;
3820 }
3821
3822 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3823 {
3824 u16 tx_seq = __get_txseq(chan, rx_control);
3825 u16 req_seq = __get_reqseq(chan, rx_control);
3826 u8 sar = __get_ctrl_sar(chan, rx_control);
3827 int tx_seq_offset, expected_tx_seq_offset;
3828 int num_to_ack = (chan->tx_win/6) + 1;
3829 int err = 0;
3830
3831 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3832 tx_seq, rx_control);
3833
3834 if (__is_ctrl_final(chan, rx_control) &&
3835 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3836 __clear_monitor_timer(chan);
3837 if (chan->unacked_frames > 0)
3838 __set_retrans_timer(chan);
3839 clear_bit(CONN_WAIT_F, &chan->conn_state);
3840 }
3841
3842 chan->expected_ack_seq = req_seq;
3843 l2cap_drop_acked_frames(chan);
3844
3845 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3846
3847 /* invalid tx_seq */
3848 if (tx_seq_offset >= chan->tx_win) {
3849 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3850 goto drop;
3851 }
3852
3853 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3854 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3855 l2cap_send_ack(chan);
3856 goto drop;
3857 }
3858
3859 if (tx_seq == chan->expected_tx_seq)
3860 goto expected;
3861
3862 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3863 struct srej_list *first;
3864
3865 first = list_first_entry(&chan->srej_l,
3866 struct srej_list, list);
3867 if (tx_seq == first->tx_seq) {
3868 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3869 l2cap_check_srej_gap(chan, tx_seq);
3870
3871 list_del(&first->list);
3872 kfree(first);
3873
3874 if (list_empty(&chan->srej_l)) {
3875 chan->buffer_seq = chan->buffer_seq_srej;
3876 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3877 l2cap_send_ack(chan);
3878 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3879 }
3880 } else {
3881 struct srej_list *l;
3882
3883 /* duplicated tx_seq */
3884 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3885 goto drop;
3886
3887 list_for_each_entry(l, &chan->srej_l, list) {
3888 if (l->tx_seq == tx_seq) {
3889 l2cap_resend_srejframe(chan, tx_seq);
3890 return 0;
3891 }
3892 }
3893
3894 err = l2cap_send_srejframe(chan, tx_seq);
3895 if (err < 0) {
3896 l2cap_send_disconn_req(chan->conn, chan, -err);
3897 return err;
3898 }
3899 }
3900 } else {
3901 expected_tx_seq_offset = __seq_offset(chan,
3902 chan->expected_tx_seq, chan->buffer_seq);
3903
3904 /* duplicated tx_seq */
3905 if (tx_seq_offset < expected_tx_seq_offset)
3906 goto drop;
3907
3908 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3909
3910 BT_DBG("chan %p, Enter SREJ", chan);
3911
3912 INIT_LIST_HEAD(&chan->srej_l);
3913 chan->buffer_seq_srej = chan->buffer_seq;
3914
3915 __skb_queue_head_init(&chan->srej_q);
3916 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3917
3918 /* Set P-bit only if there are some I-frames to ack. */
3919 if (__clear_ack_timer(chan))
3920 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3921
3922 err = l2cap_send_srejframe(chan, tx_seq);
3923 if (err < 0) {
3924 l2cap_send_disconn_req(chan->conn, chan, -err);
3925 return err;
3926 }
3927 }
3928 return 0;
3929
3930 expected:
3931 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3932
3933 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3934 bt_cb(skb)->tx_seq = tx_seq;
3935 bt_cb(skb)->sar = sar;
3936 __skb_queue_tail(&chan->srej_q, skb);
3937 return 0;
3938 }
3939
3940 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3941 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3942
3943 if (err < 0) {
3944 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3945 return err;
3946 }
3947
3948 if (__is_ctrl_final(chan, rx_control)) {
3949 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3950 l2cap_retransmit_frames(chan);
3951 }
3952
3953
3954 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3955 if (chan->num_acked == num_to_ack - 1)
3956 l2cap_send_ack(chan);
3957 else
3958 __set_ack_timer(chan);
3959
3960 return 0;
3961
3962 drop:
3963 kfree_skb(skb);
3964 return 0;
3965 }
3966
3967 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3968 {
3969 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3970 __get_reqseq(chan, rx_control), rx_control);
3971
3972 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3973 l2cap_drop_acked_frames(chan);
3974
3975 if (__is_ctrl_poll(chan, rx_control)) {
3976 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3977 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3978 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3979 (chan->unacked_frames > 0))
3980 __set_retrans_timer(chan);
3981
3982 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3983 l2cap_send_srejtail(chan);
3984 } else {
3985 l2cap_send_i_or_rr_or_rnr(chan);
3986 }
3987
3988 } else if (__is_ctrl_final(chan, rx_control)) {
3989 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3990
3991 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3992 l2cap_retransmit_frames(chan);
3993
3994 } else {
3995 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3996 (chan->unacked_frames > 0))
3997 __set_retrans_timer(chan);
3998
3999 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4000 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4001 l2cap_send_ack(chan);
4002 else
4003 l2cap_ertm_send(chan);
4004 }
4005 }
4006
4007 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4008 {
4009 u16 tx_seq = __get_reqseq(chan, rx_control);
4010
4011 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4012
4013 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4014
4015 chan->expected_ack_seq = tx_seq;
4016 l2cap_drop_acked_frames(chan);
4017
4018 if (__is_ctrl_final(chan, rx_control)) {
4019 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4020 l2cap_retransmit_frames(chan);
4021 } else {
4022 l2cap_retransmit_frames(chan);
4023
4024 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4025 set_bit(CONN_REJ_ACT, &chan->conn_state);
4026 }
4027 }
4028 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4029 {
4030 u16 tx_seq = __get_reqseq(chan, rx_control);
4031
4032 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4033
4034 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4035
4036 if (__is_ctrl_poll(chan, rx_control)) {
4037 chan->expected_ack_seq = tx_seq;
4038 l2cap_drop_acked_frames(chan);
4039
4040 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4041 l2cap_retransmit_one_frame(chan, tx_seq);
4042
4043 l2cap_ertm_send(chan);
4044
4045 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4046 chan->srej_save_reqseq = tx_seq;
4047 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4048 }
4049 } else if (__is_ctrl_final(chan, rx_control)) {
4050 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4051 chan->srej_save_reqseq == tx_seq)
4052 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4053 else
4054 l2cap_retransmit_one_frame(chan, tx_seq);
4055 } else {
4056 l2cap_retransmit_one_frame(chan, tx_seq);
4057 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4058 chan->srej_save_reqseq = tx_seq;
4059 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4060 }
4061 }
4062 }
4063
4064 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4065 {
4066 u16 tx_seq = __get_reqseq(chan, rx_control);
4067
4068 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4069
4070 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4071 chan->expected_ack_seq = tx_seq;
4072 l2cap_drop_acked_frames(chan);
4073
4074 if (__is_ctrl_poll(chan, rx_control))
4075 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4076
4077 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4078 __clear_retrans_timer(chan);
4079 if (__is_ctrl_poll(chan, rx_control))
4080 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4081 return;
4082 }
4083
4084 if (__is_ctrl_poll(chan, rx_control)) {
4085 l2cap_send_srejtail(chan);
4086 } else {
4087 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4088 l2cap_send_sframe(chan, rx_control);
4089 }
4090 }
4091
4092 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4093 {
4094 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4095
4096 if (__is_ctrl_final(chan, rx_control) &&
4097 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4098 __clear_monitor_timer(chan);
4099 if (chan->unacked_frames > 0)
4100 __set_retrans_timer(chan);
4101 clear_bit(CONN_WAIT_F, &chan->conn_state);
4102 }
4103
4104 switch (__get_ctrl_super(chan, rx_control)) {
4105 case L2CAP_SUPER_RR:
4106 l2cap_data_channel_rrframe(chan, rx_control);
4107 break;
4108
4109 case L2CAP_SUPER_REJ:
4110 l2cap_data_channel_rejframe(chan, rx_control);
4111 break;
4112
4113 case L2CAP_SUPER_SREJ:
4114 l2cap_data_channel_srejframe(chan, rx_control);
4115 break;
4116
4117 case L2CAP_SUPER_RNR:
4118 l2cap_data_channel_rnrframe(chan, rx_control);
4119 break;
4120 }
4121
4122 kfree_skb(skb);
4123 return 0;
4124 }
4125
4126 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4127 {
4128 u32 control;
4129 u16 req_seq;
4130 int len, next_tx_seq_offset, req_seq_offset;
4131
4132 control = __get_control(chan, skb->data);
4133 skb_pull(skb, __ctrl_size(chan));
4134 len = skb->len;
4135
4136 /*
4137 * We can just drop the corrupted I-frame here.
4138 * Receiver will miss it and start proper recovery
4139 * procedures and ask retransmission.
4140 */
4141 if (l2cap_check_fcs(chan, skb))
4142 goto drop;
4143
4144 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4145 len -= L2CAP_SDULEN_SIZE;
4146
4147 if (chan->fcs == L2CAP_FCS_CRC16)
4148 len -= L2CAP_FCS_SIZE;
4149
4150 if (len > chan->mps) {
4151 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4152 goto drop;
4153 }
4154
4155 req_seq = __get_reqseq(chan, control);
4156
4157 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4158
4159 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4160 chan->expected_ack_seq);
4161
4162 /* check for invalid req-seq */
4163 if (req_seq_offset > next_tx_seq_offset) {
4164 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4165 goto drop;
4166 }
4167
4168 if (!__is_sframe(chan, control)) {
4169 if (len < 0) {
4170 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4171 goto drop;
4172 }
4173
4174 l2cap_data_channel_iframe(chan, control, skb);
4175 } else {
4176 if (len != 0) {
4177 BT_ERR("%d", len);
4178 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4179 goto drop;
4180 }
4181
4182 l2cap_data_channel_sframe(chan, control, skb);
4183 }
4184
4185 return 0;
4186
4187 drop:
4188 kfree_skb(skb);
4189 return 0;
4190 }
4191
4192 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4193 {
4194 struct l2cap_chan *chan;
4195 struct sock *sk = NULL;
4196 u32 control;
4197 u16 tx_seq;
4198 int len;
4199
4200 chan = l2cap_get_chan_by_scid(conn, cid);
4201 if (!chan) {
4202 BT_DBG("unknown cid 0x%4.4x", cid);
4203 goto drop;
4204 }
4205
4206 sk = chan->sk;
4207
4208 BT_DBG("chan %p, len %d", chan, skb->len);
4209
4210 if (chan->state != BT_CONNECTED)
4211 goto drop;
4212
4213 switch (chan->mode) {
4214 case L2CAP_MODE_BASIC:
4215 /* If socket recv buffers overflows we drop data here
4216 * which is *bad* because L2CAP has to be reliable.
4217 * But we don't have any other choice. L2CAP doesn't
4218 * provide flow control mechanism. */
4219
4220 if (chan->imtu < skb->len)
4221 goto drop;
4222
4223 if (!chan->ops->recv(chan->data, skb))
4224 goto done;
4225 break;
4226
4227 case L2CAP_MODE_ERTM:
4228 l2cap_ertm_data_rcv(chan, skb);
4229
4230 goto done;
4231
4232 case L2CAP_MODE_STREAMING:
4233 control = __get_control(chan, skb->data);
4234 skb_pull(skb, __ctrl_size(chan));
4235 len = skb->len;
4236
4237 if (l2cap_check_fcs(chan, skb))
4238 goto drop;
4239
4240 if (__is_sar_start(chan, control))
4241 len -= L2CAP_SDULEN_SIZE;
4242
4243 if (chan->fcs == L2CAP_FCS_CRC16)
4244 len -= L2CAP_FCS_SIZE;
4245
4246 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4247 goto drop;
4248
4249 tx_seq = __get_txseq(chan, control);
4250
4251 if (chan->expected_tx_seq != tx_seq) {
4252 /* Frame(s) missing - must discard partial SDU */
4253 kfree_skb(chan->sdu);
4254 chan->sdu = NULL;
4255 chan->sdu_last_frag = NULL;
4256 chan->sdu_len = 0;
4257
4258 /* TODO: Notify userland of missing data */
4259 }
4260
4261 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4262
4263 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4264 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4265
4266 goto done;
4267
4268 default:
4269 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4270 break;
4271 }
4272
4273 drop:
4274 kfree_skb(skb);
4275
4276 done:
4277 if (sk)
4278 release_sock(sk);
4279
4280 return 0;
4281 }
4282
4283 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4284 {
4285 struct sock *sk = NULL;
4286 struct l2cap_chan *chan;
4287
4288 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4289 if (!chan)
4290 goto drop;
4291
4292 sk = chan->sk;
4293
4294 lock_sock(sk);
4295
4296 BT_DBG("sk %p, len %d", sk, skb->len);
4297
4298 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4299 goto drop;
4300
4301 if (chan->imtu < skb->len)
4302 goto drop;
4303
4304 if (!chan->ops->recv(chan->data, skb))
4305 goto done;
4306
4307 drop:
4308 kfree_skb(skb);
4309
4310 done:
4311 if (sk)
4312 release_sock(sk);
4313 return 0;
4314 }
4315
4316 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4317 {
4318 struct sock *sk = NULL;
4319 struct l2cap_chan *chan;
4320
4321 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4322 if (!chan)
4323 goto drop;
4324
4325 sk = chan->sk;
4326
4327 lock_sock(sk);
4328
4329 BT_DBG("sk %p, len %d", sk, skb->len);
4330
4331 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4332 goto drop;
4333
4334 if (chan->imtu < skb->len)
4335 goto drop;
4336
4337 if (!chan->ops->recv(chan->data, skb))
4338 goto done;
4339
4340 drop:
4341 kfree_skb(skb);
4342
4343 done:
4344 if (sk)
4345 release_sock(sk);
4346 return 0;
4347 }
4348
4349 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4350 {
4351 struct l2cap_hdr *lh = (void *) skb->data;
4352 u16 cid, len;
4353 __le16 psm;
4354
4355 skb_pull(skb, L2CAP_HDR_SIZE);
4356 cid = __le16_to_cpu(lh->cid);
4357 len = __le16_to_cpu(lh->len);
4358
4359 if (len != skb->len) {
4360 kfree_skb(skb);
4361 return;
4362 }
4363
4364 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4365
4366 switch (cid) {
4367 case L2CAP_CID_LE_SIGNALING:
4368 case L2CAP_CID_SIGNALING:
4369 l2cap_sig_channel(conn, skb);
4370 break;
4371
4372 case L2CAP_CID_CONN_LESS:
4373 psm = get_unaligned_le16(skb->data);
4374 skb_pull(skb, 2);
4375 l2cap_conless_channel(conn, psm, skb);
4376 break;
4377
4378 case L2CAP_CID_LE_DATA:
4379 l2cap_att_channel(conn, cid, skb);
4380 break;
4381
4382 case L2CAP_CID_SMP:
4383 if (smp_sig_channel(conn, skb))
4384 l2cap_conn_del(conn->hcon, EACCES);
4385 break;
4386
4387 default:
4388 l2cap_data_channel(conn, cid, skb);
4389 break;
4390 }
4391 }
4392
4393 /* ---- L2CAP interface with lower layer (HCI) ---- */
4394
4395 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4396 {
4397 int exact = 0, lm1 = 0, lm2 = 0;
4398 struct l2cap_chan *c;
4399
4400 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4401
4402 /* Find listening sockets and check their link_mode */
4403 read_lock(&chan_list_lock);
4404 list_for_each_entry(c, &chan_list, global_l) {
4405 struct sock *sk = c->sk;
4406
4407 if (c->state != BT_LISTEN)
4408 continue;
4409
4410 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4411 lm1 |= HCI_LM_ACCEPT;
4412 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4413 lm1 |= HCI_LM_MASTER;
4414 exact++;
4415 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4416 lm2 |= HCI_LM_ACCEPT;
4417 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4418 lm2 |= HCI_LM_MASTER;
4419 }
4420 }
4421 read_unlock(&chan_list_lock);
4422
4423 return exact ? lm1 : lm2;
4424 }
4425
4426 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4427 {
4428 struct l2cap_conn *conn;
4429
4430 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4431
4432 if (!status) {
4433 conn = l2cap_conn_add(hcon, status);
4434 if (conn)
4435 l2cap_conn_ready(conn);
4436 } else
4437 l2cap_conn_del(hcon, bt_to_errno(status));
4438
4439 return 0;
4440 }
4441
4442 int l2cap_disconn_ind(struct hci_conn *hcon)
4443 {
4444 struct l2cap_conn *conn = hcon->l2cap_data;
4445
4446 BT_DBG("hcon %p", hcon);
4447
4448 if (!conn)
4449 return HCI_ERROR_REMOTE_USER_TERM;
4450 return conn->disc_reason;
4451 }
4452
4453 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4454 {
4455 BT_DBG("hcon %p reason %d", hcon, reason);
4456
4457 l2cap_conn_del(hcon, bt_to_errno(reason));
4458 return 0;
4459 }
4460
4461 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4462 {
4463 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4464 return;
4465
4466 if (encrypt == 0x00) {
4467 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4468 __clear_chan_timer(chan);
4469 __set_chan_timer(chan,
4470 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4471 } else if (chan->sec_level == BT_SECURITY_HIGH)
4472 l2cap_chan_close(chan, ECONNREFUSED);
4473 } else {
4474 if (chan->sec_level == BT_SECURITY_MEDIUM)
4475 __clear_chan_timer(chan);
4476 }
4477 }
4478
4479 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4480 {
4481 struct l2cap_conn *conn = hcon->l2cap_data;
4482 struct l2cap_chan *chan;
4483
4484 if (!conn)
4485 return 0;
4486
4487 BT_DBG("conn %p", conn);
4488
4489 if (hcon->type == LE_LINK) {
4490 smp_distribute_keys(conn, 0);
4491 cancel_delayed_work(&conn->security_timer);
4492 }
4493
4494 rcu_read_lock();
4495
4496 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4497 struct sock *sk = chan->sk;
4498
4499 bh_lock_sock(sk);
4500
4501 BT_DBG("chan->scid %d", chan->scid);
4502
4503 if (chan->scid == L2CAP_CID_LE_DATA) {
4504 if (!status && encrypt) {
4505 chan->sec_level = hcon->sec_level;
4506 l2cap_chan_ready(chan);
4507 }
4508
4509 bh_unlock_sock(sk);
4510 continue;
4511 }
4512
4513 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4514 bh_unlock_sock(sk);
4515 continue;
4516 }
4517
4518 if (!status && (chan->state == BT_CONNECTED ||
4519 chan->state == BT_CONFIG)) {
4520 l2cap_check_encryption(chan, encrypt);
4521 bh_unlock_sock(sk);
4522 continue;
4523 }
4524
4525 if (chan->state == BT_CONNECT) {
4526 if (!status) {
4527 struct l2cap_conn_req req;
4528 req.scid = cpu_to_le16(chan->scid);
4529 req.psm = chan->psm;
4530
4531 chan->ident = l2cap_get_ident(conn);
4532 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4533
4534 l2cap_send_cmd(conn, chan->ident,
4535 L2CAP_CONN_REQ, sizeof(req), &req);
4536 } else {
4537 __clear_chan_timer(chan);
4538 __set_chan_timer(chan,
4539 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4540 }
4541 } else if (chan->state == BT_CONNECT2) {
4542 struct l2cap_conn_rsp rsp;
4543 __u16 res, stat;
4544
4545 if (!status) {
4546 if (bt_sk(sk)->defer_setup) {
4547 struct sock *parent = bt_sk(sk)->parent;
4548 res = L2CAP_CR_PEND;
4549 stat = L2CAP_CS_AUTHOR_PEND;
4550 if (parent)
4551 parent->sk_data_ready(parent, 0);
4552 } else {
4553 l2cap_state_change(chan, BT_CONFIG);
4554 res = L2CAP_CR_SUCCESS;
4555 stat = L2CAP_CS_NO_INFO;
4556 }
4557 } else {
4558 l2cap_state_change(chan, BT_DISCONN);
4559 __set_chan_timer(chan,
4560 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4561 res = L2CAP_CR_SEC_BLOCK;
4562 stat = L2CAP_CS_NO_INFO;
4563 }
4564
4565 rsp.scid = cpu_to_le16(chan->dcid);
4566 rsp.dcid = cpu_to_le16(chan->scid);
4567 rsp.result = cpu_to_le16(res);
4568 rsp.status = cpu_to_le16(stat);
4569 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4570 sizeof(rsp), &rsp);
4571 }
4572
4573 bh_unlock_sock(sk);
4574 }
4575
4576 rcu_read_unlock();
4577
4578 return 0;
4579 }
4580
4581 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4582 {
4583 struct l2cap_conn *conn = hcon->l2cap_data;
4584
4585 if (!conn)
4586 conn = l2cap_conn_add(hcon, 0);
4587
4588 if (!conn)
4589 goto drop;
4590
4591 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4592
4593 if (!(flags & ACL_CONT)) {
4594 struct l2cap_hdr *hdr;
4595 struct l2cap_chan *chan;
4596 u16 cid;
4597 int len;
4598
4599 if (conn->rx_len) {
4600 BT_ERR("Unexpected start frame (len %d)", skb->len);
4601 kfree_skb(conn->rx_skb);
4602 conn->rx_skb = NULL;
4603 conn->rx_len = 0;
4604 l2cap_conn_unreliable(conn, ECOMM);
4605 }
4606
4607 /* Start fragment always begin with Basic L2CAP header */
4608 if (skb->len < L2CAP_HDR_SIZE) {
4609 BT_ERR("Frame is too short (len %d)", skb->len);
4610 l2cap_conn_unreliable(conn, ECOMM);
4611 goto drop;
4612 }
4613
4614 hdr = (struct l2cap_hdr *) skb->data;
4615 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4616 cid = __le16_to_cpu(hdr->cid);
4617
4618 if (len == skb->len) {
4619 /* Complete frame received */
4620 l2cap_recv_frame(conn, skb);
4621 return 0;
4622 }
4623
4624 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4625
4626 if (skb->len > len) {
4627 BT_ERR("Frame is too long (len %d, expected len %d)",
4628 skb->len, len);
4629 l2cap_conn_unreliable(conn, ECOMM);
4630 goto drop;
4631 }
4632
4633 chan = l2cap_get_chan_by_scid(conn, cid);
4634
4635 if (chan && chan->sk) {
4636 struct sock *sk = chan->sk;
4637
4638 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4639 BT_ERR("Frame exceeding recv MTU (len %d, "
4640 "MTU %d)", len,
4641 chan->imtu);
4642 release_sock(sk);
4643 l2cap_conn_unreliable(conn, ECOMM);
4644 goto drop;
4645 }
4646 release_sock(sk);
4647 }
4648
4649 /* Allocate skb for the complete frame (with header) */
4650 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4651 if (!conn->rx_skb)
4652 goto drop;
4653
4654 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4655 skb->len);
4656 conn->rx_len = len - skb->len;
4657 } else {
4658 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4659
4660 if (!conn->rx_len) {
4661 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4662 l2cap_conn_unreliable(conn, ECOMM);
4663 goto drop;
4664 }
4665
4666 if (skb->len > conn->rx_len) {
4667 BT_ERR("Fragment is too long (len %d, expected %d)",
4668 skb->len, conn->rx_len);
4669 kfree_skb(conn->rx_skb);
4670 conn->rx_skb = NULL;
4671 conn->rx_len = 0;
4672 l2cap_conn_unreliable(conn, ECOMM);
4673 goto drop;
4674 }
4675
4676 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4677 skb->len);
4678 conn->rx_len -= skb->len;
4679
4680 if (!conn->rx_len) {
4681 /* Complete frame received */
4682 l2cap_recv_frame(conn, conn->rx_skb);
4683 conn->rx_skb = NULL;
4684 }
4685 }
4686
4687 drop:
4688 kfree_skb(skb);
4689 return 0;
4690 }
4691
4692 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4693 {
4694 struct l2cap_chan *c;
4695
4696 read_lock(&chan_list_lock);
4697
4698 list_for_each_entry(c, &chan_list, global_l) {
4699 struct sock *sk = c->sk;
4700
4701 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4702 batostr(&bt_sk(sk)->src),
4703 batostr(&bt_sk(sk)->dst),
4704 c->state, __le16_to_cpu(c->psm),
4705 c->scid, c->dcid, c->imtu, c->omtu,
4706 c->sec_level, c->mode);
4707 }
4708
4709 read_unlock(&chan_list_lock);
4710
4711 return 0;
4712 }
4713
4714 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4715 {
4716 return single_open(file, l2cap_debugfs_show, inode->i_private);
4717 }
4718
4719 static const struct file_operations l2cap_debugfs_fops = {
4720 .open = l2cap_debugfs_open,
4721 .read = seq_read,
4722 .llseek = seq_lseek,
4723 .release = single_release,
4724 };
4725
4726 static struct dentry *l2cap_debugfs;
4727
4728 int __init l2cap_init(void)
4729 {
4730 int err;
4731
4732 err = l2cap_init_sockets();
4733 if (err < 0)
4734 return err;
4735
4736 if (bt_debugfs) {
4737 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4738 bt_debugfs, NULL, &l2cap_debugfs_fops);
4739 if (!l2cap_debugfs)
4740 BT_ERR("Failed to create L2CAP debug file");
4741 }
4742
4743 return 0;
4744 }
4745
4746 void l2cap_exit(void)
4747 {
4748 debugfs_remove(l2cap_debugfs);
4749 l2cap_cleanup_sockets();
4750 }
4751
4752 module_param(disable_ertm, bool, 0644);
4753 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.132738 seconds and 6 git commands to generate.