Bluetooth: Change sk to l2cap_chan
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 {
82 struct l2cap_chan *c, *r = NULL;
83
84 rcu_read_lock();
85
86 list_for_each_entry_rcu(c, &conn->chan_l, list) {
87 if (c->dcid == cid) {
88 r = c;
89 break;
90 }
91 }
92
93 rcu_read_unlock();
94 return r;
95 }
96
97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
98 {
99 struct l2cap_chan *c, *r = NULL;
100
101 rcu_read_lock();
102
103 list_for_each_entry_rcu(c, &conn->chan_l, list) {
104 if (c->scid == cid) {
105 r = c;
106 break;
107 }
108 }
109
110 rcu_read_unlock();
111 return r;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 lock_sock(c->sk);
123 return c;
124 }
125
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127 {
128 struct l2cap_chan *c, *r = NULL;
129
130 rcu_read_lock();
131
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
134 r = c;
135 break;
136 }
137 }
138
139 rcu_read_unlock();
140 return r;
141 }
142
143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144 {
145 struct l2cap_chan *c;
146
147 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c)
149 lock_sock(c->sk);
150 return c;
151 }
152
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
159 return c;
160 }
161 return NULL;
162 }
163
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165 {
166 int err;
167
168 write_lock(&chan_list_lock);
169
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
173 }
174
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
181
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
189 }
190 }
191
192 done:
193 write_unlock(&chan_list_lock);
194 return err;
195 }
196
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
198 {
199 write_lock(&chan_list_lock);
200
201 chan->scid = scid;
202
203 write_unlock(&chan_list_lock);
204
205 return 0;
206 }
207
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209 {
210 u16 cid = L2CAP_CID_DYN_START;
211
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
215 }
216
217 return 0;
218 }
219
220 static char *state_to_string(int state)
221 {
222 switch(state) {
223 case BT_CONNECTED:
224 return "BT_CONNECTED";
225 case BT_OPEN:
226 return "BT_OPEN";
227 case BT_BOUND:
228 return "BT_BOUND";
229 case BT_LISTEN:
230 return "BT_LISTEN";
231 case BT_CONNECT:
232 return "BT_CONNECT";
233 case BT_CONNECT2:
234 return "BT_CONNECT2";
235 case BT_CONFIG:
236 return "BT_CONFIG";
237 case BT_DISCONN:
238 return "BT_DISCONN";
239 case BT_CLOSED:
240 return "BT_CLOSED";
241 }
242
243 return "invalid state";
244 }
245
246 static void l2cap_state_change(struct l2cap_chan *chan, int state)
247 {
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
249 state_to_string(state));
250
251 chan->state = state;
252 chan->ops->state_change(chan->data, state);
253 }
254
255 static void l2cap_chan_timeout(struct work_struct *work)
256 {
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
258 chan_timer.work);
259 struct sock *sk = chan->sk;
260 int reason;
261
262 BT_DBG("chan %p state %d", chan, chan->state);
263
264 lock_sock(sk);
265
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED;
268 else if (chan->state == BT_CONNECT &&
269 chan->sec_level != BT_SECURITY_SDP)
270 reason = ECONNREFUSED;
271 else
272 reason = ETIMEDOUT;
273
274 l2cap_chan_close(chan, reason);
275
276 release_sock(sk);
277
278 chan->ops->close(chan->data);
279 l2cap_chan_put(chan);
280 }
281
282 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
283 {
284 struct l2cap_chan *chan;
285
286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 if (!chan)
288 return NULL;
289
290 chan->sk = sk;
291
292 write_lock(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list);
294 write_unlock(&chan_list_lock);
295
296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
297
298 chan->state = BT_OPEN;
299
300 atomic_set(&chan->refcnt, 1);
301
302 BT_DBG("sk %p chan %p", sk, chan);
303
304 return chan;
305 }
306
307 void l2cap_chan_destroy(struct l2cap_chan *chan)
308 {
309 write_lock(&chan_list_lock);
310 list_del(&chan->global_l);
311 write_unlock(&chan_list_lock);
312
313 l2cap_chan_put(chan);
314 }
315
316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
317 {
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid);
320
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
322
323 chan->conn = conn;
324
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
326 if (conn->hcon->type == LE_LINK) {
327 /* LE connection */
328 chan->omtu = L2CAP_LE_DEFAULT_MTU;
329 chan->scid = L2CAP_CID_LE_DATA;
330 chan->dcid = L2CAP_CID_LE_DATA;
331 } else {
332 /* Alloc CID for connection-oriented socket */
333 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU;
335 }
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
337 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU;
341 } else {
342 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING;
345 chan->omtu = L2CAP_DEFAULT_MTU;
346 }
347
348 chan->local_id = L2CAP_BESTEFFORT_ID;
349 chan->local_stype = L2CAP_SERV_BESTEFFORT;
350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
354
355 l2cap_chan_hold(chan);
356
357 list_add_rcu(&chan->list, &conn->chan_l);
358 }
359
360 /* Delete channel.
361 * Must be called on the locked socket. */
362 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
363 {
364 struct sock *sk = chan->sk;
365 struct l2cap_conn *conn = chan->conn;
366 struct sock *parent = bt_sk(sk)->parent;
367
368 __clear_chan_timer(chan);
369
370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
371
372 if (conn) {
373 /* Delete from channel list */
374 list_del_rcu(&chan->list);
375 synchronize_rcu();
376
377 l2cap_chan_put(chan);
378
379 chan->conn = NULL;
380 hci_conn_put(conn->hcon);
381 }
382
383 l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED);
385
386 if (err)
387 sk->sk_err = err;
388
389 if (parent) {
390 bt_accept_unlink(sk);
391 parent->sk_data_ready(parent, 0);
392 } else
393 sk->sk_state_change(sk);
394
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 return;
398
399 skb_queue_purge(&chan->tx_q);
400
401 if (chan->mode == L2CAP_MODE_ERTM) {
402 struct srej_list *l, *tmp;
403
404 __clear_retrans_timer(chan);
405 __clear_monitor_timer(chan);
406 __clear_ack_timer(chan);
407
408 skb_queue_purge(&chan->srej_q);
409
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
411 list_del(&l->list);
412 kfree(l);
413 }
414 }
415 }
416
417 static void l2cap_chan_cleanup_listen(struct sock *parent)
418 {
419 struct sock *sk;
420
421 BT_DBG("parent %p", parent);
422
423 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 __clear_chan_timer(chan);
427 lock_sock(sk);
428 l2cap_chan_close(chan, ECONNRESET);
429 release_sock(sk);
430 chan->ops->close(chan->data);
431 }
432 }
433
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
435 {
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
438
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
440
441 switch (chan->state) {
442 case BT_LISTEN:
443 l2cap_chan_cleanup_listen(sk);
444
445 l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED);
447 break;
448
449 case BT_CONNECTED:
450 case BT_CONFIG:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason);
456 } else
457 l2cap_chan_del(chan, reason);
458 break;
459
460 case BT_CONNECT2:
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 struct l2cap_conn_rsp rsp;
464 __u16 result;
465
466 if (bt_sk(sk)->defer_setup)
467 result = L2CAP_CR_SEC_BLOCK;
468 else
469 result = L2CAP_CR_BAD_PSM;
470 l2cap_state_change(chan, BT_DISCONN);
471
472 rsp.scid = cpu_to_le16(chan->dcid);
473 rsp.dcid = cpu_to_le16(chan->scid);
474 rsp.result = cpu_to_le16(result);
475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
477 sizeof(rsp), &rsp);
478 }
479
480 l2cap_chan_del(chan, reason);
481 break;
482
483 case BT_CONNECT:
484 case BT_DISCONN:
485 l2cap_chan_del(chan, reason);
486 break;
487
488 default:
489 sock_set_flag(sk, SOCK_ZAPPED);
490 break;
491 }
492 }
493
494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
495 {
496 if (chan->chan_type == L2CAP_CHAN_RAW) {
497 switch (chan->sec_level) {
498 case BT_SECURITY_HIGH:
499 return HCI_AT_DEDICATED_BONDING_MITM;
500 case BT_SECURITY_MEDIUM:
501 return HCI_AT_DEDICATED_BONDING;
502 default:
503 return HCI_AT_NO_BONDING;
504 }
505 } else if (chan->psm == cpu_to_le16(0x0001)) {
506 if (chan->sec_level == BT_SECURITY_LOW)
507 chan->sec_level = BT_SECURITY_SDP;
508
509 if (chan->sec_level == BT_SECURITY_HIGH)
510 return HCI_AT_NO_BONDING_MITM;
511 else
512 return HCI_AT_NO_BONDING;
513 } else {
514 switch (chan->sec_level) {
515 case BT_SECURITY_HIGH:
516 return HCI_AT_GENERAL_BONDING_MITM;
517 case BT_SECURITY_MEDIUM:
518 return HCI_AT_GENERAL_BONDING;
519 default:
520 return HCI_AT_NO_BONDING;
521 }
522 }
523 }
524
525 /* Service level security */
526 int l2cap_chan_check_security(struct l2cap_chan *chan)
527 {
528 struct l2cap_conn *conn = chan->conn;
529 __u8 auth_type;
530
531 auth_type = l2cap_get_auth_type(chan);
532
533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
534 }
535
536 static u8 l2cap_get_ident(struct l2cap_conn *conn)
537 {
538 u8 id;
539
540 /* Get next available identificator.
541 * 1 - 128 are used by kernel.
542 * 129 - 199 are reserved.
543 * 200 - 254 are used by utilities like l2ping, etc.
544 */
545
546 spin_lock(&conn->lock);
547
548 if (++conn->tx_ident > 128)
549 conn->tx_ident = 1;
550
551 id = conn->tx_ident;
552
553 spin_unlock(&conn->lock);
554
555 return id;
556 }
557
558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
559 {
560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
561 u8 flags;
562
563 BT_DBG("code 0x%2.2x", code);
564
565 if (!skb)
566 return;
567
568 if (lmp_no_flush_capable(conn->hcon->hdev))
569 flags = ACL_START_NO_FLUSH;
570 else
571 flags = ACL_START;
572
573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
574 skb->priority = HCI_PRIO_MAX;
575
576 hci_send_acl(conn->hchan, skb, flags);
577 }
578
579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
580 {
581 struct hci_conn *hcon = chan->conn->hcon;
582 u16 flags;
583
584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
585 skb->priority);
586
587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
588 lmp_no_flush_capable(hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
590 else
591 flags = ACL_START;
592
593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
594 hci_send_acl(chan->conn->hchan, skb, flags);
595 }
596
597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
598 {
599 struct sk_buff *skb;
600 struct l2cap_hdr *lh;
601 struct l2cap_conn *conn = chan->conn;
602 int count, hlen;
603
604 if (chan->state != BT_CONNECTED)
605 return;
606
607 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
608 hlen = L2CAP_EXT_HDR_SIZE;
609 else
610 hlen = L2CAP_ENH_HDR_SIZE;
611
612 if (chan->fcs == L2CAP_FCS_CRC16)
613 hlen += L2CAP_FCS_SIZE;
614
615 BT_DBG("chan %p, control 0x%8.8x", chan, control);
616
617 count = min_t(unsigned int, conn->mtu, hlen);
618
619 control |= __set_sframe(chan);
620
621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
622 control |= __set_ctrl_final(chan);
623
624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
625 control |= __set_ctrl_poll(chan);
626
627 skb = bt_skb_alloc(count, GFP_ATOMIC);
628 if (!skb)
629 return;
630
631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
633 lh->cid = cpu_to_le16(chan->dcid);
634
635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
636
637 if (chan->fcs == L2CAP_FCS_CRC16) {
638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
640 }
641
642 skb->priority = HCI_PRIO_MAX;
643 l2cap_do_send(chan, skb);
644 }
645
646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
647 {
648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
650 set_bit(CONN_RNR_SENT, &chan->conn_state);
651 } else
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
653
654 control |= __set_reqseq(chan, chan->buffer_seq);
655
656 l2cap_send_sframe(chan, control);
657 }
658
659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
660 {
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
662 }
663
664 static void l2cap_do_start(struct l2cap_chan *chan)
665 {
666 struct l2cap_conn *conn = chan->conn;
667
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
670 return;
671
672 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) {
674 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
677
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
680
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
682 sizeof(req), &req);
683 }
684 } else {
685 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
687
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn);
690
691 schedule_delayed_work(&conn->info_timer,
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
693
694 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req);
696 }
697 }
698
699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
700 {
701 u32 local_feat_mask = l2cap_feat_mask;
702 if (!disable_ertm)
703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
704
705 switch (mode) {
706 case L2CAP_MODE_ERTM:
707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
708 case L2CAP_MODE_STREAMING:
709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 default:
711 return 0x00;
712 }
713 }
714
715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
716 {
717 struct sock *sk;
718 struct l2cap_disconn_req req;
719
720 if (!conn)
721 return;
722
723 sk = chan->sk;
724
725 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan);
728 __clear_ack_timer(chan);
729 }
730
731 req.dcid = cpu_to_le16(chan->dcid);
732 req.scid = cpu_to_le16(chan->scid);
733 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req);
735
736 l2cap_state_change(chan, BT_DISCONN);
737 sk->sk_err = err;
738 }
739
740 /* ---- L2CAP connections ---- */
741 static void l2cap_conn_start(struct l2cap_conn *conn)
742 {
743 struct l2cap_chan *chan;
744
745 BT_DBG("conn %p", conn);
746
747 rcu_read_lock();
748
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
750 struct sock *sk = chan->sk;
751
752 bh_lock_sock(sk);
753
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
755 bh_unlock_sock(sk);
756 continue;
757 }
758
759 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
761
762 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) {
764 bh_unlock_sock(sk);
765 continue;
766 }
767
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET);
774 bh_unlock_sock(sk);
775 continue;
776 }
777
778 req.scid = cpu_to_le16(chan->scid);
779 req.psm = chan->psm;
780
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
783
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 sizeof(req), &req);
786
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
789 char buf[128];
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
792
793 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
800
801 } else {
802 l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 }
806 } else {
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
809 }
810
811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
812 sizeof(rsp), &rsp);
813
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) {
816 bh_unlock_sock(sk);
817 continue;
818 }
819
820 set_bit(CONF_REQ_SENT, &chan->conf_state);
821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
822 l2cap_build_conf_req(chan, buf), buf);
823 chan->num_conf_req++;
824 }
825
826 bh_unlock_sock(sk);
827 }
828
829 rcu_read_unlock();
830 }
831
832 /* Find socket with cid and source bdaddr.
833 * Returns closest match, locked.
834 */
835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
836 {
837 struct l2cap_chan *c, *c1 = NULL;
838
839 read_lock(&chan_list_lock);
840
841 list_for_each_entry(c, &chan_list, global_l) {
842 struct sock *sk = c->sk;
843
844 if (state && c->state != state)
845 continue;
846
847 if (c->scid == cid) {
848 /* Exact match. */
849 if (!bacmp(&bt_sk(sk)->src, src)) {
850 read_unlock(&chan_list_lock);
851 return c;
852 }
853
854 /* Closest match */
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
856 c1 = c;
857 }
858 }
859
860 read_unlock(&chan_list_lock);
861
862 return c1;
863 }
864
865 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
866 {
867 struct sock *parent, *sk;
868 struct l2cap_chan *chan, *pchan;
869
870 BT_DBG("");
871
872 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
874 conn->src);
875 if (!pchan)
876 return;
877
878 parent = pchan->sk;
879
880 lock_sock(parent);
881
882 /* Check for backlog size */
883 if (sk_acceptq_is_full(parent)) {
884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 goto clean;
886 }
887
888 chan = pchan->ops->new_connection(pchan->data);
889 if (!chan)
890 goto clean;
891
892 sk = chan->sk;
893
894 hci_conn_hold(conn->hcon);
895
896 bacpy(&bt_sk(sk)->src, conn->src);
897 bacpy(&bt_sk(sk)->dst, conn->dst);
898
899 bt_accept_enqueue(parent, sk);
900
901 l2cap_chan_add(conn, chan);
902
903 __set_chan_timer(chan, sk->sk_sndtimeo);
904
905 l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0);
907
908 clean:
909 release_sock(parent);
910 }
911
912 static void l2cap_chan_ready(struct sock *sk)
913 {
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
916
917 BT_DBG("sk %p, parent %p", sk, parent);
918
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
921
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
924
925 if (parent)
926 parent->sk_data_ready(parent, 0);
927 }
928
929 static void l2cap_conn_ready(struct l2cap_conn *conn)
930 {
931 struct l2cap_chan *chan;
932
933 BT_DBG("conn %p", conn);
934
935 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
936 l2cap_le_conn_ready(conn);
937
938 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level);
940
941 rcu_read_lock();
942
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
945
946 bh_lock_sock(sk);
947
948 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk);
951
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
953 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk);
956
957 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan);
959
960 bh_unlock_sock(sk);
961 }
962
963 rcu_read_unlock();
964 }
965
966 /* Notify sockets that we cannot guaranty reliability anymore */
967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
968 {
969 struct l2cap_chan *chan;
970
971 BT_DBG("conn %p", conn);
972
973 rcu_read_lock();
974
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
977
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 sk->sk_err = err;
980 }
981
982 rcu_read_unlock();
983 }
984
985 static void l2cap_info_timeout(struct work_struct *work)
986 {
987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
988 info_timer.work);
989
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
992
993 l2cap_conn_start(conn);
994 }
995
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
997 {
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1001
1002 if (!conn)
1003 return;
1004
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1006
1007 kfree_skb(conn->rx_skb);
1008
1009 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk;
1012 lock_sock(sk);
1013 l2cap_chan_del(chan, err);
1014 release_sock(sk);
1015 chan->ops->close(chan->data);
1016 }
1017
1018 hci_chan_del(conn->hchan);
1019
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer);
1022
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 __cancel_delayed_work(&conn->security_timer);
1025 smp_chan_destroy(conn);
1026 }
1027
1028 hcon->l2cap_data = NULL;
1029 kfree(conn);
1030 }
1031
1032 static void security_timeout(struct work_struct *work)
1033 {
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1036
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1038 }
1039
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1041 {
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1044
1045 if (conn || status)
1046 return conn;
1047
1048 hchan = hci_chan_create(hcon);
1049 if (!hchan)
1050 return NULL;
1051
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 if (!conn) {
1054 hci_chan_del(hchan);
1055 return NULL;
1056 }
1057
1058 hcon->l2cap_data = conn;
1059 conn->hcon = hcon;
1060 conn->hchan = hchan;
1061
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1063
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1066 else
1067 conn->mtu = hcon->hdev->acl_mtu;
1068
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1071
1072 conn->feat_mask = 0;
1073
1074 spin_lock_init(&conn->lock);
1075
1076 INIT_LIST_HEAD(&conn->chan_l);
1077
1078 if (hcon->type == LE_LINK)
1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1080 else
1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1082
1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1084
1085 return conn;
1086 }
1087
1088 /* ---- Socket interface ---- */
1089
1090 /* Find socket with psm and source bdaddr.
1091 * Returns closest match.
1092 */
1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1094 {
1095 struct l2cap_chan *c, *c1 = NULL;
1096
1097 read_lock(&chan_list_lock);
1098
1099 list_for_each_entry(c, &chan_list, global_l) {
1100 struct sock *sk = c->sk;
1101
1102 if (state && c->state != state)
1103 continue;
1104
1105 if (c->psm == psm) {
1106 /* Exact match. */
1107 if (!bacmp(&bt_sk(sk)->src, src)) {
1108 read_unlock(&chan_list_lock);
1109 return c;
1110 }
1111
1112 /* Closest match */
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1114 c1 = c;
1115 }
1116 }
1117
1118 read_unlock(&chan_list_lock);
1119
1120 return c1;
1121 }
1122
1123 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1124 {
1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src;
1127 struct l2cap_conn *conn;
1128 struct hci_conn *hcon;
1129 struct hci_dev *hdev;
1130 __u8 auth_type;
1131 int err;
1132
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1134 chan->psm);
1135
1136 hdev = hci_get_route(dst, src);
1137 if (!hdev)
1138 return -EHOSTUNREACH;
1139
1140 hci_dev_lock(hdev);
1141
1142 lock_sock(sk);
1143
1144 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1146 chan->chan_type != L2CAP_CHAN_RAW) {
1147 err = -EINVAL;
1148 goto done;
1149 }
1150
1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1152 err = -EINVAL;
1153 goto done;
1154 }
1155
1156 switch (chan->mode) {
1157 case L2CAP_MODE_BASIC:
1158 break;
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1161 if (!disable_ertm)
1162 break;
1163 /* fall through */
1164 default:
1165 err = -ENOTSUPP;
1166 goto done;
1167 }
1168
1169 switch (sk->sk_state) {
1170 case BT_CONNECT:
1171 case BT_CONNECT2:
1172 case BT_CONFIG:
1173 /* Already connecting */
1174 err = 0;
1175 goto done;
1176
1177 case BT_CONNECTED:
1178 /* Already connected */
1179 err = -EISCONN;
1180 goto done;
1181
1182 case BT_OPEN:
1183 case BT_BOUND:
1184 /* Can connect */
1185 break;
1186
1187 default:
1188 err = -EBADFD;
1189 goto done;
1190 }
1191
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst);
1194 chan->psm = psm;
1195 chan->dcid = cid;
1196
1197 auth_type = l2cap_get_auth_type(chan);
1198
1199 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst,
1201 chan->sec_level, auth_type);
1202 else
1203 hcon = hci_connect(hdev, ACL_LINK, dst,
1204 chan->sec_level, auth_type);
1205
1206 if (IS_ERR(hcon)) {
1207 err = PTR_ERR(hcon);
1208 goto done;
1209 }
1210
1211 conn = l2cap_conn_add(hcon, 0);
1212 if (!conn) {
1213 hci_conn_put(hcon);
1214 err = -ENOMEM;
1215 goto done;
1216 }
1217
1218 /* Update source addr of the socket */
1219 bacpy(src, conn->src);
1220
1221 l2cap_chan_add(conn, chan);
1222
1223 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo);
1225
1226 if (hcon->state == BT_CONNECTED) {
1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 __clear_chan_timer(chan);
1229 if (l2cap_chan_check_security(chan))
1230 l2cap_state_change(chan, BT_CONNECTED);
1231 } else
1232 l2cap_do_start(chan);
1233 }
1234
1235 err = 0;
1236
1237 done:
1238 hci_dev_unlock(hdev);
1239 hci_dev_put(hdev);
1240 return err;
1241 }
1242
1243 int __l2cap_wait_ack(struct sock *sk)
1244 {
1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1246 DECLARE_WAITQUEUE(wait, current);
1247 int err = 0;
1248 int timeo = HZ/5;
1249
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 set_current_state(TASK_INTERRUPTIBLE);
1252 while (chan->unacked_frames > 0 && chan->conn) {
1253 if (!timeo)
1254 timeo = HZ/5;
1255
1256 if (signal_pending(current)) {
1257 err = sock_intr_errno(timeo);
1258 break;
1259 }
1260
1261 release_sock(sk);
1262 timeo = schedule_timeout(timeo);
1263 lock_sock(sk);
1264 set_current_state(TASK_INTERRUPTIBLE);
1265
1266 err = sock_error(sk);
1267 if (err)
1268 break;
1269 }
1270 set_current_state(TASK_RUNNING);
1271 remove_wait_queue(sk_sleep(sk), &wait);
1272 return err;
1273 }
1274
1275 static void l2cap_monitor_timeout(struct work_struct *work)
1276 {
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1280
1281 BT_DBG("chan %p", chan);
1282
1283 lock_sock(sk);
1284 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1286 release_sock(sk);
1287 return;
1288 }
1289
1290 chan->retry_count++;
1291 __set_monitor_timer(chan);
1292
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1294 release_sock(sk);
1295 }
1296
1297 static void l2cap_retrans_timeout(struct work_struct *work)
1298 {
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1302
1303 BT_DBG("chan %p", chan);
1304
1305 lock_sock(sk);
1306 chan->retry_count = 1;
1307 __set_monitor_timer(chan);
1308
1309 set_bit(CONN_WAIT_F, &chan->conn_state);
1310
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1312 release_sock(sk);
1313 }
1314
1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1316 {
1317 struct sk_buff *skb;
1318
1319 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1322 break;
1323
1324 skb = skb_dequeue(&chan->tx_q);
1325 kfree_skb(skb);
1326
1327 chan->unacked_frames--;
1328 }
1329
1330 if (!chan->unacked_frames)
1331 __clear_retrans_timer(chan);
1332 }
1333
1334 static void l2cap_streaming_send(struct l2cap_chan *chan)
1335 {
1336 struct sk_buff *skb;
1337 u32 control;
1338 u16 fcs;
1339
1340 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1344
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)skb->data,
1347 skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 skb->data + skb->len - L2CAP_FCS_SIZE);
1350 }
1351
1352 l2cap_do_send(chan, skb);
1353
1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1355 }
1356 }
1357
1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1359 {
1360 struct sk_buff *skb, *tx_skb;
1361 u16 fcs;
1362 u32 control;
1363
1364 skb = skb_peek(&chan->tx_q);
1365 if (!skb)
1366 return;
1367
1368 while (bt_cb(skb)->tx_seq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb))
1370 return;
1371
1372 skb = skb_queue_next(&chan->tx_q, skb);
1373 }
1374
1375 if (chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 return;
1379 }
1380
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1383
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan);
1386
1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1388 control |= __set_ctrl_final(chan);
1389
1390 control |= __set_reqseq(chan, chan->buffer_seq);
1391 control |= __set_txseq(chan, tx_seq);
1392
1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1394
1395 if (chan->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)tx_skb->data,
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 put_unaligned_le16(fcs,
1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1400 }
1401
1402 l2cap_do_send(chan, tx_skb);
1403 }
1404
1405 static int l2cap_ertm_send(struct l2cap_chan *chan)
1406 {
1407 struct sk_buff *skb, *tx_skb;
1408 u16 fcs;
1409 u32 control;
1410 int nsent = 0;
1411
1412 if (chan->state != BT_CONNECTED)
1413 return -ENOTCONN;
1414
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1416
1417 if (chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1420 break;
1421 }
1422
1423 tx_skb = skb_clone(skb, GFP_ATOMIC);
1424
1425 bt_cb(skb)->retries++;
1426
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan);
1429
1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1431 control |= __set_ctrl_final(chan);
1432
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq);
1435
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1437
1438 if (chan->fcs == L2CAP_FCS_CRC16) {
1439 fcs = crc16(0, (u8 *)skb->data,
1440 tx_skb->len - L2CAP_FCS_SIZE);
1441 put_unaligned_le16(fcs, skb->data +
1442 tx_skb->len - L2CAP_FCS_SIZE);
1443 }
1444
1445 l2cap_do_send(chan, tx_skb);
1446
1447 __set_retrans_timer(chan);
1448
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1450
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1452
1453 if (bt_cb(skb)->retries == 1)
1454 chan->unacked_frames++;
1455
1456 chan->frames_sent++;
1457
1458 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL;
1460 else
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1462
1463 nsent++;
1464 }
1465
1466 return nsent;
1467 }
1468
1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1470 {
1471 int ret;
1472
1473 if (!skb_queue_empty(&chan->tx_q))
1474 chan->tx_send_head = chan->tx_q.next;
1475
1476 chan->next_tx_seq = chan->expected_ack_seq;
1477 ret = l2cap_ertm_send(chan);
1478 return ret;
1479 }
1480
1481 static void __l2cap_send_ack(struct l2cap_chan *chan)
1482 {
1483 u32 control = 0;
1484
1485 control |= __set_reqseq(chan, chan->buffer_seq);
1486
1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1489 set_bit(CONN_RNR_SENT, &chan->conn_state);
1490 l2cap_send_sframe(chan, control);
1491 return;
1492 }
1493
1494 if (l2cap_ertm_send(chan) > 0)
1495 return;
1496
1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1498 l2cap_send_sframe(chan, control);
1499 }
1500
1501 static void l2cap_send_ack(struct l2cap_chan *chan)
1502 {
1503 __clear_ack_timer(chan);
1504 __l2cap_send_ack(chan);
1505 }
1506
1507 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1508 {
1509 struct srej_list *tail;
1510 u32 control;
1511
1512 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1513 control |= __set_ctrl_final(chan);
1514
1515 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1516 control |= __set_reqseq(chan, tail->tx_seq);
1517
1518 l2cap_send_sframe(chan, control);
1519 }
1520
1521 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1522 {
1523 struct sock *sk = chan->sk;
1524 struct l2cap_conn *conn = chan->conn;
1525 struct sk_buff **frag;
1526 int err, sent = 0;
1527
1528 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1529 return -EFAULT;
1530
1531 sent += count;
1532 len -= count;
1533
1534 /* Continuation fragments (no L2CAP header) */
1535 frag = &skb_shinfo(skb)->frag_list;
1536 while (len) {
1537 count = min_t(unsigned int, conn->mtu, len);
1538
1539 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1540 if (!*frag)
1541 return err;
1542 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1543 return -EFAULT;
1544
1545 (*frag)->priority = skb->priority;
1546
1547 sent += count;
1548 len -= count;
1549
1550 frag = &(*frag)->next;
1551 }
1552
1553 return sent;
1554 }
1555
1556 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1557 struct msghdr *msg, size_t len,
1558 u32 priority)
1559 {
1560 struct sock *sk = chan->sk;
1561 struct l2cap_conn *conn = chan->conn;
1562 struct sk_buff *skb;
1563 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1564 struct l2cap_hdr *lh;
1565
1566 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1567
1568 count = min_t(unsigned int, (conn->mtu - hlen), len);
1569 skb = bt_skb_send_alloc(sk, count + hlen,
1570 msg->msg_flags & MSG_DONTWAIT, &err);
1571 if (!skb)
1572 return ERR_PTR(err);
1573
1574 skb->priority = priority;
1575
1576 /* Create L2CAP header */
1577 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1578 lh->cid = cpu_to_le16(chan->dcid);
1579 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1580 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1581
1582 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1583 if (unlikely(err < 0)) {
1584 kfree_skb(skb);
1585 return ERR_PTR(err);
1586 }
1587 return skb;
1588 }
1589
1590 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1591 struct msghdr *msg, size_t len,
1592 u32 priority)
1593 {
1594 struct sock *sk = chan->sk;
1595 struct l2cap_conn *conn = chan->conn;
1596 struct sk_buff *skb;
1597 int err, count, hlen = L2CAP_HDR_SIZE;
1598 struct l2cap_hdr *lh;
1599
1600 BT_DBG("sk %p len %d", sk, (int)len);
1601
1602 count = min_t(unsigned int, (conn->mtu - hlen), len);
1603 skb = bt_skb_send_alloc(sk, count + hlen,
1604 msg->msg_flags & MSG_DONTWAIT, &err);
1605 if (!skb)
1606 return ERR_PTR(err);
1607
1608 skb->priority = priority;
1609
1610 /* Create L2CAP header */
1611 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1612 lh->cid = cpu_to_le16(chan->dcid);
1613 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1614
1615 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1616 if (unlikely(err < 0)) {
1617 kfree_skb(skb);
1618 return ERR_PTR(err);
1619 }
1620 return skb;
1621 }
1622
1623 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1624 struct msghdr *msg, size_t len,
1625 u32 control, u16 sdulen)
1626 {
1627 struct sock *sk = chan->sk;
1628 struct l2cap_conn *conn = chan->conn;
1629 struct sk_buff *skb;
1630 int err, count, hlen;
1631 struct l2cap_hdr *lh;
1632
1633 BT_DBG("sk %p len %d", sk, (int)len);
1634
1635 if (!conn)
1636 return ERR_PTR(-ENOTCONN);
1637
1638 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1639 hlen = L2CAP_EXT_HDR_SIZE;
1640 else
1641 hlen = L2CAP_ENH_HDR_SIZE;
1642
1643 if (sdulen)
1644 hlen += L2CAP_SDULEN_SIZE;
1645
1646 if (chan->fcs == L2CAP_FCS_CRC16)
1647 hlen += L2CAP_FCS_SIZE;
1648
1649 count = min_t(unsigned int, (conn->mtu - hlen), len);
1650 skb = bt_skb_send_alloc(sk, count + hlen,
1651 msg->msg_flags & MSG_DONTWAIT, &err);
1652 if (!skb)
1653 return ERR_PTR(err);
1654
1655 /* Create L2CAP header */
1656 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1657 lh->cid = cpu_to_le16(chan->dcid);
1658 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659
1660 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1661
1662 if (sdulen)
1663 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1664
1665 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1666 if (unlikely(err < 0)) {
1667 kfree_skb(skb);
1668 return ERR_PTR(err);
1669 }
1670
1671 if (chan->fcs == L2CAP_FCS_CRC16)
1672 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1673
1674 bt_cb(skb)->retries = 0;
1675 return skb;
1676 }
1677
1678 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1679 {
1680 struct sk_buff *skb;
1681 struct sk_buff_head sar_queue;
1682 u32 control;
1683 size_t size = 0;
1684
1685 skb_queue_head_init(&sar_queue);
1686 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1687 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1688 if (IS_ERR(skb))
1689 return PTR_ERR(skb);
1690
1691 __skb_queue_tail(&sar_queue, skb);
1692 len -= chan->remote_mps;
1693 size += chan->remote_mps;
1694
1695 while (len > 0) {
1696 size_t buflen;
1697
1698 if (len > chan->remote_mps) {
1699 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1700 buflen = chan->remote_mps;
1701 } else {
1702 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1703 buflen = len;
1704 }
1705
1706 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1707 if (IS_ERR(skb)) {
1708 skb_queue_purge(&sar_queue);
1709 return PTR_ERR(skb);
1710 }
1711
1712 __skb_queue_tail(&sar_queue, skb);
1713 len -= buflen;
1714 size += buflen;
1715 }
1716 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1717 if (chan->tx_send_head == NULL)
1718 chan->tx_send_head = sar_queue.next;
1719
1720 return size;
1721 }
1722
1723 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1724 u32 priority)
1725 {
1726 struct sk_buff *skb;
1727 u32 control;
1728 int err;
1729
1730 /* Connectionless channel */
1731 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1732 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1733 if (IS_ERR(skb))
1734 return PTR_ERR(skb);
1735
1736 l2cap_do_send(chan, skb);
1737 return len;
1738 }
1739
1740 switch (chan->mode) {
1741 case L2CAP_MODE_BASIC:
1742 /* Check outgoing MTU */
1743 if (len > chan->omtu)
1744 return -EMSGSIZE;
1745
1746 /* Create a basic PDU */
1747 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1748 if (IS_ERR(skb))
1749 return PTR_ERR(skb);
1750
1751 l2cap_do_send(chan, skb);
1752 err = len;
1753 break;
1754
1755 case L2CAP_MODE_ERTM:
1756 case L2CAP_MODE_STREAMING:
1757 /* Entire SDU fits into one PDU */
1758 if (len <= chan->remote_mps) {
1759 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1760 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1761 0);
1762 if (IS_ERR(skb))
1763 return PTR_ERR(skb);
1764
1765 __skb_queue_tail(&chan->tx_q, skb);
1766
1767 if (chan->tx_send_head == NULL)
1768 chan->tx_send_head = skb;
1769
1770 } else {
1771 /* Segment SDU into multiples PDUs */
1772 err = l2cap_sar_segment_sdu(chan, msg, len);
1773 if (err < 0)
1774 return err;
1775 }
1776
1777 if (chan->mode == L2CAP_MODE_STREAMING) {
1778 l2cap_streaming_send(chan);
1779 err = len;
1780 break;
1781 }
1782
1783 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1784 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1785 err = len;
1786 break;
1787 }
1788
1789 err = l2cap_ertm_send(chan);
1790 if (err >= 0)
1791 err = len;
1792
1793 break;
1794
1795 default:
1796 BT_DBG("bad state %1.1x", chan->mode);
1797 err = -EBADFD;
1798 }
1799
1800 return err;
1801 }
1802
1803 /* Copy frame to all raw sockets on that connection */
1804 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1805 {
1806 struct sk_buff *nskb;
1807 struct l2cap_chan *chan;
1808
1809 BT_DBG("conn %p", conn);
1810
1811 rcu_read_lock();
1812
1813 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1814 struct sock *sk = chan->sk;
1815 if (chan->chan_type != L2CAP_CHAN_RAW)
1816 continue;
1817
1818 /* Don't send frame to the socket it came from */
1819 if (skb->sk == sk)
1820 continue;
1821 nskb = skb_clone(skb, GFP_ATOMIC);
1822 if (!nskb)
1823 continue;
1824
1825 if (chan->ops->recv(chan->data, nskb))
1826 kfree_skb(nskb);
1827 }
1828
1829 rcu_read_unlock();
1830 }
1831
1832 /* ---- L2CAP signalling commands ---- */
1833 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1834 u8 code, u8 ident, u16 dlen, void *data)
1835 {
1836 struct sk_buff *skb, **frag;
1837 struct l2cap_cmd_hdr *cmd;
1838 struct l2cap_hdr *lh;
1839 int len, count;
1840
1841 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1842 conn, code, ident, dlen);
1843
1844 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1845 count = min_t(unsigned int, conn->mtu, len);
1846
1847 skb = bt_skb_alloc(count, GFP_ATOMIC);
1848 if (!skb)
1849 return NULL;
1850
1851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1852 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1853
1854 if (conn->hcon->type == LE_LINK)
1855 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1856 else
1857 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1858
1859 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1860 cmd->code = code;
1861 cmd->ident = ident;
1862 cmd->len = cpu_to_le16(dlen);
1863
1864 if (dlen) {
1865 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1866 memcpy(skb_put(skb, count), data, count);
1867 data += count;
1868 }
1869
1870 len -= skb->len;
1871
1872 /* Continuation fragments (no L2CAP header) */
1873 frag = &skb_shinfo(skb)->frag_list;
1874 while (len) {
1875 count = min_t(unsigned int, conn->mtu, len);
1876
1877 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1878 if (!*frag)
1879 goto fail;
1880
1881 memcpy(skb_put(*frag, count), data, count);
1882
1883 len -= count;
1884 data += count;
1885
1886 frag = &(*frag)->next;
1887 }
1888
1889 return skb;
1890
1891 fail:
1892 kfree_skb(skb);
1893 return NULL;
1894 }
1895
1896 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1897 {
1898 struct l2cap_conf_opt *opt = *ptr;
1899 int len;
1900
1901 len = L2CAP_CONF_OPT_SIZE + opt->len;
1902 *ptr += len;
1903
1904 *type = opt->type;
1905 *olen = opt->len;
1906
1907 switch (opt->len) {
1908 case 1:
1909 *val = *((u8 *) opt->val);
1910 break;
1911
1912 case 2:
1913 *val = get_unaligned_le16(opt->val);
1914 break;
1915
1916 case 4:
1917 *val = get_unaligned_le32(opt->val);
1918 break;
1919
1920 default:
1921 *val = (unsigned long) opt->val;
1922 break;
1923 }
1924
1925 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1926 return len;
1927 }
1928
1929 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1930 {
1931 struct l2cap_conf_opt *opt = *ptr;
1932
1933 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1934
1935 opt->type = type;
1936 opt->len = len;
1937
1938 switch (len) {
1939 case 1:
1940 *((u8 *) opt->val) = val;
1941 break;
1942
1943 case 2:
1944 put_unaligned_le16(val, opt->val);
1945 break;
1946
1947 case 4:
1948 put_unaligned_le32(val, opt->val);
1949 break;
1950
1951 default:
1952 memcpy(opt->val, (void *) val, len);
1953 break;
1954 }
1955
1956 *ptr += L2CAP_CONF_OPT_SIZE + len;
1957 }
1958
1959 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1960 {
1961 struct l2cap_conf_efs efs;
1962
1963 switch (chan->mode) {
1964 case L2CAP_MODE_ERTM:
1965 efs.id = chan->local_id;
1966 efs.stype = chan->local_stype;
1967 efs.msdu = cpu_to_le16(chan->local_msdu);
1968 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1969 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1970 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1971 break;
1972
1973 case L2CAP_MODE_STREAMING:
1974 efs.id = 1;
1975 efs.stype = L2CAP_SERV_BESTEFFORT;
1976 efs.msdu = cpu_to_le16(chan->local_msdu);
1977 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1978 efs.acc_lat = 0;
1979 efs.flush_to = 0;
1980 break;
1981
1982 default:
1983 return;
1984 }
1985
1986 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1987 (unsigned long) &efs);
1988 }
1989
1990 static void l2cap_ack_timeout(struct work_struct *work)
1991 {
1992 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1993 ack_timer.work);
1994
1995 BT_DBG("chan %p", chan);
1996
1997 lock_sock(chan->sk);
1998 __l2cap_send_ack(chan);
1999 release_sock(chan->sk);
2000
2001 l2cap_chan_put(chan);
2002 }
2003
2004 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2005 {
2006 chan->expected_ack_seq = 0;
2007 chan->unacked_frames = 0;
2008 chan->buffer_seq = 0;
2009 chan->num_acked = 0;
2010 chan->frames_sent = 0;
2011
2012 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2013 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2014 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2015
2016 skb_queue_head_init(&chan->srej_q);
2017
2018 INIT_LIST_HEAD(&chan->srej_l);
2019 }
2020
2021 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2022 {
2023 switch (mode) {
2024 case L2CAP_MODE_STREAMING:
2025 case L2CAP_MODE_ERTM:
2026 if (l2cap_mode_supported(mode, remote_feat_mask))
2027 return mode;
2028 /* fall through */
2029 default:
2030 return L2CAP_MODE_BASIC;
2031 }
2032 }
2033
2034 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2035 {
2036 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2037 }
2038
2039 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2040 {
2041 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2042 }
2043
2044 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2045 {
2046 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2047 __l2cap_ews_supported(chan)) {
2048 /* use extended control field */
2049 set_bit(FLAG_EXT_CTRL, &chan->flags);
2050 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2051 } else {
2052 chan->tx_win = min_t(u16, chan->tx_win,
2053 L2CAP_DEFAULT_TX_WINDOW);
2054 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2055 }
2056 }
2057
2058 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2059 {
2060 struct l2cap_conf_req *req = data;
2061 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2062 void *ptr = req->data;
2063 u16 size;
2064
2065 BT_DBG("chan %p", chan);
2066
2067 if (chan->num_conf_req || chan->num_conf_rsp)
2068 goto done;
2069
2070 switch (chan->mode) {
2071 case L2CAP_MODE_STREAMING:
2072 case L2CAP_MODE_ERTM:
2073 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2074 break;
2075
2076 if (__l2cap_efs_supported(chan))
2077 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2078
2079 /* fall through */
2080 default:
2081 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2082 break;
2083 }
2084
2085 done:
2086 if (chan->imtu != L2CAP_DEFAULT_MTU)
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2088
2089 switch (chan->mode) {
2090 case L2CAP_MODE_BASIC:
2091 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2092 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2093 break;
2094
2095 rfc.mode = L2CAP_MODE_BASIC;
2096 rfc.txwin_size = 0;
2097 rfc.max_transmit = 0;
2098 rfc.retrans_timeout = 0;
2099 rfc.monitor_timeout = 0;
2100 rfc.max_pdu_size = 0;
2101
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2103 (unsigned long) &rfc);
2104 break;
2105
2106 case L2CAP_MODE_ERTM:
2107 rfc.mode = L2CAP_MODE_ERTM;
2108 rfc.max_transmit = chan->max_tx;
2109 rfc.retrans_timeout = 0;
2110 rfc.monitor_timeout = 0;
2111
2112 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2113 L2CAP_EXT_HDR_SIZE -
2114 L2CAP_SDULEN_SIZE -
2115 L2CAP_FCS_SIZE);
2116 rfc.max_pdu_size = cpu_to_le16(size);
2117
2118 l2cap_txwin_setup(chan);
2119
2120 rfc.txwin_size = min_t(u16, chan->tx_win,
2121 L2CAP_DEFAULT_TX_WINDOW);
2122
2123 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2124 (unsigned long) &rfc);
2125
2126 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2127 l2cap_add_opt_efs(&ptr, chan);
2128
2129 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2130 break;
2131
2132 if (chan->fcs == L2CAP_FCS_NONE ||
2133 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2134 chan->fcs = L2CAP_FCS_NONE;
2135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2136 }
2137
2138 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2140 chan->tx_win);
2141 break;
2142
2143 case L2CAP_MODE_STREAMING:
2144 rfc.mode = L2CAP_MODE_STREAMING;
2145 rfc.txwin_size = 0;
2146 rfc.max_transmit = 0;
2147 rfc.retrans_timeout = 0;
2148 rfc.monitor_timeout = 0;
2149
2150 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2151 L2CAP_EXT_HDR_SIZE -
2152 L2CAP_SDULEN_SIZE -
2153 L2CAP_FCS_SIZE);
2154 rfc.max_pdu_size = cpu_to_le16(size);
2155
2156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2157 (unsigned long) &rfc);
2158
2159 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2160 l2cap_add_opt_efs(&ptr, chan);
2161
2162 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2163 break;
2164
2165 if (chan->fcs == L2CAP_FCS_NONE ||
2166 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2167 chan->fcs = L2CAP_FCS_NONE;
2168 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2169 }
2170 break;
2171 }
2172
2173 req->dcid = cpu_to_le16(chan->dcid);
2174 req->flags = cpu_to_le16(0);
2175
2176 return ptr - data;
2177 }
2178
2179 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2180 {
2181 struct l2cap_conf_rsp *rsp = data;
2182 void *ptr = rsp->data;
2183 void *req = chan->conf_req;
2184 int len = chan->conf_len;
2185 int type, hint, olen;
2186 unsigned long val;
2187 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2188 struct l2cap_conf_efs efs;
2189 u8 remote_efs = 0;
2190 u16 mtu = L2CAP_DEFAULT_MTU;
2191 u16 result = L2CAP_CONF_SUCCESS;
2192 u16 size;
2193
2194 BT_DBG("chan %p", chan);
2195
2196 while (len >= L2CAP_CONF_OPT_SIZE) {
2197 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2198
2199 hint = type & L2CAP_CONF_HINT;
2200 type &= L2CAP_CONF_MASK;
2201
2202 switch (type) {
2203 case L2CAP_CONF_MTU:
2204 mtu = val;
2205 break;
2206
2207 case L2CAP_CONF_FLUSH_TO:
2208 chan->flush_to = val;
2209 break;
2210
2211 case L2CAP_CONF_QOS:
2212 break;
2213
2214 case L2CAP_CONF_RFC:
2215 if (olen == sizeof(rfc))
2216 memcpy(&rfc, (void *) val, olen);
2217 break;
2218
2219 case L2CAP_CONF_FCS:
2220 if (val == L2CAP_FCS_NONE)
2221 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2222 break;
2223
2224 case L2CAP_CONF_EFS:
2225 remote_efs = 1;
2226 if (olen == sizeof(efs))
2227 memcpy(&efs, (void *) val, olen);
2228 break;
2229
2230 case L2CAP_CONF_EWS:
2231 if (!enable_hs)
2232 return -ECONNREFUSED;
2233
2234 set_bit(FLAG_EXT_CTRL, &chan->flags);
2235 set_bit(CONF_EWS_RECV, &chan->conf_state);
2236 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2237 chan->remote_tx_win = val;
2238 break;
2239
2240 default:
2241 if (hint)
2242 break;
2243
2244 result = L2CAP_CONF_UNKNOWN;
2245 *((u8 *) ptr++) = type;
2246 break;
2247 }
2248 }
2249
2250 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2251 goto done;
2252
2253 switch (chan->mode) {
2254 case L2CAP_MODE_STREAMING:
2255 case L2CAP_MODE_ERTM:
2256 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2257 chan->mode = l2cap_select_mode(rfc.mode,
2258 chan->conn->feat_mask);
2259 break;
2260 }
2261
2262 if (remote_efs) {
2263 if (__l2cap_efs_supported(chan))
2264 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2265 else
2266 return -ECONNREFUSED;
2267 }
2268
2269 if (chan->mode != rfc.mode)
2270 return -ECONNREFUSED;
2271
2272 break;
2273 }
2274
2275 done:
2276 if (chan->mode != rfc.mode) {
2277 result = L2CAP_CONF_UNACCEPT;
2278 rfc.mode = chan->mode;
2279
2280 if (chan->num_conf_rsp == 1)
2281 return -ECONNREFUSED;
2282
2283 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2284 sizeof(rfc), (unsigned long) &rfc);
2285 }
2286
2287 if (result == L2CAP_CONF_SUCCESS) {
2288 /* Configure output options and let the other side know
2289 * which ones we don't like. */
2290
2291 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2292 result = L2CAP_CONF_UNACCEPT;
2293 else {
2294 chan->omtu = mtu;
2295 set_bit(CONF_MTU_DONE, &chan->conf_state);
2296 }
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2298
2299 if (remote_efs) {
2300 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2301 efs.stype != L2CAP_SERV_NOTRAFIC &&
2302 efs.stype != chan->local_stype) {
2303
2304 result = L2CAP_CONF_UNACCEPT;
2305
2306 if (chan->num_conf_req >= 1)
2307 return -ECONNREFUSED;
2308
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2310 sizeof(efs),
2311 (unsigned long) &efs);
2312 } else {
2313 /* Send PENDING Conf Rsp */
2314 result = L2CAP_CONF_PENDING;
2315 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2316 }
2317 }
2318
2319 switch (rfc.mode) {
2320 case L2CAP_MODE_BASIC:
2321 chan->fcs = L2CAP_FCS_NONE;
2322 set_bit(CONF_MODE_DONE, &chan->conf_state);
2323 break;
2324
2325 case L2CAP_MODE_ERTM:
2326 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2327 chan->remote_tx_win = rfc.txwin_size;
2328 else
2329 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2330
2331 chan->remote_max_tx = rfc.max_transmit;
2332
2333 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2334 chan->conn->mtu -
2335 L2CAP_EXT_HDR_SIZE -
2336 L2CAP_SDULEN_SIZE -
2337 L2CAP_FCS_SIZE);
2338 rfc.max_pdu_size = cpu_to_le16(size);
2339 chan->remote_mps = size;
2340
2341 rfc.retrans_timeout =
2342 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2343 rfc.monitor_timeout =
2344 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2345
2346 set_bit(CONF_MODE_DONE, &chan->conf_state);
2347
2348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2349 sizeof(rfc), (unsigned long) &rfc);
2350
2351 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2352 chan->remote_id = efs.id;
2353 chan->remote_stype = efs.stype;
2354 chan->remote_msdu = le16_to_cpu(efs.msdu);
2355 chan->remote_flush_to =
2356 le32_to_cpu(efs.flush_to);
2357 chan->remote_acc_lat =
2358 le32_to_cpu(efs.acc_lat);
2359 chan->remote_sdu_itime =
2360 le32_to_cpu(efs.sdu_itime);
2361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2362 sizeof(efs), (unsigned long) &efs);
2363 }
2364 break;
2365
2366 case L2CAP_MODE_STREAMING:
2367 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2368 chan->conn->mtu -
2369 L2CAP_EXT_HDR_SIZE -
2370 L2CAP_SDULEN_SIZE -
2371 L2CAP_FCS_SIZE);
2372 rfc.max_pdu_size = cpu_to_le16(size);
2373 chan->remote_mps = size;
2374
2375 set_bit(CONF_MODE_DONE, &chan->conf_state);
2376
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2378 sizeof(rfc), (unsigned long) &rfc);
2379
2380 break;
2381
2382 default:
2383 result = L2CAP_CONF_UNACCEPT;
2384
2385 memset(&rfc, 0, sizeof(rfc));
2386 rfc.mode = chan->mode;
2387 }
2388
2389 if (result == L2CAP_CONF_SUCCESS)
2390 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2391 }
2392 rsp->scid = cpu_to_le16(chan->dcid);
2393 rsp->result = cpu_to_le16(result);
2394 rsp->flags = cpu_to_le16(0x0000);
2395
2396 return ptr - data;
2397 }
2398
2399 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2400 {
2401 struct l2cap_conf_req *req = data;
2402 void *ptr = req->data;
2403 int type, olen;
2404 unsigned long val;
2405 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2406 struct l2cap_conf_efs efs;
2407
2408 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2409
2410 while (len >= L2CAP_CONF_OPT_SIZE) {
2411 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2412
2413 switch (type) {
2414 case L2CAP_CONF_MTU:
2415 if (val < L2CAP_DEFAULT_MIN_MTU) {
2416 *result = L2CAP_CONF_UNACCEPT;
2417 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2418 } else
2419 chan->imtu = val;
2420 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2421 break;
2422
2423 case L2CAP_CONF_FLUSH_TO:
2424 chan->flush_to = val;
2425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2426 2, chan->flush_to);
2427 break;
2428
2429 case L2CAP_CONF_RFC:
2430 if (olen == sizeof(rfc))
2431 memcpy(&rfc, (void *)val, olen);
2432
2433 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2434 rfc.mode != chan->mode)
2435 return -ECONNREFUSED;
2436
2437 chan->fcs = 0;
2438
2439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2440 sizeof(rfc), (unsigned long) &rfc);
2441 break;
2442
2443 case L2CAP_CONF_EWS:
2444 chan->tx_win = min_t(u16, val,
2445 L2CAP_DEFAULT_EXT_WINDOW);
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2447 chan->tx_win);
2448 break;
2449
2450 case L2CAP_CONF_EFS:
2451 if (olen == sizeof(efs))
2452 memcpy(&efs, (void *)val, olen);
2453
2454 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2455 efs.stype != L2CAP_SERV_NOTRAFIC &&
2456 efs.stype != chan->local_stype)
2457 return -ECONNREFUSED;
2458
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2460 sizeof(efs), (unsigned long) &efs);
2461 break;
2462 }
2463 }
2464
2465 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2466 return -ECONNREFUSED;
2467
2468 chan->mode = rfc.mode;
2469
2470 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2471 switch (rfc.mode) {
2472 case L2CAP_MODE_ERTM:
2473 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2474 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2475 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2476
2477 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2478 chan->local_msdu = le16_to_cpu(efs.msdu);
2479 chan->local_sdu_itime =
2480 le32_to_cpu(efs.sdu_itime);
2481 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2482 chan->local_flush_to =
2483 le32_to_cpu(efs.flush_to);
2484 }
2485 break;
2486
2487 case L2CAP_MODE_STREAMING:
2488 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2489 }
2490 }
2491
2492 req->dcid = cpu_to_le16(chan->dcid);
2493 req->flags = cpu_to_le16(0x0000);
2494
2495 return ptr - data;
2496 }
2497
2498 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2499 {
2500 struct l2cap_conf_rsp *rsp = data;
2501 void *ptr = rsp->data;
2502
2503 BT_DBG("chan %p", chan);
2504
2505 rsp->scid = cpu_to_le16(chan->dcid);
2506 rsp->result = cpu_to_le16(result);
2507 rsp->flags = cpu_to_le16(flags);
2508
2509 return ptr - data;
2510 }
2511
2512 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2513 {
2514 struct l2cap_conn_rsp rsp;
2515 struct l2cap_conn *conn = chan->conn;
2516 u8 buf[128];
2517
2518 rsp.scid = cpu_to_le16(chan->dcid);
2519 rsp.dcid = cpu_to_le16(chan->scid);
2520 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2521 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2522 l2cap_send_cmd(conn, chan->ident,
2523 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2524
2525 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2526 return;
2527
2528 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2529 l2cap_build_conf_req(chan, buf), buf);
2530 chan->num_conf_req++;
2531 }
2532
2533 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2534 {
2535 int type, olen;
2536 unsigned long val;
2537 struct l2cap_conf_rfc rfc;
2538
2539 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2540
2541 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2542 return;
2543
2544 while (len >= L2CAP_CONF_OPT_SIZE) {
2545 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2546
2547 switch (type) {
2548 case L2CAP_CONF_RFC:
2549 if (olen == sizeof(rfc))
2550 memcpy(&rfc, (void *)val, olen);
2551 goto done;
2552 }
2553 }
2554
2555 /* Use sane default values in case a misbehaving remote device
2556 * did not send an RFC option.
2557 */
2558 rfc.mode = chan->mode;
2559 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2560 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2561 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2562
2563 BT_ERR("Expected RFC option was not found, using defaults");
2564
2565 done:
2566 switch (rfc.mode) {
2567 case L2CAP_MODE_ERTM:
2568 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2569 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2570 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2571 break;
2572 case L2CAP_MODE_STREAMING:
2573 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2574 }
2575 }
2576
2577 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2578 {
2579 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2580
2581 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2582 return 0;
2583
2584 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2585 cmd->ident == conn->info_ident) {
2586 __cancel_delayed_work(&conn->info_timer);
2587
2588 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2589 conn->info_ident = 0;
2590
2591 l2cap_conn_start(conn);
2592 }
2593
2594 return 0;
2595 }
2596
2597 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2598 {
2599 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2600 struct l2cap_conn_rsp rsp;
2601 struct l2cap_chan *chan = NULL, *pchan;
2602 struct sock *parent, *sk = NULL;
2603 int result, status = L2CAP_CS_NO_INFO;
2604
2605 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2606 __le16 psm = req->psm;
2607
2608 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2609
2610 /* Check if we have socket listening on psm */
2611 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2612 if (!pchan) {
2613 result = L2CAP_CR_BAD_PSM;
2614 goto sendresp;
2615 }
2616
2617 parent = pchan->sk;
2618
2619 lock_sock(parent);
2620
2621 /* Check if the ACL is secure enough (if not SDP) */
2622 if (psm != cpu_to_le16(0x0001) &&
2623 !hci_conn_check_link_mode(conn->hcon)) {
2624 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2625 result = L2CAP_CR_SEC_BLOCK;
2626 goto response;
2627 }
2628
2629 result = L2CAP_CR_NO_MEM;
2630
2631 /* Check for backlog size */
2632 if (sk_acceptq_is_full(parent)) {
2633 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2634 goto response;
2635 }
2636
2637 chan = pchan->ops->new_connection(pchan->data);
2638 if (!chan)
2639 goto response;
2640
2641 sk = chan->sk;
2642
2643 /* Check if we already have channel with that dcid */
2644 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2645 sock_set_flag(sk, SOCK_ZAPPED);
2646 chan->ops->close(chan->data);
2647 goto response;
2648 }
2649
2650 hci_conn_hold(conn->hcon);
2651
2652 bacpy(&bt_sk(sk)->src, conn->src);
2653 bacpy(&bt_sk(sk)->dst, conn->dst);
2654 chan->psm = psm;
2655 chan->dcid = scid;
2656
2657 bt_accept_enqueue(parent, sk);
2658
2659 l2cap_chan_add(conn, chan);
2660
2661 dcid = chan->scid;
2662
2663 __set_chan_timer(chan, sk->sk_sndtimeo);
2664
2665 chan->ident = cmd->ident;
2666
2667 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2668 if (l2cap_chan_check_security(chan)) {
2669 if (bt_sk(sk)->defer_setup) {
2670 l2cap_state_change(chan, BT_CONNECT2);
2671 result = L2CAP_CR_PEND;
2672 status = L2CAP_CS_AUTHOR_PEND;
2673 parent->sk_data_ready(parent, 0);
2674 } else {
2675 l2cap_state_change(chan, BT_CONFIG);
2676 result = L2CAP_CR_SUCCESS;
2677 status = L2CAP_CS_NO_INFO;
2678 }
2679 } else {
2680 l2cap_state_change(chan, BT_CONNECT2);
2681 result = L2CAP_CR_PEND;
2682 status = L2CAP_CS_AUTHEN_PEND;
2683 }
2684 } else {
2685 l2cap_state_change(chan, BT_CONNECT2);
2686 result = L2CAP_CR_PEND;
2687 status = L2CAP_CS_NO_INFO;
2688 }
2689
2690 response:
2691 release_sock(parent);
2692
2693 sendresp:
2694 rsp.scid = cpu_to_le16(scid);
2695 rsp.dcid = cpu_to_le16(dcid);
2696 rsp.result = cpu_to_le16(result);
2697 rsp.status = cpu_to_le16(status);
2698 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2699
2700 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2701 struct l2cap_info_req info;
2702 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2703
2704 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2705 conn->info_ident = l2cap_get_ident(conn);
2706
2707 schedule_delayed_work(&conn->info_timer,
2708 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2709
2710 l2cap_send_cmd(conn, conn->info_ident,
2711 L2CAP_INFO_REQ, sizeof(info), &info);
2712 }
2713
2714 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2715 result == L2CAP_CR_SUCCESS) {
2716 u8 buf[128];
2717 set_bit(CONF_REQ_SENT, &chan->conf_state);
2718 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2719 l2cap_build_conf_req(chan, buf), buf);
2720 chan->num_conf_req++;
2721 }
2722
2723 return 0;
2724 }
2725
2726 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2727 {
2728 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2729 u16 scid, dcid, result, status;
2730 struct l2cap_chan *chan;
2731 struct sock *sk;
2732 u8 req[128];
2733
2734 scid = __le16_to_cpu(rsp->scid);
2735 dcid = __le16_to_cpu(rsp->dcid);
2736 result = __le16_to_cpu(rsp->result);
2737 status = __le16_to_cpu(rsp->status);
2738
2739 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2740
2741 if (scid) {
2742 chan = l2cap_get_chan_by_scid(conn, scid);
2743 if (!chan)
2744 return -EFAULT;
2745 } else {
2746 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2747 if (!chan)
2748 return -EFAULT;
2749 }
2750
2751 sk = chan->sk;
2752
2753 switch (result) {
2754 case L2CAP_CR_SUCCESS:
2755 l2cap_state_change(chan, BT_CONFIG);
2756 chan->ident = 0;
2757 chan->dcid = dcid;
2758 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2759
2760 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2761 break;
2762
2763 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2764 l2cap_build_conf_req(chan, req), req);
2765 chan->num_conf_req++;
2766 break;
2767
2768 case L2CAP_CR_PEND:
2769 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2770 break;
2771
2772 default:
2773 l2cap_chan_del(chan, ECONNREFUSED);
2774 break;
2775 }
2776
2777 release_sock(sk);
2778 return 0;
2779 }
2780
2781 static inline void set_default_fcs(struct l2cap_chan *chan)
2782 {
2783 /* FCS is enabled only in ERTM or streaming mode, if one or both
2784 * sides request it.
2785 */
2786 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2787 chan->fcs = L2CAP_FCS_NONE;
2788 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2789 chan->fcs = L2CAP_FCS_CRC16;
2790 }
2791
2792 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2793 {
2794 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2795 u16 dcid, flags;
2796 u8 rsp[64];
2797 struct l2cap_chan *chan;
2798 struct sock *sk;
2799 int len;
2800
2801 dcid = __le16_to_cpu(req->dcid);
2802 flags = __le16_to_cpu(req->flags);
2803
2804 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2805
2806 chan = l2cap_get_chan_by_scid(conn, dcid);
2807 if (!chan)
2808 return -ENOENT;
2809
2810 sk = chan->sk;
2811
2812 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2813 struct l2cap_cmd_rej_cid rej;
2814
2815 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2816 rej.scid = cpu_to_le16(chan->scid);
2817 rej.dcid = cpu_to_le16(chan->dcid);
2818
2819 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2820 sizeof(rej), &rej);
2821 goto unlock;
2822 }
2823
2824 /* Reject if config buffer is too small. */
2825 len = cmd_len - sizeof(*req);
2826 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2827 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2828 l2cap_build_conf_rsp(chan, rsp,
2829 L2CAP_CONF_REJECT, flags), rsp);
2830 goto unlock;
2831 }
2832
2833 /* Store config. */
2834 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2835 chan->conf_len += len;
2836
2837 if (flags & 0x0001) {
2838 /* Incomplete config. Send empty response. */
2839 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2840 l2cap_build_conf_rsp(chan, rsp,
2841 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2842 goto unlock;
2843 }
2844
2845 /* Complete config. */
2846 len = l2cap_parse_conf_req(chan, rsp);
2847 if (len < 0) {
2848 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2849 goto unlock;
2850 }
2851
2852 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2853 chan->num_conf_rsp++;
2854
2855 /* Reset config buffer. */
2856 chan->conf_len = 0;
2857
2858 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2859 goto unlock;
2860
2861 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2862 set_default_fcs(chan);
2863
2864 l2cap_state_change(chan, BT_CONNECTED);
2865
2866 chan->next_tx_seq = 0;
2867 chan->expected_tx_seq = 0;
2868 skb_queue_head_init(&chan->tx_q);
2869 if (chan->mode == L2CAP_MODE_ERTM)
2870 l2cap_ertm_init(chan);
2871
2872 l2cap_chan_ready(sk);
2873 goto unlock;
2874 }
2875
2876 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2877 u8 buf[64];
2878 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2879 l2cap_build_conf_req(chan, buf), buf);
2880 chan->num_conf_req++;
2881 }
2882
2883 /* Got Conf Rsp PENDING from remote side and asume we sent
2884 Conf Rsp PENDING in the code above */
2885 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2886 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2887
2888 /* check compatibility */
2889
2890 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2891 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2892
2893 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2894 l2cap_build_conf_rsp(chan, rsp,
2895 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2896 }
2897
2898 unlock:
2899 release_sock(sk);
2900 return 0;
2901 }
2902
2903 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2904 {
2905 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2906 u16 scid, flags, result;
2907 struct l2cap_chan *chan;
2908 struct sock *sk;
2909 int len = cmd->len - sizeof(*rsp);
2910
2911 scid = __le16_to_cpu(rsp->scid);
2912 flags = __le16_to_cpu(rsp->flags);
2913 result = __le16_to_cpu(rsp->result);
2914
2915 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2916 scid, flags, result);
2917
2918 chan = l2cap_get_chan_by_scid(conn, scid);
2919 if (!chan)
2920 return 0;
2921
2922 sk = chan->sk;
2923
2924 switch (result) {
2925 case L2CAP_CONF_SUCCESS:
2926 l2cap_conf_rfc_get(chan, rsp->data, len);
2927 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2928 break;
2929
2930 case L2CAP_CONF_PENDING:
2931 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2932
2933 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2934 char buf[64];
2935
2936 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2937 buf, &result);
2938 if (len < 0) {
2939 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2940 goto done;
2941 }
2942
2943 /* check compatibility */
2944
2945 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2946 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2947
2948 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2949 l2cap_build_conf_rsp(chan, buf,
2950 L2CAP_CONF_SUCCESS, 0x0000), buf);
2951 }
2952 goto done;
2953
2954 case L2CAP_CONF_UNACCEPT:
2955 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2956 char req[64];
2957
2958 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2959 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2960 goto done;
2961 }
2962
2963 /* throw out any old stored conf requests */
2964 result = L2CAP_CONF_SUCCESS;
2965 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2966 req, &result);
2967 if (len < 0) {
2968 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2969 goto done;
2970 }
2971
2972 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2973 L2CAP_CONF_REQ, len, req);
2974 chan->num_conf_req++;
2975 if (result != L2CAP_CONF_SUCCESS)
2976 goto done;
2977 break;
2978 }
2979
2980 default:
2981 sk->sk_err = ECONNRESET;
2982 __set_chan_timer(chan,
2983 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2984 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2985 goto done;
2986 }
2987
2988 if (flags & 0x01)
2989 goto done;
2990
2991 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2992
2993 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2994 set_default_fcs(chan);
2995
2996 l2cap_state_change(chan, BT_CONNECTED);
2997 chan->next_tx_seq = 0;
2998 chan->expected_tx_seq = 0;
2999 skb_queue_head_init(&chan->tx_q);
3000 if (chan->mode == L2CAP_MODE_ERTM)
3001 l2cap_ertm_init(chan);
3002
3003 l2cap_chan_ready(sk);
3004 }
3005
3006 done:
3007 release_sock(sk);
3008 return 0;
3009 }
3010
3011 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 {
3013 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3014 struct l2cap_disconn_rsp rsp;
3015 u16 dcid, scid;
3016 struct l2cap_chan *chan;
3017 struct sock *sk;
3018
3019 scid = __le16_to_cpu(req->scid);
3020 dcid = __le16_to_cpu(req->dcid);
3021
3022 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3023
3024 chan = l2cap_get_chan_by_scid(conn, dcid);
3025 if (!chan)
3026 return 0;
3027
3028 sk = chan->sk;
3029
3030 rsp.dcid = cpu_to_le16(chan->scid);
3031 rsp.scid = cpu_to_le16(chan->dcid);
3032 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3033
3034 sk->sk_shutdown = SHUTDOWN_MASK;
3035
3036 l2cap_chan_del(chan, ECONNRESET);
3037 release_sock(sk);
3038
3039 chan->ops->close(chan->data);
3040 return 0;
3041 }
3042
3043 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3044 {
3045 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3046 u16 dcid, scid;
3047 struct l2cap_chan *chan;
3048 struct sock *sk;
3049
3050 scid = __le16_to_cpu(rsp->scid);
3051 dcid = __le16_to_cpu(rsp->dcid);
3052
3053 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3054
3055 chan = l2cap_get_chan_by_scid(conn, scid);
3056 if (!chan)
3057 return 0;
3058
3059 sk = chan->sk;
3060
3061 l2cap_chan_del(chan, 0);
3062 release_sock(sk);
3063
3064 chan->ops->close(chan->data);
3065 return 0;
3066 }
3067
3068 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3069 {
3070 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3071 u16 type;
3072
3073 type = __le16_to_cpu(req->type);
3074
3075 BT_DBG("type 0x%4.4x", type);
3076
3077 if (type == L2CAP_IT_FEAT_MASK) {
3078 u8 buf[8];
3079 u32 feat_mask = l2cap_feat_mask;
3080 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3081 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3082 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3083 if (!disable_ertm)
3084 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3085 | L2CAP_FEAT_FCS;
3086 if (enable_hs)
3087 feat_mask |= L2CAP_FEAT_EXT_FLOW
3088 | L2CAP_FEAT_EXT_WINDOW;
3089
3090 put_unaligned_le32(feat_mask, rsp->data);
3091 l2cap_send_cmd(conn, cmd->ident,
3092 L2CAP_INFO_RSP, sizeof(buf), buf);
3093 } else if (type == L2CAP_IT_FIXED_CHAN) {
3094 u8 buf[12];
3095 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3096
3097 if (enable_hs)
3098 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3099 else
3100 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3101
3102 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3103 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3104 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3105 l2cap_send_cmd(conn, cmd->ident,
3106 L2CAP_INFO_RSP, sizeof(buf), buf);
3107 } else {
3108 struct l2cap_info_rsp rsp;
3109 rsp.type = cpu_to_le16(type);
3110 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3111 l2cap_send_cmd(conn, cmd->ident,
3112 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3113 }
3114
3115 return 0;
3116 }
3117
3118 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3119 {
3120 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3121 u16 type, result;
3122
3123 type = __le16_to_cpu(rsp->type);
3124 result = __le16_to_cpu(rsp->result);
3125
3126 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3127
3128 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3129 if (cmd->ident != conn->info_ident ||
3130 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3131 return 0;
3132
3133 __cancel_delayed_work(&conn->info_timer);
3134
3135 if (result != L2CAP_IR_SUCCESS) {
3136 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3137 conn->info_ident = 0;
3138
3139 l2cap_conn_start(conn);
3140
3141 return 0;
3142 }
3143
3144 if (type == L2CAP_IT_FEAT_MASK) {
3145 conn->feat_mask = get_unaligned_le32(rsp->data);
3146
3147 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3148 struct l2cap_info_req req;
3149 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3150
3151 conn->info_ident = l2cap_get_ident(conn);
3152
3153 l2cap_send_cmd(conn, conn->info_ident,
3154 L2CAP_INFO_REQ, sizeof(req), &req);
3155 } else {
3156 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3157 conn->info_ident = 0;
3158
3159 l2cap_conn_start(conn);
3160 }
3161 } else if (type == L2CAP_IT_FIXED_CHAN) {
3162 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3163 conn->info_ident = 0;
3164
3165 l2cap_conn_start(conn);
3166 }
3167
3168 return 0;
3169 }
3170
3171 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3172 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3173 void *data)
3174 {
3175 struct l2cap_create_chan_req *req = data;
3176 struct l2cap_create_chan_rsp rsp;
3177 u16 psm, scid;
3178
3179 if (cmd_len != sizeof(*req))
3180 return -EPROTO;
3181
3182 if (!enable_hs)
3183 return -EINVAL;
3184
3185 psm = le16_to_cpu(req->psm);
3186 scid = le16_to_cpu(req->scid);
3187
3188 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3189
3190 /* Placeholder: Always reject */
3191 rsp.dcid = 0;
3192 rsp.scid = cpu_to_le16(scid);
3193 rsp.result = L2CAP_CR_NO_MEM;
3194 rsp.status = L2CAP_CS_NO_INFO;
3195
3196 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3197 sizeof(rsp), &rsp);
3198
3199 return 0;
3200 }
3201
3202 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3203 struct l2cap_cmd_hdr *cmd, void *data)
3204 {
3205 BT_DBG("conn %p", conn);
3206
3207 return l2cap_connect_rsp(conn, cmd, data);
3208 }
3209
3210 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3211 u16 icid, u16 result)
3212 {
3213 struct l2cap_move_chan_rsp rsp;
3214
3215 BT_DBG("icid %d, result %d", icid, result);
3216
3217 rsp.icid = cpu_to_le16(icid);
3218 rsp.result = cpu_to_le16(result);
3219
3220 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3221 }
3222
3223 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3224 struct l2cap_chan *chan, u16 icid, u16 result)
3225 {
3226 struct l2cap_move_chan_cfm cfm;
3227 u8 ident;
3228
3229 BT_DBG("icid %d, result %d", icid, result);
3230
3231 ident = l2cap_get_ident(conn);
3232 if (chan)
3233 chan->ident = ident;
3234
3235 cfm.icid = cpu_to_le16(icid);
3236 cfm.result = cpu_to_le16(result);
3237
3238 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3239 }
3240
3241 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3242 u16 icid)
3243 {
3244 struct l2cap_move_chan_cfm_rsp rsp;
3245
3246 BT_DBG("icid %d", icid);
3247
3248 rsp.icid = cpu_to_le16(icid);
3249 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3250 }
3251
3252 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3253 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3254 {
3255 struct l2cap_move_chan_req *req = data;
3256 u16 icid = 0;
3257 u16 result = L2CAP_MR_NOT_ALLOWED;
3258
3259 if (cmd_len != sizeof(*req))
3260 return -EPROTO;
3261
3262 icid = le16_to_cpu(req->icid);
3263
3264 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3265
3266 if (!enable_hs)
3267 return -EINVAL;
3268
3269 /* Placeholder: Always refuse */
3270 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3271
3272 return 0;
3273 }
3274
3275 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3276 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3277 {
3278 struct l2cap_move_chan_rsp *rsp = data;
3279 u16 icid, result;
3280
3281 if (cmd_len != sizeof(*rsp))
3282 return -EPROTO;
3283
3284 icid = le16_to_cpu(rsp->icid);
3285 result = le16_to_cpu(rsp->result);
3286
3287 BT_DBG("icid %d, result %d", icid, result);
3288
3289 /* Placeholder: Always unconfirmed */
3290 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3291
3292 return 0;
3293 }
3294
3295 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3296 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3297 {
3298 struct l2cap_move_chan_cfm *cfm = data;
3299 u16 icid, result;
3300
3301 if (cmd_len != sizeof(*cfm))
3302 return -EPROTO;
3303
3304 icid = le16_to_cpu(cfm->icid);
3305 result = le16_to_cpu(cfm->result);
3306
3307 BT_DBG("icid %d, result %d", icid, result);
3308
3309 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3310
3311 return 0;
3312 }
3313
3314 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3315 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3316 {
3317 struct l2cap_move_chan_cfm_rsp *rsp = data;
3318 u16 icid;
3319
3320 if (cmd_len != sizeof(*rsp))
3321 return -EPROTO;
3322
3323 icid = le16_to_cpu(rsp->icid);
3324
3325 BT_DBG("icid %d", icid);
3326
3327 return 0;
3328 }
3329
3330 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3331 u16 to_multiplier)
3332 {
3333 u16 max_latency;
3334
3335 if (min > max || min < 6 || max > 3200)
3336 return -EINVAL;
3337
3338 if (to_multiplier < 10 || to_multiplier > 3200)
3339 return -EINVAL;
3340
3341 if (max >= to_multiplier * 8)
3342 return -EINVAL;
3343
3344 max_latency = (to_multiplier * 8 / max) - 1;
3345 if (latency > 499 || latency > max_latency)
3346 return -EINVAL;
3347
3348 return 0;
3349 }
3350
3351 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3352 struct l2cap_cmd_hdr *cmd, u8 *data)
3353 {
3354 struct hci_conn *hcon = conn->hcon;
3355 struct l2cap_conn_param_update_req *req;
3356 struct l2cap_conn_param_update_rsp rsp;
3357 u16 min, max, latency, to_multiplier, cmd_len;
3358 int err;
3359
3360 if (!(hcon->link_mode & HCI_LM_MASTER))
3361 return -EINVAL;
3362
3363 cmd_len = __le16_to_cpu(cmd->len);
3364 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3365 return -EPROTO;
3366
3367 req = (struct l2cap_conn_param_update_req *) data;
3368 min = __le16_to_cpu(req->min);
3369 max = __le16_to_cpu(req->max);
3370 latency = __le16_to_cpu(req->latency);
3371 to_multiplier = __le16_to_cpu(req->to_multiplier);
3372
3373 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3374 min, max, latency, to_multiplier);
3375
3376 memset(&rsp, 0, sizeof(rsp));
3377
3378 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3379 if (err)
3380 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3381 else
3382 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3383
3384 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3385 sizeof(rsp), &rsp);
3386
3387 if (!err)
3388 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3389
3390 return 0;
3391 }
3392
3393 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3394 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3395 {
3396 int err = 0;
3397
3398 switch (cmd->code) {
3399 case L2CAP_COMMAND_REJ:
3400 l2cap_command_rej(conn, cmd, data);
3401 break;
3402
3403 case L2CAP_CONN_REQ:
3404 err = l2cap_connect_req(conn, cmd, data);
3405 break;
3406
3407 case L2CAP_CONN_RSP:
3408 err = l2cap_connect_rsp(conn, cmd, data);
3409 break;
3410
3411 case L2CAP_CONF_REQ:
3412 err = l2cap_config_req(conn, cmd, cmd_len, data);
3413 break;
3414
3415 case L2CAP_CONF_RSP:
3416 err = l2cap_config_rsp(conn, cmd, data);
3417 break;
3418
3419 case L2CAP_DISCONN_REQ:
3420 err = l2cap_disconnect_req(conn, cmd, data);
3421 break;
3422
3423 case L2CAP_DISCONN_RSP:
3424 err = l2cap_disconnect_rsp(conn, cmd, data);
3425 break;
3426
3427 case L2CAP_ECHO_REQ:
3428 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3429 break;
3430
3431 case L2CAP_ECHO_RSP:
3432 break;
3433
3434 case L2CAP_INFO_REQ:
3435 err = l2cap_information_req(conn, cmd, data);
3436 break;
3437
3438 case L2CAP_INFO_RSP:
3439 err = l2cap_information_rsp(conn, cmd, data);
3440 break;
3441
3442 case L2CAP_CREATE_CHAN_REQ:
3443 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3444 break;
3445
3446 case L2CAP_CREATE_CHAN_RSP:
3447 err = l2cap_create_channel_rsp(conn, cmd, data);
3448 break;
3449
3450 case L2CAP_MOVE_CHAN_REQ:
3451 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3452 break;
3453
3454 case L2CAP_MOVE_CHAN_RSP:
3455 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3456 break;
3457
3458 case L2CAP_MOVE_CHAN_CFM:
3459 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3460 break;
3461
3462 case L2CAP_MOVE_CHAN_CFM_RSP:
3463 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3464 break;
3465
3466 default:
3467 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3468 err = -EINVAL;
3469 break;
3470 }
3471
3472 return err;
3473 }
3474
3475 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3476 struct l2cap_cmd_hdr *cmd, u8 *data)
3477 {
3478 switch (cmd->code) {
3479 case L2CAP_COMMAND_REJ:
3480 return 0;
3481
3482 case L2CAP_CONN_PARAM_UPDATE_REQ:
3483 return l2cap_conn_param_update_req(conn, cmd, data);
3484
3485 case L2CAP_CONN_PARAM_UPDATE_RSP:
3486 return 0;
3487
3488 default:
3489 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3490 return -EINVAL;
3491 }
3492 }
3493
3494 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3495 struct sk_buff *skb)
3496 {
3497 u8 *data = skb->data;
3498 int len = skb->len;
3499 struct l2cap_cmd_hdr cmd;
3500 int err;
3501
3502 l2cap_raw_recv(conn, skb);
3503
3504 while (len >= L2CAP_CMD_HDR_SIZE) {
3505 u16 cmd_len;
3506 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3507 data += L2CAP_CMD_HDR_SIZE;
3508 len -= L2CAP_CMD_HDR_SIZE;
3509
3510 cmd_len = le16_to_cpu(cmd.len);
3511
3512 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3513
3514 if (cmd_len > len || !cmd.ident) {
3515 BT_DBG("corrupted command");
3516 break;
3517 }
3518
3519 if (conn->hcon->type == LE_LINK)
3520 err = l2cap_le_sig_cmd(conn, &cmd, data);
3521 else
3522 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3523
3524 if (err) {
3525 struct l2cap_cmd_rej_unk rej;
3526
3527 BT_ERR("Wrong link type (%d)", err);
3528
3529 /* FIXME: Map err to a valid reason */
3530 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3531 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3532 }
3533
3534 data += cmd_len;
3535 len -= cmd_len;
3536 }
3537
3538 kfree_skb(skb);
3539 }
3540
3541 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3542 {
3543 u16 our_fcs, rcv_fcs;
3544 int hdr_size;
3545
3546 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3547 hdr_size = L2CAP_EXT_HDR_SIZE;
3548 else
3549 hdr_size = L2CAP_ENH_HDR_SIZE;
3550
3551 if (chan->fcs == L2CAP_FCS_CRC16) {
3552 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3553 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3554 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3555
3556 if (our_fcs != rcv_fcs)
3557 return -EBADMSG;
3558 }
3559 return 0;
3560 }
3561
3562 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3563 {
3564 u32 control = 0;
3565
3566 chan->frames_sent = 0;
3567
3568 control |= __set_reqseq(chan, chan->buffer_seq);
3569
3570 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3571 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3572 l2cap_send_sframe(chan, control);
3573 set_bit(CONN_RNR_SENT, &chan->conn_state);
3574 }
3575
3576 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3577 l2cap_retransmit_frames(chan);
3578
3579 l2cap_ertm_send(chan);
3580
3581 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3582 chan->frames_sent == 0) {
3583 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3584 l2cap_send_sframe(chan, control);
3585 }
3586 }
3587
3588 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3589 {
3590 struct sk_buff *next_skb;
3591 int tx_seq_offset, next_tx_seq_offset;
3592
3593 bt_cb(skb)->tx_seq = tx_seq;
3594 bt_cb(skb)->sar = sar;
3595
3596 next_skb = skb_peek(&chan->srej_q);
3597
3598 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3599
3600 while (next_skb) {
3601 if (bt_cb(next_skb)->tx_seq == tx_seq)
3602 return -EINVAL;
3603
3604 next_tx_seq_offset = __seq_offset(chan,
3605 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3606
3607 if (next_tx_seq_offset > tx_seq_offset) {
3608 __skb_queue_before(&chan->srej_q, next_skb, skb);
3609 return 0;
3610 }
3611
3612 if (skb_queue_is_last(&chan->srej_q, next_skb))
3613 next_skb = NULL;
3614 else
3615 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3616 }
3617
3618 __skb_queue_tail(&chan->srej_q, skb);
3619
3620 return 0;
3621 }
3622
3623 static void append_skb_frag(struct sk_buff *skb,
3624 struct sk_buff *new_frag, struct sk_buff **last_frag)
3625 {
3626 /* skb->len reflects data in skb as well as all fragments
3627 * skb->data_len reflects only data in fragments
3628 */
3629 if (!skb_has_frag_list(skb))
3630 skb_shinfo(skb)->frag_list = new_frag;
3631
3632 new_frag->next = NULL;
3633
3634 (*last_frag)->next = new_frag;
3635 *last_frag = new_frag;
3636
3637 skb->len += new_frag->len;
3638 skb->data_len += new_frag->len;
3639 skb->truesize += new_frag->truesize;
3640 }
3641
3642 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3643 {
3644 int err = -EINVAL;
3645
3646 switch (__get_ctrl_sar(chan, control)) {
3647 case L2CAP_SAR_UNSEGMENTED:
3648 if (chan->sdu)
3649 break;
3650
3651 err = chan->ops->recv(chan->data, skb);
3652 break;
3653
3654 case L2CAP_SAR_START:
3655 if (chan->sdu)
3656 break;
3657
3658 chan->sdu_len = get_unaligned_le16(skb->data);
3659 skb_pull(skb, L2CAP_SDULEN_SIZE);
3660
3661 if (chan->sdu_len > chan->imtu) {
3662 err = -EMSGSIZE;
3663 break;
3664 }
3665
3666 if (skb->len >= chan->sdu_len)
3667 break;
3668
3669 chan->sdu = skb;
3670 chan->sdu_last_frag = skb;
3671
3672 skb = NULL;
3673 err = 0;
3674 break;
3675
3676 case L2CAP_SAR_CONTINUE:
3677 if (!chan->sdu)
3678 break;
3679
3680 append_skb_frag(chan->sdu, skb,
3681 &chan->sdu_last_frag);
3682 skb = NULL;
3683
3684 if (chan->sdu->len >= chan->sdu_len)
3685 break;
3686
3687 err = 0;
3688 break;
3689
3690 case L2CAP_SAR_END:
3691 if (!chan->sdu)
3692 break;
3693
3694 append_skb_frag(chan->sdu, skb,
3695 &chan->sdu_last_frag);
3696 skb = NULL;
3697
3698 if (chan->sdu->len != chan->sdu_len)
3699 break;
3700
3701 err = chan->ops->recv(chan->data, chan->sdu);
3702
3703 if (!err) {
3704 /* Reassembly complete */
3705 chan->sdu = NULL;
3706 chan->sdu_last_frag = NULL;
3707 chan->sdu_len = 0;
3708 }
3709 break;
3710 }
3711
3712 if (err) {
3713 kfree_skb(skb);
3714 kfree_skb(chan->sdu);
3715 chan->sdu = NULL;
3716 chan->sdu_last_frag = NULL;
3717 chan->sdu_len = 0;
3718 }
3719
3720 return err;
3721 }
3722
3723 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3724 {
3725 BT_DBG("chan %p, Enter local busy", chan);
3726
3727 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3728
3729 __set_ack_timer(chan);
3730 }
3731
3732 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3733 {
3734 u32 control;
3735
3736 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3737 goto done;
3738
3739 control = __set_reqseq(chan, chan->buffer_seq);
3740 control |= __set_ctrl_poll(chan);
3741 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3742 l2cap_send_sframe(chan, control);
3743 chan->retry_count = 1;
3744
3745 __clear_retrans_timer(chan);
3746 __set_monitor_timer(chan);
3747
3748 set_bit(CONN_WAIT_F, &chan->conn_state);
3749
3750 done:
3751 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3752 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3753
3754 BT_DBG("chan %p, Exit local busy", chan);
3755 }
3756
3757 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3758 {
3759 if (chan->mode == L2CAP_MODE_ERTM) {
3760 if (busy)
3761 l2cap_ertm_enter_local_busy(chan);
3762 else
3763 l2cap_ertm_exit_local_busy(chan);
3764 }
3765 }
3766
3767 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3768 {
3769 struct sk_buff *skb;
3770 u32 control;
3771
3772 while ((skb = skb_peek(&chan->srej_q)) &&
3773 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3774 int err;
3775
3776 if (bt_cb(skb)->tx_seq != tx_seq)
3777 break;
3778
3779 skb = skb_dequeue(&chan->srej_q);
3780 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3781 err = l2cap_reassemble_sdu(chan, skb, control);
3782
3783 if (err < 0) {
3784 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3785 break;
3786 }
3787
3788 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3789 tx_seq = __next_seq(chan, tx_seq);
3790 }
3791 }
3792
3793 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3794 {
3795 struct srej_list *l, *tmp;
3796 u32 control;
3797
3798 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3799 if (l->tx_seq == tx_seq) {
3800 list_del(&l->list);
3801 kfree(l);
3802 return;
3803 }
3804 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3805 control |= __set_reqseq(chan, l->tx_seq);
3806 l2cap_send_sframe(chan, control);
3807 list_del(&l->list);
3808 list_add_tail(&l->list, &chan->srej_l);
3809 }
3810 }
3811
3812 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3813 {
3814 struct srej_list *new;
3815 u32 control;
3816
3817 while (tx_seq != chan->expected_tx_seq) {
3818 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3819 control |= __set_reqseq(chan, chan->expected_tx_seq);
3820 l2cap_send_sframe(chan, control);
3821
3822 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3823 if (!new)
3824 return -ENOMEM;
3825
3826 new->tx_seq = chan->expected_tx_seq;
3827
3828 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3829
3830 list_add_tail(&new->list, &chan->srej_l);
3831 }
3832
3833 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3834
3835 return 0;
3836 }
3837
3838 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3839 {
3840 u16 tx_seq = __get_txseq(chan, rx_control);
3841 u16 req_seq = __get_reqseq(chan, rx_control);
3842 u8 sar = __get_ctrl_sar(chan, rx_control);
3843 int tx_seq_offset, expected_tx_seq_offset;
3844 int num_to_ack = (chan->tx_win/6) + 1;
3845 int err = 0;
3846
3847 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3848 tx_seq, rx_control);
3849
3850 if (__is_ctrl_final(chan, rx_control) &&
3851 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3852 __clear_monitor_timer(chan);
3853 if (chan->unacked_frames > 0)
3854 __set_retrans_timer(chan);
3855 clear_bit(CONN_WAIT_F, &chan->conn_state);
3856 }
3857
3858 chan->expected_ack_seq = req_seq;
3859 l2cap_drop_acked_frames(chan);
3860
3861 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3862
3863 /* invalid tx_seq */
3864 if (tx_seq_offset >= chan->tx_win) {
3865 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3866 goto drop;
3867 }
3868
3869 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3870 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3871 l2cap_send_ack(chan);
3872 goto drop;
3873 }
3874
3875 if (tx_seq == chan->expected_tx_seq)
3876 goto expected;
3877
3878 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3879 struct srej_list *first;
3880
3881 first = list_first_entry(&chan->srej_l,
3882 struct srej_list, list);
3883 if (tx_seq == first->tx_seq) {
3884 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3885 l2cap_check_srej_gap(chan, tx_seq);
3886
3887 list_del(&first->list);
3888 kfree(first);
3889
3890 if (list_empty(&chan->srej_l)) {
3891 chan->buffer_seq = chan->buffer_seq_srej;
3892 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3893 l2cap_send_ack(chan);
3894 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3895 }
3896 } else {
3897 struct srej_list *l;
3898
3899 /* duplicated tx_seq */
3900 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3901 goto drop;
3902
3903 list_for_each_entry(l, &chan->srej_l, list) {
3904 if (l->tx_seq == tx_seq) {
3905 l2cap_resend_srejframe(chan, tx_seq);
3906 return 0;
3907 }
3908 }
3909
3910 err = l2cap_send_srejframe(chan, tx_seq);
3911 if (err < 0) {
3912 l2cap_send_disconn_req(chan->conn, chan, -err);
3913 return err;
3914 }
3915 }
3916 } else {
3917 expected_tx_seq_offset = __seq_offset(chan,
3918 chan->expected_tx_seq, chan->buffer_seq);
3919
3920 /* duplicated tx_seq */
3921 if (tx_seq_offset < expected_tx_seq_offset)
3922 goto drop;
3923
3924 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3925
3926 BT_DBG("chan %p, Enter SREJ", chan);
3927
3928 INIT_LIST_HEAD(&chan->srej_l);
3929 chan->buffer_seq_srej = chan->buffer_seq;
3930
3931 __skb_queue_head_init(&chan->srej_q);
3932 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3933
3934 /* Set P-bit only if there are some I-frames to ack. */
3935 if (__clear_ack_timer(chan))
3936 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3937
3938 err = l2cap_send_srejframe(chan, tx_seq);
3939 if (err < 0) {
3940 l2cap_send_disconn_req(chan->conn, chan, -err);
3941 return err;
3942 }
3943 }
3944 return 0;
3945
3946 expected:
3947 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3948
3949 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3950 bt_cb(skb)->tx_seq = tx_seq;
3951 bt_cb(skb)->sar = sar;
3952 __skb_queue_tail(&chan->srej_q, skb);
3953 return 0;
3954 }
3955
3956 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3957 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3958
3959 if (err < 0) {
3960 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3961 return err;
3962 }
3963
3964 if (__is_ctrl_final(chan, rx_control)) {
3965 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3966 l2cap_retransmit_frames(chan);
3967 }
3968
3969
3970 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3971 if (chan->num_acked == num_to_ack - 1)
3972 l2cap_send_ack(chan);
3973 else
3974 __set_ack_timer(chan);
3975
3976 return 0;
3977
3978 drop:
3979 kfree_skb(skb);
3980 return 0;
3981 }
3982
3983 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3984 {
3985 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3986 __get_reqseq(chan, rx_control), rx_control);
3987
3988 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3989 l2cap_drop_acked_frames(chan);
3990
3991 if (__is_ctrl_poll(chan, rx_control)) {
3992 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3993 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3994 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3995 (chan->unacked_frames > 0))
3996 __set_retrans_timer(chan);
3997
3998 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3999 l2cap_send_srejtail(chan);
4000 } else {
4001 l2cap_send_i_or_rr_or_rnr(chan);
4002 }
4003
4004 } else if (__is_ctrl_final(chan, rx_control)) {
4005 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4006
4007 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4008 l2cap_retransmit_frames(chan);
4009
4010 } else {
4011 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4012 (chan->unacked_frames > 0))
4013 __set_retrans_timer(chan);
4014
4015 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4016 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4017 l2cap_send_ack(chan);
4018 else
4019 l2cap_ertm_send(chan);
4020 }
4021 }
4022
4023 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4024 {
4025 u16 tx_seq = __get_reqseq(chan, rx_control);
4026
4027 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4028
4029 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4030
4031 chan->expected_ack_seq = tx_seq;
4032 l2cap_drop_acked_frames(chan);
4033
4034 if (__is_ctrl_final(chan, rx_control)) {
4035 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4036 l2cap_retransmit_frames(chan);
4037 } else {
4038 l2cap_retransmit_frames(chan);
4039
4040 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4041 set_bit(CONN_REJ_ACT, &chan->conn_state);
4042 }
4043 }
4044 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4045 {
4046 u16 tx_seq = __get_reqseq(chan, rx_control);
4047
4048 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4049
4050 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4051
4052 if (__is_ctrl_poll(chan, rx_control)) {
4053 chan->expected_ack_seq = tx_seq;
4054 l2cap_drop_acked_frames(chan);
4055
4056 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4057 l2cap_retransmit_one_frame(chan, tx_seq);
4058
4059 l2cap_ertm_send(chan);
4060
4061 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4062 chan->srej_save_reqseq = tx_seq;
4063 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4064 }
4065 } else if (__is_ctrl_final(chan, rx_control)) {
4066 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4067 chan->srej_save_reqseq == tx_seq)
4068 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4069 else
4070 l2cap_retransmit_one_frame(chan, tx_seq);
4071 } else {
4072 l2cap_retransmit_one_frame(chan, tx_seq);
4073 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4074 chan->srej_save_reqseq = tx_seq;
4075 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4076 }
4077 }
4078 }
4079
4080 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4081 {
4082 u16 tx_seq = __get_reqseq(chan, rx_control);
4083
4084 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4085
4086 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4087 chan->expected_ack_seq = tx_seq;
4088 l2cap_drop_acked_frames(chan);
4089
4090 if (__is_ctrl_poll(chan, rx_control))
4091 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4092
4093 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4094 __clear_retrans_timer(chan);
4095 if (__is_ctrl_poll(chan, rx_control))
4096 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4097 return;
4098 }
4099
4100 if (__is_ctrl_poll(chan, rx_control)) {
4101 l2cap_send_srejtail(chan);
4102 } else {
4103 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4104 l2cap_send_sframe(chan, rx_control);
4105 }
4106 }
4107
4108 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4109 {
4110 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4111
4112 if (__is_ctrl_final(chan, rx_control) &&
4113 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4114 __clear_monitor_timer(chan);
4115 if (chan->unacked_frames > 0)
4116 __set_retrans_timer(chan);
4117 clear_bit(CONN_WAIT_F, &chan->conn_state);
4118 }
4119
4120 switch (__get_ctrl_super(chan, rx_control)) {
4121 case L2CAP_SUPER_RR:
4122 l2cap_data_channel_rrframe(chan, rx_control);
4123 break;
4124
4125 case L2CAP_SUPER_REJ:
4126 l2cap_data_channel_rejframe(chan, rx_control);
4127 break;
4128
4129 case L2CAP_SUPER_SREJ:
4130 l2cap_data_channel_srejframe(chan, rx_control);
4131 break;
4132
4133 case L2CAP_SUPER_RNR:
4134 l2cap_data_channel_rnrframe(chan, rx_control);
4135 break;
4136 }
4137
4138 kfree_skb(skb);
4139 return 0;
4140 }
4141
4142 int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4143 {
4144 u32 control;
4145 u16 req_seq;
4146 int len, next_tx_seq_offset, req_seq_offset;
4147
4148 control = __get_control(chan, skb->data);
4149 skb_pull(skb, __ctrl_size(chan));
4150 len = skb->len;
4151
4152 /*
4153 * We can just drop the corrupted I-frame here.
4154 * Receiver will miss it and start proper recovery
4155 * procedures and ask retransmission.
4156 */
4157 if (l2cap_check_fcs(chan, skb))
4158 goto drop;
4159
4160 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4161 len -= L2CAP_SDULEN_SIZE;
4162
4163 if (chan->fcs == L2CAP_FCS_CRC16)
4164 len -= L2CAP_FCS_SIZE;
4165
4166 if (len > chan->mps) {
4167 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4168 goto drop;
4169 }
4170
4171 req_seq = __get_reqseq(chan, control);
4172
4173 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4174
4175 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4176 chan->expected_ack_seq);
4177
4178 /* check for invalid req-seq */
4179 if (req_seq_offset > next_tx_seq_offset) {
4180 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4181 goto drop;
4182 }
4183
4184 if (!__is_sframe(chan, control)) {
4185 if (len < 0) {
4186 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4187 goto drop;
4188 }
4189
4190 l2cap_data_channel_iframe(chan, control, skb);
4191 } else {
4192 if (len != 0) {
4193 BT_ERR("%d", len);
4194 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4195 goto drop;
4196 }
4197
4198 l2cap_data_channel_sframe(chan, control, skb);
4199 }
4200
4201 return 0;
4202
4203 drop:
4204 kfree_skb(skb);
4205 return 0;
4206 }
4207
4208 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4209 {
4210 struct l2cap_chan *chan;
4211 struct sock *sk = NULL;
4212 u32 control;
4213 u16 tx_seq;
4214 int len;
4215
4216 chan = l2cap_get_chan_by_scid(conn, cid);
4217 if (!chan) {
4218 BT_DBG("unknown cid 0x%4.4x", cid);
4219 goto drop;
4220 }
4221
4222 sk = chan->sk;
4223
4224 BT_DBG("chan %p, len %d", chan, skb->len);
4225
4226 if (chan->state != BT_CONNECTED)
4227 goto drop;
4228
4229 switch (chan->mode) {
4230 case L2CAP_MODE_BASIC:
4231 /* If socket recv buffers overflows we drop data here
4232 * which is *bad* because L2CAP has to be reliable.
4233 * But we don't have any other choice. L2CAP doesn't
4234 * provide flow control mechanism. */
4235
4236 if (chan->imtu < skb->len)
4237 goto drop;
4238
4239 if (!chan->ops->recv(chan->data, skb))
4240 goto done;
4241 break;
4242
4243 case L2CAP_MODE_ERTM:
4244 l2cap_ertm_data_rcv(chan, skb);
4245
4246 goto done;
4247
4248 case L2CAP_MODE_STREAMING:
4249 control = __get_control(chan, skb->data);
4250 skb_pull(skb, __ctrl_size(chan));
4251 len = skb->len;
4252
4253 if (l2cap_check_fcs(chan, skb))
4254 goto drop;
4255
4256 if (__is_sar_start(chan, control))
4257 len -= L2CAP_SDULEN_SIZE;
4258
4259 if (chan->fcs == L2CAP_FCS_CRC16)
4260 len -= L2CAP_FCS_SIZE;
4261
4262 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4263 goto drop;
4264
4265 tx_seq = __get_txseq(chan, control);
4266
4267 if (chan->expected_tx_seq != tx_seq) {
4268 /* Frame(s) missing - must discard partial SDU */
4269 kfree_skb(chan->sdu);
4270 chan->sdu = NULL;
4271 chan->sdu_last_frag = NULL;
4272 chan->sdu_len = 0;
4273
4274 /* TODO: Notify userland of missing data */
4275 }
4276
4277 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4278
4279 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4281
4282 goto done;
4283
4284 default:
4285 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4286 break;
4287 }
4288
4289 drop:
4290 kfree_skb(skb);
4291
4292 done:
4293 if (sk)
4294 release_sock(sk);
4295
4296 return 0;
4297 }
4298
4299 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4300 {
4301 struct sock *sk = NULL;
4302 struct l2cap_chan *chan;
4303
4304 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4305 if (!chan)
4306 goto drop;
4307
4308 sk = chan->sk;
4309
4310 lock_sock(sk);
4311
4312 BT_DBG("sk %p, len %d", sk, skb->len);
4313
4314 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4315 goto drop;
4316
4317 if (chan->imtu < skb->len)
4318 goto drop;
4319
4320 if (!chan->ops->recv(chan->data, skb))
4321 goto done;
4322
4323 drop:
4324 kfree_skb(skb);
4325
4326 done:
4327 if (sk)
4328 release_sock(sk);
4329 return 0;
4330 }
4331
4332 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4333 {
4334 struct sock *sk = NULL;
4335 struct l2cap_chan *chan;
4336
4337 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4338 if (!chan)
4339 goto drop;
4340
4341 sk = chan->sk;
4342
4343 lock_sock(sk);
4344
4345 BT_DBG("sk %p, len %d", sk, skb->len);
4346
4347 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4348 goto drop;
4349
4350 if (chan->imtu < skb->len)
4351 goto drop;
4352
4353 if (!chan->ops->recv(chan->data, skb))
4354 goto done;
4355
4356 drop:
4357 kfree_skb(skb);
4358
4359 done:
4360 if (sk)
4361 release_sock(sk);
4362 return 0;
4363 }
4364
4365 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4366 {
4367 struct l2cap_hdr *lh = (void *) skb->data;
4368 u16 cid, len;
4369 __le16 psm;
4370
4371 skb_pull(skb, L2CAP_HDR_SIZE);
4372 cid = __le16_to_cpu(lh->cid);
4373 len = __le16_to_cpu(lh->len);
4374
4375 if (len != skb->len) {
4376 kfree_skb(skb);
4377 return;
4378 }
4379
4380 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4381
4382 switch (cid) {
4383 case L2CAP_CID_LE_SIGNALING:
4384 case L2CAP_CID_SIGNALING:
4385 l2cap_sig_channel(conn, skb);
4386 break;
4387
4388 case L2CAP_CID_CONN_LESS:
4389 psm = get_unaligned_le16(skb->data);
4390 skb_pull(skb, 2);
4391 l2cap_conless_channel(conn, psm, skb);
4392 break;
4393
4394 case L2CAP_CID_LE_DATA:
4395 l2cap_att_channel(conn, cid, skb);
4396 break;
4397
4398 case L2CAP_CID_SMP:
4399 if (smp_sig_channel(conn, skb))
4400 l2cap_conn_del(conn->hcon, EACCES);
4401 break;
4402
4403 default:
4404 l2cap_data_channel(conn, cid, skb);
4405 break;
4406 }
4407 }
4408
4409 /* ---- L2CAP interface with lower layer (HCI) ---- */
4410
4411 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4412 {
4413 int exact = 0, lm1 = 0, lm2 = 0;
4414 struct l2cap_chan *c;
4415
4416 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4417
4418 /* Find listening sockets and check their link_mode */
4419 read_lock(&chan_list_lock);
4420 list_for_each_entry(c, &chan_list, global_l) {
4421 struct sock *sk = c->sk;
4422
4423 if (c->state != BT_LISTEN)
4424 continue;
4425
4426 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4427 lm1 |= HCI_LM_ACCEPT;
4428 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4429 lm1 |= HCI_LM_MASTER;
4430 exact++;
4431 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4432 lm2 |= HCI_LM_ACCEPT;
4433 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4434 lm2 |= HCI_LM_MASTER;
4435 }
4436 }
4437 read_unlock(&chan_list_lock);
4438
4439 return exact ? lm1 : lm2;
4440 }
4441
4442 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4443 {
4444 struct l2cap_conn *conn;
4445
4446 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4447
4448 if (!status) {
4449 conn = l2cap_conn_add(hcon, status);
4450 if (conn)
4451 l2cap_conn_ready(conn);
4452 } else
4453 l2cap_conn_del(hcon, bt_to_errno(status));
4454
4455 return 0;
4456 }
4457
4458 int l2cap_disconn_ind(struct hci_conn *hcon)
4459 {
4460 struct l2cap_conn *conn = hcon->l2cap_data;
4461
4462 BT_DBG("hcon %p", hcon);
4463
4464 if (!conn)
4465 return HCI_ERROR_REMOTE_USER_TERM;
4466 return conn->disc_reason;
4467 }
4468
4469 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4470 {
4471 BT_DBG("hcon %p reason %d", hcon, reason);
4472
4473 l2cap_conn_del(hcon, bt_to_errno(reason));
4474 return 0;
4475 }
4476
4477 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4478 {
4479 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4480 return;
4481
4482 if (encrypt == 0x00) {
4483 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4484 __clear_chan_timer(chan);
4485 __set_chan_timer(chan,
4486 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4487 } else if (chan->sec_level == BT_SECURITY_HIGH)
4488 l2cap_chan_close(chan, ECONNREFUSED);
4489 } else {
4490 if (chan->sec_level == BT_SECURITY_MEDIUM)
4491 __clear_chan_timer(chan);
4492 }
4493 }
4494
4495 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4496 {
4497 struct l2cap_conn *conn = hcon->l2cap_data;
4498 struct l2cap_chan *chan;
4499
4500 if (!conn)
4501 return 0;
4502
4503 BT_DBG("conn %p", conn);
4504
4505 if (hcon->type == LE_LINK) {
4506 smp_distribute_keys(conn, 0);
4507 __cancel_delayed_work(&conn->security_timer);
4508 }
4509
4510 rcu_read_lock();
4511
4512 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4513 struct sock *sk = chan->sk;
4514
4515 bh_lock_sock(sk);
4516
4517 BT_DBG("chan->scid %d", chan->scid);
4518
4519 if (chan->scid == L2CAP_CID_LE_DATA) {
4520 if (!status && encrypt) {
4521 chan->sec_level = hcon->sec_level;
4522 l2cap_chan_ready(sk);
4523 }
4524
4525 bh_unlock_sock(sk);
4526 continue;
4527 }
4528
4529 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4530 bh_unlock_sock(sk);
4531 continue;
4532 }
4533
4534 if (!status && (chan->state == BT_CONNECTED ||
4535 chan->state == BT_CONFIG)) {
4536 l2cap_check_encryption(chan, encrypt);
4537 bh_unlock_sock(sk);
4538 continue;
4539 }
4540
4541 if (chan->state == BT_CONNECT) {
4542 if (!status) {
4543 struct l2cap_conn_req req;
4544 req.scid = cpu_to_le16(chan->scid);
4545 req.psm = chan->psm;
4546
4547 chan->ident = l2cap_get_ident(conn);
4548 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4549
4550 l2cap_send_cmd(conn, chan->ident,
4551 L2CAP_CONN_REQ, sizeof(req), &req);
4552 } else {
4553 __clear_chan_timer(chan);
4554 __set_chan_timer(chan,
4555 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4556 }
4557 } else if (chan->state == BT_CONNECT2) {
4558 struct l2cap_conn_rsp rsp;
4559 __u16 res, stat;
4560
4561 if (!status) {
4562 if (bt_sk(sk)->defer_setup) {
4563 struct sock *parent = bt_sk(sk)->parent;
4564 res = L2CAP_CR_PEND;
4565 stat = L2CAP_CS_AUTHOR_PEND;
4566 if (parent)
4567 parent->sk_data_ready(parent, 0);
4568 } else {
4569 l2cap_state_change(chan, BT_CONFIG);
4570 res = L2CAP_CR_SUCCESS;
4571 stat = L2CAP_CS_NO_INFO;
4572 }
4573 } else {
4574 l2cap_state_change(chan, BT_DISCONN);
4575 __set_chan_timer(chan,
4576 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4577 res = L2CAP_CR_SEC_BLOCK;
4578 stat = L2CAP_CS_NO_INFO;
4579 }
4580
4581 rsp.scid = cpu_to_le16(chan->dcid);
4582 rsp.dcid = cpu_to_le16(chan->scid);
4583 rsp.result = cpu_to_le16(res);
4584 rsp.status = cpu_to_le16(stat);
4585 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4586 sizeof(rsp), &rsp);
4587 }
4588
4589 bh_unlock_sock(sk);
4590 }
4591
4592 rcu_read_unlock();
4593
4594 return 0;
4595 }
4596
4597 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4598 {
4599 struct l2cap_conn *conn = hcon->l2cap_data;
4600
4601 if (!conn)
4602 conn = l2cap_conn_add(hcon, 0);
4603
4604 if (!conn)
4605 goto drop;
4606
4607 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4608
4609 if (!(flags & ACL_CONT)) {
4610 struct l2cap_hdr *hdr;
4611 struct l2cap_chan *chan;
4612 u16 cid;
4613 int len;
4614
4615 if (conn->rx_len) {
4616 BT_ERR("Unexpected start frame (len %d)", skb->len);
4617 kfree_skb(conn->rx_skb);
4618 conn->rx_skb = NULL;
4619 conn->rx_len = 0;
4620 l2cap_conn_unreliable(conn, ECOMM);
4621 }
4622
4623 /* Start fragment always begin with Basic L2CAP header */
4624 if (skb->len < L2CAP_HDR_SIZE) {
4625 BT_ERR("Frame is too short (len %d)", skb->len);
4626 l2cap_conn_unreliable(conn, ECOMM);
4627 goto drop;
4628 }
4629
4630 hdr = (struct l2cap_hdr *) skb->data;
4631 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4632 cid = __le16_to_cpu(hdr->cid);
4633
4634 if (len == skb->len) {
4635 /* Complete frame received */
4636 l2cap_recv_frame(conn, skb);
4637 return 0;
4638 }
4639
4640 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4641
4642 if (skb->len > len) {
4643 BT_ERR("Frame is too long (len %d, expected len %d)",
4644 skb->len, len);
4645 l2cap_conn_unreliable(conn, ECOMM);
4646 goto drop;
4647 }
4648
4649 chan = l2cap_get_chan_by_scid(conn, cid);
4650
4651 if (chan && chan->sk) {
4652 struct sock *sk = chan->sk;
4653
4654 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4655 BT_ERR("Frame exceeding recv MTU (len %d, "
4656 "MTU %d)", len,
4657 chan->imtu);
4658 release_sock(sk);
4659 l2cap_conn_unreliable(conn, ECOMM);
4660 goto drop;
4661 }
4662 release_sock(sk);
4663 }
4664
4665 /* Allocate skb for the complete frame (with header) */
4666 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4667 if (!conn->rx_skb)
4668 goto drop;
4669
4670 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4671 skb->len);
4672 conn->rx_len = len - skb->len;
4673 } else {
4674 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4675
4676 if (!conn->rx_len) {
4677 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4678 l2cap_conn_unreliable(conn, ECOMM);
4679 goto drop;
4680 }
4681
4682 if (skb->len > conn->rx_len) {
4683 BT_ERR("Fragment is too long (len %d, expected %d)",
4684 skb->len, conn->rx_len);
4685 kfree_skb(conn->rx_skb);
4686 conn->rx_skb = NULL;
4687 conn->rx_len = 0;
4688 l2cap_conn_unreliable(conn, ECOMM);
4689 goto drop;
4690 }
4691
4692 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4693 skb->len);
4694 conn->rx_len -= skb->len;
4695
4696 if (!conn->rx_len) {
4697 /* Complete frame received */
4698 l2cap_recv_frame(conn, conn->rx_skb);
4699 conn->rx_skb = NULL;
4700 }
4701 }
4702
4703 drop:
4704 kfree_skb(skb);
4705 return 0;
4706 }
4707
4708 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4709 {
4710 struct l2cap_chan *c;
4711
4712 read_lock(&chan_list_lock);
4713
4714 list_for_each_entry(c, &chan_list, global_l) {
4715 struct sock *sk = c->sk;
4716
4717 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4718 batostr(&bt_sk(sk)->src),
4719 batostr(&bt_sk(sk)->dst),
4720 c->state, __le16_to_cpu(c->psm),
4721 c->scid, c->dcid, c->imtu, c->omtu,
4722 c->sec_level, c->mode);
4723 }
4724
4725 read_unlock(&chan_list_lock);
4726
4727 return 0;
4728 }
4729
4730 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4731 {
4732 return single_open(file, l2cap_debugfs_show, inode->i_private);
4733 }
4734
4735 static const struct file_operations l2cap_debugfs_fops = {
4736 .open = l2cap_debugfs_open,
4737 .read = seq_read,
4738 .llseek = seq_lseek,
4739 .release = single_release,
4740 };
4741
4742 static struct dentry *l2cap_debugfs;
4743
4744 int __init l2cap_init(void)
4745 {
4746 int err;
4747
4748 err = l2cap_init_sockets();
4749 if (err < 0)
4750 return err;
4751
4752 if (bt_debugfs) {
4753 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4754 bt_debugfs, NULL, &l2cap_debugfs_fops);
4755 if (!l2cap_debugfs)
4756 BT_ERR("Failed to create L2CAP debug file");
4757 }
4758
4759 return 0;
4760 }
4761
4762 void l2cap_exit(void)
4763 {
4764 debugfs_remove(l2cap_debugfs);
4765 l2cap_cleanup_sockets();
4766 }
4767
4768 module_param(disable_ertm, bool, 0644);
4769 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.171301 seconds and 5 git commands to generate.