Bluetooth: Add signal handlers for channel creation
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60 int enable_hs;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static inline void chan_hold(struct l2cap_chan *c)
81 {
82 atomic_inc(&c->refcnt);
83 }
84
85 static inline void chan_put(struct l2cap_chan *c)
86 {
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
122 if (c)
123 bh_lock_sock(c->sk);
124 read_unlock(&conn->chan_lock);
125 return c;
126 }
127
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
134 return c;
135 }
136 return NULL;
137 }
138
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
145 if (c)
146 bh_lock_sock(c->sk);
147 read_unlock(&conn->chan_lock);
148 return c;
149 }
150
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 {
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 goto found;
158 }
159
160 c = NULL;
161 found:
162 return c;
163 }
164
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 {
167 int err;
168
169 write_lock_bh(&chan_list_lock);
170
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
172 err = -EADDRINUSE;
173 goto done;
174 }
175
176 if (psm) {
177 chan->psm = psm;
178 chan->sport = psm;
179 err = 0;
180 } else {
181 u16 p;
182
183 err = -EINVAL;
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
188 err = 0;
189 break;
190 }
191 }
192
193 done:
194 write_unlock_bh(&chan_list_lock);
195 return err;
196 }
197
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 {
200 write_lock_bh(&chan_list_lock);
201
202 chan->scid = scid;
203
204 write_unlock_bh(&chan_list_lock);
205
206 return 0;
207 }
208
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 {
211 u16 cid = L2CAP_CID_DYN_START;
212
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
215 return cid;
216 }
217
218 return 0;
219 }
220
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 {
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
224
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
226 chan_hold(chan);
227 }
228
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 {
231 BT_DBG("chan %p state %d", chan, chan->state);
232
233 if (timer_pending(timer) && del_timer(timer))
234 chan_put(chan);
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 chan->state = state;
240 chan->ops->state_change(chan->data, state);
241 }
242
243 static void l2cap_chan_timeout(unsigned long arg)
244 {
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
247 int reason;
248
249 BT_DBG("chan %p state %d", chan, chan->state);
250
251 bh_lock_sock(sk);
252
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
256 bh_unlock_sock(sk);
257 chan_put(chan);
258 return;
259 }
260
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
266 else
267 reason = ETIMEDOUT;
268
269 l2cap_chan_close(chan, reason);
270
271 bh_unlock_sock(sk);
272
273 chan->ops->close(chan->data);
274 chan_put(chan);
275 }
276
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 {
279 struct l2cap_chan *chan;
280
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 if (!chan)
283 return NULL;
284
285 chan->sk = sk;
286
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
290
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292
293 chan->state = BT_OPEN;
294
295 atomic_set(&chan->refcnt, 1);
296
297 return chan;
298 }
299
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 {
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
305
306 chan_put(chan);
307 }
308
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 {
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
313
314 conn->disc_reason = 0x13;
315
316 chan->conn = conn;
317
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
320 /* LE connection */
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
324 } else {
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
328 }
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
334 } else {
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
339 }
340
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
347
348 chan_hold(chan);
349
350 list_add(&chan->list, &conn->chan_l);
351 }
352
353 /* Delete channel.
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
356 {
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
360
361 __clear_chan_timer(chan);
362
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
364
365 if (conn) {
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
370 chan_put(chan);
371
372 chan->conn = NULL;
373 hci_conn_put(conn->hcon);
374 }
375
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
378
379 if (err)
380 sk->sk_err = err;
381
382 if (parent) {
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
385 } else
386 sk->sk_state_change(sk);
387
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
390 return;
391
392 skb_queue_purge(&chan->tx_q);
393
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
396
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
400
401 skb_queue_purge(&chan->srej_q);
402
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
404 list_del(&l->list);
405 kfree(l);
406 }
407 }
408 }
409
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
411 {
412 struct sock *sk;
413
414 BT_DBG("parent %p", parent);
415
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
420 lock_sock(sk);
421 l2cap_chan_close(chan, ECONNRESET);
422 release_sock(sk);
423 chan->ops->close(chan->data);
424 }
425 }
426
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
428 {
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
431
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
433
434 switch (chan->state) {
435 case BT_LISTEN:
436 l2cap_chan_cleanup_listen(sk);
437
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
440 break;
441
442 case BT_CONNECTED:
443 case BT_CONFIG:
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
449 } else
450 l2cap_chan_del(chan, reason);
451 break;
452
453 case BT_CONNECT2:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
457 __u16 result;
458
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
461 else
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
464
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
470 sizeof(rsp), &rsp);
471 }
472
473 l2cap_chan_del(chan, reason);
474 break;
475
476 case BT_CONNECT:
477 case BT_DISCONN:
478 l2cap_chan_del(chan, reason);
479 break;
480
481 default:
482 sock_set_flag(sk, SOCK_ZAPPED);
483 break;
484 }
485 }
486
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
488 {
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
495 default:
496 return HCI_AT_NO_BONDING;
497 }
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
501
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
504 else
505 return HCI_AT_NO_BONDING;
506 } else {
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
512 default:
513 return HCI_AT_NO_BONDING;
514 }
515 }
516 }
517
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
520 {
521 struct l2cap_conn *conn = chan->conn;
522 __u8 auth_type;
523
524 auth_type = l2cap_get_auth_type(chan);
525
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
527 }
528
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
530 {
531 u8 id;
532
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
537 */
538
539 spin_lock_bh(&conn->lock);
540
541 if (++conn->tx_ident > 128)
542 conn->tx_ident = 1;
543
544 id = conn->tx_ident;
545
546 spin_unlock_bh(&conn->lock);
547
548 return id;
549 }
550
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
552 {
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
554 u8 flags;
555
556 BT_DBG("code 0x%2.2x", code);
557
558 if (!skb)
559 return;
560
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
563 else
564 flags = ACL_START;
565
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
567 skb->priority = HCI_PRIO_MAX;
568
569 hci_send_acl(conn->hchan, skb, flags);
570 }
571
572 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
573 {
574 struct hci_conn *hcon = chan->conn->hcon;
575 u16 flags;
576
577 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
578 skb->priority);
579
580 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
581 lmp_no_flush_capable(hcon->hdev))
582 flags = ACL_START_NO_FLUSH;
583 else
584 flags = ACL_START;
585
586 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
587 hci_send_acl(chan->conn->hchan, skb, flags);
588 }
589
590 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
591 {
592 struct sk_buff *skb;
593 struct l2cap_hdr *lh;
594 struct l2cap_conn *conn = chan->conn;
595 int count, hlen;
596
597 if (chan->state != BT_CONNECTED)
598 return;
599
600 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
601 hlen = L2CAP_EXT_HDR_SIZE;
602 else
603 hlen = L2CAP_ENH_HDR_SIZE;
604
605 if (chan->fcs == L2CAP_FCS_CRC16)
606 hlen += L2CAP_FCS_SIZE;
607
608 BT_DBG("chan %p, control 0x%8.8x", chan, control);
609
610 count = min_t(unsigned int, conn->mtu, hlen);
611
612 control |= __set_sframe(chan);
613
614 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
615 control |= __set_ctrl_final(chan);
616
617 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
618 control |= __set_ctrl_poll(chan);
619
620 skb = bt_skb_alloc(count, GFP_ATOMIC);
621 if (!skb)
622 return;
623
624 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
625 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
626 lh->cid = cpu_to_le16(chan->dcid);
627
628 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
629
630 if (chan->fcs == L2CAP_FCS_CRC16) {
631 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
632 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
633 }
634
635 skb->priority = HCI_PRIO_MAX;
636 l2cap_do_send(chan, skb);
637 }
638
639 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
640 {
641 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
642 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
643 set_bit(CONN_RNR_SENT, &chan->conn_state);
644 } else
645 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
646
647 control |= __set_reqseq(chan, chan->buffer_seq);
648
649 l2cap_send_sframe(chan, control);
650 }
651
652 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
653 {
654 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
655 }
656
657 static void l2cap_do_start(struct l2cap_chan *chan)
658 {
659 struct l2cap_conn *conn = chan->conn;
660
661 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
662 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
663 return;
664
665 if (l2cap_check_security(chan) &&
666 __l2cap_no_conn_pending(chan)) {
667 struct l2cap_conn_req req;
668 req.scid = cpu_to_le16(chan->scid);
669 req.psm = chan->psm;
670
671 chan->ident = l2cap_get_ident(conn);
672 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
673
674 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
675 sizeof(req), &req);
676 }
677 } else {
678 struct l2cap_info_req req;
679 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
680
681 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
682 conn->info_ident = l2cap_get_ident(conn);
683
684 mod_timer(&conn->info_timer, jiffies +
685 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
686
687 l2cap_send_cmd(conn, conn->info_ident,
688 L2CAP_INFO_REQ, sizeof(req), &req);
689 }
690 }
691
692 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
693 {
694 u32 local_feat_mask = l2cap_feat_mask;
695 if (!disable_ertm)
696 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
697
698 switch (mode) {
699 case L2CAP_MODE_ERTM:
700 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
701 case L2CAP_MODE_STREAMING:
702 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
703 default:
704 return 0x00;
705 }
706 }
707
708 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
709 {
710 struct sock *sk;
711 struct l2cap_disconn_req req;
712
713 if (!conn)
714 return;
715
716 sk = chan->sk;
717
718 if (chan->mode == L2CAP_MODE_ERTM) {
719 __clear_retrans_timer(chan);
720 __clear_monitor_timer(chan);
721 __clear_ack_timer(chan);
722 }
723
724 req.dcid = cpu_to_le16(chan->dcid);
725 req.scid = cpu_to_le16(chan->scid);
726 l2cap_send_cmd(conn, l2cap_get_ident(conn),
727 L2CAP_DISCONN_REQ, sizeof(req), &req);
728
729 l2cap_state_change(chan, BT_DISCONN);
730 sk->sk_err = err;
731 }
732
733 /* ---- L2CAP connections ---- */
734 static void l2cap_conn_start(struct l2cap_conn *conn)
735 {
736 struct l2cap_chan *chan, *tmp;
737
738 BT_DBG("conn %p", conn);
739
740 read_lock(&conn->chan_lock);
741
742 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
743 struct sock *sk = chan->sk;
744
745 bh_lock_sock(sk);
746
747 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
748 bh_unlock_sock(sk);
749 continue;
750 }
751
752 if (chan->state == BT_CONNECT) {
753 struct l2cap_conn_req req;
754
755 if (!l2cap_check_security(chan) ||
756 !__l2cap_no_conn_pending(chan)) {
757 bh_unlock_sock(sk);
758 continue;
759 }
760
761 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
762 && test_bit(CONF_STATE2_DEVICE,
763 &chan->conf_state)) {
764 /* l2cap_chan_close() calls list_del(chan)
765 * so release the lock */
766 read_unlock(&conn->chan_lock);
767 l2cap_chan_close(chan, ECONNRESET);
768 read_lock(&conn->chan_lock);
769 bh_unlock_sock(sk);
770 continue;
771 }
772
773 req.scid = cpu_to_le16(chan->scid);
774 req.psm = chan->psm;
775
776 chan->ident = l2cap_get_ident(conn);
777 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
778
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
780 sizeof(req), &req);
781
782 } else if (chan->state == BT_CONNECT2) {
783 struct l2cap_conn_rsp rsp;
784 char buf[128];
785 rsp.scid = cpu_to_le16(chan->dcid);
786 rsp.dcid = cpu_to_le16(chan->scid);
787
788 if (l2cap_check_security(chan)) {
789 if (bt_sk(sk)->defer_setup) {
790 struct sock *parent = bt_sk(sk)->parent;
791 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
792 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
793 if (parent)
794 parent->sk_data_ready(parent, 0);
795
796 } else {
797 l2cap_state_change(chan, BT_CONFIG);
798 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
799 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
800 }
801 } else {
802 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
803 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
804 }
805
806 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
807 sizeof(rsp), &rsp);
808
809 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
810 rsp.result != L2CAP_CR_SUCCESS) {
811 bh_unlock_sock(sk);
812 continue;
813 }
814
815 set_bit(CONF_REQ_SENT, &chan->conf_state);
816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
817 l2cap_build_conf_req(chan, buf), buf);
818 chan->num_conf_req++;
819 }
820
821 bh_unlock_sock(sk);
822 }
823
824 read_unlock(&conn->chan_lock);
825 }
826
827 /* Find socket with cid and source bdaddr.
828 * Returns closest match, locked.
829 */
830 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
831 {
832 struct l2cap_chan *c, *c1 = NULL;
833
834 read_lock(&chan_list_lock);
835
836 list_for_each_entry(c, &chan_list, global_l) {
837 struct sock *sk = c->sk;
838
839 if (state && c->state != state)
840 continue;
841
842 if (c->scid == cid) {
843 /* Exact match. */
844 if (!bacmp(&bt_sk(sk)->src, src)) {
845 read_unlock(&chan_list_lock);
846 return c;
847 }
848
849 /* Closest match */
850 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
851 c1 = c;
852 }
853 }
854
855 read_unlock(&chan_list_lock);
856
857 return c1;
858 }
859
860 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
861 {
862 struct sock *parent, *sk;
863 struct l2cap_chan *chan, *pchan;
864
865 BT_DBG("");
866
867 /* Check if we have socket listening on cid */
868 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
869 conn->src);
870 if (!pchan)
871 return;
872
873 parent = pchan->sk;
874
875 bh_lock_sock(parent);
876
877 /* Check for backlog size */
878 if (sk_acceptq_is_full(parent)) {
879 BT_DBG("backlog full %d", parent->sk_ack_backlog);
880 goto clean;
881 }
882
883 chan = pchan->ops->new_connection(pchan->data);
884 if (!chan)
885 goto clean;
886
887 sk = chan->sk;
888
889 write_lock_bh(&conn->chan_lock);
890
891 hci_conn_hold(conn->hcon);
892
893 bacpy(&bt_sk(sk)->src, conn->src);
894 bacpy(&bt_sk(sk)->dst, conn->dst);
895
896 bt_accept_enqueue(parent, sk);
897
898 __l2cap_chan_add(conn, chan);
899
900 __set_chan_timer(chan, sk->sk_sndtimeo);
901
902 l2cap_state_change(chan, BT_CONNECTED);
903 parent->sk_data_ready(parent, 0);
904
905 write_unlock_bh(&conn->chan_lock);
906
907 clean:
908 bh_unlock_sock(parent);
909 }
910
911 static void l2cap_chan_ready(struct sock *sk)
912 {
913 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
914 struct sock *parent = bt_sk(sk)->parent;
915
916 BT_DBG("sk %p, parent %p", sk, parent);
917
918 chan->conf_state = 0;
919 __clear_chan_timer(chan);
920
921 l2cap_state_change(chan, BT_CONNECTED);
922 sk->sk_state_change(sk);
923
924 if (parent)
925 parent->sk_data_ready(parent, 0);
926 }
927
928 static void l2cap_conn_ready(struct l2cap_conn *conn)
929 {
930 struct l2cap_chan *chan;
931
932 BT_DBG("conn %p", conn);
933
934 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
935 l2cap_le_conn_ready(conn);
936
937 if (conn->hcon->out && conn->hcon->type == LE_LINK)
938 smp_conn_security(conn, conn->hcon->pending_sec_level);
939
940 read_lock(&conn->chan_lock);
941
942 list_for_each_entry(chan, &conn->chan_l, list) {
943 struct sock *sk = chan->sk;
944
945 bh_lock_sock(sk);
946
947 if (conn->hcon->type == LE_LINK) {
948 if (smp_conn_security(conn, chan->sec_level))
949 l2cap_chan_ready(sk);
950
951 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
952 __clear_chan_timer(chan);
953 l2cap_state_change(chan, BT_CONNECTED);
954 sk->sk_state_change(sk);
955
956 } else if (chan->state == BT_CONNECT)
957 l2cap_do_start(chan);
958
959 bh_unlock_sock(sk);
960 }
961
962 read_unlock(&conn->chan_lock);
963 }
964
965 /* Notify sockets that we cannot guaranty reliability anymore */
966 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
967 {
968 struct l2cap_chan *chan;
969
970 BT_DBG("conn %p", conn);
971
972 read_lock(&conn->chan_lock);
973
974 list_for_each_entry(chan, &conn->chan_l, list) {
975 struct sock *sk = chan->sk;
976
977 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
978 sk->sk_err = err;
979 }
980
981 read_unlock(&conn->chan_lock);
982 }
983
984 static void l2cap_info_timeout(unsigned long arg)
985 {
986 struct l2cap_conn *conn = (void *) arg;
987
988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
989 conn->info_ident = 0;
990
991 l2cap_conn_start(conn);
992 }
993
994 static void l2cap_conn_del(struct hci_conn *hcon, int err)
995 {
996 struct l2cap_conn *conn = hcon->l2cap_data;
997 struct l2cap_chan *chan, *l;
998 struct sock *sk;
999
1000 if (!conn)
1001 return;
1002
1003 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1004
1005 kfree_skb(conn->rx_skb);
1006
1007 /* Kill channels */
1008 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1009 sk = chan->sk;
1010 bh_lock_sock(sk);
1011 l2cap_chan_del(chan, err);
1012 bh_unlock_sock(sk);
1013 chan->ops->close(chan->data);
1014 }
1015
1016 hci_chan_del(conn->hchan);
1017
1018 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1019 del_timer_sync(&conn->info_timer);
1020
1021 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1022 del_timer(&conn->security_timer);
1023 smp_chan_destroy(conn);
1024 }
1025
1026 hcon->l2cap_data = NULL;
1027 kfree(conn);
1028 }
1029
1030 static void security_timeout(unsigned long arg)
1031 {
1032 struct l2cap_conn *conn = (void *) arg;
1033
1034 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1035 }
1036
1037 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1038 {
1039 struct l2cap_conn *conn = hcon->l2cap_data;
1040 struct hci_chan *hchan;
1041
1042 if (conn || status)
1043 return conn;
1044
1045 hchan = hci_chan_create(hcon);
1046 if (!hchan)
1047 return NULL;
1048
1049 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1050 if (!conn) {
1051 hci_chan_del(hchan);
1052 return NULL;
1053 }
1054
1055 hcon->l2cap_data = conn;
1056 conn->hcon = hcon;
1057 conn->hchan = hchan;
1058
1059 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1060
1061 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1062 conn->mtu = hcon->hdev->le_mtu;
1063 else
1064 conn->mtu = hcon->hdev->acl_mtu;
1065
1066 conn->src = &hcon->hdev->bdaddr;
1067 conn->dst = &hcon->dst;
1068
1069 conn->feat_mask = 0;
1070
1071 spin_lock_init(&conn->lock);
1072 rwlock_init(&conn->chan_lock);
1073
1074 INIT_LIST_HEAD(&conn->chan_l);
1075
1076 if (hcon->type == LE_LINK)
1077 setup_timer(&conn->security_timer, security_timeout,
1078 (unsigned long) conn);
1079 else
1080 setup_timer(&conn->info_timer, l2cap_info_timeout,
1081 (unsigned long) conn);
1082
1083 conn->disc_reason = 0x13;
1084
1085 return conn;
1086 }
1087
1088 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1089 {
1090 write_lock_bh(&conn->chan_lock);
1091 __l2cap_chan_add(conn, chan);
1092 write_unlock_bh(&conn->chan_lock);
1093 }
1094
1095 /* ---- Socket interface ---- */
1096
1097 /* Find socket with psm and source bdaddr.
1098 * Returns closest match.
1099 */
1100 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1101 {
1102 struct l2cap_chan *c, *c1 = NULL;
1103
1104 read_lock(&chan_list_lock);
1105
1106 list_for_each_entry(c, &chan_list, global_l) {
1107 struct sock *sk = c->sk;
1108
1109 if (state && c->state != state)
1110 continue;
1111
1112 if (c->psm == psm) {
1113 /* Exact match. */
1114 if (!bacmp(&bt_sk(sk)->src, src)) {
1115 read_unlock(&chan_list_lock);
1116 return c;
1117 }
1118
1119 /* Closest match */
1120 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1121 c1 = c;
1122 }
1123 }
1124
1125 read_unlock(&chan_list_lock);
1126
1127 return c1;
1128 }
1129
1130 int l2cap_chan_connect(struct l2cap_chan *chan)
1131 {
1132 struct sock *sk = chan->sk;
1133 bdaddr_t *src = &bt_sk(sk)->src;
1134 bdaddr_t *dst = &bt_sk(sk)->dst;
1135 struct l2cap_conn *conn;
1136 struct hci_conn *hcon;
1137 struct hci_dev *hdev;
1138 __u8 auth_type;
1139 int err;
1140
1141 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1142 chan->psm);
1143
1144 hdev = hci_get_route(dst, src);
1145 if (!hdev)
1146 return -EHOSTUNREACH;
1147
1148 hci_dev_lock_bh(hdev);
1149
1150 auth_type = l2cap_get_auth_type(chan);
1151
1152 if (chan->dcid == L2CAP_CID_LE_DATA)
1153 hcon = hci_connect(hdev, LE_LINK, dst,
1154 chan->sec_level, auth_type);
1155 else
1156 hcon = hci_connect(hdev, ACL_LINK, dst,
1157 chan->sec_level, auth_type);
1158
1159 if (IS_ERR(hcon)) {
1160 err = PTR_ERR(hcon);
1161 goto done;
1162 }
1163
1164 conn = l2cap_conn_add(hcon, 0);
1165 if (!conn) {
1166 hci_conn_put(hcon);
1167 err = -ENOMEM;
1168 goto done;
1169 }
1170
1171 /* Update source addr of the socket */
1172 bacpy(src, conn->src);
1173
1174 l2cap_chan_add(conn, chan);
1175
1176 l2cap_state_change(chan, BT_CONNECT);
1177 __set_chan_timer(chan, sk->sk_sndtimeo);
1178
1179 if (hcon->state == BT_CONNECTED) {
1180 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1181 __clear_chan_timer(chan);
1182 if (l2cap_check_security(chan))
1183 l2cap_state_change(chan, BT_CONNECTED);
1184 } else
1185 l2cap_do_start(chan);
1186 }
1187
1188 err = 0;
1189
1190 done:
1191 hci_dev_unlock_bh(hdev);
1192 hci_dev_put(hdev);
1193 return err;
1194 }
1195
1196 int __l2cap_wait_ack(struct sock *sk)
1197 {
1198 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1199 DECLARE_WAITQUEUE(wait, current);
1200 int err = 0;
1201 int timeo = HZ/5;
1202
1203 add_wait_queue(sk_sleep(sk), &wait);
1204 set_current_state(TASK_INTERRUPTIBLE);
1205 while (chan->unacked_frames > 0 && chan->conn) {
1206 if (!timeo)
1207 timeo = HZ/5;
1208
1209 if (signal_pending(current)) {
1210 err = sock_intr_errno(timeo);
1211 break;
1212 }
1213
1214 release_sock(sk);
1215 timeo = schedule_timeout(timeo);
1216 lock_sock(sk);
1217 set_current_state(TASK_INTERRUPTIBLE);
1218
1219 err = sock_error(sk);
1220 if (err)
1221 break;
1222 }
1223 set_current_state(TASK_RUNNING);
1224 remove_wait_queue(sk_sleep(sk), &wait);
1225 return err;
1226 }
1227
1228 static void l2cap_monitor_timeout(unsigned long arg)
1229 {
1230 struct l2cap_chan *chan = (void *) arg;
1231 struct sock *sk = chan->sk;
1232
1233 BT_DBG("chan %p", chan);
1234
1235 bh_lock_sock(sk);
1236 if (chan->retry_count >= chan->remote_max_tx) {
1237 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1238 bh_unlock_sock(sk);
1239 return;
1240 }
1241
1242 chan->retry_count++;
1243 __set_monitor_timer(chan);
1244
1245 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1246 bh_unlock_sock(sk);
1247 }
1248
1249 static void l2cap_retrans_timeout(unsigned long arg)
1250 {
1251 struct l2cap_chan *chan = (void *) arg;
1252 struct sock *sk = chan->sk;
1253
1254 BT_DBG("chan %p", chan);
1255
1256 bh_lock_sock(sk);
1257 chan->retry_count = 1;
1258 __set_monitor_timer(chan);
1259
1260 set_bit(CONN_WAIT_F, &chan->conn_state);
1261
1262 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1263 bh_unlock_sock(sk);
1264 }
1265
1266 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1267 {
1268 struct sk_buff *skb;
1269
1270 while ((skb = skb_peek(&chan->tx_q)) &&
1271 chan->unacked_frames) {
1272 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1273 break;
1274
1275 skb = skb_dequeue(&chan->tx_q);
1276 kfree_skb(skb);
1277
1278 chan->unacked_frames--;
1279 }
1280
1281 if (!chan->unacked_frames)
1282 __clear_retrans_timer(chan);
1283 }
1284
1285 static void l2cap_streaming_send(struct l2cap_chan *chan)
1286 {
1287 struct sk_buff *skb;
1288 u32 control;
1289 u16 fcs;
1290
1291 while ((skb = skb_dequeue(&chan->tx_q))) {
1292 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1293 control |= __set_txseq(chan, chan->next_tx_seq);
1294 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1295
1296 if (chan->fcs == L2CAP_FCS_CRC16) {
1297 fcs = crc16(0, (u8 *)skb->data,
1298 skb->len - L2CAP_FCS_SIZE);
1299 put_unaligned_le16(fcs,
1300 skb->data + skb->len - L2CAP_FCS_SIZE);
1301 }
1302
1303 l2cap_do_send(chan, skb);
1304
1305 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1306 }
1307 }
1308
1309 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1310 {
1311 struct sk_buff *skb, *tx_skb;
1312 u16 fcs;
1313 u32 control;
1314
1315 skb = skb_peek(&chan->tx_q);
1316 if (!skb)
1317 return;
1318
1319 do {
1320 if (bt_cb(skb)->tx_seq == tx_seq)
1321 break;
1322
1323 if (skb_queue_is_last(&chan->tx_q, skb))
1324 return;
1325
1326 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1327
1328 if (chan->remote_max_tx &&
1329 bt_cb(skb)->retries == chan->remote_max_tx) {
1330 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1331 return;
1332 }
1333
1334 tx_skb = skb_clone(skb, GFP_ATOMIC);
1335 bt_cb(skb)->retries++;
1336
1337 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1338 control &= __get_sar_mask(chan);
1339
1340 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1341 control |= __set_ctrl_final(chan);
1342
1343 control |= __set_reqseq(chan, chan->buffer_seq);
1344 control |= __set_txseq(chan, tx_seq);
1345
1346 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1347
1348 if (chan->fcs == L2CAP_FCS_CRC16) {
1349 fcs = crc16(0, (u8 *)tx_skb->data,
1350 tx_skb->len - L2CAP_FCS_SIZE);
1351 put_unaligned_le16(fcs,
1352 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1353 }
1354
1355 l2cap_do_send(chan, tx_skb);
1356 }
1357
1358 static int l2cap_ertm_send(struct l2cap_chan *chan)
1359 {
1360 struct sk_buff *skb, *tx_skb;
1361 u16 fcs;
1362 u32 control;
1363 int nsent = 0;
1364
1365 if (chan->state != BT_CONNECTED)
1366 return -ENOTCONN;
1367
1368 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1369
1370 if (chan->remote_max_tx &&
1371 bt_cb(skb)->retries == chan->remote_max_tx) {
1372 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1373 break;
1374 }
1375
1376 tx_skb = skb_clone(skb, GFP_ATOMIC);
1377
1378 bt_cb(skb)->retries++;
1379
1380 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1381 control &= __get_sar_mask(chan);
1382
1383 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1384 control |= __set_ctrl_final(chan);
1385
1386 control |= __set_reqseq(chan, chan->buffer_seq);
1387 control |= __set_txseq(chan, chan->next_tx_seq);
1388
1389 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1390
1391 if (chan->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data,
1393 tx_skb->len - L2CAP_FCS_SIZE);
1394 put_unaligned_le16(fcs, skb->data +
1395 tx_skb->len - L2CAP_FCS_SIZE);
1396 }
1397
1398 l2cap_do_send(chan, tx_skb);
1399
1400 __set_retrans_timer(chan);
1401
1402 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1403
1404 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1405
1406 if (bt_cb(skb)->retries == 1)
1407 chan->unacked_frames++;
1408
1409 chan->frames_sent++;
1410
1411 if (skb_queue_is_last(&chan->tx_q, skb))
1412 chan->tx_send_head = NULL;
1413 else
1414 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1415
1416 nsent++;
1417 }
1418
1419 return nsent;
1420 }
1421
1422 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1423 {
1424 int ret;
1425
1426 if (!skb_queue_empty(&chan->tx_q))
1427 chan->tx_send_head = chan->tx_q.next;
1428
1429 chan->next_tx_seq = chan->expected_ack_seq;
1430 ret = l2cap_ertm_send(chan);
1431 return ret;
1432 }
1433
1434 static void l2cap_send_ack(struct l2cap_chan *chan)
1435 {
1436 u32 control = 0;
1437
1438 control |= __set_reqseq(chan, chan->buffer_seq);
1439
1440 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1441 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1442 set_bit(CONN_RNR_SENT, &chan->conn_state);
1443 l2cap_send_sframe(chan, control);
1444 return;
1445 }
1446
1447 if (l2cap_ertm_send(chan) > 0)
1448 return;
1449
1450 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1451 l2cap_send_sframe(chan, control);
1452 }
1453
1454 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1455 {
1456 struct srej_list *tail;
1457 u32 control;
1458
1459 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1460 control |= __set_ctrl_final(chan);
1461
1462 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1463 control |= __set_reqseq(chan, tail->tx_seq);
1464
1465 l2cap_send_sframe(chan, control);
1466 }
1467
1468 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1469 {
1470 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1471 struct sk_buff **frag;
1472 int err, sent = 0;
1473
1474 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1475 return -EFAULT;
1476
1477 sent += count;
1478 len -= count;
1479
1480 /* Continuation fragments (no L2CAP header) */
1481 frag = &skb_shinfo(skb)->frag_list;
1482 while (len) {
1483 count = min_t(unsigned int, conn->mtu, len);
1484
1485 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1486 if (!*frag)
1487 return err;
1488 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1489 return -EFAULT;
1490
1491 (*frag)->priority = skb->priority;
1492
1493 sent += count;
1494 len -= count;
1495
1496 frag = &(*frag)->next;
1497 }
1498
1499 return sent;
1500 }
1501
1502 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1503 struct msghdr *msg, size_t len,
1504 u32 priority)
1505 {
1506 struct sock *sk = chan->sk;
1507 struct l2cap_conn *conn = chan->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1510 struct l2cap_hdr *lh;
1511
1512 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1513
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1517 if (!skb)
1518 return ERR_PTR(err);
1519
1520 skb->priority = priority;
1521
1522 /* Create L2CAP header */
1523 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 lh->cid = cpu_to_le16(chan->dcid);
1525 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1526 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1527
1528 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1529 if (unlikely(err < 0)) {
1530 kfree_skb(skb);
1531 return ERR_PTR(err);
1532 }
1533 return skb;
1534 }
1535
1536 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1537 struct msghdr *msg, size_t len,
1538 u32 priority)
1539 {
1540 struct sock *sk = chan->sk;
1541 struct l2cap_conn *conn = chan->conn;
1542 struct sk_buff *skb;
1543 int err, count, hlen = L2CAP_HDR_SIZE;
1544 struct l2cap_hdr *lh;
1545
1546 BT_DBG("sk %p len %d", sk, (int)len);
1547
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1551 if (!skb)
1552 return ERR_PTR(err);
1553
1554 skb->priority = priority;
1555
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1560
1561 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1562 if (unlikely(err < 0)) {
1563 kfree_skb(skb);
1564 return ERR_PTR(err);
1565 }
1566 return skb;
1567 }
1568
1569 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1570 struct msghdr *msg, size_t len,
1571 u32 control, u16 sdulen)
1572 {
1573 struct sock *sk = chan->sk;
1574 struct l2cap_conn *conn = chan->conn;
1575 struct sk_buff *skb;
1576 int err, count, hlen;
1577 struct l2cap_hdr *lh;
1578
1579 BT_DBG("sk %p len %d", sk, (int)len);
1580
1581 if (!conn)
1582 return ERR_PTR(-ENOTCONN);
1583
1584 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1585 hlen = L2CAP_EXT_HDR_SIZE;
1586 else
1587 hlen = L2CAP_ENH_HDR_SIZE;
1588
1589 if (sdulen)
1590 hlen += L2CAP_SDULEN_SIZE;
1591
1592 if (chan->fcs == L2CAP_FCS_CRC16)
1593 hlen += L2CAP_FCS_SIZE;
1594
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (!skb)
1599 return ERR_PTR(err);
1600
1601 /* Create L2CAP header */
1602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1603 lh->cid = cpu_to_le16(chan->dcid);
1604 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1605
1606 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1607
1608 if (sdulen)
1609 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1610
1611 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1612 if (unlikely(err < 0)) {
1613 kfree_skb(skb);
1614 return ERR_PTR(err);
1615 }
1616
1617 if (chan->fcs == L2CAP_FCS_CRC16)
1618 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1619
1620 bt_cb(skb)->retries = 0;
1621 return skb;
1622 }
1623
1624 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1625 {
1626 struct sk_buff *skb;
1627 struct sk_buff_head sar_queue;
1628 u32 control;
1629 size_t size = 0;
1630
1631 skb_queue_head_init(&sar_queue);
1632 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1633 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1634 if (IS_ERR(skb))
1635 return PTR_ERR(skb);
1636
1637 __skb_queue_tail(&sar_queue, skb);
1638 len -= chan->remote_mps;
1639 size += chan->remote_mps;
1640
1641 while (len > 0) {
1642 size_t buflen;
1643
1644 if (len > chan->remote_mps) {
1645 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1646 buflen = chan->remote_mps;
1647 } else {
1648 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1649 buflen = len;
1650 }
1651
1652 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1653 if (IS_ERR(skb)) {
1654 skb_queue_purge(&sar_queue);
1655 return PTR_ERR(skb);
1656 }
1657
1658 __skb_queue_tail(&sar_queue, skb);
1659 len -= buflen;
1660 size += buflen;
1661 }
1662 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1663 if (chan->tx_send_head == NULL)
1664 chan->tx_send_head = sar_queue.next;
1665
1666 return size;
1667 }
1668
1669 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1670 u32 priority)
1671 {
1672 struct sk_buff *skb;
1673 u32 control;
1674 int err;
1675
1676 /* Connectionless channel */
1677 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1678 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1679 if (IS_ERR(skb))
1680 return PTR_ERR(skb);
1681
1682 l2cap_do_send(chan, skb);
1683 return len;
1684 }
1685
1686 switch (chan->mode) {
1687 case L2CAP_MODE_BASIC:
1688 /* Check outgoing MTU */
1689 if (len > chan->omtu)
1690 return -EMSGSIZE;
1691
1692 /* Create a basic PDU */
1693 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1694 if (IS_ERR(skb))
1695 return PTR_ERR(skb);
1696
1697 l2cap_do_send(chan, skb);
1698 err = len;
1699 break;
1700
1701 case L2CAP_MODE_ERTM:
1702 case L2CAP_MODE_STREAMING:
1703 /* Entire SDU fits into one PDU */
1704 if (len <= chan->remote_mps) {
1705 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1706 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1707 0);
1708 if (IS_ERR(skb))
1709 return PTR_ERR(skb);
1710
1711 __skb_queue_tail(&chan->tx_q, skb);
1712
1713 if (chan->tx_send_head == NULL)
1714 chan->tx_send_head = skb;
1715
1716 } else {
1717 /* Segment SDU into multiples PDUs */
1718 err = l2cap_sar_segment_sdu(chan, msg, len);
1719 if (err < 0)
1720 return err;
1721 }
1722
1723 if (chan->mode == L2CAP_MODE_STREAMING) {
1724 l2cap_streaming_send(chan);
1725 err = len;
1726 break;
1727 }
1728
1729 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1730 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1731 err = len;
1732 break;
1733 }
1734
1735 err = l2cap_ertm_send(chan);
1736 if (err >= 0)
1737 err = len;
1738
1739 break;
1740
1741 default:
1742 BT_DBG("bad state %1.1x", chan->mode);
1743 err = -EBADFD;
1744 }
1745
1746 return err;
1747 }
1748
1749 /* Copy frame to all raw sockets on that connection */
1750 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1751 {
1752 struct sk_buff *nskb;
1753 struct l2cap_chan *chan;
1754
1755 BT_DBG("conn %p", conn);
1756
1757 read_lock(&conn->chan_lock);
1758 list_for_each_entry(chan, &conn->chan_l, list) {
1759 struct sock *sk = chan->sk;
1760 if (chan->chan_type != L2CAP_CHAN_RAW)
1761 continue;
1762
1763 /* Don't send frame to the socket it came from */
1764 if (skb->sk == sk)
1765 continue;
1766 nskb = skb_clone(skb, GFP_ATOMIC);
1767 if (!nskb)
1768 continue;
1769
1770 if (chan->ops->recv(chan->data, nskb))
1771 kfree_skb(nskb);
1772 }
1773 read_unlock(&conn->chan_lock);
1774 }
1775
1776 /* ---- L2CAP signalling commands ---- */
1777 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1778 u8 code, u8 ident, u16 dlen, void *data)
1779 {
1780 struct sk_buff *skb, **frag;
1781 struct l2cap_cmd_hdr *cmd;
1782 struct l2cap_hdr *lh;
1783 int len, count;
1784
1785 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1786 conn, code, ident, dlen);
1787
1788 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1789 count = min_t(unsigned int, conn->mtu, len);
1790
1791 skb = bt_skb_alloc(count, GFP_ATOMIC);
1792 if (!skb)
1793 return NULL;
1794
1795 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1796 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1797
1798 if (conn->hcon->type == LE_LINK)
1799 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1800 else
1801 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1802
1803 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1804 cmd->code = code;
1805 cmd->ident = ident;
1806 cmd->len = cpu_to_le16(dlen);
1807
1808 if (dlen) {
1809 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1810 memcpy(skb_put(skb, count), data, count);
1811 data += count;
1812 }
1813
1814 len -= skb->len;
1815
1816 /* Continuation fragments (no L2CAP header) */
1817 frag = &skb_shinfo(skb)->frag_list;
1818 while (len) {
1819 count = min_t(unsigned int, conn->mtu, len);
1820
1821 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1822 if (!*frag)
1823 goto fail;
1824
1825 memcpy(skb_put(*frag, count), data, count);
1826
1827 len -= count;
1828 data += count;
1829
1830 frag = &(*frag)->next;
1831 }
1832
1833 return skb;
1834
1835 fail:
1836 kfree_skb(skb);
1837 return NULL;
1838 }
1839
1840 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1841 {
1842 struct l2cap_conf_opt *opt = *ptr;
1843 int len;
1844
1845 len = L2CAP_CONF_OPT_SIZE + opt->len;
1846 *ptr += len;
1847
1848 *type = opt->type;
1849 *olen = opt->len;
1850
1851 switch (opt->len) {
1852 case 1:
1853 *val = *((u8 *) opt->val);
1854 break;
1855
1856 case 2:
1857 *val = get_unaligned_le16(opt->val);
1858 break;
1859
1860 case 4:
1861 *val = get_unaligned_le32(opt->val);
1862 break;
1863
1864 default:
1865 *val = (unsigned long) opt->val;
1866 break;
1867 }
1868
1869 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1870 return len;
1871 }
1872
1873 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1874 {
1875 struct l2cap_conf_opt *opt = *ptr;
1876
1877 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1878
1879 opt->type = type;
1880 opt->len = len;
1881
1882 switch (len) {
1883 case 1:
1884 *((u8 *) opt->val) = val;
1885 break;
1886
1887 case 2:
1888 put_unaligned_le16(val, opt->val);
1889 break;
1890
1891 case 4:
1892 put_unaligned_le32(val, opt->val);
1893 break;
1894
1895 default:
1896 memcpy(opt->val, (void *) val, len);
1897 break;
1898 }
1899
1900 *ptr += L2CAP_CONF_OPT_SIZE + len;
1901 }
1902
1903 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1904 {
1905 struct l2cap_conf_efs efs;
1906
1907 switch(chan->mode) {
1908 case L2CAP_MODE_ERTM:
1909 efs.id = chan->local_id;
1910 efs.stype = chan->local_stype;
1911 efs.msdu = cpu_to_le16(chan->local_msdu);
1912 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1913 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1914 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1915 break;
1916
1917 case L2CAP_MODE_STREAMING:
1918 efs.id = 1;
1919 efs.stype = L2CAP_SERV_BESTEFFORT;
1920 efs.msdu = cpu_to_le16(chan->local_msdu);
1921 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1922 efs.acc_lat = 0;
1923 efs.flush_to = 0;
1924 break;
1925
1926 default:
1927 return;
1928 }
1929
1930 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1931 (unsigned long) &efs);
1932 }
1933
1934 static void l2cap_ack_timeout(unsigned long arg)
1935 {
1936 struct l2cap_chan *chan = (void *) arg;
1937
1938 bh_lock_sock(chan->sk);
1939 l2cap_send_ack(chan);
1940 bh_unlock_sock(chan->sk);
1941 }
1942
1943 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1944 {
1945 struct sock *sk = chan->sk;
1946
1947 chan->expected_ack_seq = 0;
1948 chan->unacked_frames = 0;
1949 chan->buffer_seq = 0;
1950 chan->num_acked = 0;
1951 chan->frames_sent = 0;
1952
1953 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1954 (unsigned long) chan);
1955 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1958
1959 skb_queue_head_init(&chan->srej_q);
1960
1961 INIT_LIST_HEAD(&chan->srej_l);
1962
1963
1964 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1965 }
1966
1967 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1968 {
1969 switch (mode) {
1970 case L2CAP_MODE_STREAMING:
1971 case L2CAP_MODE_ERTM:
1972 if (l2cap_mode_supported(mode, remote_feat_mask))
1973 return mode;
1974 /* fall through */
1975 default:
1976 return L2CAP_MODE_BASIC;
1977 }
1978 }
1979
1980 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1981 {
1982 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1983 }
1984
1985 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1986 {
1987 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1988 }
1989
1990 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1991 {
1992 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1993 __l2cap_ews_supported(chan)) {
1994 /* use extended control field */
1995 set_bit(FLAG_EXT_CTRL, &chan->flags);
1996 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1997 } else {
1998 chan->tx_win = min_t(u16, chan->tx_win,
1999 L2CAP_DEFAULT_TX_WINDOW);
2000 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2001 }
2002 }
2003
2004 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2005 {
2006 struct l2cap_conf_req *req = data;
2007 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2008 void *ptr = req->data;
2009 u16 size;
2010
2011 BT_DBG("chan %p", chan);
2012
2013 if (chan->num_conf_req || chan->num_conf_rsp)
2014 goto done;
2015
2016 switch (chan->mode) {
2017 case L2CAP_MODE_STREAMING:
2018 case L2CAP_MODE_ERTM:
2019 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2020 break;
2021
2022 if (__l2cap_efs_supported(chan))
2023 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2024
2025 /* fall through */
2026 default:
2027 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2028 break;
2029 }
2030
2031 done:
2032 if (chan->imtu != L2CAP_DEFAULT_MTU)
2033 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2034
2035 switch (chan->mode) {
2036 case L2CAP_MODE_BASIC:
2037 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2038 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2039 break;
2040
2041 rfc.mode = L2CAP_MODE_BASIC;
2042 rfc.txwin_size = 0;
2043 rfc.max_transmit = 0;
2044 rfc.retrans_timeout = 0;
2045 rfc.monitor_timeout = 0;
2046 rfc.max_pdu_size = 0;
2047
2048 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2049 (unsigned long) &rfc);
2050 break;
2051
2052 case L2CAP_MODE_ERTM:
2053 rfc.mode = L2CAP_MODE_ERTM;
2054 rfc.max_transmit = chan->max_tx;
2055 rfc.retrans_timeout = 0;
2056 rfc.monitor_timeout = 0;
2057
2058 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2059 L2CAP_EXT_HDR_SIZE -
2060 L2CAP_SDULEN_SIZE -
2061 L2CAP_FCS_SIZE);
2062 rfc.max_pdu_size = cpu_to_le16(size);
2063
2064 l2cap_txwin_setup(chan);
2065
2066 rfc.txwin_size = min_t(u16, chan->tx_win,
2067 L2CAP_DEFAULT_TX_WINDOW);
2068
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2070 (unsigned long) &rfc);
2071
2072 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2073 l2cap_add_opt_efs(&ptr, chan);
2074
2075 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2076 break;
2077
2078 if (chan->fcs == L2CAP_FCS_NONE ||
2079 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2080 chan->fcs = L2CAP_FCS_NONE;
2081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2082 }
2083
2084 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2086 chan->tx_win);
2087 break;
2088
2089 case L2CAP_MODE_STREAMING:
2090 rfc.mode = L2CAP_MODE_STREAMING;
2091 rfc.txwin_size = 0;
2092 rfc.max_transmit = 0;
2093 rfc.retrans_timeout = 0;
2094 rfc.monitor_timeout = 0;
2095
2096 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2097 L2CAP_EXT_HDR_SIZE -
2098 L2CAP_SDULEN_SIZE -
2099 L2CAP_FCS_SIZE);
2100 rfc.max_pdu_size = cpu_to_le16(size);
2101
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2103 (unsigned long) &rfc);
2104
2105 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2106 l2cap_add_opt_efs(&ptr, chan);
2107
2108 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2109 break;
2110
2111 if (chan->fcs == L2CAP_FCS_NONE ||
2112 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2113 chan->fcs = L2CAP_FCS_NONE;
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2115 }
2116 break;
2117 }
2118
2119 req->dcid = cpu_to_le16(chan->dcid);
2120 req->flags = cpu_to_le16(0);
2121
2122 return ptr - data;
2123 }
2124
2125 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2126 {
2127 struct l2cap_conf_rsp *rsp = data;
2128 void *ptr = rsp->data;
2129 void *req = chan->conf_req;
2130 int len = chan->conf_len;
2131 int type, hint, olen;
2132 unsigned long val;
2133 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2134 struct l2cap_conf_efs efs;
2135 u8 remote_efs = 0;
2136 u16 mtu = L2CAP_DEFAULT_MTU;
2137 u16 result = L2CAP_CONF_SUCCESS;
2138 u16 size;
2139
2140 BT_DBG("chan %p", chan);
2141
2142 while (len >= L2CAP_CONF_OPT_SIZE) {
2143 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2144
2145 hint = type & L2CAP_CONF_HINT;
2146 type &= L2CAP_CONF_MASK;
2147
2148 switch (type) {
2149 case L2CAP_CONF_MTU:
2150 mtu = val;
2151 break;
2152
2153 case L2CAP_CONF_FLUSH_TO:
2154 chan->flush_to = val;
2155 break;
2156
2157 case L2CAP_CONF_QOS:
2158 break;
2159
2160 case L2CAP_CONF_RFC:
2161 if (olen == sizeof(rfc))
2162 memcpy(&rfc, (void *) val, olen);
2163 break;
2164
2165 case L2CAP_CONF_FCS:
2166 if (val == L2CAP_FCS_NONE)
2167 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2168 break;
2169
2170 case L2CAP_CONF_EFS:
2171 remote_efs = 1;
2172 if (olen == sizeof(efs))
2173 memcpy(&efs, (void *) val, olen);
2174 break;
2175
2176 case L2CAP_CONF_EWS:
2177 if (!enable_hs)
2178 return -ECONNREFUSED;
2179
2180 set_bit(FLAG_EXT_CTRL, &chan->flags);
2181 set_bit(CONF_EWS_RECV, &chan->conf_state);
2182 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2183 chan->remote_tx_win = val;
2184 break;
2185
2186 default:
2187 if (hint)
2188 break;
2189
2190 result = L2CAP_CONF_UNKNOWN;
2191 *((u8 *) ptr++) = type;
2192 break;
2193 }
2194 }
2195
2196 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2197 goto done;
2198
2199 switch (chan->mode) {
2200 case L2CAP_MODE_STREAMING:
2201 case L2CAP_MODE_ERTM:
2202 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2203 chan->mode = l2cap_select_mode(rfc.mode,
2204 chan->conn->feat_mask);
2205 break;
2206 }
2207
2208 if (remote_efs) {
2209 if (__l2cap_efs_supported(chan))
2210 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2211 else
2212 return -ECONNREFUSED;
2213 }
2214
2215 if (chan->mode != rfc.mode)
2216 return -ECONNREFUSED;
2217
2218 break;
2219 }
2220
2221 done:
2222 if (chan->mode != rfc.mode) {
2223 result = L2CAP_CONF_UNACCEPT;
2224 rfc.mode = chan->mode;
2225
2226 if (chan->num_conf_rsp == 1)
2227 return -ECONNREFUSED;
2228
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2230 sizeof(rfc), (unsigned long) &rfc);
2231 }
2232
2233 if (result == L2CAP_CONF_SUCCESS) {
2234 /* Configure output options and let the other side know
2235 * which ones we don't like. */
2236
2237 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2238 result = L2CAP_CONF_UNACCEPT;
2239 else {
2240 chan->omtu = mtu;
2241 set_bit(CONF_MTU_DONE, &chan->conf_state);
2242 }
2243 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2244
2245 if (remote_efs) {
2246 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2247 efs.stype != L2CAP_SERV_NOTRAFIC &&
2248 efs.stype != chan->local_stype) {
2249
2250 result = L2CAP_CONF_UNACCEPT;
2251
2252 if (chan->num_conf_req >= 1)
2253 return -ECONNREFUSED;
2254
2255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2256 sizeof(efs),
2257 (unsigned long) &efs);
2258 } else {
2259 /* Send PENDING Conf Rsp */
2260 result = L2CAP_CONF_PENDING;
2261 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2262 }
2263 }
2264
2265 switch (rfc.mode) {
2266 case L2CAP_MODE_BASIC:
2267 chan->fcs = L2CAP_FCS_NONE;
2268 set_bit(CONF_MODE_DONE, &chan->conf_state);
2269 break;
2270
2271 case L2CAP_MODE_ERTM:
2272 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2273 chan->remote_tx_win = rfc.txwin_size;
2274 else
2275 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2276
2277 chan->remote_max_tx = rfc.max_transmit;
2278
2279 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2280 chan->conn->mtu -
2281 L2CAP_EXT_HDR_SIZE -
2282 L2CAP_SDULEN_SIZE -
2283 L2CAP_FCS_SIZE);
2284 rfc.max_pdu_size = cpu_to_le16(size);
2285 chan->remote_mps = size;
2286
2287 rfc.retrans_timeout =
2288 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2289 rfc.monitor_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2291
2292 set_bit(CONF_MODE_DONE, &chan->conf_state);
2293
2294 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2295 sizeof(rfc), (unsigned long) &rfc);
2296
2297 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2298 chan->remote_id = efs.id;
2299 chan->remote_stype = efs.stype;
2300 chan->remote_msdu = le16_to_cpu(efs.msdu);
2301 chan->remote_flush_to =
2302 le32_to_cpu(efs.flush_to);
2303 chan->remote_acc_lat =
2304 le32_to_cpu(efs.acc_lat);
2305 chan->remote_sdu_itime =
2306 le32_to_cpu(efs.sdu_itime);
2307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2308 sizeof(efs), (unsigned long) &efs);
2309 }
2310 break;
2311
2312 case L2CAP_MODE_STREAMING:
2313 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2314 chan->conn->mtu -
2315 L2CAP_EXT_HDR_SIZE -
2316 L2CAP_SDULEN_SIZE -
2317 L2CAP_FCS_SIZE);
2318 rfc.max_pdu_size = cpu_to_le16(size);
2319 chan->remote_mps = size;
2320
2321 set_bit(CONF_MODE_DONE, &chan->conf_state);
2322
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2324 sizeof(rfc), (unsigned long) &rfc);
2325
2326 break;
2327
2328 default:
2329 result = L2CAP_CONF_UNACCEPT;
2330
2331 memset(&rfc, 0, sizeof(rfc));
2332 rfc.mode = chan->mode;
2333 }
2334
2335 if (result == L2CAP_CONF_SUCCESS)
2336 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2337 }
2338 rsp->scid = cpu_to_le16(chan->dcid);
2339 rsp->result = cpu_to_le16(result);
2340 rsp->flags = cpu_to_le16(0x0000);
2341
2342 return ptr - data;
2343 }
2344
2345 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2346 {
2347 struct l2cap_conf_req *req = data;
2348 void *ptr = req->data;
2349 int type, olen;
2350 unsigned long val;
2351 struct l2cap_conf_rfc rfc;
2352
2353 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2354
2355 while (len >= L2CAP_CONF_OPT_SIZE) {
2356 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2357
2358 switch (type) {
2359 case L2CAP_CONF_MTU:
2360 if (val < L2CAP_DEFAULT_MIN_MTU) {
2361 *result = L2CAP_CONF_UNACCEPT;
2362 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2363 } else
2364 chan->imtu = val;
2365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2366 break;
2367
2368 case L2CAP_CONF_FLUSH_TO:
2369 chan->flush_to = val;
2370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2371 2, chan->flush_to);
2372 break;
2373
2374 case L2CAP_CONF_RFC:
2375 if (olen == sizeof(rfc))
2376 memcpy(&rfc, (void *)val, olen);
2377
2378 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2379 rfc.mode != chan->mode)
2380 return -ECONNREFUSED;
2381
2382 chan->fcs = 0;
2383
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2386 break;
2387
2388 case L2CAP_CONF_EWS:
2389 chan->tx_win = min_t(u16, val,
2390 L2CAP_DEFAULT_EXT_WINDOW);
2391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2392 chan->tx_win);
2393 break;
2394 }
2395 }
2396
2397 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2398 return -ECONNREFUSED;
2399
2400 chan->mode = rfc.mode;
2401
2402 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2403 switch (rfc.mode) {
2404 case L2CAP_MODE_ERTM:
2405 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2406 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2407 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2408 break;
2409 case L2CAP_MODE_STREAMING:
2410 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2411 }
2412 }
2413
2414 req->dcid = cpu_to_le16(chan->dcid);
2415 req->flags = cpu_to_le16(0x0000);
2416
2417 return ptr - data;
2418 }
2419
2420 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2421 {
2422 struct l2cap_conf_rsp *rsp = data;
2423 void *ptr = rsp->data;
2424
2425 BT_DBG("chan %p", chan);
2426
2427 rsp->scid = cpu_to_le16(chan->dcid);
2428 rsp->result = cpu_to_le16(result);
2429 rsp->flags = cpu_to_le16(flags);
2430
2431 return ptr - data;
2432 }
2433
2434 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2435 {
2436 struct l2cap_conn_rsp rsp;
2437 struct l2cap_conn *conn = chan->conn;
2438 u8 buf[128];
2439
2440 rsp.scid = cpu_to_le16(chan->dcid);
2441 rsp.dcid = cpu_to_le16(chan->scid);
2442 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2443 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2444 l2cap_send_cmd(conn, chan->ident,
2445 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2446
2447 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2448 return;
2449
2450 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2451 l2cap_build_conf_req(chan, buf), buf);
2452 chan->num_conf_req++;
2453 }
2454
2455 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2456 {
2457 int type, olen;
2458 unsigned long val;
2459 struct l2cap_conf_rfc rfc;
2460
2461 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2462
2463 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2464 return;
2465
2466 while (len >= L2CAP_CONF_OPT_SIZE) {
2467 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2468
2469 switch (type) {
2470 case L2CAP_CONF_RFC:
2471 if (olen == sizeof(rfc))
2472 memcpy(&rfc, (void *)val, olen);
2473 goto done;
2474 }
2475 }
2476
2477 done:
2478 switch (rfc.mode) {
2479 case L2CAP_MODE_ERTM:
2480 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2481 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2482 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2483 break;
2484 case L2CAP_MODE_STREAMING:
2485 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2486 }
2487 }
2488
2489 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2490 {
2491 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2492
2493 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2494 return 0;
2495
2496 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2497 cmd->ident == conn->info_ident) {
2498 del_timer(&conn->info_timer);
2499
2500 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2501 conn->info_ident = 0;
2502
2503 l2cap_conn_start(conn);
2504 }
2505
2506 return 0;
2507 }
2508
2509 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2510 {
2511 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2512 struct l2cap_conn_rsp rsp;
2513 struct l2cap_chan *chan = NULL, *pchan;
2514 struct sock *parent, *sk = NULL;
2515 int result, status = L2CAP_CS_NO_INFO;
2516
2517 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2518 __le16 psm = req->psm;
2519
2520 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2521
2522 /* Check if we have socket listening on psm */
2523 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2524 if (!pchan) {
2525 result = L2CAP_CR_BAD_PSM;
2526 goto sendresp;
2527 }
2528
2529 parent = pchan->sk;
2530
2531 bh_lock_sock(parent);
2532
2533 /* Check if the ACL is secure enough (if not SDP) */
2534 if (psm != cpu_to_le16(0x0001) &&
2535 !hci_conn_check_link_mode(conn->hcon)) {
2536 conn->disc_reason = 0x05;
2537 result = L2CAP_CR_SEC_BLOCK;
2538 goto response;
2539 }
2540
2541 result = L2CAP_CR_NO_MEM;
2542
2543 /* Check for backlog size */
2544 if (sk_acceptq_is_full(parent)) {
2545 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2546 goto response;
2547 }
2548
2549 chan = pchan->ops->new_connection(pchan->data);
2550 if (!chan)
2551 goto response;
2552
2553 sk = chan->sk;
2554
2555 write_lock_bh(&conn->chan_lock);
2556
2557 /* Check if we already have channel with that dcid */
2558 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2559 write_unlock_bh(&conn->chan_lock);
2560 sock_set_flag(sk, SOCK_ZAPPED);
2561 chan->ops->close(chan->data);
2562 goto response;
2563 }
2564
2565 hci_conn_hold(conn->hcon);
2566
2567 bacpy(&bt_sk(sk)->src, conn->src);
2568 bacpy(&bt_sk(sk)->dst, conn->dst);
2569 chan->psm = psm;
2570 chan->dcid = scid;
2571
2572 bt_accept_enqueue(parent, sk);
2573
2574 __l2cap_chan_add(conn, chan);
2575
2576 dcid = chan->scid;
2577
2578 __set_chan_timer(chan, sk->sk_sndtimeo);
2579
2580 chan->ident = cmd->ident;
2581
2582 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2583 if (l2cap_check_security(chan)) {
2584 if (bt_sk(sk)->defer_setup) {
2585 l2cap_state_change(chan, BT_CONNECT2);
2586 result = L2CAP_CR_PEND;
2587 status = L2CAP_CS_AUTHOR_PEND;
2588 parent->sk_data_ready(parent, 0);
2589 } else {
2590 l2cap_state_change(chan, BT_CONFIG);
2591 result = L2CAP_CR_SUCCESS;
2592 status = L2CAP_CS_NO_INFO;
2593 }
2594 } else {
2595 l2cap_state_change(chan, BT_CONNECT2);
2596 result = L2CAP_CR_PEND;
2597 status = L2CAP_CS_AUTHEN_PEND;
2598 }
2599 } else {
2600 l2cap_state_change(chan, BT_CONNECT2);
2601 result = L2CAP_CR_PEND;
2602 status = L2CAP_CS_NO_INFO;
2603 }
2604
2605 write_unlock_bh(&conn->chan_lock);
2606
2607 response:
2608 bh_unlock_sock(parent);
2609
2610 sendresp:
2611 rsp.scid = cpu_to_le16(scid);
2612 rsp.dcid = cpu_to_le16(dcid);
2613 rsp.result = cpu_to_le16(result);
2614 rsp.status = cpu_to_le16(status);
2615 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2616
2617 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2618 struct l2cap_info_req info;
2619 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2620
2621 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2622 conn->info_ident = l2cap_get_ident(conn);
2623
2624 mod_timer(&conn->info_timer, jiffies +
2625 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2626
2627 l2cap_send_cmd(conn, conn->info_ident,
2628 L2CAP_INFO_REQ, sizeof(info), &info);
2629 }
2630
2631 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2632 result == L2CAP_CR_SUCCESS) {
2633 u8 buf[128];
2634 set_bit(CONF_REQ_SENT, &chan->conf_state);
2635 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2636 l2cap_build_conf_req(chan, buf), buf);
2637 chan->num_conf_req++;
2638 }
2639
2640 return 0;
2641 }
2642
2643 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2644 {
2645 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2646 u16 scid, dcid, result, status;
2647 struct l2cap_chan *chan;
2648 struct sock *sk;
2649 u8 req[128];
2650
2651 scid = __le16_to_cpu(rsp->scid);
2652 dcid = __le16_to_cpu(rsp->dcid);
2653 result = __le16_to_cpu(rsp->result);
2654 status = __le16_to_cpu(rsp->status);
2655
2656 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2657
2658 if (scid) {
2659 chan = l2cap_get_chan_by_scid(conn, scid);
2660 if (!chan)
2661 return -EFAULT;
2662 } else {
2663 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2664 if (!chan)
2665 return -EFAULT;
2666 }
2667
2668 sk = chan->sk;
2669
2670 switch (result) {
2671 case L2CAP_CR_SUCCESS:
2672 l2cap_state_change(chan, BT_CONFIG);
2673 chan->ident = 0;
2674 chan->dcid = dcid;
2675 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2676
2677 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2678 break;
2679
2680 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2681 l2cap_build_conf_req(chan, req), req);
2682 chan->num_conf_req++;
2683 break;
2684
2685 case L2CAP_CR_PEND:
2686 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2687 break;
2688
2689 default:
2690 /* don't delete l2cap channel if sk is owned by user */
2691 if (sock_owned_by_user(sk)) {
2692 l2cap_state_change(chan, BT_DISCONN);
2693 __clear_chan_timer(chan);
2694 __set_chan_timer(chan, HZ / 5);
2695 break;
2696 }
2697
2698 l2cap_chan_del(chan, ECONNREFUSED);
2699 break;
2700 }
2701
2702 bh_unlock_sock(sk);
2703 return 0;
2704 }
2705
2706 static inline void set_default_fcs(struct l2cap_chan *chan)
2707 {
2708 /* FCS is enabled only in ERTM or streaming mode, if one or both
2709 * sides request it.
2710 */
2711 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2712 chan->fcs = L2CAP_FCS_NONE;
2713 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2714 chan->fcs = L2CAP_FCS_CRC16;
2715 }
2716
2717 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2718 {
2719 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2720 u16 dcid, flags;
2721 u8 rsp[64];
2722 struct l2cap_chan *chan;
2723 struct sock *sk;
2724 int len;
2725
2726 dcid = __le16_to_cpu(req->dcid);
2727 flags = __le16_to_cpu(req->flags);
2728
2729 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2730
2731 chan = l2cap_get_chan_by_scid(conn, dcid);
2732 if (!chan)
2733 return -ENOENT;
2734
2735 sk = chan->sk;
2736
2737 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2738 struct l2cap_cmd_rej_cid rej;
2739
2740 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2741 rej.scid = cpu_to_le16(chan->scid);
2742 rej.dcid = cpu_to_le16(chan->dcid);
2743
2744 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2745 sizeof(rej), &rej);
2746 goto unlock;
2747 }
2748
2749 /* Reject if config buffer is too small. */
2750 len = cmd_len - sizeof(*req);
2751 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2752 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2753 l2cap_build_conf_rsp(chan, rsp,
2754 L2CAP_CONF_REJECT, flags), rsp);
2755 goto unlock;
2756 }
2757
2758 /* Store config. */
2759 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2760 chan->conf_len += len;
2761
2762 if (flags & 0x0001) {
2763 /* Incomplete config. Send empty response. */
2764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2765 l2cap_build_conf_rsp(chan, rsp,
2766 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2767 goto unlock;
2768 }
2769
2770 /* Complete config. */
2771 len = l2cap_parse_conf_req(chan, rsp);
2772 if (len < 0) {
2773 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2774 goto unlock;
2775 }
2776
2777 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2778 chan->num_conf_rsp++;
2779
2780 /* Reset config buffer. */
2781 chan->conf_len = 0;
2782
2783 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2784 goto unlock;
2785
2786 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2787 set_default_fcs(chan);
2788
2789 l2cap_state_change(chan, BT_CONNECTED);
2790
2791 chan->next_tx_seq = 0;
2792 chan->expected_tx_seq = 0;
2793 skb_queue_head_init(&chan->tx_q);
2794 if (chan->mode == L2CAP_MODE_ERTM)
2795 l2cap_ertm_init(chan);
2796
2797 l2cap_chan_ready(sk);
2798 goto unlock;
2799 }
2800
2801 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2802 u8 buf[64];
2803 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2804 l2cap_build_conf_req(chan, buf), buf);
2805 chan->num_conf_req++;
2806 }
2807
2808 /* Got Conf Rsp PENDING from remote side and asume we sent
2809 Conf Rsp PENDING in the code above */
2810 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2811 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2812
2813 /* check compatibility */
2814
2815 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2816 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2817
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2819 l2cap_build_conf_rsp(chan, rsp,
2820 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2821 }
2822
2823 unlock:
2824 bh_unlock_sock(sk);
2825 return 0;
2826 }
2827
2828 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2829 {
2830 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2831 u16 scid, flags, result;
2832 struct l2cap_chan *chan;
2833 struct sock *sk;
2834 int len = cmd->len - sizeof(*rsp);
2835
2836 scid = __le16_to_cpu(rsp->scid);
2837 flags = __le16_to_cpu(rsp->flags);
2838 result = __le16_to_cpu(rsp->result);
2839
2840 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2841 scid, flags, result);
2842
2843 chan = l2cap_get_chan_by_scid(conn, scid);
2844 if (!chan)
2845 return 0;
2846
2847 sk = chan->sk;
2848
2849 switch (result) {
2850 case L2CAP_CONF_SUCCESS:
2851 l2cap_conf_rfc_get(chan, rsp->data, len);
2852 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2853 break;
2854
2855 case L2CAP_CONF_PENDING:
2856 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2857
2858 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2859 char buf[64];
2860
2861 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2862 buf, &result);
2863 if (len < 0) {
2864 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2865 goto done;
2866 }
2867
2868 /* check compatibility */
2869
2870 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2871 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2872
2873 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2874 l2cap_build_conf_rsp(chan, buf,
2875 L2CAP_CONF_SUCCESS, 0x0000), buf);
2876 }
2877 goto done;
2878
2879 case L2CAP_CONF_UNACCEPT:
2880 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2881 char req[64];
2882
2883 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2884 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2885 goto done;
2886 }
2887
2888 /* throw out any old stored conf requests */
2889 result = L2CAP_CONF_SUCCESS;
2890 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2891 req, &result);
2892 if (len < 0) {
2893 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2894 goto done;
2895 }
2896
2897 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2898 L2CAP_CONF_REQ, len, req);
2899 chan->num_conf_req++;
2900 if (result != L2CAP_CONF_SUCCESS)
2901 goto done;
2902 break;
2903 }
2904
2905 default:
2906 sk->sk_err = ECONNRESET;
2907 __set_chan_timer(chan, HZ * 5);
2908 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2909 goto done;
2910 }
2911
2912 if (flags & 0x01)
2913 goto done;
2914
2915 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2916
2917 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2918 set_default_fcs(chan);
2919
2920 l2cap_state_change(chan, BT_CONNECTED);
2921 chan->next_tx_seq = 0;
2922 chan->expected_tx_seq = 0;
2923 skb_queue_head_init(&chan->tx_q);
2924 if (chan->mode == L2CAP_MODE_ERTM)
2925 l2cap_ertm_init(chan);
2926
2927 l2cap_chan_ready(sk);
2928 }
2929
2930 done:
2931 bh_unlock_sock(sk);
2932 return 0;
2933 }
2934
2935 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2936 {
2937 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2938 struct l2cap_disconn_rsp rsp;
2939 u16 dcid, scid;
2940 struct l2cap_chan *chan;
2941 struct sock *sk;
2942
2943 scid = __le16_to_cpu(req->scid);
2944 dcid = __le16_to_cpu(req->dcid);
2945
2946 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2947
2948 chan = l2cap_get_chan_by_scid(conn, dcid);
2949 if (!chan)
2950 return 0;
2951
2952 sk = chan->sk;
2953
2954 rsp.dcid = cpu_to_le16(chan->scid);
2955 rsp.scid = cpu_to_le16(chan->dcid);
2956 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2957
2958 sk->sk_shutdown = SHUTDOWN_MASK;
2959
2960 /* don't delete l2cap channel if sk is owned by user */
2961 if (sock_owned_by_user(sk)) {
2962 l2cap_state_change(chan, BT_DISCONN);
2963 __clear_chan_timer(chan);
2964 __set_chan_timer(chan, HZ / 5);
2965 bh_unlock_sock(sk);
2966 return 0;
2967 }
2968
2969 l2cap_chan_del(chan, ECONNRESET);
2970 bh_unlock_sock(sk);
2971
2972 chan->ops->close(chan->data);
2973 return 0;
2974 }
2975
2976 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2977 {
2978 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2979 u16 dcid, scid;
2980 struct l2cap_chan *chan;
2981 struct sock *sk;
2982
2983 scid = __le16_to_cpu(rsp->scid);
2984 dcid = __le16_to_cpu(rsp->dcid);
2985
2986 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2987
2988 chan = l2cap_get_chan_by_scid(conn, scid);
2989 if (!chan)
2990 return 0;
2991
2992 sk = chan->sk;
2993
2994 /* don't delete l2cap channel if sk is owned by user */
2995 if (sock_owned_by_user(sk)) {
2996 l2cap_state_change(chan,BT_DISCONN);
2997 __clear_chan_timer(chan);
2998 __set_chan_timer(chan, HZ / 5);
2999 bh_unlock_sock(sk);
3000 return 0;
3001 }
3002
3003 l2cap_chan_del(chan, 0);
3004 bh_unlock_sock(sk);
3005
3006 chan->ops->close(chan->data);
3007 return 0;
3008 }
3009
3010 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3011 {
3012 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3013 u16 type;
3014
3015 type = __le16_to_cpu(req->type);
3016
3017 BT_DBG("type 0x%4.4x", type);
3018
3019 if (type == L2CAP_IT_FEAT_MASK) {
3020 u8 buf[8];
3021 u32 feat_mask = l2cap_feat_mask;
3022 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3023 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3024 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3025 if (!disable_ertm)
3026 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3027 | L2CAP_FEAT_FCS;
3028 if (enable_hs)
3029 feat_mask |= L2CAP_FEAT_EXT_FLOW
3030 | L2CAP_FEAT_EXT_WINDOW;
3031
3032 put_unaligned_le32(feat_mask, rsp->data);
3033 l2cap_send_cmd(conn, cmd->ident,
3034 L2CAP_INFO_RSP, sizeof(buf), buf);
3035 } else if (type == L2CAP_IT_FIXED_CHAN) {
3036 u8 buf[12];
3037 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3038 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3039 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3040 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3041 l2cap_send_cmd(conn, cmd->ident,
3042 L2CAP_INFO_RSP, sizeof(buf), buf);
3043 } else {
3044 struct l2cap_info_rsp rsp;
3045 rsp.type = cpu_to_le16(type);
3046 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3047 l2cap_send_cmd(conn, cmd->ident,
3048 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3049 }
3050
3051 return 0;
3052 }
3053
3054 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3055 {
3056 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3057 u16 type, result;
3058
3059 type = __le16_to_cpu(rsp->type);
3060 result = __le16_to_cpu(rsp->result);
3061
3062 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3063
3064 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3065 if (cmd->ident != conn->info_ident ||
3066 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3067 return 0;
3068
3069 del_timer(&conn->info_timer);
3070
3071 if (result != L2CAP_IR_SUCCESS) {
3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3073 conn->info_ident = 0;
3074
3075 l2cap_conn_start(conn);
3076
3077 return 0;
3078 }
3079
3080 if (type == L2CAP_IT_FEAT_MASK) {
3081 conn->feat_mask = get_unaligned_le32(rsp->data);
3082
3083 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3084 struct l2cap_info_req req;
3085 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3086
3087 conn->info_ident = l2cap_get_ident(conn);
3088
3089 l2cap_send_cmd(conn, conn->info_ident,
3090 L2CAP_INFO_REQ, sizeof(req), &req);
3091 } else {
3092 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3093 conn->info_ident = 0;
3094
3095 l2cap_conn_start(conn);
3096 }
3097 } else if (type == L2CAP_IT_FIXED_CHAN) {
3098 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3099 conn->info_ident = 0;
3100
3101 l2cap_conn_start(conn);
3102 }
3103
3104 return 0;
3105 }
3106
3107 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3109 void *data)
3110 {
3111 struct l2cap_create_chan_req *req = data;
3112 struct l2cap_create_chan_rsp rsp;
3113 u16 psm, scid;
3114
3115 if (cmd_len != sizeof(*req))
3116 return -EPROTO;
3117
3118 if (!enable_hs)
3119 return -EINVAL;
3120
3121 psm = le16_to_cpu(req->psm);
3122 scid = le16_to_cpu(req->scid);
3123
3124 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3125
3126 /* Placeholder: Always reject */
3127 rsp.dcid = 0;
3128 rsp.scid = cpu_to_le16(scid);
3129 rsp.result = L2CAP_CR_NO_MEM;
3130 rsp.status = L2CAP_CS_NO_INFO;
3131
3132 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3133 sizeof(rsp), &rsp);
3134
3135 return 0;
3136 }
3137
3138 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3139 struct l2cap_cmd_hdr *cmd, void *data)
3140 {
3141 BT_DBG("conn %p", conn);
3142
3143 return l2cap_connect_rsp(conn, cmd, data);
3144 }
3145
3146 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3147 u16 to_multiplier)
3148 {
3149 u16 max_latency;
3150
3151 if (min > max || min < 6 || max > 3200)
3152 return -EINVAL;
3153
3154 if (to_multiplier < 10 || to_multiplier > 3200)
3155 return -EINVAL;
3156
3157 if (max >= to_multiplier * 8)
3158 return -EINVAL;
3159
3160 max_latency = (to_multiplier * 8 / max) - 1;
3161 if (latency > 499 || latency > max_latency)
3162 return -EINVAL;
3163
3164 return 0;
3165 }
3166
3167 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3168 struct l2cap_cmd_hdr *cmd, u8 *data)
3169 {
3170 struct hci_conn *hcon = conn->hcon;
3171 struct l2cap_conn_param_update_req *req;
3172 struct l2cap_conn_param_update_rsp rsp;
3173 u16 min, max, latency, to_multiplier, cmd_len;
3174 int err;
3175
3176 if (!(hcon->link_mode & HCI_LM_MASTER))
3177 return -EINVAL;
3178
3179 cmd_len = __le16_to_cpu(cmd->len);
3180 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3181 return -EPROTO;
3182
3183 req = (struct l2cap_conn_param_update_req *) data;
3184 min = __le16_to_cpu(req->min);
3185 max = __le16_to_cpu(req->max);
3186 latency = __le16_to_cpu(req->latency);
3187 to_multiplier = __le16_to_cpu(req->to_multiplier);
3188
3189 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3190 min, max, latency, to_multiplier);
3191
3192 memset(&rsp, 0, sizeof(rsp));
3193
3194 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3195 if (err)
3196 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3197 else
3198 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3199
3200 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3201 sizeof(rsp), &rsp);
3202
3203 if (!err)
3204 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3205
3206 return 0;
3207 }
3208
3209 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3210 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3211 {
3212 int err = 0;
3213
3214 switch (cmd->code) {
3215 case L2CAP_COMMAND_REJ:
3216 l2cap_command_rej(conn, cmd, data);
3217 break;
3218
3219 case L2CAP_CONN_REQ:
3220 err = l2cap_connect_req(conn, cmd, data);
3221 break;
3222
3223 case L2CAP_CONN_RSP:
3224 err = l2cap_connect_rsp(conn, cmd, data);
3225 break;
3226
3227 case L2CAP_CONF_REQ:
3228 err = l2cap_config_req(conn, cmd, cmd_len, data);
3229 break;
3230
3231 case L2CAP_CONF_RSP:
3232 err = l2cap_config_rsp(conn, cmd, data);
3233 break;
3234
3235 case L2CAP_DISCONN_REQ:
3236 err = l2cap_disconnect_req(conn, cmd, data);
3237 break;
3238
3239 case L2CAP_DISCONN_RSP:
3240 err = l2cap_disconnect_rsp(conn, cmd, data);
3241 break;
3242
3243 case L2CAP_ECHO_REQ:
3244 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3245 break;
3246
3247 case L2CAP_ECHO_RSP:
3248 break;
3249
3250 case L2CAP_INFO_REQ:
3251 err = l2cap_information_req(conn, cmd, data);
3252 break;
3253
3254 case L2CAP_INFO_RSP:
3255 err = l2cap_information_rsp(conn, cmd, data);
3256 break;
3257
3258 case L2CAP_CREATE_CHAN_REQ:
3259 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3260 break;
3261
3262 case L2CAP_CREATE_CHAN_RSP:
3263 err = l2cap_create_channel_rsp(conn, cmd, data);
3264 break;
3265
3266 default:
3267 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3268 err = -EINVAL;
3269 break;
3270 }
3271
3272 return err;
3273 }
3274
3275 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3276 struct l2cap_cmd_hdr *cmd, u8 *data)
3277 {
3278 switch (cmd->code) {
3279 case L2CAP_COMMAND_REJ:
3280 return 0;
3281
3282 case L2CAP_CONN_PARAM_UPDATE_REQ:
3283 return l2cap_conn_param_update_req(conn, cmd, data);
3284
3285 case L2CAP_CONN_PARAM_UPDATE_RSP:
3286 return 0;
3287
3288 default:
3289 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3290 return -EINVAL;
3291 }
3292 }
3293
3294 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3295 struct sk_buff *skb)
3296 {
3297 u8 *data = skb->data;
3298 int len = skb->len;
3299 struct l2cap_cmd_hdr cmd;
3300 int err;
3301
3302 l2cap_raw_recv(conn, skb);
3303
3304 while (len >= L2CAP_CMD_HDR_SIZE) {
3305 u16 cmd_len;
3306 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3307 data += L2CAP_CMD_HDR_SIZE;
3308 len -= L2CAP_CMD_HDR_SIZE;
3309
3310 cmd_len = le16_to_cpu(cmd.len);
3311
3312 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3313
3314 if (cmd_len > len || !cmd.ident) {
3315 BT_DBG("corrupted command");
3316 break;
3317 }
3318
3319 if (conn->hcon->type == LE_LINK)
3320 err = l2cap_le_sig_cmd(conn, &cmd, data);
3321 else
3322 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3323
3324 if (err) {
3325 struct l2cap_cmd_rej_unk rej;
3326
3327 BT_ERR("Wrong link type (%d)", err);
3328
3329 /* FIXME: Map err to a valid reason */
3330 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3331 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3332 }
3333
3334 data += cmd_len;
3335 len -= cmd_len;
3336 }
3337
3338 kfree_skb(skb);
3339 }
3340
3341 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3342 {
3343 u16 our_fcs, rcv_fcs;
3344 int hdr_size;
3345
3346 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3347 hdr_size = L2CAP_EXT_HDR_SIZE;
3348 else
3349 hdr_size = L2CAP_ENH_HDR_SIZE;
3350
3351 if (chan->fcs == L2CAP_FCS_CRC16) {
3352 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3353 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3354 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3355
3356 if (our_fcs != rcv_fcs)
3357 return -EBADMSG;
3358 }
3359 return 0;
3360 }
3361
3362 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3363 {
3364 u32 control = 0;
3365
3366 chan->frames_sent = 0;
3367
3368 control |= __set_reqseq(chan, chan->buffer_seq);
3369
3370 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3371 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3372 l2cap_send_sframe(chan, control);
3373 set_bit(CONN_RNR_SENT, &chan->conn_state);
3374 }
3375
3376 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3377 l2cap_retransmit_frames(chan);
3378
3379 l2cap_ertm_send(chan);
3380
3381 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3382 chan->frames_sent == 0) {
3383 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3384 l2cap_send_sframe(chan, control);
3385 }
3386 }
3387
3388 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3389 {
3390 struct sk_buff *next_skb;
3391 int tx_seq_offset, next_tx_seq_offset;
3392
3393 bt_cb(skb)->tx_seq = tx_seq;
3394 bt_cb(skb)->sar = sar;
3395
3396 next_skb = skb_peek(&chan->srej_q);
3397 if (!next_skb) {
3398 __skb_queue_tail(&chan->srej_q, skb);
3399 return 0;
3400 }
3401
3402 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3403
3404 do {
3405 if (bt_cb(next_skb)->tx_seq == tx_seq)
3406 return -EINVAL;
3407
3408 next_tx_seq_offset = __seq_offset(chan,
3409 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3410
3411 if (next_tx_seq_offset > tx_seq_offset) {
3412 __skb_queue_before(&chan->srej_q, next_skb, skb);
3413 return 0;
3414 }
3415
3416 if (skb_queue_is_last(&chan->srej_q, next_skb))
3417 break;
3418
3419 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3420
3421 __skb_queue_tail(&chan->srej_q, skb);
3422
3423 return 0;
3424 }
3425
3426 static void append_skb_frag(struct sk_buff *skb,
3427 struct sk_buff *new_frag, struct sk_buff **last_frag)
3428 {
3429 /* skb->len reflects data in skb as well as all fragments
3430 * skb->data_len reflects only data in fragments
3431 */
3432 if (!skb_has_frag_list(skb))
3433 skb_shinfo(skb)->frag_list = new_frag;
3434
3435 new_frag->next = NULL;
3436
3437 (*last_frag)->next = new_frag;
3438 *last_frag = new_frag;
3439
3440 skb->len += new_frag->len;
3441 skb->data_len += new_frag->len;
3442 skb->truesize += new_frag->truesize;
3443 }
3444
3445 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3446 {
3447 int err = -EINVAL;
3448
3449 switch (__get_ctrl_sar(chan, control)) {
3450 case L2CAP_SAR_UNSEGMENTED:
3451 if (chan->sdu)
3452 break;
3453
3454 err = chan->ops->recv(chan->data, skb);
3455 break;
3456
3457 case L2CAP_SAR_START:
3458 if (chan->sdu)
3459 break;
3460
3461 chan->sdu_len = get_unaligned_le16(skb->data);
3462 skb_pull(skb, L2CAP_SDULEN_SIZE);
3463
3464 if (chan->sdu_len > chan->imtu) {
3465 err = -EMSGSIZE;
3466 break;
3467 }
3468
3469 if (skb->len >= chan->sdu_len)
3470 break;
3471
3472 chan->sdu = skb;
3473 chan->sdu_last_frag = skb;
3474
3475 skb = NULL;
3476 err = 0;
3477 break;
3478
3479 case L2CAP_SAR_CONTINUE:
3480 if (!chan->sdu)
3481 break;
3482
3483 append_skb_frag(chan->sdu, skb,
3484 &chan->sdu_last_frag);
3485 skb = NULL;
3486
3487 if (chan->sdu->len >= chan->sdu_len)
3488 break;
3489
3490 err = 0;
3491 break;
3492
3493 case L2CAP_SAR_END:
3494 if (!chan->sdu)
3495 break;
3496
3497 append_skb_frag(chan->sdu, skb,
3498 &chan->sdu_last_frag);
3499 skb = NULL;
3500
3501 if (chan->sdu->len != chan->sdu_len)
3502 break;
3503
3504 err = chan->ops->recv(chan->data, chan->sdu);
3505
3506 if (!err) {
3507 /* Reassembly complete */
3508 chan->sdu = NULL;
3509 chan->sdu_last_frag = NULL;
3510 chan->sdu_len = 0;
3511 }
3512 break;
3513 }
3514
3515 if (err) {
3516 kfree_skb(skb);
3517 kfree_skb(chan->sdu);
3518 chan->sdu = NULL;
3519 chan->sdu_last_frag = NULL;
3520 chan->sdu_len = 0;
3521 }
3522
3523 return err;
3524 }
3525
3526 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3527 {
3528 u32 control;
3529
3530 BT_DBG("chan %p, Enter local busy", chan);
3531
3532 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3533
3534 control = __set_reqseq(chan, chan->buffer_seq);
3535 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3536 l2cap_send_sframe(chan, control);
3537
3538 set_bit(CONN_RNR_SENT, &chan->conn_state);
3539
3540 __clear_ack_timer(chan);
3541 }
3542
3543 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3544 {
3545 u32 control;
3546
3547 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3548 goto done;
3549
3550 control = __set_reqseq(chan, chan->buffer_seq);
3551 control |= __set_ctrl_poll(chan);
3552 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3553 l2cap_send_sframe(chan, control);
3554 chan->retry_count = 1;
3555
3556 __clear_retrans_timer(chan);
3557 __set_monitor_timer(chan);
3558
3559 set_bit(CONN_WAIT_F, &chan->conn_state);
3560
3561 done:
3562 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3563 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3564
3565 BT_DBG("chan %p, Exit local busy", chan);
3566 }
3567
3568 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3569 {
3570 if (chan->mode == L2CAP_MODE_ERTM) {
3571 if (busy)
3572 l2cap_ertm_enter_local_busy(chan);
3573 else
3574 l2cap_ertm_exit_local_busy(chan);
3575 }
3576 }
3577
3578 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3579 {
3580 struct sk_buff *skb;
3581 u32 control;
3582
3583 while ((skb = skb_peek(&chan->srej_q)) &&
3584 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3585 int err;
3586
3587 if (bt_cb(skb)->tx_seq != tx_seq)
3588 break;
3589
3590 skb = skb_dequeue(&chan->srej_q);
3591 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3592 err = l2cap_reassemble_sdu(chan, skb, control);
3593
3594 if (err < 0) {
3595 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3596 break;
3597 }
3598
3599 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3600 tx_seq = __next_seq(chan, tx_seq);
3601 }
3602 }
3603
3604 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3605 {
3606 struct srej_list *l, *tmp;
3607 u32 control;
3608
3609 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3610 if (l->tx_seq == tx_seq) {
3611 list_del(&l->list);
3612 kfree(l);
3613 return;
3614 }
3615 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3616 control |= __set_reqseq(chan, l->tx_seq);
3617 l2cap_send_sframe(chan, control);
3618 list_del(&l->list);
3619 list_add_tail(&l->list, &chan->srej_l);
3620 }
3621 }
3622
3623 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3624 {
3625 struct srej_list *new;
3626 u32 control;
3627
3628 while (tx_seq != chan->expected_tx_seq) {
3629 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3630 control |= __set_reqseq(chan, chan->expected_tx_seq);
3631 l2cap_send_sframe(chan, control);
3632
3633 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3634 new->tx_seq = chan->expected_tx_seq;
3635
3636 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3637
3638 list_add_tail(&new->list, &chan->srej_l);
3639 }
3640
3641 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3642 }
3643
3644 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3645 {
3646 u16 tx_seq = __get_txseq(chan, rx_control);
3647 u16 req_seq = __get_reqseq(chan, rx_control);
3648 u8 sar = __get_ctrl_sar(chan, rx_control);
3649 int tx_seq_offset, expected_tx_seq_offset;
3650 int num_to_ack = (chan->tx_win/6) + 1;
3651 int err = 0;
3652
3653 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3654 tx_seq, rx_control);
3655
3656 if (__is_ctrl_final(chan, rx_control) &&
3657 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3658 __clear_monitor_timer(chan);
3659 if (chan->unacked_frames > 0)
3660 __set_retrans_timer(chan);
3661 clear_bit(CONN_WAIT_F, &chan->conn_state);
3662 }
3663
3664 chan->expected_ack_seq = req_seq;
3665 l2cap_drop_acked_frames(chan);
3666
3667 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3668
3669 /* invalid tx_seq */
3670 if (tx_seq_offset >= chan->tx_win) {
3671 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3672 goto drop;
3673 }
3674
3675 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3676 goto drop;
3677
3678 if (tx_seq == chan->expected_tx_seq)
3679 goto expected;
3680
3681 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3682 struct srej_list *first;
3683
3684 first = list_first_entry(&chan->srej_l,
3685 struct srej_list, list);
3686 if (tx_seq == first->tx_seq) {
3687 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3688 l2cap_check_srej_gap(chan, tx_seq);
3689
3690 list_del(&first->list);
3691 kfree(first);
3692
3693 if (list_empty(&chan->srej_l)) {
3694 chan->buffer_seq = chan->buffer_seq_srej;
3695 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3696 l2cap_send_ack(chan);
3697 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3698 }
3699 } else {
3700 struct srej_list *l;
3701
3702 /* duplicated tx_seq */
3703 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3704 goto drop;
3705
3706 list_for_each_entry(l, &chan->srej_l, list) {
3707 if (l->tx_seq == tx_seq) {
3708 l2cap_resend_srejframe(chan, tx_seq);
3709 return 0;
3710 }
3711 }
3712 l2cap_send_srejframe(chan, tx_seq);
3713 }
3714 } else {
3715 expected_tx_seq_offset = __seq_offset(chan,
3716 chan->expected_tx_seq, chan->buffer_seq);
3717
3718 /* duplicated tx_seq */
3719 if (tx_seq_offset < expected_tx_seq_offset)
3720 goto drop;
3721
3722 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3723
3724 BT_DBG("chan %p, Enter SREJ", chan);
3725
3726 INIT_LIST_HEAD(&chan->srej_l);
3727 chan->buffer_seq_srej = chan->buffer_seq;
3728
3729 __skb_queue_head_init(&chan->srej_q);
3730 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3731
3732 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3733
3734 l2cap_send_srejframe(chan, tx_seq);
3735
3736 __clear_ack_timer(chan);
3737 }
3738 return 0;
3739
3740 expected:
3741 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3742
3743 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3744 bt_cb(skb)->tx_seq = tx_seq;
3745 bt_cb(skb)->sar = sar;
3746 __skb_queue_tail(&chan->srej_q, skb);
3747 return 0;
3748 }
3749
3750 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3751 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3752
3753 if (err < 0) {
3754 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3755 return err;
3756 }
3757
3758 if (__is_ctrl_final(chan, rx_control)) {
3759 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3760 l2cap_retransmit_frames(chan);
3761 }
3762
3763 __set_ack_timer(chan);
3764
3765 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3766 if (chan->num_acked == num_to_ack - 1)
3767 l2cap_send_ack(chan);
3768
3769 return 0;
3770
3771 drop:
3772 kfree_skb(skb);
3773 return 0;
3774 }
3775
3776 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3777 {
3778 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3779 __get_reqseq(chan, rx_control), rx_control);
3780
3781 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3782 l2cap_drop_acked_frames(chan);
3783
3784 if (__is_ctrl_poll(chan, rx_control)) {
3785 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3786 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3787 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3788 (chan->unacked_frames > 0))
3789 __set_retrans_timer(chan);
3790
3791 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3792 l2cap_send_srejtail(chan);
3793 } else {
3794 l2cap_send_i_or_rr_or_rnr(chan);
3795 }
3796
3797 } else if (__is_ctrl_final(chan, rx_control)) {
3798 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3799
3800 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3801 l2cap_retransmit_frames(chan);
3802
3803 } else {
3804 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3805 (chan->unacked_frames > 0))
3806 __set_retrans_timer(chan);
3807
3808 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3809 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3810 l2cap_send_ack(chan);
3811 else
3812 l2cap_ertm_send(chan);
3813 }
3814 }
3815
3816 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3817 {
3818 u16 tx_seq = __get_reqseq(chan, rx_control);
3819
3820 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3821
3822 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3823
3824 chan->expected_ack_seq = tx_seq;
3825 l2cap_drop_acked_frames(chan);
3826
3827 if (__is_ctrl_final(chan, rx_control)) {
3828 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3829 l2cap_retransmit_frames(chan);
3830 } else {
3831 l2cap_retransmit_frames(chan);
3832
3833 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3834 set_bit(CONN_REJ_ACT, &chan->conn_state);
3835 }
3836 }
3837 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3838 {
3839 u16 tx_seq = __get_reqseq(chan, rx_control);
3840
3841 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3842
3843 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3844
3845 if (__is_ctrl_poll(chan, rx_control)) {
3846 chan->expected_ack_seq = tx_seq;
3847 l2cap_drop_acked_frames(chan);
3848
3849 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3850 l2cap_retransmit_one_frame(chan, tx_seq);
3851
3852 l2cap_ertm_send(chan);
3853
3854 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3855 chan->srej_save_reqseq = tx_seq;
3856 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3857 }
3858 } else if (__is_ctrl_final(chan, rx_control)) {
3859 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3860 chan->srej_save_reqseq == tx_seq)
3861 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3862 else
3863 l2cap_retransmit_one_frame(chan, tx_seq);
3864 } else {
3865 l2cap_retransmit_one_frame(chan, tx_seq);
3866 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3867 chan->srej_save_reqseq = tx_seq;
3868 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3869 }
3870 }
3871 }
3872
3873 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
3874 {
3875 u16 tx_seq = __get_reqseq(chan, rx_control);
3876
3877 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3878
3879 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3880 chan->expected_ack_seq = tx_seq;
3881 l2cap_drop_acked_frames(chan);
3882
3883 if (__is_ctrl_poll(chan, rx_control))
3884 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3885
3886 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3887 __clear_retrans_timer(chan);
3888 if (__is_ctrl_poll(chan, rx_control))
3889 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3890 return;
3891 }
3892
3893 if (__is_ctrl_poll(chan, rx_control)) {
3894 l2cap_send_srejtail(chan);
3895 } else {
3896 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3897 l2cap_send_sframe(chan, rx_control);
3898 }
3899 }
3900
3901 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3902 {
3903 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
3904
3905 if (__is_ctrl_final(chan, rx_control) &&
3906 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3907 __clear_monitor_timer(chan);
3908 if (chan->unacked_frames > 0)
3909 __set_retrans_timer(chan);
3910 clear_bit(CONN_WAIT_F, &chan->conn_state);
3911 }
3912
3913 switch (__get_ctrl_super(chan, rx_control)) {
3914 case L2CAP_SUPER_RR:
3915 l2cap_data_channel_rrframe(chan, rx_control);
3916 break;
3917
3918 case L2CAP_SUPER_REJ:
3919 l2cap_data_channel_rejframe(chan, rx_control);
3920 break;
3921
3922 case L2CAP_SUPER_SREJ:
3923 l2cap_data_channel_srejframe(chan, rx_control);
3924 break;
3925
3926 case L2CAP_SUPER_RNR:
3927 l2cap_data_channel_rnrframe(chan, rx_control);
3928 break;
3929 }
3930
3931 kfree_skb(skb);
3932 return 0;
3933 }
3934
3935 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3936 {
3937 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3938 u32 control;
3939 u16 req_seq;
3940 int len, next_tx_seq_offset, req_seq_offset;
3941
3942 control = __get_control(chan, skb->data);
3943 skb_pull(skb, __ctrl_size(chan));
3944 len = skb->len;
3945
3946 /*
3947 * We can just drop the corrupted I-frame here.
3948 * Receiver will miss it and start proper recovery
3949 * procedures and ask retransmission.
3950 */
3951 if (l2cap_check_fcs(chan, skb))
3952 goto drop;
3953
3954 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3955 len -= L2CAP_SDULEN_SIZE;
3956
3957 if (chan->fcs == L2CAP_FCS_CRC16)
3958 len -= L2CAP_FCS_SIZE;
3959
3960 if (len > chan->mps) {
3961 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3962 goto drop;
3963 }
3964
3965 req_seq = __get_reqseq(chan, control);
3966
3967 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
3968
3969 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
3970 chan->expected_ack_seq);
3971
3972 /* check for invalid req-seq */
3973 if (req_seq_offset > next_tx_seq_offset) {
3974 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3975 goto drop;
3976 }
3977
3978 if (!__is_sframe(chan, control)) {
3979 if (len < 0) {
3980 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3981 goto drop;
3982 }
3983
3984 l2cap_data_channel_iframe(chan, control, skb);
3985 } else {
3986 if (len != 0) {
3987 BT_ERR("%d", len);
3988 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3989 goto drop;
3990 }
3991
3992 l2cap_data_channel_sframe(chan, control, skb);
3993 }
3994
3995 return 0;
3996
3997 drop:
3998 kfree_skb(skb);
3999 return 0;
4000 }
4001
4002 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4003 {
4004 struct l2cap_chan *chan;
4005 struct sock *sk = NULL;
4006 u32 control;
4007 u16 tx_seq;
4008 int len;
4009
4010 chan = l2cap_get_chan_by_scid(conn, cid);
4011 if (!chan) {
4012 BT_DBG("unknown cid 0x%4.4x", cid);
4013 goto drop;
4014 }
4015
4016 sk = chan->sk;
4017
4018 BT_DBG("chan %p, len %d", chan, skb->len);
4019
4020 if (chan->state != BT_CONNECTED)
4021 goto drop;
4022
4023 switch (chan->mode) {
4024 case L2CAP_MODE_BASIC:
4025 /* If socket recv buffers overflows we drop data here
4026 * which is *bad* because L2CAP has to be reliable.
4027 * But we don't have any other choice. L2CAP doesn't
4028 * provide flow control mechanism. */
4029
4030 if (chan->imtu < skb->len)
4031 goto drop;
4032
4033 if (!chan->ops->recv(chan->data, skb))
4034 goto done;
4035 break;
4036
4037 case L2CAP_MODE_ERTM:
4038 if (!sock_owned_by_user(sk)) {
4039 l2cap_ertm_data_rcv(sk, skb);
4040 } else {
4041 if (sk_add_backlog(sk, skb))
4042 goto drop;
4043 }
4044
4045 goto done;
4046
4047 case L2CAP_MODE_STREAMING:
4048 control = __get_control(chan, skb->data);
4049 skb_pull(skb, __ctrl_size(chan));
4050 len = skb->len;
4051
4052 if (l2cap_check_fcs(chan, skb))
4053 goto drop;
4054
4055 if (__is_sar_start(chan, control))
4056 len -= L2CAP_SDULEN_SIZE;
4057
4058 if (chan->fcs == L2CAP_FCS_CRC16)
4059 len -= L2CAP_FCS_SIZE;
4060
4061 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4062 goto drop;
4063
4064 tx_seq = __get_txseq(chan, control);
4065
4066 if (chan->expected_tx_seq != tx_seq) {
4067 /* Frame(s) missing - must discard partial SDU */
4068 kfree_skb(chan->sdu);
4069 chan->sdu = NULL;
4070 chan->sdu_last_frag = NULL;
4071 chan->sdu_len = 0;
4072
4073 /* TODO: Notify userland of missing data */
4074 }
4075
4076 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4077
4078 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4079 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4080
4081 goto done;
4082
4083 default:
4084 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4085 break;
4086 }
4087
4088 drop:
4089 kfree_skb(skb);
4090
4091 done:
4092 if (sk)
4093 bh_unlock_sock(sk);
4094
4095 return 0;
4096 }
4097
4098 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4099 {
4100 struct sock *sk = NULL;
4101 struct l2cap_chan *chan;
4102
4103 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4104 if (!chan)
4105 goto drop;
4106
4107 sk = chan->sk;
4108
4109 bh_lock_sock(sk);
4110
4111 BT_DBG("sk %p, len %d", sk, skb->len);
4112
4113 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4114 goto drop;
4115
4116 if (chan->imtu < skb->len)
4117 goto drop;
4118
4119 if (!chan->ops->recv(chan->data, skb))
4120 goto done;
4121
4122 drop:
4123 kfree_skb(skb);
4124
4125 done:
4126 if (sk)
4127 bh_unlock_sock(sk);
4128 return 0;
4129 }
4130
4131 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4132 {
4133 struct sock *sk = NULL;
4134 struct l2cap_chan *chan;
4135
4136 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4137 if (!chan)
4138 goto drop;
4139
4140 sk = chan->sk;
4141
4142 bh_lock_sock(sk);
4143
4144 BT_DBG("sk %p, len %d", sk, skb->len);
4145
4146 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4147 goto drop;
4148
4149 if (chan->imtu < skb->len)
4150 goto drop;
4151
4152 if (!chan->ops->recv(chan->data, skb))
4153 goto done;
4154
4155 drop:
4156 kfree_skb(skb);
4157
4158 done:
4159 if (sk)
4160 bh_unlock_sock(sk);
4161 return 0;
4162 }
4163
4164 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4165 {
4166 struct l2cap_hdr *lh = (void *) skb->data;
4167 u16 cid, len;
4168 __le16 psm;
4169
4170 skb_pull(skb, L2CAP_HDR_SIZE);
4171 cid = __le16_to_cpu(lh->cid);
4172 len = __le16_to_cpu(lh->len);
4173
4174 if (len != skb->len) {
4175 kfree_skb(skb);
4176 return;
4177 }
4178
4179 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4180
4181 switch (cid) {
4182 case L2CAP_CID_LE_SIGNALING:
4183 case L2CAP_CID_SIGNALING:
4184 l2cap_sig_channel(conn, skb);
4185 break;
4186
4187 case L2CAP_CID_CONN_LESS:
4188 psm = get_unaligned_le16(skb->data);
4189 skb_pull(skb, 2);
4190 l2cap_conless_channel(conn, psm, skb);
4191 break;
4192
4193 case L2CAP_CID_LE_DATA:
4194 l2cap_att_channel(conn, cid, skb);
4195 break;
4196
4197 case L2CAP_CID_SMP:
4198 if (smp_sig_channel(conn, skb))
4199 l2cap_conn_del(conn->hcon, EACCES);
4200 break;
4201
4202 default:
4203 l2cap_data_channel(conn, cid, skb);
4204 break;
4205 }
4206 }
4207
4208 /* ---- L2CAP interface with lower layer (HCI) ---- */
4209
4210 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4211 {
4212 int exact = 0, lm1 = 0, lm2 = 0;
4213 struct l2cap_chan *c;
4214
4215 if (type != ACL_LINK)
4216 return -EINVAL;
4217
4218 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4219
4220 /* Find listening sockets and check their link_mode */
4221 read_lock(&chan_list_lock);
4222 list_for_each_entry(c, &chan_list, global_l) {
4223 struct sock *sk = c->sk;
4224
4225 if (c->state != BT_LISTEN)
4226 continue;
4227
4228 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4229 lm1 |= HCI_LM_ACCEPT;
4230 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4231 lm1 |= HCI_LM_MASTER;
4232 exact++;
4233 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4234 lm2 |= HCI_LM_ACCEPT;
4235 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4236 lm2 |= HCI_LM_MASTER;
4237 }
4238 }
4239 read_unlock(&chan_list_lock);
4240
4241 return exact ? lm1 : lm2;
4242 }
4243
4244 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4245 {
4246 struct l2cap_conn *conn;
4247
4248 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4249
4250 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4251 return -EINVAL;
4252
4253 if (!status) {
4254 conn = l2cap_conn_add(hcon, status);
4255 if (conn)
4256 l2cap_conn_ready(conn);
4257 } else
4258 l2cap_conn_del(hcon, bt_to_errno(status));
4259
4260 return 0;
4261 }
4262
4263 static int l2cap_disconn_ind(struct hci_conn *hcon)
4264 {
4265 struct l2cap_conn *conn = hcon->l2cap_data;
4266
4267 BT_DBG("hcon %p", hcon);
4268
4269 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4270 return 0x13;
4271
4272 return conn->disc_reason;
4273 }
4274
4275 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4276 {
4277 BT_DBG("hcon %p reason %d", hcon, reason);
4278
4279 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4280 return -EINVAL;
4281
4282 l2cap_conn_del(hcon, bt_to_errno(reason));
4283
4284 return 0;
4285 }
4286
4287 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4288 {
4289 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4290 return;
4291
4292 if (encrypt == 0x00) {
4293 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4294 __clear_chan_timer(chan);
4295 __set_chan_timer(chan, HZ * 5);
4296 } else if (chan->sec_level == BT_SECURITY_HIGH)
4297 l2cap_chan_close(chan, ECONNREFUSED);
4298 } else {
4299 if (chan->sec_level == BT_SECURITY_MEDIUM)
4300 __clear_chan_timer(chan);
4301 }
4302 }
4303
4304 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4305 {
4306 struct l2cap_conn *conn = hcon->l2cap_data;
4307 struct l2cap_chan *chan;
4308
4309 if (!conn)
4310 return 0;
4311
4312 BT_DBG("conn %p", conn);
4313
4314 if (hcon->type == LE_LINK) {
4315 smp_distribute_keys(conn, 0);
4316 del_timer(&conn->security_timer);
4317 }
4318
4319 read_lock(&conn->chan_lock);
4320
4321 list_for_each_entry(chan, &conn->chan_l, list) {
4322 struct sock *sk = chan->sk;
4323
4324 bh_lock_sock(sk);
4325
4326 BT_DBG("chan->scid %d", chan->scid);
4327
4328 if (chan->scid == L2CAP_CID_LE_DATA) {
4329 if (!status && encrypt) {
4330 chan->sec_level = hcon->sec_level;
4331 l2cap_chan_ready(sk);
4332 }
4333
4334 bh_unlock_sock(sk);
4335 continue;
4336 }
4337
4338 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4339 bh_unlock_sock(sk);
4340 continue;
4341 }
4342
4343 if (!status && (chan->state == BT_CONNECTED ||
4344 chan->state == BT_CONFIG)) {
4345 l2cap_check_encryption(chan, encrypt);
4346 bh_unlock_sock(sk);
4347 continue;
4348 }
4349
4350 if (chan->state == BT_CONNECT) {
4351 if (!status) {
4352 struct l2cap_conn_req req;
4353 req.scid = cpu_to_le16(chan->scid);
4354 req.psm = chan->psm;
4355
4356 chan->ident = l2cap_get_ident(conn);
4357 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4358
4359 l2cap_send_cmd(conn, chan->ident,
4360 L2CAP_CONN_REQ, sizeof(req), &req);
4361 } else {
4362 __clear_chan_timer(chan);
4363 __set_chan_timer(chan, HZ / 10);
4364 }
4365 } else if (chan->state == BT_CONNECT2) {
4366 struct l2cap_conn_rsp rsp;
4367 __u16 res, stat;
4368
4369 if (!status) {
4370 if (bt_sk(sk)->defer_setup) {
4371 struct sock *parent = bt_sk(sk)->parent;
4372 res = L2CAP_CR_PEND;
4373 stat = L2CAP_CS_AUTHOR_PEND;
4374 if (parent)
4375 parent->sk_data_ready(parent, 0);
4376 } else {
4377 l2cap_state_change(chan, BT_CONFIG);
4378 res = L2CAP_CR_SUCCESS;
4379 stat = L2CAP_CS_NO_INFO;
4380 }
4381 } else {
4382 l2cap_state_change(chan, BT_DISCONN);
4383 __set_chan_timer(chan, HZ / 10);
4384 res = L2CAP_CR_SEC_BLOCK;
4385 stat = L2CAP_CS_NO_INFO;
4386 }
4387
4388 rsp.scid = cpu_to_le16(chan->dcid);
4389 rsp.dcid = cpu_to_le16(chan->scid);
4390 rsp.result = cpu_to_le16(res);
4391 rsp.status = cpu_to_le16(stat);
4392 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4393 sizeof(rsp), &rsp);
4394 }
4395
4396 bh_unlock_sock(sk);
4397 }
4398
4399 read_unlock(&conn->chan_lock);
4400
4401 return 0;
4402 }
4403
4404 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4405 {
4406 struct l2cap_conn *conn = hcon->l2cap_data;
4407
4408 if (!conn)
4409 conn = l2cap_conn_add(hcon, 0);
4410
4411 if (!conn)
4412 goto drop;
4413
4414 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4415
4416 if (!(flags & ACL_CONT)) {
4417 struct l2cap_hdr *hdr;
4418 struct l2cap_chan *chan;
4419 u16 cid;
4420 int len;
4421
4422 if (conn->rx_len) {
4423 BT_ERR("Unexpected start frame (len %d)", skb->len);
4424 kfree_skb(conn->rx_skb);
4425 conn->rx_skb = NULL;
4426 conn->rx_len = 0;
4427 l2cap_conn_unreliable(conn, ECOMM);
4428 }
4429
4430 /* Start fragment always begin with Basic L2CAP header */
4431 if (skb->len < L2CAP_HDR_SIZE) {
4432 BT_ERR("Frame is too short (len %d)", skb->len);
4433 l2cap_conn_unreliable(conn, ECOMM);
4434 goto drop;
4435 }
4436
4437 hdr = (struct l2cap_hdr *) skb->data;
4438 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4439 cid = __le16_to_cpu(hdr->cid);
4440
4441 if (len == skb->len) {
4442 /* Complete frame received */
4443 l2cap_recv_frame(conn, skb);
4444 return 0;
4445 }
4446
4447 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4448
4449 if (skb->len > len) {
4450 BT_ERR("Frame is too long (len %d, expected len %d)",
4451 skb->len, len);
4452 l2cap_conn_unreliable(conn, ECOMM);
4453 goto drop;
4454 }
4455
4456 chan = l2cap_get_chan_by_scid(conn, cid);
4457
4458 if (chan && chan->sk) {
4459 struct sock *sk = chan->sk;
4460
4461 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4462 BT_ERR("Frame exceeding recv MTU (len %d, "
4463 "MTU %d)", len,
4464 chan->imtu);
4465 bh_unlock_sock(sk);
4466 l2cap_conn_unreliable(conn, ECOMM);
4467 goto drop;
4468 }
4469 bh_unlock_sock(sk);
4470 }
4471
4472 /* Allocate skb for the complete frame (with header) */
4473 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4474 if (!conn->rx_skb)
4475 goto drop;
4476
4477 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4478 skb->len);
4479 conn->rx_len = len - skb->len;
4480 } else {
4481 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4482
4483 if (!conn->rx_len) {
4484 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4485 l2cap_conn_unreliable(conn, ECOMM);
4486 goto drop;
4487 }
4488
4489 if (skb->len > conn->rx_len) {
4490 BT_ERR("Fragment is too long (len %d, expected %d)",
4491 skb->len, conn->rx_len);
4492 kfree_skb(conn->rx_skb);
4493 conn->rx_skb = NULL;
4494 conn->rx_len = 0;
4495 l2cap_conn_unreliable(conn, ECOMM);
4496 goto drop;
4497 }
4498
4499 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4500 skb->len);
4501 conn->rx_len -= skb->len;
4502
4503 if (!conn->rx_len) {
4504 /* Complete frame received */
4505 l2cap_recv_frame(conn, conn->rx_skb);
4506 conn->rx_skb = NULL;
4507 }
4508 }
4509
4510 drop:
4511 kfree_skb(skb);
4512 return 0;
4513 }
4514
4515 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4516 {
4517 struct l2cap_chan *c;
4518
4519 read_lock_bh(&chan_list_lock);
4520
4521 list_for_each_entry(c, &chan_list, global_l) {
4522 struct sock *sk = c->sk;
4523
4524 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4525 batostr(&bt_sk(sk)->src),
4526 batostr(&bt_sk(sk)->dst),
4527 c->state, __le16_to_cpu(c->psm),
4528 c->scid, c->dcid, c->imtu, c->omtu,
4529 c->sec_level, c->mode);
4530 }
4531
4532 read_unlock_bh(&chan_list_lock);
4533
4534 return 0;
4535 }
4536
4537 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4538 {
4539 return single_open(file, l2cap_debugfs_show, inode->i_private);
4540 }
4541
4542 static const struct file_operations l2cap_debugfs_fops = {
4543 .open = l2cap_debugfs_open,
4544 .read = seq_read,
4545 .llseek = seq_lseek,
4546 .release = single_release,
4547 };
4548
4549 static struct dentry *l2cap_debugfs;
4550
4551 static struct hci_proto l2cap_hci_proto = {
4552 .name = "L2CAP",
4553 .id = HCI_PROTO_L2CAP,
4554 .connect_ind = l2cap_connect_ind,
4555 .connect_cfm = l2cap_connect_cfm,
4556 .disconn_ind = l2cap_disconn_ind,
4557 .disconn_cfm = l2cap_disconn_cfm,
4558 .security_cfm = l2cap_security_cfm,
4559 .recv_acldata = l2cap_recv_acldata
4560 };
4561
4562 int __init l2cap_init(void)
4563 {
4564 int err;
4565
4566 err = l2cap_init_sockets();
4567 if (err < 0)
4568 return err;
4569
4570 err = hci_register_proto(&l2cap_hci_proto);
4571 if (err < 0) {
4572 BT_ERR("L2CAP protocol registration failed");
4573 bt_sock_unregister(BTPROTO_L2CAP);
4574 goto error;
4575 }
4576
4577 if (bt_debugfs) {
4578 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4579 bt_debugfs, NULL, &l2cap_debugfs_fops);
4580 if (!l2cap_debugfs)
4581 BT_ERR("Failed to create L2CAP debug file");
4582 }
4583
4584 return 0;
4585
4586 error:
4587 l2cap_cleanup_sockets();
4588 return err;
4589 }
4590
4591 void l2cap_exit(void)
4592 {
4593 debugfs_remove(l2cap_debugfs);
4594
4595 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4596 BT_ERR("L2CAP protocol unregistration failed");
4597
4598 l2cap_cleanup_sockets();
4599 }
4600
4601 module_param(disable_ertm, bool, 0644);
4602 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4603
4604 module_param(enable_hs, bool, 0644);
4605 MODULE_PARM_DESC(enable_hs, "Enable High Speed");
This page took 0.134448 seconds and 5 git commands to generate.