Bluetooth: Simplify the ERTM ack timeout
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm = 1;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 {
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90 }
91
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 {
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116 }
117
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 {
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127 }
128
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130 {
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138 }
139
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141 {
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168 done:
169 write_unlock(&chan_list_lock);
170 return err;
171 }
172
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174 {
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182 }
183
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185 {
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194 }
195
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197 {
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203 }
204
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
206 {
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212 }
213
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215 {
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219 }
220
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 {
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228 }
229
230 static void __set_retrans_timer(struct l2cap_chan *chan)
231 {
232 if (!delayed_work_pending(&chan->monitor_timer) &&
233 chan->retrans_timeout) {
234 l2cap_set_timer(chan, &chan->retrans_timer,
235 msecs_to_jiffies(chan->retrans_timeout));
236 }
237 }
238
239 static void __set_monitor_timer(struct l2cap_chan *chan)
240 {
241 __clear_retrans_timer(chan);
242 if (chan->monitor_timeout) {
243 l2cap_set_timer(chan, &chan->monitor_timer,
244 msecs_to_jiffies(chan->monitor_timeout));
245 }
246 }
247
248 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
249 u16 seq)
250 {
251 struct sk_buff *skb;
252
253 skb_queue_walk(head, skb) {
254 if (bt_cb(skb)->control.txseq == seq)
255 return skb;
256 }
257
258 return NULL;
259 }
260
261 /* ---- L2CAP sequence number lists ---- */
262
263 /* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
269 * allocs or frees.
270 */
271
272 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
273 {
274 size_t alloc_size, i;
275
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
279 */
280 alloc_size = roundup_pow_of_two(size);
281
282 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
283 if (!seq_list->list)
284 return -ENOMEM;
285
286 seq_list->mask = alloc_size - 1;
287 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
288 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
289 for (i = 0; i < alloc_size; i++)
290 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
291
292 return 0;
293 }
294
295 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
296 {
297 kfree(seq_list->list);
298 }
299
300 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
301 u16 seq)
302 {
303 /* Constant-time check for list membership */
304 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
305 }
306
307 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
308 {
309 u16 mask = seq_list->mask;
310
311 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR;
314 } else if (seq_list->head == seq) {
315 /* Head can be removed in constant time */
316 seq_list->head = seq_list->list[seq & mask];
317 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
318
319 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
320 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
321 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
322 }
323 } else {
324 /* Walk the list to find the sequence number */
325 u16 prev = seq_list->head;
326 while (seq_list->list[prev & mask] != seq) {
327 prev = seq_list->list[prev & mask];
328 if (prev == L2CAP_SEQ_LIST_TAIL)
329 return L2CAP_SEQ_LIST_CLEAR;
330 }
331
332 /* Unlink the number from the list and clear it */
333 seq_list->list[prev & mask] = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 if (seq_list->tail == seq)
336 seq_list->tail = prev;
337 }
338 return seq;
339 }
340
341 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
342 {
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list, seq_list->head);
345 }
346
347 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
348 {
349 u16 i;
350
351 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
352 return;
353
354 for (i = 0; i <= seq_list->mask; i++)
355 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
356
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 }
360
361 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
362 {
363 u16 mask = seq_list->mask;
364
365 /* All appends happen in constant time */
366
367 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
368 return;
369
370 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
371 seq_list->head = seq;
372 else
373 seq_list->list[seq_list->tail & mask] = seq;
374
375 seq_list->tail = seq;
376 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
377 }
378
379 static void l2cap_chan_timeout(struct work_struct *work)
380 {
381 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
382 chan_timer.work);
383 struct l2cap_conn *conn = chan->conn;
384 int reason;
385
386 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
387
388 mutex_lock(&conn->chan_lock);
389 l2cap_chan_lock(chan);
390
391 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
392 reason = ECONNREFUSED;
393 else if (chan->state == BT_CONNECT &&
394 chan->sec_level != BT_SECURITY_SDP)
395 reason = ECONNREFUSED;
396 else
397 reason = ETIMEDOUT;
398
399 l2cap_chan_close(chan, reason);
400
401 l2cap_chan_unlock(chan);
402
403 chan->ops->close(chan->data);
404 mutex_unlock(&conn->chan_lock);
405
406 l2cap_chan_put(chan);
407 }
408
409 struct l2cap_chan *l2cap_chan_create(void)
410 {
411 struct l2cap_chan *chan;
412
413 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
414 if (!chan)
415 return NULL;
416
417 mutex_init(&chan->lock);
418
419 write_lock(&chan_list_lock);
420 list_add(&chan->global_l, &chan_list);
421 write_unlock(&chan_list_lock);
422
423 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
424
425 chan->state = BT_OPEN;
426
427 atomic_set(&chan->refcnt, 1);
428
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
431
432 BT_DBG("chan %p", chan);
433
434 return chan;
435 }
436
437 void l2cap_chan_destroy(struct l2cap_chan *chan)
438 {
439 write_lock(&chan_list_lock);
440 list_del(&chan->global_l);
441 write_unlock(&chan_list_lock);
442
443 l2cap_chan_put(chan);
444 }
445
446 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447 {
448 chan->fcs = L2CAP_FCS_CRC16;
449 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
453
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
455 }
456
457 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
458 {
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
461
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
463
464 chan->conn = conn;
465
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
469 /* LE connection */
470 chan->omtu = L2CAP_LE_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
473 } else {
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
477 }
478 break;
479
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
485 break;
486
487 default:
488 /* Raw socket can send/recv signalling messages only */
489 chan->scid = L2CAP_CID_SIGNALING;
490 chan->dcid = L2CAP_CID_SIGNALING;
491 chan->omtu = L2CAP_DEFAULT_MTU;
492 }
493
494 chan->local_id = L2CAP_BESTEFFORT_ID;
495 chan->local_stype = L2CAP_SERV_BESTEFFORT;
496 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
497 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
498 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
499 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
500
501 l2cap_chan_hold(chan);
502
503 list_add(&chan->list, &conn->chan_l);
504 }
505
506 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
507 {
508 mutex_lock(&conn->chan_lock);
509 __l2cap_chan_add(conn, chan);
510 mutex_unlock(&conn->chan_lock);
511 }
512
513 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
514 {
515 struct sock *sk = chan->sk;
516 struct l2cap_conn *conn = chan->conn;
517 struct sock *parent = bt_sk(sk)->parent;
518
519 __clear_chan_timer(chan);
520
521 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
522
523 if (conn) {
524 /* Delete from channel list */
525 list_del(&chan->list);
526
527 l2cap_chan_put(chan);
528
529 chan->conn = NULL;
530 hci_conn_put(conn->hcon);
531 }
532
533 lock_sock(sk);
534
535 __l2cap_state_change(chan, BT_CLOSED);
536 sock_set_flag(sk, SOCK_ZAPPED);
537
538 if (err)
539 __l2cap_chan_set_err(chan, err);
540
541 if (parent) {
542 bt_accept_unlink(sk);
543 parent->sk_data_ready(parent, 0);
544 } else
545 sk->sk_state_change(sk);
546
547 release_sock(sk);
548
549 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
550 return;
551
552 skb_queue_purge(&chan->tx_q);
553
554 if (chan->mode == L2CAP_MODE_ERTM) {
555 __clear_retrans_timer(chan);
556 __clear_monitor_timer(chan);
557 __clear_ack_timer(chan);
558
559 skb_queue_purge(&chan->srej_q);
560
561 l2cap_seq_list_free(&chan->srej_list);
562 l2cap_seq_list_free(&chan->retrans_list);
563 }
564 }
565
566 static void l2cap_chan_cleanup_listen(struct sock *parent)
567 {
568 struct sock *sk;
569
570 BT_DBG("parent %p", parent);
571
572 /* Close not yet accepted channels */
573 while ((sk = bt_accept_dequeue(parent, NULL))) {
574 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
575
576 l2cap_chan_lock(chan);
577 __clear_chan_timer(chan);
578 l2cap_chan_close(chan, ECONNRESET);
579 l2cap_chan_unlock(chan);
580
581 chan->ops->close(chan->data);
582 }
583 }
584
585 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
586 {
587 struct l2cap_conn *conn = chan->conn;
588 struct sock *sk = chan->sk;
589
590 BT_DBG("chan %p state %s sk %p", chan,
591 state_to_string(chan->state), sk);
592
593 switch (chan->state) {
594 case BT_LISTEN:
595 lock_sock(sk);
596 l2cap_chan_cleanup_listen(sk);
597
598 __l2cap_state_change(chan, BT_CLOSED);
599 sock_set_flag(sk, SOCK_ZAPPED);
600 release_sock(sk);
601 break;
602
603 case BT_CONNECTED:
604 case BT_CONFIG:
605 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
606 conn->hcon->type == ACL_LINK) {
607 __set_chan_timer(chan, sk->sk_sndtimeo);
608 l2cap_send_disconn_req(conn, chan, reason);
609 } else
610 l2cap_chan_del(chan, reason);
611 break;
612
613 case BT_CONNECT2:
614 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
615 conn->hcon->type == ACL_LINK) {
616 struct l2cap_conn_rsp rsp;
617 __u16 result;
618
619 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
620 result = L2CAP_CR_SEC_BLOCK;
621 else
622 result = L2CAP_CR_BAD_PSM;
623 l2cap_state_change(chan, BT_DISCONN);
624
625 rsp.scid = cpu_to_le16(chan->dcid);
626 rsp.dcid = cpu_to_le16(chan->scid);
627 rsp.result = cpu_to_le16(result);
628 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
629 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
630 sizeof(rsp), &rsp);
631 }
632
633 l2cap_chan_del(chan, reason);
634 break;
635
636 case BT_CONNECT:
637 case BT_DISCONN:
638 l2cap_chan_del(chan, reason);
639 break;
640
641 default:
642 lock_sock(sk);
643 sock_set_flag(sk, SOCK_ZAPPED);
644 release_sock(sk);
645 break;
646 }
647 }
648
649 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
650 {
651 if (chan->chan_type == L2CAP_CHAN_RAW) {
652 switch (chan->sec_level) {
653 case BT_SECURITY_HIGH:
654 return HCI_AT_DEDICATED_BONDING_MITM;
655 case BT_SECURITY_MEDIUM:
656 return HCI_AT_DEDICATED_BONDING;
657 default:
658 return HCI_AT_NO_BONDING;
659 }
660 } else if (chan->psm == cpu_to_le16(0x0001)) {
661 if (chan->sec_level == BT_SECURITY_LOW)
662 chan->sec_level = BT_SECURITY_SDP;
663
664 if (chan->sec_level == BT_SECURITY_HIGH)
665 return HCI_AT_NO_BONDING_MITM;
666 else
667 return HCI_AT_NO_BONDING;
668 } else {
669 switch (chan->sec_level) {
670 case BT_SECURITY_HIGH:
671 return HCI_AT_GENERAL_BONDING_MITM;
672 case BT_SECURITY_MEDIUM:
673 return HCI_AT_GENERAL_BONDING;
674 default:
675 return HCI_AT_NO_BONDING;
676 }
677 }
678 }
679
680 /* Service level security */
681 int l2cap_chan_check_security(struct l2cap_chan *chan)
682 {
683 struct l2cap_conn *conn = chan->conn;
684 __u8 auth_type;
685
686 auth_type = l2cap_get_auth_type(chan);
687
688 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
689 }
690
691 static u8 l2cap_get_ident(struct l2cap_conn *conn)
692 {
693 u8 id;
694
695 /* Get next available identificator.
696 * 1 - 128 are used by kernel.
697 * 129 - 199 are reserved.
698 * 200 - 254 are used by utilities like l2ping, etc.
699 */
700
701 spin_lock(&conn->lock);
702
703 if (++conn->tx_ident > 128)
704 conn->tx_ident = 1;
705
706 id = conn->tx_ident;
707
708 spin_unlock(&conn->lock);
709
710 return id;
711 }
712
713 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
714 {
715 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
716 u8 flags;
717
718 BT_DBG("code 0x%2.2x", code);
719
720 if (!skb)
721 return;
722
723 if (lmp_no_flush_capable(conn->hcon->hdev))
724 flags = ACL_START_NO_FLUSH;
725 else
726 flags = ACL_START;
727
728 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
729 skb->priority = HCI_PRIO_MAX;
730
731 hci_send_acl(conn->hchan, skb, flags);
732 }
733
734 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
735 {
736 struct hci_conn *hcon = chan->conn->hcon;
737 u16 flags;
738
739 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
740 skb->priority);
741
742 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
743 lmp_no_flush_capable(hcon->hdev))
744 flags = ACL_START_NO_FLUSH;
745 else
746 flags = ACL_START;
747
748 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
749 hci_send_acl(chan->conn->hchan, skb, flags);
750 }
751
752 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
753 {
754 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
755 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
756
757 if (enh & L2CAP_CTRL_FRAME_TYPE) {
758 /* S-Frame */
759 control->sframe = 1;
760 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
761 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
762
763 control->sar = 0;
764 control->txseq = 0;
765 } else {
766 /* I-Frame */
767 control->sframe = 0;
768 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
769 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
770
771 control->poll = 0;
772 control->super = 0;
773 }
774 }
775
776 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
777 {
778 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
779 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
780
781 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
782 /* S-Frame */
783 control->sframe = 1;
784 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
785 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
786
787 control->sar = 0;
788 control->txseq = 0;
789 } else {
790 /* I-Frame */
791 control->sframe = 0;
792 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
793 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
794
795 control->poll = 0;
796 control->super = 0;
797 }
798 }
799
800 static inline void __unpack_control(struct l2cap_chan *chan,
801 struct sk_buff *skb)
802 {
803 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
804 __unpack_extended_control(get_unaligned_le32(skb->data),
805 &bt_cb(skb)->control);
806 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
807 } else {
808 __unpack_enhanced_control(get_unaligned_le16(skb->data),
809 &bt_cb(skb)->control);
810 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
811 }
812 }
813
814 static u32 __pack_extended_control(struct l2cap_ctrl *control)
815 {
816 u32 packed;
817
818 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
820
821 if (control->sframe) {
822 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
823 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
824 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
825 } else {
826 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
827 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
828 }
829
830 return packed;
831 }
832
833 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
834 {
835 u16 packed;
836
837 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
838 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
839
840 if (control->sframe) {
841 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
842 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
843 packed |= L2CAP_CTRL_FRAME_TYPE;
844 } else {
845 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
846 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
847 }
848
849 return packed;
850 }
851
852 static inline void __pack_control(struct l2cap_chan *chan,
853 struct l2cap_ctrl *control,
854 struct sk_buff *skb)
855 {
856 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
857 put_unaligned_le32(__pack_extended_control(control),
858 skb->data + L2CAP_HDR_SIZE);
859 } else {
860 put_unaligned_le16(__pack_enhanced_control(control),
861 skb->data + L2CAP_HDR_SIZE);
862 }
863 }
864
865 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
866 u32 control)
867 {
868 struct sk_buff *skb;
869 struct l2cap_hdr *lh;
870 int hlen;
871
872 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
873 hlen = L2CAP_EXT_HDR_SIZE;
874 else
875 hlen = L2CAP_ENH_HDR_SIZE;
876
877 if (chan->fcs == L2CAP_FCS_CRC16)
878 hlen += L2CAP_FCS_SIZE;
879
880 skb = bt_skb_alloc(hlen, GFP_KERNEL);
881
882 if (!skb)
883 return ERR_PTR(-ENOMEM);
884
885 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
886 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
887 lh->cid = cpu_to_le16(chan->dcid);
888
889 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
890 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
891 else
892 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
893
894 if (chan->fcs == L2CAP_FCS_CRC16) {
895 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
896 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
897 }
898
899 skb->priority = HCI_PRIO_MAX;
900 return skb;
901 }
902
903 static void l2cap_send_sframe(struct l2cap_chan *chan,
904 struct l2cap_ctrl *control)
905 {
906 struct sk_buff *skb;
907 u32 control_field;
908
909 BT_DBG("chan %p, control %p", chan, control);
910
911 if (!control->sframe)
912 return;
913
914 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
915 !control->poll)
916 control->final = 1;
917
918 if (control->super == L2CAP_SUPER_RR)
919 clear_bit(CONN_RNR_SENT, &chan->conn_state);
920 else if (control->super == L2CAP_SUPER_RNR)
921 set_bit(CONN_RNR_SENT, &chan->conn_state);
922
923 if (control->super != L2CAP_SUPER_SREJ) {
924 chan->last_acked_seq = control->reqseq;
925 __clear_ack_timer(chan);
926 }
927
928 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
929 control->final, control->poll, control->super);
930
931 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
932 control_field = __pack_extended_control(control);
933 else
934 control_field = __pack_enhanced_control(control);
935
936 skb = l2cap_create_sframe_pdu(chan, control_field);
937 if (!IS_ERR(skb))
938 l2cap_do_send(chan, skb);
939 }
940
941 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
942 {
943 struct l2cap_ctrl control;
944
945 BT_DBG("chan %p, poll %d", chan, poll);
946
947 memset(&control, 0, sizeof(control));
948 control.sframe = 1;
949 control.poll = poll;
950
951 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
952 control.super = L2CAP_SUPER_RNR;
953 else
954 control.super = L2CAP_SUPER_RR;
955
956 control.reqseq = chan->buffer_seq;
957 l2cap_send_sframe(chan, &control);
958 }
959
960 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
961 {
962 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
963 }
964
965 static void l2cap_send_conn_req(struct l2cap_chan *chan)
966 {
967 struct l2cap_conn *conn = chan->conn;
968 struct l2cap_conn_req req;
969
970 req.scid = cpu_to_le16(chan->scid);
971 req.psm = chan->psm;
972
973 chan->ident = l2cap_get_ident(conn);
974
975 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
976
977 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
978 }
979
980 static void l2cap_chan_ready(struct l2cap_chan *chan)
981 {
982 struct sock *sk = chan->sk;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 BT_DBG("sk %p, parent %p", sk, parent);
990
991 /* This clears all conf flags, including CONF_NOT_COMPLETE */
992 chan->conf_state = 0;
993 __clear_chan_timer(chan);
994
995 __l2cap_state_change(chan, BT_CONNECTED);
996 sk->sk_state_change(sk);
997
998 if (parent)
999 parent->sk_data_ready(parent, 0);
1000
1001 release_sock(sk);
1002 }
1003
1004 static void l2cap_do_start(struct l2cap_chan *chan)
1005 {
1006 struct l2cap_conn *conn = chan->conn;
1007
1008 if (conn->hcon->type == LE_LINK) {
1009 l2cap_chan_ready(chan);
1010 return;
1011 }
1012
1013 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1014 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1015 return;
1016
1017 if (l2cap_chan_check_security(chan) &&
1018 __l2cap_no_conn_pending(chan))
1019 l2cap_send_conn_req(chan);
1020 } else {
1021 struct l2cap_info_req req;
1022 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1023
1024 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1025 conn->info_ident = l2cap_get_ident(conn);
1026
1027 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1028
1029 l2cap_send_cmd(conn, conn->info_ident,
1030 L2CAP_INFO_REQ, sizeof(req), &req);
1031 }
1032 }
1033
1034 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1035 {
1036 u32 local_feat_mask = l2cap_feat_mask;
1037 if (!disable_ertm)
1038 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1039
1040 switch (mode) {
1041 case L2CAP_MODE_ERTM:
1042 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1043 case L2CAP_MODE_STREAMING:
1044 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1045 default:
1046 return 0x00;
1047 }
1048 }
1049
1050 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1051 {
1052 struct sock *sk = chan->sk;
1053 struct l2cap_disconn_req req;
1054
1055 if (!conn)
1056 return;
1057
1058 if (chan->mode == L2CAP_MODE_ERTM) {
1059 __clear_retrans_timer(chan);
1060 __clear_monitor_timer(chan);
1061 __clear_ack_timer(chan);
1062 }
1063
1064 req.dcid = cpu_to_le16(chan->dcid);
1065 req.scid = cpu_to_le16(chan->scid);
1066 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1067 L2CAP_DISCONN_REQ, sizeof(req), &req);
1068
1069 lock_sock(sk);
1070 __l2cap_state_change(chan, BT_DISCONN);
1071 __l2cap_chan_set_err(chan, err);
1072 release_sock(sk);
1073 }
1074
1075 /* ---- L2CAP connections ---- */
1076 static void l2cap_conn_start(struct l2cap_conn *conn)
1077 {
1078 struct l2cap_chan *chan, *tmp;
1079
1080 BT_DBG("conn %p", conn);
1081
1082 mutex_lock(&conn->chan_lock);
1083
1084 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1085 struct sock *sk = chan->sk;
1086
1087 l2cap_chan_lock(chan);
1088
1089 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1090 l2cap_chan_unlock(chan);
1091 continue;
1092 }
1093
1094 if (chan->state == BT_CONNECT) {
1095 if (!l2cap_chan_check_security(chan) ||
1096 !__l2cap_no_conn_pending(chan)) {
1097 l2cap_chan_unlock(chan);
1098 continue;
1099 }
1100
1101 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1102 && test_bit(CONF_STATE2_DEVICE,
1103 &chan->conf_state)) {
1104 l2cap_chan_close(chan, ECONNRESET);
1105 l2cap_chan_unlock(chan);
1106 continue;
1107 }
1108
1109 l2cap_send_conn_req(chan);
1110
1111 } else if (chan->state == BT_CONNECT2) {
1112 struct l2cap_conn_rsp rsp;
1113 char buf[128];
1114 rsp.scid = cpu_to_le16(chan->dcid);
1115 rsp.dcid = cpu_to_le16(chan->scid);
1116
1117 if (l2cap_chan_check_security(chan)) {
1118 lock_sock(sk);
1119 if (test_bit(BT_SK_DEFER_SETUP,
1120 &bt_sk(sk)->flags)) {
1121 struct sock *parent = bt_sk(sk)->parent;
1122 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1123 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1124 if (parent)
1125 parent->sk_data_ready(parent, 0);
1126
1127 } else {
1128 __l2cap_state_change(chan, BT_CONFIG);
1129 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1130 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1131 }
1132 release_sock(sk);
1133 } else {
1134 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1135 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1136 }
1137
1138 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1139 sizeof(rsp), &rsp);
1140
1141 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1142 rsp.result != L2CAP_CR_SUCCESS) {
1143 l2cap_chan_unlock(chan);
1144 continue;
1145 }
1146
1147 set_bit(CONF_REQ_SENT, &chan->conf_state);
1148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1149 l2cap_build_conf_req(chan, buf), buf);
1150 chan->num_conf_req++;
1151 }
1152
1153 l2cap_chan_unlock(chan);
1154 }
1155
1156 mutex_unlock(&conn->chan_lock);
1157 }
1158
1159 /* Find socket with cid and source/destination bdaddr.
1160 * Returns closest match, locked.
1161 */
1162 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1163 bdaddr_t *src,
1164 bdaddr_t *dst)
1165 {
1166 struct l2cap_chan *c, *c1 = NULL;
1167
1168 read_lock(&chan_list_lock);
1169
1170 list_for_each_entry(c, &chan_list, global_l) {
1171 struct sock *sk = c->sk;
1172
1173 if (state && c->state != state)
1174 continue;
1175
1176 if (c->scid == cid) {
1177 int src_match, dst_match;
1178 int src_any, dst_any;
1179
1180 /* Exact match. */
1181 src_match = !bacmp(&bt_sk(sk)->src, src);
1182 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1183 if (src_match && dst_match) {
1184 read_unlock(&chan_list_lock);
1185 return c;
1186 }
1187
1188 /* Closest match */
1189 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1190 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1191 if ((src_match && dst_any) || (src_any && dst_match) ||
1192 (src_any && dst_any))
1193 c1 = c;
1194 }
1195 }
1196
1197 read_unlock(&chan_list_lock);
1198
1199 return c1;
1200 }
1201
1202 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1203 {
1204 struct sock *parent, *sk;
1205 struct l2cap_chan *chan, *pchan;
1206
1207 BT_DBG("");
1208
1209 /* Check if we have socket listening on cid */
1210 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1211 conn->src, conn->dst);
1212 if (!pchan)
1213 return;
1214
1215 parent = pchan->sk;
1216
1217 lock_sock(parent);
1218
1219 /* Check for backlog size */
1220 if (sk_acceptq_is_full(parent)) {
1221 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1222 goto clean;
1223 }
1224
1225 chan = pchan->ops->new_connection(pchan->data);
1226 if (!chan)
1227 goto clean;
1228
1229 sk = chan->sk;
1230
1231 hci_conn_hold(conn->hcon);
1232
1233 bacpy(&bt_sk(sk)->src, conn->src);
1234 bacpy(&bt_sk(sk)->dst, conn->dst);
1235
1236 bt_accept_enqueue(parent, sk);
1237
1238 l2cap_chan_add(conn, chan);
1239
1240 __set_chan_timer(chan, sk->sk_sndtimeo);
1241
1242 __l2cap_state_change(chan, BT_CONNECTED);
1243 parent->sk_data_ready(parent, 0);
1244
1245 clean:
1246 release_sock(parent);
1247 }
1248
1249 static void l2cap_conn_ready(struct l2cap_conn *conn)
1250 {
1251 struct l2cap_chan *chan;
1252
1253 BT_DBG("conn %p", conn);
1254
1255 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1256 l2cap_le_conn_ready(conn);
1257
1258 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1259 smp_conn_security(conn, conn->hcon->pending_sec_level);
1260
1261 mutex_lock(&conn->chan_lock);
1262
1263 list_for_each_entry(chan, &conn->chan_l, list) {
1264
1265 l2cap_chan_lock(chan);
1266
1267 if (conn->hcon->type == LE_LINK) {
1268 if (smp_conn_security(conn, chan->sec_level))
1269 l2cap_chan_ready(chan);
1270
1271 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1272 struct sock *sk = chan->sk;
1273 __clear_chan_timer(chan);
1274 lock_sock(sk);
1275 __l2cap_state_change(chan, BT_CONNECTED);
1276 sk->sk_state_change(sk);
1277 release_sock(sk);
1278
1279 } else if (chan->state == BT_CONNECT)
1280 l2cap_do_start(chan);
1281
1282 l2cap_chan_unlock(chan);
1283 }
1284
1285 mutex_unlock(&conn->chan_lock);
1286 }
1287
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1290 {
1291 struct l2cap_chan *chan;
1292
1293 BT_DBG("conn %p", conn);
1294
1295 mutex_lock(&conn->chan_lock);
1296
1297 list_for_each_entry(chan, &conn->chan_l, list) {
1298 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1299 __l2cap_chan_set_err(chan, err);
1300 }
1301
1302 mutex_unlock(&conn->chan_lock);
1303 }
1304
1305 static void l2cap_info_timeout(struct work_struct *work)
1306 {
1307 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1308 info_timer.work);
1309
1310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1311 conn->info_ident = 0;
1312
1313 l2cap_conn_start(conn);
1314 }
1315
1316 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1317 {
1318 struct l2cap_conn *conn = hcon->l2cap_data;
1319 struct l2cap_chan *chan, *l;
1320
1321 if (!conn)
1322 return;
1323
1324 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1325
1326 kfree_skb(conn->rx_skb);
1327
1328 mutex_lock(&conn->chan_lock);
1329
1330 /* Kill channels */
1331 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1332 l2cap_chan_hold(chan);
1333 l2cap_chan_lock(chan);
1334
1335 l2cap_chan_del(chan, err);
1336
1337 l2cap_chan_unlock(chan);
1338
1339 chan->ops->close(chan->data);
1340 l2cap_chan_put(chan);
1341 }
1342
1343 mutex_unlock(&conn->chan_lock);
1344
1345 hci_chan_del(conn->hchan);
1346
1347 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1348 cancel_delayed_work_sync(&conn->info_timer);
1349
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1351 cancel_delayed_work_sync(&conn->security_timer);
1352 smp_chan_destroy(conn);
1353 }
1354
1355 hcon->l2cap_data = NULL;
1356 kfree(conn);
1357 }
1358
1359 static void security_timeout(struct work_struct *work)
1360 {
1361 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1362 security_timer.work);
1363
1364 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1365 }
1366
1367 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1368 {
1369 struct l2cap_conn *conn = hcon->l2cap_data;
1370 struct hci_chan *hchan;
1371
1372 if (conn || status)
1373 return conn;
1374
1375 hchan = hci_chan_create(hcon);
1376 if (!hchan)
1377 return NULL;
1378
1379 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1380 if (!conn) {
1381 hci_chan_del(hchan);
1382 return NULL;
1383 }
1384
1385 hcon->l2cap_data = conn;
1386 conn->hcon = hcon;
1387 conn->hchan = hchan;
1388
1389 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1390
1391 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1392 conn->mtu = hcon->hdev->le_mtu;
1393 else
1394 conn->mtu = hcon->hdev->acl_mtu;
1395
1396 conn->src = &hcon->hdev->bdaddr;
1397 conn->dst = &hcon->dst;
1398
1399 conn->feat_mask = 0;
1400
1401 spin_lock_init(&conn->lock);
1402 mutex_init(&conn->chan_lock);
1403
1404 INIT_LIST_HEAD(&conn->chan_l);
1405
1406 if (hcon->type == LE_LINK)
1407 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1408 else
1409 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1410
1411 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1412
1413 return conn;
1414 }
1415
1416 /* ---- Socket interface ---- */
1417
1418 /* Find socket with psm and source / destination bdaddr.
1419 * Returns closest match.
1420 */
1421 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1422 bdaddr_t *src,
1423 bdaddr_t *dst)
1424 {
1425 struct l2cap_chan *c, *c1 = NULL;
1426
1427 read_lock(&chan_list_lock);
1428
1429 list_for_each_entry(c, &chan_list, global_l) {
1430 struct sock *sk = c->sk;
1431
1432 if (state && c->state != state)
1433 continue;
1434
1435 if (c->psm == psm) {
1436 int src_match, dst_match;
1437 int src_any, dst_any;
1438
1439 /* Exact match. */
1440 src_match = !bacmp(&bt_sk(sk)->src, src);
1441 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1442 if (src_match && dst_match) {
1443 read_unlock(&chan_list_lock);
1444 return c;
1445 }
1446
1447 /* Closest match */
1448 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1449 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1450 if ((src_match && dst_any) || (src_any && dst_match) ||
1451 (src_any && dst_any))
1452 c1 = c;
1453 }
1454 }
1455
1456 read_unlock(&chan_list_lock);
1457
1458 return c1;
1459 }
1460
1461 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1462 bdaddr_t *dst, u8 dst_type)
1463 {
1464 struct sock *sk = chan->sk;
1465 bdaddr_t *src = &bt_sk(sk)->src;
1466 struct l2cap_conn *conn;
1467 struct hci_conn *hcon;
1468 struct hci_dev *hdev;
1469 __u8 auth_type;
1470 int err;
1471
1472 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1473 dst_type, __le16_to_cpu(chan->psm));
1474
1475 hdev = hci_get_route(dst, src);
1476 if (!hdev)
1477 return -EHOSTUNREACH;
1478
1479 hci_dev_lock(hdev);
1480
1481 l2cap_chan_lock(chan);
1482
1483 /* PSM must be odd and lsb of upper byte must be 0 */
1484 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1485 chan->chan_type != L2CAP_CHAN_RAW) {
1486 err = -EINVAL;
1487 goto done;
1488 }
1489
1490 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1491 err = -EINVAL;
1492 goto done;
1493 }
1494
1495 switch (chan->mode) {
1496 case L2CAP_MODE_BASIC:
1497 break;
1498 case L2CAP_MODE_ERTM:
1499 case L2CAP_MODE_STREAMING:
1500 if (!disable_ertm)
1501 break;
1502 /* fall through */
1503 default:
1504 err = -ENOTSUPP;
1505 goto done;
1506 }
1507
1508 lock_sock(sk);
1509
1510 switch (sk->sk_state) {
1511 case BT_CONNECT:
1512 case BT_CONNECT2:
1513 case BT_CONFIG:
1514 /* Already connecting */
1515 err = 0;
1516 release_sock(sk);
1517 goto done;
1518
1519 case BT_CONNECTED:
1520 /* Already connected */
1521 err = -EISCONN;
1522 release_sock(sk);
1523 goto done;
1524
1525 case BT_OPEN:
1526 case BT_BOUND:
1527 /* Can connect */
1528 break;
1529
1530 default:
1531 err = -EBADFD;
1532 release_sock(sk);
1533 goto done;
1534 }
1535
1536 /* Set destination address and psm */
1537 bacpy(&bt_sk(sk)->dst, dst);
1538
1539 release_sock(sk);
1540
1541 chan->psm = psm;
1542 chan->dcid = cid;
1543
1544 auth_type = l2cap_get_auth_type(chan);
1545
1546 if (chan->dcid == L2CAP_CID_LE_DATA)
1547 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1548 chan->sec_level, auth_type);
1549 else
1550 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1551 chan->sec_level, auth_type);
1552
1553 if (IS_ERR(hcon)) {
1554 err = PTR_ERR(hcon);
1555 goto done;
1556 }
1557
1558 conn = l2cap_conn_add(hcon, 0);
1559 if (!conn) {
1560 hci_conn_put(hcon);
1561 err = -ENOMEM;
1562 goto done;
1563 }
1564
1565 if (hcon->type == LE_LINK) {
1566 err = 0;
1567
1568 if (!list_empty(&conn->chan_l)) {
1569 err = -EBUSY;
1570 hci_conn_put(hcon);
1571 }
1572
1573 if (err)
1574 goto done;
1575 }
1576
1577 /* Update source addr of the socket */
1578 bacpy(src, conn->src);
1579
1580 l2cap_chan_unlock(chan);
1581 l2cap_chan_add(conn, chan);
1582 l2cap_chan_lock(chan);
1583
1584 l2cap_state_change(chan, BT_CONNECT);
1585 __set_chan_timer(chan, sk->sk_sndtimeo);
1586
1587 if (hcon->state == BT_CONNECTED) {
1588 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1589 __clear_chan_timer(chan);
1590 if (l2cap_chan_check_security(chan))
1591 l2cap_state_change(chan, BT_CONNECTED);
1592 } else
1593 l2cap_do_start(chan);
1594 }
1595
1596 err = 0;
1597
1598 done:
1599 l2cap_chan_unlock(chan);
1600 hci_dev_unlock(hdev);
1601 hci_dev_put(hdev);
1602 return err;
1603 }
1604
1605 int __l2cap_wait_ack(struct sock *sk)
1606 {
1607 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1608 DECLARE_WAITQUEUE(wait, current);
1609 int err = 0;
1610 int timeo = HZ/5;
1611
1612 add_wait_queue(sk_sleep(sk), &wait);
1613 set_current_state(TASK_INTERRUPTIBLE);
1614 while (chan->unacked_frames > 0 && chan->conn) {
1615 if (!timeo)
1616 timeo = HZ/5;
1617
1618 if (signal_pending(current)) {
1619 err = sock_intr_errno(timeo);
1620 break;
1621 }
1622
1623 release_sock(sk);
1624 timeo = schedule_timeout(timeo);
1625 lock_sock(sk);
1626 set_current_state(TASK_INTERRUPTIBLE);
1627
1628 err = sock_error(sk);
1629 if (err)
1630 break;
1631 }
1632 set_current_state(TASK_RUNNING);
1633 remove_wait_queue(sk_sleep(sk), &wait);
1634 return err;
1635 }
1636
1637 static void l2cap_monitor_timeout(struct work_struct *work)
1638 {
1639 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1640 monitor_timer.work);
1641
1642 BT_DBG("chan %p", chan);
1643
1644 l2cap_chan_lock(chan);
1645
1646 if (!chan->conn) {
1647 l2cap_chan_unlock(chan);
1648 l2cap_chan_put(chan);
1649 return;
1650 }
1651
1652 l2cap_tx(chan, 0, 0, L2CAP_EV_MONITOR_TO);
1653
1654 l2cap_chan_unlock(chan);
1655 l2cap_chan_put(chan);
1656 }
1657
1658 static void l2cap_retrans_timeout(struct work_struct *work)
1659 {
1660 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1661 retrans_timer.work);
1662
1663 BT_DBG("chan %p", chan);
1664
1665 l2cap_chan_lock(chan);
1666
1667 if (!chan->conn) {
1668 l2cap_chan_unlock(chan);
1669 l2cap_chan_put(chan);
1670 return;
1671 }
1672
1673 l2cap_tx(chan, 0, 0, L2CAP_EV_RETRANS_TO);
1674 l2cap_chan_unlock(chan);
1675 l2cap_chan_put(chan);
1676 }
1677
1678 static int l2cap_streaming_send(struct l2cap_chan *chan,
1679 struct sk_buff_head *skbs)
1680 {
1681 struct sk_buff *skb;
1682 struct l2cap_ctrl *control;
1683
1684 BT_DBG("chan %p, skbs %p", chan, skbs);
1685
1686 if (chan->state != BT_CONNECTED)
1687 return -ENOTCONN;
1688
1689 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1690
1691 while (!skb_queue_empty(&chan->tx_q)) {
1692
1693 skb = skb_dequeue(&chan->tx_q);
1694
1695 bt_cb(skb)->control.retries = 1;
1696 control = &bt_cb(skb)->control;
1697
1698 control->reqseq = 0;
1699 control->txseq = chan->next_tx_seq;
1700
1701 __pack_control(chan, control, skb);
1702
1703 if (chan->fcs == L2CAP_FCS_CRC16) {
1704 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1705 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1706 }
1707
1708 l2cap_do_send(chan, skb);
1709
1710 BT_DBG("Sent txseq %d", (int)control->txseq);
1711
1712 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1713 chan->frames_sent++;
1714 }
1715
1716 return 0;
1717 }
1718
1719 static int l2cap_ertm_send(struct l2cap_chan *chan)
1720 {
1721 struct sk_buff *skb, *tx_skb;
1722 struct l2cap_ctrl *control;
1723 int sent = 0;
1724
1725 BT_DBG("chan %p", chan);
1726
1727 if (chan->state != BT_CONNECTED)
1728 return -ENOTCONN;
1729
1730 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1731 return 0;
1732
1733 while (chan->tx_send_head &&
1734 chan->unacked_frames < chan->remote_tx_win &&
1735 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1736
1737 skb = chan->tx_send_head;
1738
1739 bt_cb(skb)->control.retries = 1;
1740 control = &bt_cb(skb)->control;
1741
1742 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1743 control->final = 1;
1744
1745 control->reqseq = chan->buffer_seq;
1746 chan->last_acked_seq = chan->buffer_seq;
1747 control->txseq = chan->next_tx_seq;
1748
1749 __pack_control(chan, control, skb);
1750
1751 if (chan->fcs == L2CAP_FCS_CRC16) {
1752 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1753 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1754 }
1755
1756 /* Clone after data has been modified. Data is assumed to be
1757 read-only (for locking purposes) on cloned sk_buffs.
1758 */
1759 tx_skb = skb_clone(skb, GFP_KERNEL);
1760
1761 if (!tx_skb)
1762 break;
1763
1764 __set_retrans_timer(chan);
1765
1766 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1767 chan->unacked_frames++;
1768 chan->frames_sent++;
1769 sent++;
1770
1771 if (skb_queue_is_last(&chan->tx_q, skb))
1772 chan->tx_send_head = NULL;
1773 else
1774 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1775
1776 l2cap_do_send(chan, tx_skb);
1777 BT_DBG("Sent txseq %d", (int)control->txseq);
1778 }
1779
1780 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1781 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1782
1783 return sent;
1784 }
1785
1786 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1787 {
1788 struct l2cap_ctrl control;
1789 struct sk_buff *skb;
1790 struct sk_buff *tx_skb;
1791 u16 seq;
1792
1793 BT_DBG("chan %p", chan);
1794
1795 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1796 return;
1797
1798 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1799 seq = l2cap_seq_list_pop(&chan->retrans_list);
1800
1801 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1802 if (!skb) {
1803 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1804 seq);
1805 continue;
1806 }
1807
1808 bt_cb(skb)->control.retries++;
1809 control = bt_cb(skb)->control;
1810
1811 if (chan->max_tx != 0 &&
1812 bt_cb(skb)->control.retries > chan->max_tx) {
1813 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1814 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1815 l2cap_seq_list_clear(&chan->retrans_list);
1816 break;
1817 }
1818
1819 control.reqseq = chan->buffer_seq;
1820 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1821 control.final = 1;
1822 else
1823 control.final = 0;
1824
1825 if (skb_cloned(skb)) {
1826 /* Cloned sk_buffs are read-only, so we need a
1827 * writeable copy
1828 */
1829 tx_skb = skb_copy(skb, GFP_ATOMIC);
1830 } else {
1831 tx_skb = skb_clone(skb, GFP_ATOMIC);
1832 }
1833
1834 if (!tx_skb) {
1835 l2cap_seq_list_clear(&chan->retrans_list);
1836 break;
1837 }
1838
1839 /* Update skb contents */
1840 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1841 put_unaligned_le32(__pack_extended_control(&control),
1842 tx_skb->data + L2CAP_HDR_SIZE);
1843 } else {
1844 put_unaligned_le16(__pack_enhanced_control(&control),
1845 tx_skb->data + L2CAP_HDR_SIZE);
1846 }
1847
1848 if (chan->fcs == L2CAP_FCS_CRC16) {
1849 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1850 put_unaligned_le16(fcs, skb_put(tx_skb,
1851 L2CAP_FCS_SIZE));
1852 }
1853
1854 l2cap_do_send(chan, tx_skb);
1855
1856 BT_DBG("Resent txseq %d", control.txseq);
1857
1858 chan->last_acked_seq = chan->buffer_seq;
1859 }
1860 }
1861
1862 static void l2cap_retransmit(struct l2cap_chan *chan,
1863 struct l2cap_ctrl *control)
1864 {
1865 BT_DBG("chan %p, control %p", chan, control);
1866
1867 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1868 l2cap_ertm_resend(chan);
1869 }
1870
1871 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1872 struct l2cap_ctrl *control)
1873 {
1874 struct sk_buff *skb;
1875
1876 BT_DBG("chan %p, control %p", chan, control);
1877
1878 if (control->poll)
1879 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1880
1881 l2cap_seq_list_clear(&chan->retrans_list);
1882
1883 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1884 return;
1885
1886 if (chan->unacked_frames) {
1887 skb_queue_walk(&chan->tx_q, skb) {
1888 if (bt_cb(skb)->control.txseq == control->reqseq ||
1889 skb == chan->tx_send_head)
1890 break;
1891 }
1892
1893 skb_queue_walk_from(&chan->tx_q, skb) {
1894 if (skb == chan->tx_send_head)
1895 break;
1896
1897 l2cap_seq_list_append(&chan->retrans_list,
1898 bt_cb(skb)->control.txseq);
1899 }
1900
1901 l2cap_ertm_resend(chan);
1902 }
1903 }
1904
1905 static void l2cap_send_ack(struct l2cap_chan *chan)
1906 {
1907 struct l2cap_ctrl control;
1908 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1909 chan->last_acked_seq);
1910 int threshold;
1911
1912 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1913 chan, chan->last_acked_seq, chan->buffer_seq);
1914
1915 memset(&control, 0, sizeof(control));
1916 control.sframe = 1;
1917
1918 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1919 chan->rx_state == L2CAP_RX_STATE_RECV) {
1920 __clear_ack_timer(chan);
1921 control.super = L2CAP_SUPER_RNR;
1922 control.reqseq = chan->buffer_seq;
1923 l2cap_send_sframe(chan, &control);
1924 } else {
1925 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1926 l2cap_ertm_send(chan);
1927 /* If any i-frames were sent, they included an ack */
1928 if (chan->buffer_seq == chan->last_acked_seq)
1929 frames_to_ack = 0;
1930 }
1931
1932 /* Ack now if the tx window is 3/4ths full.
1933 * Calculate without mul or div
1934 */
1935 threshold = chan->tx_win;
1936 threshold += threshold << 1;
1937 threshold >>= 2;
1938
1939 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1940 threshold);
1941
1942 if (frames_to_ack >= threshold) {
1943 __clear_ack_timer(chan);
1944 control.super = L2CAP_SUPER_RR;
1945 control.reqseq = chan->buffer_seq;
1946 l2cap_send_sframe(chan, &control);
1947 frames_to_ack = 0;
1948 }
1949
1950 if (frames_to_ack)
1951 __set_ack_timer(chan);
1952 }
1953 }
1954
1955 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1956 struct msghdr *msg, int len,
1957 int count, struct sk_buff *skb)
1958 {
1959 struct l2cap_conn *conn = chan->conn;
1960 struct sk_buff **frag;
1961 int sent = 0;
1962
1963 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1964 return -EFAULT;
1965
1966 sent += count;
1967 len -= count;
1968
1969 /* Continuation fragments (no L2CAP header) */
1970 frag = &skb_shinfo(skb)->frag_list;
1971 while (len) {
1972 struct sk_buff *tmp;
1973
1974 count = min_t(unsigned int, conn->mtu, len);
1975
1976 tmp = chan->ops->alloc_skb(chan, count,
1977 msg->msg_flags & MSG_DONTWAIT);
1978 if (IS_ERR(tmp))
1979 return PTR_ERR(tmp);
1980
1981 *frag = tmp;
1982
1983 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1984 return -EFAULT;
1985
1986 (*frag)->priority = skb->priority;
1987
1988 sent += count;
1989 len -= count;
1990
1991 skb->len += (*frag)->len;
1992 skb->data_len += (*frag)->len;
1993
1994 frag = &(*frag)->next;
1995 }
1996
1997 return sent;
1998 }
1999
2000 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2001 struct msghdr *msg, size_t len,
2002 u32 priority)
2003 {
2004 struct l2cap_conn *conn = chan->conn;
2005 struct sk_buff *skb;
2006 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2007 struct l2cap_hdr *lh;
2008
2009 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
2010
2011 count = min_t(unsigned int, (conn->mtu - hlen), len);
2012
2013 skb = chan->ops->alloc_skb(chan, count + hlen,
2014 msg->msg_flags & MSG_DONTWAIT);
2015 if (IS_ERR(skb))
2016 return skb;
2017
2018 skb->priority = priority;
2019
2020 /* Create L2CAP header */
2021 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2022 lh->cid = cpu_to_le16(chan->dcid);
2023 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2024 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2025
2026 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2027 if (unlikely(err < 0)) {
2028 kfree_skb(skb);
2029 return ERR_PTR(err);
2030 }
2031 return skb;
2032 }
2033
2034 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2035 struct msghdr *msg, size_t len,
2036 u32 priority)
2037 {
2038 struct l2cap_conn *conn = chan->conn;
2039 struct sk_buff *skb;
2040 int err, count;
2041 struct l2cap_hdr *lh;
2042
2043 BT_DBG("chan %p len %d", chan, (int)len);
2044
2045 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2046
2047 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2048 msg->msg_flags & MSG_DONTWAIT);
2049 if (IS_ERR(skb))
2050 return skb;
2051
2052 skb->priority = priority;
2053
2054 /* Create L2CAP header */
2055 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2056 lh->cid = cpu_to_le16(chan->dcid);
2057 lh->len = cpu_to_le16(len);
2058
2059 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2060 if (unlikely(err < 0)) {
2061 kfree_skb(skb);
2062 return ERR_PTR(err);
2063 }
2064 return skb;
2065 }
2066
2067 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2068 struct msghdr *msg, size_t len,
2069 u16 sdulen)
2070 {
2071 struct l2cap_conn *conn = chan->conn;
2072 struct sk_buff *skb;
2073 int err, count, hlen;
2074 struct l2cap_hdr *lh;
2075
2076 BT_DBG("chan %p len %d", chan, (int)len);
2077
2078 if (!conn)
2079 return ERR_PTR(-ENOTCONN);
2080
2081 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2082 hlen = L2CAP_EXT_HDR_SIZE;
2083 else
2084 hlen = L2CAP_ENH_HDR_SIZE;
2085
2086 if (sdulen)
2087 hlen += L2CAP_SDULEN_SIZE;
2088
2089 if (chan->fcs == L2CAP_FCS_CRC16)
2090 hlen += L2CAP_FCS_SIZE;
2091
2092 count = min_t(unsigned int, (conn->mtu - hlen), len);
2093
2094 skb = chan->ops->alloc_skb(chan, count + hlen,
2095 msg->msg_flags & MSG_DONTWAIT);
2096 if (IS_ERR(skb))
2097 return skb;
2098
2099 /* Create L2CAP header */
2100 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2101 lh->cid = cpu_to_le16(chan->dcid);
2102 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2103
2104 /* Control header is populated later */
2105 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2106 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2107 else
2108 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2109
2110 if (sdulen)
2111 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2112
2113 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2114 if (unlikely(err < 0)) {
2115 kfree_skb(skb);
2116 return ERR_PTR(err);
2117 }
2118
2119 bt_cb(skb)->control.fcs = chan->fcs;
2120 bt_cb(skb)->control.retries = 0;
2121 return skb;
2122 }
2123
2124 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2125 struct sk_buff_head *seg_queue,
2126 struct msghdr *msg, size_t len)
2127 {
2128 struct sk_buff *skb;
2129 u16 sdu_len;
2130 size_t pdu_len;
2131 int err = 0;
2132 u8 sar;
2133
2134 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2135
2136 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2137 * so fragmented skbs are not used. The HCI layer's handling
2138 * of fragmented skbs is not compatible with ERTM's queueing.
2139 */
2140
2141 /* PDU size is derived from the HCI MTU */
2142 pdu_len = chan->conn->mtu;
2143
2144 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2145
2146 /* Adjust for largest possible L2CAP overhead. */
2147 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2148
2149 /* Remote device may have requested smaller PDUs */
2150 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2151
2152 if (len <= pdu_len) {
2153 sar = L2CAP_SAR_UNSEGMENTED;
2154 sdu_len = 0;
2155 pdu_len = len;
2156 } else {
2157 sar = L2CAP_SAR_START;
2158 sdu_len = len;
2159 pdu_len -= L2CAP_SDULEN_SIZE;
2160 }
2161
2162 while (len > 0) {
2163 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2164
2165 if (IS_ERR(skb)) {
2166 __skb_queue_purge(seg_queue);
2167 return PTR_ERR(skb);
2168 }
2169
2170 bt_cb(skb)->control.sar = sar;
2171 __skb_queue_tail(seg_queue, skb);
2172
2173 len -= pdu_len;
2174 if (sdu_len) {
2175 sdu_len = 0;
2176 pdu_len += L2CAP_SDULEN_SIZE;
2177 }
2178
2179 if (len <= pdu_len) {
2180 sar = L2CAP_SAR_END;
2181 pdu_len = len;
2182 } else {
2183 sar = L2CAP_SAR_CONTINUE;
2184 }
2185 }
2186
2187 return err;
2188 }
2189
2190 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2191 u32 priority)
2192 {
2193 struct sk_buff *skb;
2194 int err;
2195 struct sk_buff_head seg_queue;
2196
2197 /* Connectionless channel */
2198 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2199 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2200 if (IS_ERR(skb))
2201 return PTR_ERR(skb);
2202
2203 l2cap_do_send(chan, skb);
2204 return len;
2205 }
2206
2207 switch (chan->mode) {
2208 case L2CAP_MODE_BASIC:
2209 /* Check outgoing MTU */
2210 if (len > chan->omtu)
2211 return -EMSGSIZE;
2212
2213 /* Create a basic PDU */
2214 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2215 if (IS_ERR(skb))
2216 return PTR_ERR(skb);
2217
2218 l2cap_do_send(chan, skb);
2219 err = len;
2220 break;
2221
2222 case L2CAP_MODE_ERTM:
2223 case L2CAP_MODE_STREAMING:
2224 /* Check outgoing MTU */
2225 if (len > chan->omtu) {
2226 err = -EMSGSIZE;
2227 break;
2228 }
2229
2230 __skb_queue_head_init(&seg_queue);
2231
2232 /* Do segmentation before calling in to the state machine,
2233 * since it's possible to block while waiting for memory
2234 * allocation.
2235 */
2236 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2237
2238 /* The channel could have been closed while segmenting,
2239 * check that it is still connected.
2240 */
2241 if (chan->state != BT_CONNECTED) {
2242 __skb_queue_purge(&seg_queue);
2243 err = -ENOTCONN;
2244 }
2245
2246 if (err)
2247 break;
2248
2249 if (chan->mode == L2CAP_MODE_ERTM)
2250 err = l2cap_tx(chan, 0, &seg_queue,
2251 L2CAP_EV_DATA_REQUEST);
2252 else
2253 err = l2cap_streaming_send(chan, &seg_queue);
2254
2255 if (!err)
2256 err = len;
2257
2258 /* If the skbs were not queued for sending, they'll still be in
2259 * seg_queue and need to be purged.
2260 */
2261 __skb_queue_purge(&seg_queue);
2262 break;
2263
2264 default:
2265 BT_DBG("bad state %1.1x", chan->mode);
2266 err = -EBADFD;
2267 }
2268
2269 return err;
2270 }
2271
2272 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2273 {
2274 struct l2cap_ctrl control;
2275 u16 seq;
2276
2277 BT_DBG("chan %p, txseq %d", chan, txseq);
2278
2279 memset(&control, 0, sizeof(control));
2280 control.sframe = 1;
2281 control.super = L2CAP_SUPER_SREJ;
2282
2283 for (seq = chan->expected_tx_seq; seq != txseq;
2284 seq = __next_seq(chan, seq)) {
2285 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2286 control.reqseq = seq;
2287 l2cap_send_sframe(chan, &control);
2288 l2cap_seq_list_append(&chan->srej_list, seq);
2289 }
2290 }
2291
2292 chan->expected_tx_seq = __next_seq(chan, txseq);
2293 }
2294
2295 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2296 {
2297 struct l2cap_ctrl control;
2298
2299 BT_DBG("chan %p", chan);
2300
2301 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2302 return;
2303
2304 memset(&control, 0, sizeof(control));
2305 control.sframe = 1;
2306 control.super = L2CAP_SUPER_SREJ;
2307 control.reqseq = chan->srej_list.tail;
2308 l2cap_send_sframe(chan, &control);
2309 }
2310
2311 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2312 {
2313 struct l2cap_ctrl control;
2314 u16 initial_head;
2315 u16 seq;
2316
2317 BT_DBG("chan %p, txseq %d", chan, txseq);
2318
2319 memset(&control, 0, sizeof(control));
2320 control.sframe = 1;
2321 control.super = L2CAP_SUPER_SREJ;
2322
2323 /* Capture initial list head to allow only one pass through the list. */
2324 initial_head = chan->srej_list.head;
2325
2326 do {
2327 seq = l2cap_seq_list_pop(&chan->srej_list);
2328 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2329 break;
2330
2331 control.reqseq = seq;
2332 l2cap_send_sframe(chan, &control);
2333 l2cap_seq_list_append(&chan->srej_list, seq);
2334 } while (chan->srej_list.head != initial_head);
2335 }
2336
2337 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2338 {
2339 struct sk_buff *acked_skb;
2340 u16 ackseq;
2341
2342 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2343
2344 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2345 return;
2346
2347 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2348 chan->expected_ack_seq, chan->unacked_frames);
2349
2350 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2351 ackseq = __next_seq(chan, ackseq)) {
2352
2353 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2354 if (acked_skb) {
2355 skb_unlink(acked_skb, &chan->tx_q);
2356 kfree_skb(acked_skb);
2357 chan->unacked_frames--;
2358 }
2359 }
2360
2361 chan->expected_ack_seq = reqseq;
2362
2363 if (chan->unacked_frames == 0)
2364 __clear_retrans_timer(chan);
2365
2366 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2367 }
2368
2369 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2370 {
2371 BT_DBG("chan %p", chan);
2372
2373 chan->expected_tx_seq = chan->buffer_seq;
2374 l2cap_seq_list_clear(&chan->srej_list);
2375 skb_queue_purge(&chan->srej_q);
2376 chan->rx_state = L2CAP_RX_STATE_RECV;
2377 }
2378
2379 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2380 struct l2cap_ctrl *control,
2381 struct sk_buff_head *skbs, u8 event)
2382 {
2383 int err = 0;
2384
2385 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2386 event);
2387
2388 switch (event) {
2389 case L2CAP_EV_DATA_REQUEST:
2390 if (chan->tx_send_head == NULL)
2391 chan->tx_send_head = skb_peek(skbs);
2392
2393 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2394 l2cap_ertm_send(chan);
2395 break;
2396 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2397 BT_DBG("Enter LOCAL_BUSY");
2398 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2399
2400 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2401 /* The SREJ_SENT state must be aborted if we are to
2402 * enter the LOCAL_BUSY state.
2403 */
2404 l2cap_abort_rx_srej_sent(chan);
2405 }
2406
2407 l2cap_send_ack(chan);
2408
2409 break;
2410 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2411 BT_DBG("Exit LOCAL_BUSY");
2412 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2413
2414 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2415 struct l2cap_ctrl local_control;
2416
2417 memset(&local_control, 0, sizeof(local_control));
2418 local_control.sframe = 1;
2419 local_control.super = L2CAP_SUPER_RR;
2420 local_control.poll = 1;
2421 local_control.reqseq = chan->buffer_seq;
2422 l2cap_send_sframe(chan, &local_control);
2423
2424 chan->retry_count = 1;
2425 __set_monitor_timer(chan);
2426 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2427 }
2428 break;
2429 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2430 l2cap_process_reqseq(chan, control->reqseq);
2431 break;
2432 case L2CAP_EV_EXPLICIT_POLL:
2433 l2cap_send_rr_or_rnr(chan, 1);
2434 chan->retry_count = 1;
2435 __set_monitor_timer(chan);
2436 __clear_ack_timer(chan);
2437 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2438 break;
2439 case L2CAP_EV_RETRANS_TO:
2440 l2cap_send_rr_or_rnr(chan, 1);
2441 chan->retry_count = 1;
2442 __set_monitor_timer(chan);
2443 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2444 break;
2445 case L2CAP_EV_RECV_FBIT:
2446 /* Nothing to process */
2447 break;
2448 default:
2449 break;
2450 }
2451
2452 return err;
2453 }
2454
2455 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2456 struct l2cap_ctrl *control,
2457 struct sk_buff_head *skbs, u8 event)
2458 {
2459 int err = 0;
2460
2461 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2462 event);
2463
2464 switch (event) {
2465 case L2CAP_EV_DATA_REQUEST:
2466 if (chan->tx_send_head == NULL)
2467 chan->tx_send_head = skb_peek(skbs);
2468 /* Queue data, but don't send. */
2469 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2470 break;
2471 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2472 BT_DBG("Enter LOCAL_BUSY");
2473 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2474
2475 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2476 /* The SREJ_SENT state must be aborted if we are to
2477 * enter the LOCAL_BUSY state.
2478 */
2479 l2cap_abort_rx_srej_sent(chan);
2480 }
2481
2482 l2cap_send_ack(chan);
2483
2484 break;
2485 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2486 BT_DBG("Exit LOCAL_BUSY");
2487 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2488
2489 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2490 struct l2cap_ctrl local_control;
2491 memset(&local_control, 0, sizeof(local_control));
2492 local_control.sframe = 1;
2493 local_control.super = L2CAP_SUPER_RR;
2494 local_control.poll = 1;
2495 local_control.reqseq = chan->buffer_seq;
2496 l2cap_send_sframe(chan, &local_control);
2497
2498 chan->retry_count = 1;
2499 __set_monitor_timer(chan);
2500 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2501 }
2502 break;
2503 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2504 l2cap_process_reqseq(chan, control->reqseq);
2505
2506 /* Fall through */
2507
2508 case L2CAP_EV_RECV_FBIT:
2509 if (control && control->final) {
2510 __clear_monitor_timer(chan);
2511 if (chan->unacked_frames > 0)
2512 __set_retrans_timer(chan);
2513 chan->retry_count = 0;
2514 chan->tx_state = L2CAP_TX_STATE_XMIT;
2515 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2516 }
2517 break;
2518 case L2CAP_EV_EXPLICIT_POLL:
2519 /* Ignore */
2520 break;
2521 case L2CAP_EV_MONITOR_TO:
2522 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2523 l2cap_send_rr_or_rnr(chan, 1);
2524 __set_monitor_timer(chan);
2525 chan->retry_count++;
2526 } else {
2527 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2528 }
2529 break;
2530 default:
2531 break;
2532 }
2533
2534 return err;
2535 }
2536
2537 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2538 struct sk_buff_head *skbs, u8 event)
2539 {
2540 int err = 0;
2541
2542 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2543 chan, control, skbs, event, chan->tx_state);
2544
2545 switch (chan->tx_state) {
2546 case L2CAP_TX_STATE_XMIT:
2547 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2548 break;
2549 case L2CAP_TX_STATE_WAIT_F:
2550 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2551 break;
2552 default:
2553 /* Ignore event */
2554 break;
2555 }
2556
2557 return err;
2558 }
2559
2560 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2561 struct l2cap_ctrl *control)
2562 {
2563 BT_DBG("chan %p, control %p", chan, control);
2564 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2565 }
2566
2567 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2568 struct l2cap_ctrl *control)
2569 {
2570 BT_DBG("chan %p, control %p", chan, control);
2571 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_FBIT);
2572 }
2573
2574 /* Copy frame to all raw sockets on that connection */
2575 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2576 {
2577 struct sk_buff *nskb;
2578 struct l2cap_chan *chan;
2579
2580 BT_DBG("conn %p", conn);
2581
2582 mutex_lock(&conn->chan_lock);
2583
2584 list_for_each_entry(chan, &conn->chan_l, list) {
2585 struct sock *sk = chan->sk;
2586 if (chan->chan_type != L2CAP_CHAN_RAW)
2587 continue;
2588
2589 /* Don't send frame to the socket it came from */
2590 if (skb->sk == sk)
2591 continue;
2592 nskb = skb_clone(skb, GFP_ATOMIC);
2593 if (!nskb)
2594 continue;
2595
2596 if (chan->ops->recv(chan->data, nskb))
2597 kfree_skb(nskb);
2598 }
2599
2600 mutex_unlock(&conn->chan_lock);
2601 }
2602
2603 /* ---- L2CAP signalling commands ---- */
2604 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2605 u8 code, u8 ident, u16 dlen, void *data)
2606 {
2607 struct sk_buff *skb, **frag;
2608 struct l2cap_cmd_hdr *cmd;
2609 struct l2cap_hdr *lh;
2610 int len, count;
2611
2612 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2613 conn, code, ident, dlen);
2614
2615 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2616 count = min_t(unsigned int, conn->mtu, len);
2617
2618 skb = bt_skb_alloc(count, GFP_ATOMIC);
2619 if (!skb)
2620 return NULL;
2621
2622 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2623 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2624
2625 if (conn->hcon->type == LE_LINK)
2626 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2627 else
2628 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2629
2630 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2631 cmd->code = code;
2632 cmd->ident = ident;
2633 cmd->len = cpu_to_le16(dlen);
2634
2635 if (dlen) {
2636 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2637 memcpy(skb_put(skb, count), data, count);
2638 data += count;
2639 }
2640
2641 len -= skb->len;
2642
2643 /* Continuation fragments (no L2CAP header) */
2644 frag = &skb_shinfo(skb)->frag_list;
2645 while (len) {
2646 count = min_t(unsigned int, conn->mtu, len);
2647
2648 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2649 if (!*frag)
2650 goto fail;
2651
2652 memcpy(skb_put(*frag, count), data, count);
2653
2654 len -= count;
2655 data += count;
2656
2657 frag = &(*frag)->next;
2658 }
2659
2660 return skb;
2661
2662 fail:
2663 kfree_skb(skb);
2664 return NULL;
2665 }
2666
2667 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2668 {
2669 struct l2cap_conf_opt *opt = *ptr;
2670 int len;
2671
2672 len = L2CAP_CONF_OPT_SIZE + opt->len;
2673 *ptr += len;
2674
2675 *type = opt->type;
2676 *olen = opt->len;
2677
2678 switch (opt->len) {
2679 case 1:
2680 *val = *((u8 *) opt->val);
2681 break;
2682
2683 case 2:
2684 *val = get_unaligned_le16(opt->val);
2685 break;
2686
2687 case 4:
2688 *val = get_unaligned_le32(opt->val);
2689 break;
2690
2691 default:
2692 *val = (unsigned long) opt->val;
2693 break;
2694 }
2695
2696 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2697 return len;
2698 }
2699
2700 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2701 {
2702 struct l2cap_conf_opt *opt = *ptr;
2703
2704 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2705
2706 opt->type = type;
2707 opt->len = len;
2708
2709 switch (len) {
2710 case 1:
2711 *((u8 *) opt->val) = val;
2712 break;
2713
2714 case 2:
2715 put_unaligned_le16(val, opt->val);
2716 break;
2717
2718 case 4:
2719 put_unaligned_le32(val, opt->val);
2720 break;
2721
2722 default:
2723 memcpy(opt->val, (void *) val, len);
2724 break;
2725 }
2726
2727 *ptr += L2CAP_CONF_OPT_SIZE + len;
2728 }
2729
2730 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2731 {
2732 struct l2cap_conf_efs efs;
2733
2734 switch (chan->mode) {
2735 case L2CAP_MODE_ERTM:
2736 efs.id = chan->local_id;
2737 efs.stype = chan->local_stype;
2738 efs.msdu = cpu_to_le16(chan->local_msdu);
2739 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2740 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2741 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2742 break;
2743
2744 case L2CAP_MODE_STREAMING:
2745 efs.id = 1;
2746 efs.stype = L2CAP_SERV_BESTEFFORT;
2747 efs.msdu = cpu_to_le16(chan->local_msdu);
2748 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2749 efs.acc_lat = 0;
2750 efs.flush_to = 0;
2751 break;
2752
2753 default:
2754 return;
2755 }
2756
2757 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2758 (unsigned long) &efs);
2759 }
2760
2761 static void l2cap_ack_timeout(struct work_struct *work)
2762 {
2763 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2764 ack_timer.work);
2765 u16 frames_to_ack;
2766
2767 BT_DBG("chan %p", chan);
2768
2769 l2cap_chan_lock(chan);
2770
2771 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2772 chan->last_acked_seq);
2773
2774 if (frames_to_ack)
2775 l2cap_send_rr_or_rnr(chan, 0);
2776
2777 l2cap_chan_unlock(chan);
2778 l2cap_chan_put(chan);
2779 }
2780
2781 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2782 {
2783 int err;
2784
2785 chan->next_tx_seq = 0;
2786 chan->expected_tx_seq = 0;
2787 chan->expected_ack_seq = 0;
2788 chan->unacked_frames = 0;
2789 chan->buffer_seq = 0;
2790 chan->frames_sent = 0;
2791 chan->last_acked_seq = 0;
2792 chan->sdu = NULL;
2793 chan->sdu_last_frag = NULL;
2794 chan->sdu_len = 0;
2795
2796 skb_queue_head_init(&chan->tx_q);
2797
2798 if (chan->mode != L2CAP_MODE_ERTM)
2799 return 0;
2800
2801 chan->rx_state = L2CAP_RX_STATE_RECV;
2802 chan->tx_state = L2CAP_TX_STATE_XMIT;
2803
2804 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2805 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2806 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2807
2808 skb_queue_head_init(&chan->srej_q);
2809
2810 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2811 if (err < 0)
2812 return err;
2813
2814 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2815 if (err < 0)
2816 l2cap_seq_list_free(&chan->srej_list);
2817
2818 return err;
2819 }
2820
2821 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2822 {
2823 switch (mode) {
2824 case L2CAP_MODE_STREAMING:
2825 case L2CAP_MODE_ERTM:
2826 if (l2cap_mode_supported(mode, remote_feat_mask))
2827 return mode;
2828 /* fall through */
2829 default:
2830 return L2CAP_MODE_BASIC;
2831 }
2832 }
2833
2834 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2835 {
2836 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2837 }
2838
2839 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2840 {
2841 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2842 }
2843
2844 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2845 {
2846 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2847 __l2cap_ews_supported(chan)) {
2848 /* use extended control field */
2849 set_bit(FLAG_EXT_CTRL, &chan->flags);
2850 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2851 } else {
2852 chan->tx_win = min_t(u16, chan->tx_win,
2853 L2CAP_DEFAULT_TX_WINDOW);
2854 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2855 }
2856 }
2857
2858 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2859 {
2860 struct l2cap_conf_req *req = data;
2861 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2862 void *ptr = req->data;
2863 u16 size;
2864
2865 BT_DBG("chan %p", chan);
2866
2867 if (chan->num_conf_req || chan->num_conf_rsp)
2868 goto done;
2869
2870 switch (chan->mode) {
2871 case L2CAP_MODE_STREAMING:
2872 case L2CAP_MODE_ERTM:
2873 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2874 break;
2875
2876 if (__l2cap_efs_supported(chan))
2877 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2878
2879 /* fall through */
2880 default:
2881 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2882 break;
2883 }
2884
2885 done:
2886 if (chan->imtu != L2CAP_DEFAULT_MTU)
2887 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2888
2889 switch (chan->mode) {
2890 case L2CAP_MODE_BASIC:
2891 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2892 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2893 break;
2894
2895 rfc.mode = L2CAP_MODE_BASIC;
2896 rfc.txwin_size = 0;
2897 rfc.max_transmit = 0;
2898 rfc.retrans_timeout = 0;
2899 rfc.monitor_timeout = 0;
2900 rfc.max_pdu_size = 0;
2901
2902 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2903 (unsigned long) &rfc);
2904 break;
2905
2906 case L2CAP_MODE_ERTM:
2907 rfc.mode = L2CAP_MODE_ERTM;
2908 rfc.max_transmit = chan->max_tx;
2909 rfc.retrans_timeout = 0;
2910 rfc.monitor_timeout = 0;
2911
2912 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2913 L2CAP_EXT_HDR_SIZE -
2914 L2CAP_SDULEN_SIZE -
2915 L2CAP_FCS_SIZE);
2916 rfc.max_pdu_size = cpu_to_le16(size);
2917
2918 l2cap_txwin_setup(chan);
2919
2920 rfc.txwin_size = min_t(u16, chan->tx_win,
2921 L2CAP_DEFAULT_TX_WINDOW);
2922
2923 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2924 (unsigned long) &rfc);
2925
2926 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2927 l2cap_add_opt_efs(&ptr, chan);
2928
2929 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2930 break;
2931
2932 if (chan->fcs == L2CAP_FCS_NONE ||
2933 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2934 chan->fcs = L2CAP_FCS_NONE;
2935 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2936 }
2937
2938 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2939 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2940 chan->tx_win);
2941 break;
2942
2943 case L2CAP_MODE_STREAMING:
2944 rfc.mode = L2CAP_MODE_STREAMING;
2945 rfc.txwin_size = 0;
2946 rfc.max_transmit = 0;
2947 rfc.retrans_timeout = 0;
2948 rfc.monitor_timeout = 0;
2949
2950 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2951 L2CAP_EXT_HDR_SIZE -
2952 L2CAP_SDULEN_SIZE -
2953 L2CAP_FCS_SIZE);
2954 rfc.max_pdu_size = cpu_to_le16(size);
2955
2956 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2957 (unsigned long) &rfc);
2958
2959 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2960 l2cap_add_opt_efs(&ptr, chan);
2961
2962 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2963 break;
2964
2965 if (chan->fcs == L2CAP_FCS_NONE ||
2966 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2967 chan->fcs = L2CAP_FCS_NONE;
2968 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2969 }
2970 break;
2971 }
2972
2973 req->dcid = cpu_to_le16(chan->dcid);
2974 req->flags = cpu_to_le16(0);
2975
2976 return ptr - data;
2977 }
2978
2979 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2980 {
2981 struct l2cap_conf_rsp *rsp = data;
2982 void *ptr = rsp->data;
2983 void *req = chan->conf_req;
2984 int len = chan->conf_len;
2985 int type, hint, olen;
2986 unsigned long val;
2987 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2988 struct l2cap_conf_efs efs;
2989 u8 remote_efs = 0;
2990 u16 mtu = L2CAP_DEFAULT_MTU;
2991 u16 result = L2CAP_CONF_SUCCESS;
2992 u16 size;
2993
2994 BT_DBG("chan %p", chan);
2995
2996 while (len >= L2CAP_CONF_OPT_SIZE) {
2997 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2998
2999 hint = type & L2CAP_CONF_HINT;
3000 type &= L2CAP_CONF_MASK;
3001
3002 switch (type) {
3003 case L2CAP_CONF_MTU:
3004 mtu = val;
3005 break;
3006
3007 case L2CAP_CONF_FLUSH_TO:
3008 chan->flush_to = val;
3009 break;
3010
3011 case L2CAP_CONF_QOS:
3012 break;
3013
3014 case L2CAP_CONF_RFC:
3015 if (olen == sizeof(rfc))
3016 memcpy(&rfc, (void *) val, olen);
3017 break;
3018
3019 case L2CAP_CONF_FCS:
3020 if (val == L2CAP_FCS_NONE)
3021 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3022 break;
3023
3024 case L2CAP_CONF_EFS:
3025 remote_efs = 1;
3026 if (olen == sizeof(efs))
3027 memcpy(&efs, (void *) val, olen);
3028 break;
3029
3030 case L2CAP_CONF_EWS:
3031 if (!enable_hs)
3032 return -ECONNREFUSED;
3033
3034 set_bit(FLAG_EXT_CTRL, &chan->flags);
3035 set_bit(CONF_EWS_RECV, &chan->conf_state);
3036 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3037 chan->remote_tx_win = val;
3038 break;
3039
3040 default:
3041 if (hint)
3042 break;
3043
3044 result = L2CAP_CONF_UNKNOWN;
3045 *((u8 *) ptr++) = type;
3046 break;
3047 }
3048 }
3049
3050 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3051 goto done;
3052
3053 switch (chan->mode) {
3054 case L2CAP_MODE_STREAMING:
3055 case L2CAP_MODE_ERTM:
3056 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3057 chan->mode = l2cap_select_mode(rfc.mode,
3058 chan->conn->feat_mask);
3059 break;
3060 }
3061
3062 if (remote_efs) {
3063 if (__l2cap_efs_supported(chan))
3064 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3065 else
3066 return -ECONNREFUSED;
3067 }
3068
3069 if (chan->mode != rfc.mode)
3070 return -ECONNREFUSED;
3071
3072 break;
3073 }
3074
3075 done:
3076 if (chan->mode != rfc.mode) {
3077 result = L2CAP_CONF_UNACCEPT;
3078 rfc.mode = chan->mode;
3079
3080 if (chan->num_conf_rsp == 1)
3081 return -ECONNREFUSED;
3082
3083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3084 sizeof(rfc), (unsigned long) &rfc);
3085 }
3086
3087 if (result == L2CAP_CONF_SUCCESS) {
3088 /* Configure output options and let the other side know
3089 * which ones we don't like. */
3090
3091 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3092 result = L2CAP_CONF_UNACCEPT;
3093 else {
3094 chan->omtu = mtu;
3095 set_bit(CONF_MTU_DONE, &chan->conf_state);
3096 }
3097 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3098
3099 if (remote_efs) {
3100 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3101 efs.stype != L2CAP_SERV_NOTRAFIC &&
3102 efs.stype != chan->local_stype) {
3103
3104 result = L2CAP_CONF_UNACCEPT;
3105
3106 if (chan->num_conf_req >= 1)
3107 return -ECONNREFUSED;
3108
3109 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3110 sizeof(efs),
3111 (unsigned long) &efs);
3112 } else {
3113 /* Send PENDING Conf Rsp */
3114 result = L2CAP_CONF_PENDING;
3115 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3116 }
3117 }
3118
3119 switch (rfc.mode) {
3120 case L2CAP_MODE_BASIC:
3121 chan->fcs = L2CAP_FCS_NONE;
3122 set_bit(CONF_MODE_DONE, &chan->conf_state);
3123 break;
3124
3125 case L2CAP_MODE_ERTM:
3126 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3127 chan->remote_tx_win = rfc.txwin_size;
3128 else
3129 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3130
3131 chan->remote_max_tx = rfc.max_transmit;
3132
3133 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3134 chan->conn->mtu -
3135 L2CAP_EXT_HDR_SIZE -
3136 L2CAP_SDULEN_SIZE -
3137 L2CAP_FCS_SIZE);
3138 rfc.max_pdu_size = cpu_to_le16(size);
3139 chan->remote_mps = size;
3140
3141 rfc.retrans_timeout =
3142 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3143 rfc.monitor_timeout =
3144 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3145
3146 set_bit(CONF_MODE_DONE, &chan->conf_state);
3147
3148 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3149 sizeof(rfc), (unsigned long) &rfc);
3150
3151 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3152 chan->remote_id = efs.id;
3153 chan->remote_stype = efs.stype;
3154 chan->remote_msdu = le16_to_cpu(efs.msdu);
3155 chan->remote_flush_to =
3156 le32_to_cpu(efs.flush_to);
3157 chan->remote_acc_lat =
3158 le32_to_cpu(efs.acc_lat);
3159 chan->remote_sdu_itime =
3160 le32_to_cpu(efs.sdu_itime);
3161 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3162 sizeof(efs), (unsigned long) &efs);
3163 }
3164 break;
3165
3166 case L2CAP_MODE_STREAMING:
3167 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3168 chan->conn->mtu -
3169 L2CAP_EXT_HDR_SIZE -
3170 L2CAP_SDULEN_SIZE -
3171 L2CAP_FCS_SIZE);
3172 rfc.max_pdu_size = cpu_to_le16(size);
3173 chan->remote_mps = size;
3174
3175 set_bit(CONF_MODE_DONE, &chan->conf_state);
3176
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3178 sizeof(rfc), (unsigned long) &rfc);
3179
3180 break;
3181
3182 default:
3183 result = L2CAP_CONF_UNACCEPT;
3184
3185 memset(&rfc, 0, sizeof(rfc));
3186 rfc.mode = chan->mode;
3187 }
3188
3189 if (result == L2CAP_CONF_SUCCESS)
3190 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3191 }
3192 rsp->scid = cpu_to_le16(chan->dcid);
3193 rsp->result = cpu_to_le16(result);
3194 rsp->flags = cpu_to_le16(0x0000);
3195
3196 return ptr - data;
3197 }
3198
3199 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3200 {
3201 struct l2cap_conf_req *req = data;
3202 void *ptr = req->data;
3203 int type, olen;
3204 unsigned long val;
3205 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3206 struct l2cap_conf_efs efs;
3207
3208 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3209
3210 while (len >= L2CAP_CONF_OPT_SIZE) {
3211 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3212
3213 switch (type) {
3214 case L2CAP_CONF_MTU:
3215 if (val < L2CAP_DEFAULT_MIN_MTU) {
3216 *result = L2CAP_CONF_UNACCEPT;
3217 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3218 } else
3219 chan->imtu = val;
3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3221 break;
3222
3223 case L2CAP_CONF_FLUSH_TO:
3224 chan->flush_to = val;
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3226 2, chan->flush_to);
3227 break;
3228
3229 case L2CAP_CONF_RFC:
3230 if (olen == sizeof(rfc))
3231 memcpy(&rfc, (void *)val, olen);
3232
3233 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3234 rfc.mode != chan->mode)
3235 return -ECONNREFUSED;
3236
3237 chan->fcs = 0;
3238
3239 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3240 sizeof(rfc), (unsigned long) &rfc);
3241 break;
3242
3243 case L2CAP_CONF_EWS:
3244 chan->tx_win = min_t(u16, val,
3245 L2CAP_DEFAULT_EXT_WINDOW);
3246 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3247 chan->tx_win);
3248 break;
3249
3250 case L2CAP_CONF_EFS:
3251 if (olen == sizeof(efs))
3252 memcpy(&efs, (void *)val, olen);
3253
3254 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3255 efs.stype != L2CAP_SERV_NOTRAFIC &&
3256 efs.stype != chan->local_stype)
3257 return -ECONNREFUSED;
3258
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3260 sizeof(efs), (unsigned long) &efs);
3261 break;
3262 }
3263 }
3264
3265 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3266 return -ECONNREFUSED;
3267
3268 chan->mode = rfc.mode;
3269
3270 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3271 switch (rfc.mode) {
3272 case L2CAP_MODE_ERTM:
3273 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3274 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3275 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3276
3277 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3278 chan->local_msdu = le16_to_cpu(efs.msdu);
3279 chan->local_sdu_itime =
3280 le32_to_cpu(efs.sdu_itime);
3281 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3282 chan->local_flush_to =
3283 le32_to_cpu(efs.flush_to);
3284 }
3285 break;
3286
3287 case L2CAP_MODE_STREAMING:
3288 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3289 }
3290 }
3291
3292 req->dcid = cpu_to_le16(chan->dcid);
3293 req->flags = cpu_to_le16(0x0000);
3294
3295 return ptr - data;
3296 }
3297
3298 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3299 {
3300 struct l2cap_conf_rsp *rsp = data;
3301 void *ptr = rsp->data;
3302
3303 BT_DBG("chan %p", chan);
3304
3305 rsp->scid = cpu_to_le16(chan->dcid);
3306 rsp->result = cpu_to_le16(result);
3307 rsp->flags = cpu_to_le16(flags);
3308
3309 return ptr - data;
3310 }
3311
3312 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3313 {
3314 struct l2cap_conn_rsp rsp;
3315 struct l2cap_conn *conn = chan->conn;
3316 u8 buf[128];
3317
3318 rsp.scid = cpu_to_le16(chan->dcid);
3319 rsp.dcid = cpu_to_le16(chan->scid);
3320 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3321 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3322 l2cap_send_cmd(conn, chan->ident,
3323 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3324
3325 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3326 return;
3327
3328 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3329 l2cap_build_conf_req(chan, buf), buf);
3330 chan->num_conf_req++;
3331 }
3332
3333 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3334 {
3335 int type, olen;
3336 unsigned long val;
3337 struct l2cap_conf_rfc rfc;
3338
3339 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3340
3341 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3342 return;
3343
3344 while (len >= L2CAP_CONF_OPT_SIZE) {
3345 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3346
3347 switch (type) {
3348 case L2CAP_CONF_RFC:
3349 if (olen == sizeof(rfc))
3350 memcpy(&rfc, (void *)val, olen);
3351 goto done;
3352 }
3353 }
3354
3355 /* Use sane default values in case a misbehaving remote device
3356 * did not send an RFC option.
3357 */
3358 rfc.mode = chan->mode;
3359 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3360 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3361 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3362
3363 BT_ERR("Expected RFC option was not found, using defaults");
3364
3365 done:
3366 switch (rfc.mode) {
3367 case L2CAP_MODE_ERTM:
3368 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3369 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3370 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3371 break;
3372 case L2CAP_MODE_STREAMING:
3373 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3374 }
3375 }
3376
3377 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3378 {
3379 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3380
3381 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3382 return 0;
3383
3384 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3385 cmd->ident == conn->info_ident) {
3386 cancel_delayed_work(&conn->info_timer);
3387
3388 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3389 conn->info_ident = 0;
3390
3391 l2cap_conn_start(conn);
3392 }
3393
3394 return 0;
3395 }
3396
3397 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3398 {
3399 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3400 struct l2cap_conn_rsp rsp;
3401 struct l2cap_chan *chan = NULL, *pchan;
3402 struct sock *parent, *sk = NULL;
3403 int result, status = L2CAP_CS_NO_INFO;
3404
3405 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3406 __le16 psm = req->psm;
3407
3408 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3409
3410 /* Check if we have socket listening on psm */
3411 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3412 if (!pchan) {
3413 result = L2CAP_CR_BAD_PSM;
3414 goto sendresp;
3415 }
3416
3417 parent = pchan->sk;
3418
3419 mutex_lock(&conn->chan_lock);
3420 lock_sock(parent);
3421
3422 /* Check if the ACL is secure enough (if not SDP) */
3423 if (psm != cpu_to_le16(0x0001) &&
3424 !hci_conn_check_link_mode(conn->hcon)) {
3425 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3426 result = L2CAP_CR_SEC_BLOCK;
3427 goto response;
3428 }
3429
3430 result = L2CAP_CR_NO_MEM;
3431
3432 /* Check for backlog size */
3433 if (sk_acceptq_is_full(parent)) {
3434 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3435 goto response;
3436 }
3437
3438 chan = pchan->ops->new_connection(pchan->data);
3439 if (!chan)
3440 goto response;
3441
3442 sk = chan->sk;
3443
3444 /* Check if we already have channel with that dcid */
3445 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3446 sock_set_flag(sk, SOCK_ZAPPED);
3447 chan->ops->close(chan->data);
3448 goto response;
3449 }
3450
3451 hci_conn_hold(conn->hcon);
3452
3453 bacpy(&bt_sk(sk)->src, conn->src);
3454 bacpy(&bt_sk(sk)->dst, conn->dst);
3455 chan->psm = psm;
3456 chan->dcid = scid;
3457
3458 bt_accept_enqueue(parent, sk);
3459
3460 __l2cap_chan_add(conn, chan);
3461
3462 dcid = chan->scid;
3463
3464 __set_chan_timer(chan, sk->sk_sndtimeo);
3465
3466 chan->ident = cmd->ident;
3467
3468 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3469 if (l2cap_chan_check_security(chan)) {
3470 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3471 __l2cap_state_change(chan, BT_CONNECT2);
3472 result = L2CAP_CR_PEND;
3473 status = L2CAP_CS_AUTHOR_PEND;
3474 parent->sk_data_ready(parent, 0);
3475 } else {
3476 __l2cap_state_change(chan, BT_CONFIG);
3477 result = L2CAP_CR_SUCCESS;
3478 status = L2CAP_CS_NO_INFO;
3479 }
3480 } else {
3481 __l2cap_state_change(chan, BT_CONNECT2);
3482 result = L2CAP_CR_PEND;
3483 status = L2CAP_CS_AUTHEN_PEND;
3484 }
3485 } else {
3486 __l2cap_state_change(chan, BT_CONNECT2);
3487 result = L2CAP_CR_PEND;
3488 status = L2CAP_CS_NO_INFO;
3489 }
3490
3491 response:
3492 release_sock(parent);
3493 mutex_unlock(&conn->chan_lock);
3494
3495 sendresp:
3496 rsp.scid = cpu_to_le16(scid);
3497 rsp.dcid = cpu_to_le16(dcid);
3498 rsp.result = cpu_to_le16(result);
3499 rsp.status = cpu_to_le16(status);
3500 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3501
3502 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3503 struct l2cap_info_req info;
3504 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3505
3506 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3507 conn->info_ident = l2cap_get_ident(conn);
3508
3509 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3510
3511 l2cap_send_cmd(conn, conn->info_ident,
3512 L2CAP_INFO_REQ, sizeof(info), &info);
3513 }
3514
3515 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3516 result == L2CAP_CR_SUCCESS) {
3517 u8 buf[128];
3518 set_bit(CONF_REQ_SENT, &chan->conf_state);
3519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3520 l2cap_build_conf_req(chan, buf), buf);
3521 chan->num_conf_req++;
3522 }
3523
3524 return 0;
3525 }
3526
3527 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3528 {
3529 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3530 u16 scid, dcid, result, status;
3531 struct l2cap_chan *chan;
3532 u8 req[128];
3533 int err;
3534
3535 scid = __le16_to_cpu(rsp->scid);
3536 dcid = __le16_to_cpu(rsp->dcid);
3537 result = __le16_to_cpu(rsp->result);
3538 status = __le16_to_cpu(rsp->status);
3539
3540 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3541 dcid, scid, result, status);
3542
3543 mutex_lock(&conn->chan_lock);
3544
3545 if (scid) {
3546 chan = __l2cap_get_chan_by_scid(conn, scid);
3547 if (!chan) {
3548 err = -EFAULT;
3549 goto unlock;
3550 }
3551 } else {
3552 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3553 if (!chan) {
3554 err = -EFAULT;
3555 goto unlock;
3556 }
3557 }
3558
3559 err = 0;
3560
3561 l2cap_chan_lock(chan);
3562
3563 switch (result) {
3564 case L2CAP_CR_SUCCESS:
3565 l2cap_state_change(chan, BT_CONFIG);
3566 chan->ident = 0;
3567 chan->dcid = dcid;
3568 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3569
3570 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3571 break;
3572
3573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3574 l2cap_build_conf_req(chan, req), req);
3575 chan->num_conf_req++;
3576 break;
3577
3578 case L2CAP_CR_PEND:
3579 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3580 break;
3581
3582 default:
3583 l2cap_chan_del(chan, ECONNREFUSED);
3584 break;
3585 }
3586
3587 l2cap_chan_unlock(chan);
3588
3589 unlock:
3590 mutex_unlock(&conn->chan_lock);
3591
3592 return err;
3593 }
3594
3595 static inline void set_default_fcs(struct l2cap_chan *chan)
3596 {
3597 /* FCS is enabled only in ERTM or streaming mode, if one or both
3598 * sides request it.
3599 */
3600 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3601 chan->fcs = L2CAP_FCS_NONE;
3602 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3603 chan->fcs = L2CAP_FCS_CRC16;
3604 }
3605
3606 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3607 {
3608 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3609 u16 dcid, flags;
3610 u8 rsp[64];
3611 struct l2cap_chan *chan;
3612 int len, err = 0;
3613
3614 dcid = __le16_to_cpu(req->dcid);
3615 flags = __le16_to_cpu(req->flags);
3616
3617 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3618
3619 chan = l2cap_get_chan_by_scid(conn, dcid);
3620 if (!chan)
3621 return -ENOENT;
3622
3623 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3624 struct l2cap_cmd_rej_cid rej;
3625
3626 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3627 rej.scid = cpu_to_le16(chan->scid);
3628 rej.dcid = cpu_to_le16(chan->dcid);
3629
3630 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3631 sizeof(rej), &rej);
3632 goto unlock;
3633 }
3634
3635 /* Reject if config buffer is too small. */
3636 len = cmd_len - sizeof(*req);
3637 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3638 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3639 l2cap_build_conf_rsp(chan, rsp,
3640 L2CAP_CONF_REJECT, flags), rsp);
3641 goto unlock;
3642 }
3643
3644 /* Store config. */
3645 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3646 chan->conf_len += len;
3647
3648 if (flags & 0x0001) {
3649 /* Incomplete config. Send empty response. */
3650 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3651 l2cap_build_conf_rsp(chan, rsp,
3652 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3653 goto unlock;
3654 }
3655
3656 /* Complete config. */
3657 len = l2cap_parse_conf_req(chan, rsp);
3658 if (len < 0) {
3659 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3660 goto unlock;
3661 }
3662
3663 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3664 chan->num_conf_rsp++;
3665
3666 /* Reset config buffer. */
3667 chan->conf_len = 0;
3668
3669 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3670 goto unlock;
3671
3672 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3673 set_default_fcs(chan);
3674
3675 l2cap_state_change(chan, BT_CONNECTED);
3676
3677 if (chan->mode == L2CAP_MODE_ERTM ||
3678 chan->mode == L2CAP_MODE_STREAMING)
3679 err = l2cap_ertm_init(chan);
3680
3681 if (err < 0)
3682 l2cap_send_disconn_req(chan->conn, chan, -err);
3683 else
3684 l2cap_chan_ready(chan);
3685
3686 goto unlock;
3687 }
3688
3689 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3690 u8 buf[64];
3691 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3692 l2cap_build_conf_req(chan, buf), buf);
3693 chan->num_conf_req++;
3694 }
3695
3696 /* Got Conf Rsp PENDING from remote side and asume we sent
3697 Conf Rsp PENDING in the code above */
3698 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3699 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3700
3701 /* check compatibility */
3702
3703 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3704 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3705
3706 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3707 l2cap_build_conf_rsp(chan, rsp,
3708 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3709 }
3710
3711 unlock:
3712 l2cap_chan_unlock(chan);
3713 return err;
3714 }
3715
3716 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3717 {
3718 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3719 u16 scid, flags, result;
3720 struct l2cap_chan *chan;
3721 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3722 int err = 0;
3723
3724 scid = __le16_to_cpu(rsp->scid);
3725 flags = __le16_to_cpu(rsp->flags);
3726 result = __le16_to_cpu(rsp->result);
3727
3728 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3729 result, len);
3730
3731 chan = l2cap_get_chan_by_scid(conn, scid);
3732 if (!chan)
3733 return 0;
3734
3735 switch (result) {
3736 case L2CAP_CONF_SUCCESS:
3737 l2cap_conf_rfc_get(chan, rsp->data, len);
3738 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3739 break;
3740
3741 case L2CAP_CONF_PENDING:
3742 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3743
3744 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3745 char buf[64];
3746
3747 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3748 buf, &result);
3749 if (len < 0) {
3750 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3751 goto done;
3752 }
3753
3754 /* check compatibility */
3755
3756 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3757 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3758
3759 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3760 l2cap_build_conf_rsp(chan, buf,
3761 L2CAP_CONF_SUCCESS, 0x0000), buf);
3762 }
3763 goto done;
3764
3765 case L2CAP_CONF_UNACCEPT:
3766 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3767 char req[64];
3768
3769 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3770 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3771 goto done;
3772 }
3773
3774 /* throw out any old stored conf requests */
3775 result = L2CAP_CONF_SUCCESS;
3776 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3777 req, &result);
3778 if (len < 0) {
3779 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3780 goto done;
3781 }
3782
3783 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3784 L2CAP_CONF_REQ, len, req);
3785 chan->num_conf_req++;
3786 if (result != L2CAP_CONF_SUCCESS)
3787 goto done;
3788 break;
3789 }
3790
3791 default:
3792 l2cap_chan_set_err(chan, ECONNRESET);
3793
3794 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3795 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3796 goto done;
3797 }
3798
3799 if (flags & 0x01)
3800 goto done;
3801
3802 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3803
3804 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3805 set_default_fcs(chan);
3806
3807 l2cap_state_change(chan, BT_CONNECTED);
3808 if (chan->mode == L2CAP_MODE_ERTM ||
3809 chan->mode == L2CAP_MODE_STREAMING)
3810 err = l2cap_ertm_init(chan);
3811
3812 if (err < 0)
3813 l2cap_send_disconn_req(chan->conn, chan, -err);
3814 else
3815 l2cap_chan_ready(chan);
3816 }
3817
3818 done:
3819 l2cap_chan_unlock(chan);
3820 return err;
3821 }
3822
3823 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3824 {
3825 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3826 struct l2cap_disconn_rsp rsp;
3827 u16 dcid, scid;
3828 struct l2cap_chan *chan;
3829 struct sock *sk;
3830
3831 scid = __le16_to_cpu(req->scid);
3832 dcid = __le16_to_cpu(req->dcid);
3833
3834 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3835
3836 mutex_lock(&conn->chan_lock);
3837
3838 chan = __l2cap_get_chan_by_scid(conn, dcid);
3839 if (!chan) {
3840 mutex_unlock(&conn->chan_lock);
3841 return 0;
3842 }
3843
3844 l2cap_chan_lock(chan);
3845
3846 sk = chan->sk;
3847
3848 rsp.dcid = cpu_to_le16(chan->scid);
3849 rsp.scid = cpu_to_le16(chan->dcid);
3850 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3851
3852 lock_sock(sk);
3853 sk->sk_shutdown = SHUTDOWN_MASK;
3854 release_sock(sk);
3855
3856 l2cap_chan_hold(chan);
3857 l2cap_chan_del(chan, ECONNRESET);
3858
3859 l2cap_chan_unlock(chan);
3860
3861 chan->ops->close(chan->data);
3862 l2cap_chan_put(chan);
3863
3864 mutex_unlock(&conn->chan_lock);
3865
3866 return 0;
3867 }
3868
3869 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3870 {
3871 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3872 u16 dcid, scid;
3873 struct l2cap_chan *chan;
3874
3875 scid = __le16_to_cpu(rsp->scid);
3876 dcid = __le16_to_cpu(rsp->dcid);
3877
3878 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3879
3880 mutex_lock(&conn->chan_lock);
3881
3882 chan = __l2cap_get_chan_by_scid(conn, scid);
3883 if (!chan) {
3884 mutex_unlock(&conn->chan_lock);
3885 return 0;
3886 }
3887
3888 l2cap_chan_lock(chan);
3889
3890 l2cap_chan_hold(chan);
3891 l2cap_chan_del(chan, 0);
3892
3893 l2cap_chan_unlock(chan);
3894
3895 chan->ops->close(chan->data);
3896 l2cap_chan_put(chan);
3897
3898 mutex_unlock(&conn->chan_lock);
3899
3900 return 0;
3901 }
3902
3903 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3904 {
3905 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3906 u16 type;
3907
3908 type = __le16_to_cpu(req->type);
3909
3910 BT_DBG("type 0x%4.4x", type);
3911
3912 if (type == L2CAP_IT_FEAT_MASK) {
3913 u8 buf[8];
3914 u32 feat_mask = l2cap_feat_mask;
3915 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3916 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3917 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3918 if (!disable_ertm)
3919 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3920 | L2CAP_FEAT_FCS;
3921 if (enable_hs)
3922 feat_mask |= L2CAP_FEAT_EXT_FLOW
3923 | L2CAP_FEAT_EXT_WINDOW;
3924
3925 put_unaligned_le32(feat_mask, rsp->data);
3926 l2cap_send_cmd(conn, cmd->ident,
3927 L2CAP_INFO_RSP, sizeof(buf), buf);
3928 } else if (type == L2CAP_IT_FIXED_CHAN) {
3929 u8 buf[12];
3930 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3931
3932 if (enable_hs)
3933 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3934 else
3935 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3936
3937 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3938 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3939 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3940 l2cap_send_cmd(conn, cmd->ident,
3941 L2CAP_INFO_RSP, sizeof(buf), buf);
3942 } else {
3943 struct l2cap_info_rsp rsp;
3944 rsp.type = cpu_to_le16(type);
3945 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3946 l2cap_send_cmd(conn, cmd->ident,
3947 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3948 }
3949
3950 return 0;
3951 }
3952
3953 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3954 {
3955 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3956 u16 type, result;
3957
3958 type = __le16_to_cpu(rsp->type);
3959 result = __le16_to_cpu(rsp->result);
3960
3961 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3962
3963 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3964 if (cmd->ident != conn->info_ident ||
3965 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3966 return 0;
3967
3968 cancel_delayed_work(&conn->info_timer);
3969
3970 if (result != L2CAP_IR_SUCCESS) {
3971 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3972 conn->info_ident = 0;
3973
3974 l2cap_conn_start(conn);
3975
3976 return 0;
3977 }
3978
3979 switch (type) {
3980 case L2CAP_IT_FEAT_MASK:
3981 conn->feat_mask = get_unaligned_le32(rsp->data);
3982
3983 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3984 struct l2cap_info_req req;
3985 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3986
3987 conn->info_ident = l2cap_get_ident(conn);
3988
3989 l2cap_send_cmd(conn, conn->info_ident,
3990 L2CAP_INFO_REQ, sizeof(req), &req);
3991 } else {
3992 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3993 conn->info_ident = 0;
3994
3995 l2cap_conn_start(conn);
3996 }
3997 break;
3998
3999 case L2CAP_IT_FIXED_CHAN:
4000 conn->fixed_chan_mask = rsp->data[0];
4001 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4002 conn->info_ident = 0;
4003
4004 l2cap_conn_start(conn);
4005 break;
4006 }
4007
4008 return 0;
4009 }
4010
4011 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4012 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4013 void *data)
4014 {
4015 struct l2cap_create_chan_req *req = data;
4016 struct l2cap_create_chan_rsp rsp;
4017 u16 psm, scid;
4018
4019 if (cmd_len != sizeof(*req))
4020 return -EPROTO;
4021
4022 if (!enable_hs)
4023 return -EINVAL;
4024
4025 psm = le16_to_cpu(req->psm);
4026 scid = le16_to_cpu(req->scid);
4027
4028 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4029
4030 /* Placeholder: Always reject */
4031 rsp.dcid = 0;
4032 rsp.scid = cpu_to_le16(scid);
4033 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4034 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4035
4036 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4037 sizeof(rsp), &rsp);
4038
4039 return 0;
4040 }
4041
4042 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4043 struct l2cap_cmd_hdr *cmd, void *data)
4044 {
4045 BT_DBG("conn %p", conn);
4046
4047 return l2cap_connect_rsp(conn, cmd, data);
4048 }
4049
4050 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4051 u16 icid, u16 result)
4052 {
4053 struct l2cap_move_chan_rsp rsp;
4054
4055 BT_DBG("icid %d, result %d", icid, result);
4056
4057 rsp.icid = cpu_to_le16(icid);
4058 rsp.result = cpu_to_le16(result);
4059
4060 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4061 }
4062
4063 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4064 struct l2cap_chan *chan, u16 icid, u16 result)
4065 {
4066 struct l2cap_move_chan_cfm cfm;
4067 u8 ident;
4068
4069 BT_DBG("icid %d, result %d", icid, result);
4070
4071 ident = l2cap_get_ident(conn);
4072 if (chan)
4073 chan->ident = ident;
4074
4075 cfm.icid = cpu_to_le16(icid);
4076 cfm.result = cpu_to_le16(result);
4077
4078 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4079 }
4080
4081 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4082 u16 icid)
4083 {
4084 struct l2cap_move_chan_cfm_rsp rsp;
4085
4086 BT_DBG("icid %d", icid);
4087
4088 rsp.icid = cpu_to_le16(icid);
4089 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4090 }
4091
4092 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4093 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4094 {
4095 struct l2cap_move_chan_req *req = data;
4096 u16 icid = 0;
4097 u16 result = L2CAP_MR_NOT_ALLOWED;
4098
4099 if (cmd_len != sizeof(*req))
4100 return -EPROTO;
4101
4102 icid = le16_to_cpu(req->icid);
4103
4104 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4105
4106 if (!enable_hs)
4107 return -EINVAL;
4108
4109 /* Placeholder: Always refuse */
4110 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4111
4112 return 0;
4113 }
4114
4115 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4116 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4117 {
4118 struct l2cap_move_chan_rsp *rsp = data;
4119 u16 icid, result;
4120
4121 if (cmd_len != sizeof(*rsp))
4122 return -EPROTO;
4123
4124 icid = le16_to_cpu(rsp->icid);
4125 result = le16_to_cpu(rsp->result);
4126
4127 BT_DBG("icid %d, result %d", icid, result);
4128
4129 /* Placeholder: Always unconfirmed */
4130 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4131
4132 return 0;
4133 }
4134
4135 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4136 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4137 {
4138 struct l2cap_move_chan_cfm *cfm = data;
4139 u16 icid, result;
4140
4141 if (cmd_len != sizeof(*cfm))
4142 return -EPROTO;
4143
4144 icid = le16_to_cpu(cfm->icid);
4145 result = le16_to_cpu(cfm->result);
4146
4147 BT_DBG("icid %d, result %d", icid, result);
4148
4149 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4150
4151 return 0;
4152 }
4153
4154 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4155 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4156 {
4157 struct l2cap_move_chan_cfm_rsp *rsp = data;
4158 u16 icid;
4159
4160 if (cmd_len != sizeof(*rsp))
4161 return -EPROTO;
4162
4163 icid = le16_to_cpu(rsp->icid);
4164
4165 BT_DBG("icid %d", icid);
4166
4167 return 0;
4168 }
4169
4170 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4171 u16 to_multiplier)
4172 {
4173 u16 max_latency;
4174
4175 if (min > max || min < 6 || max > 3200)
4176 return -EINVAL;
4177
4178 if (to_multiplier < 10 || to_multiplier > 3200)
4179 return -EINVAL;
4180
4181 if (max >= to_multiplier * 8)
4182 return -EINVAL;
4183
4184 max_latency = (to_multiplier * 8 / max) - 1;
4185 if (latency > 499 || latency > max_latency)
4186 return -EINVAL;
4187
4188 return 0;
4189 }
4190
4191 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4192 struct l2cap_cmd_hdr *cmd, u8 *data)
4193 {
4194 struct hci_conn *hcon = conn->hcon;
4195 struct l2cap_conn_param_update_req *req;
4196 struct l2cap_conn_param_update_rsp rsp;
4197 u16 min, max, latency, to_multiplier, cmd_len;
4198 int err;
4199
4200 if (!(hcon->link_mode & HCI_LM_MASTER))
4201 return -EINVAL;
4202
4203 cmd_len = __le16_to_cpu(cmd->len);
4204 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4205 return -EPROTO;
4206
4207 req = (struct l2cap_conn_param_update_req *) data;
4208 min = __le16_to_cpu(req->min);
4209 max = __le16_to_cpu(req->max);
4210 latency = __le16_to_cpu(req->latency);
4211 to_multiplier = __le16_to_cpu(req->to_multiplier);
4212
4213 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4214 min, max, latency, to_multiplier);
4215
4216 memset(&rsp, 0, sizeof(rsp));
4217
4218 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4219 if (err)
4220 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4221 else
4222 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4223
4224 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4225 sizeof(rsp), &rsp);
4226
4227 if (!err)
4228 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4229
4230 return 0;
4231 }
4232
4233 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4234 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4235 {
4236 int err = 0;
4237
4238 switch (cmd->code) {
4239 case L2CAP_COMMAND_REJ:
4240 l2cap_command_rej(conn, cmd, data);
4241 break;
4242
4243 case L2CAP_CONN_REQ:
4244 err = l2cap_connect_req(conn, cmd, data);
4245 break;
4246
4247 case L2CAP_CONN_RSP:
4248 err = l2cap_connect_rsp(conn, cmd, data);
4249 break;
4250
4251 case L2CAP_CONF_REQ:
4252 err = l2cap_config_req(conn, cmd, cmd_len, data);
4253 break;
4254
4255 case L2CAP_CONF_RSP:
4256 err = l2cap_config_rsp(conn, cmd, data);
4257 break;
4258
4259 case L2CAP_DISCONN_REQ:
4260 err = l2cap_disconnect_req(conn, cmd, data);
4261 break;
4262
4263 case L2CAP_DISCONN_RSP:
4264 err = l2cap_disconnect_rsp(conn, cmd, data);
4265 break;
4266
4267 case L2CAP_ECHO_REQ:
4268 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4269 break;
4270
4271 case L2CAP_ECHO_RSP:
4272 break;
4273
4274 case L2CAP_INFO_REQ:
4275 err = l2cap_information_req(conn, cmd, data);
4276 break;
4277
4278 case L2CAP_INFO_RSP:
4279 err = l2cap_information_rsp(conn, cmd, data);
4280 break;
4281
4282 case L2CAP_CREATE_CHAN_REQ:
4283 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4284 break;
4285
4286 case L2CAP_CREATE_CHAN_RSP:
4287 err = l2cap_create_channel_rsp(conn, cmd, data);
4288 break;
4289
4290 case L2CAP_MOVE_CHAN_REQ:
4291 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4292 break;
4293
4294 case L2CAP_MOVE_CHAN_RSP:
4295 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4296 break;
4297
4298 case L2CAP_MOVE_CHAN_CFM:
4299 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4300 break;
4301
4302 case L2CAP_MOVE_CHAN_CFM_RSP:
4303 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4304 break;
4305
4306 default:
4307 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4308 err = -EINVAL;
4309 break;
4310 }
4311
4312 return err;
4313 }
4314
4315 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4316 struct l2cap_cmd_hdr *cmd, u8 *data)
4317 {
4318 switch (cmd->code) {
4319 case L2CAP_COMMAND_REJ:
4320 return 0;
4321
4322 case L2CAP_CONN_PARAM_UPDATE_REQ:
4323 return l2cap_conn_param_update_req(conn, cmd, data);
4324
4325 case L2CAP_CONN_PARAM_UPDATE_RSP:
4326 return 0;
4327
4328 default:
4329 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4330 return -EINVAL;
4331 }
4332 }
4333
4334 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4335 struct sk_buff *skb)
4336 {
4337 u8 *data = skb->data;
4338 int len = skb->len;
4339 struct l2cap_cmd_hdr cmd;
4340 int err;
4341
4342 l2cap_raw_recv(conn, skb);
4343
4344 while (len >= L2CAP_CMD_HDR_SIZE) {
4345 u16 cmd_len;
4346 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4347 data += L2CAP_CMD_HDR_SIZE;
4348 len -= L2CAP_CMD_HDR_SIZE;
4349
4350 cmd_len = le16_to_cpu(cmd.len);
4351
4352 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4353
4354 if (cmd_len > len || !cmd.ident) {
4355 BT_DBG("corrupted command");
4356 break;
4357 }
4358
4359 if (conn->hcon->type == LE_LINK)
4360 err = l2cap_le_sig_cmd(conn, &cmd, data);
4361 else
4362 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4363
4364 if (err) {
4365 struct l2cap_cmd_rej_unk rej;
4366
4367 BT_ERR("Wrong link type (%d)", err);
4368
4369 /* FIXME: Map err to a valid reason */
4370 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4371 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4372 }
4373
4374 data += cmd_len;
4375 len -= cmd_len;
4376 }
4377
4378 kfree_skb(skb);
4379 }
4380
4381 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4382 {
4383 u16 our_fcs, rcv_fcs;
4384 int hdr_size;
4385
4386 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4387 hdr_size = L2CAP_EXT_HDR_SIZE;
4388 else
4389 hdr_size = L2CAP_ENH_HDR_SIZE;
4390
4391 if (chan->fcs == L2CAP_FCS_CRC16) {
4392 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4393 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4394 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4395
4396 if (our_fcs != rcv_fcs)
4397 return -EBADMSG;
4398 }
4399 return 0;
4400 }
4401
4402 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4403 {
4404 struct l2cap_ctrl control;
4405
4406 BT_DBG("chan %p", chan);
4407
4408 memset(&control, 0, sizeof(control));
4409 control.sframe = 1;
4410 control.final = 1;
4411 control.reqseq = chan->buffer_seq;
4412 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4413
4414 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4415 control.super = L2CAP_SUPER_RNR;
4416 l2cap_send_sframe(chan, &control);
4417 }
4418
4419 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4420 chan->unacked_frames > 0)
4421 __set_retrans_timer(chan);
4422
4423 /* Send pending iframes */
4424 l2cap_ertm_send(chan);
4425
4426 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4427 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4428 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4429 * send it now.
4430 */
4431 control.super = L2CAP_SUPER_RR;
4432 l2cap_send_sframe(chan, &control);
4433 }
4434 }
4435
4436 static void append_skb_frag(struct sk_buff *skb,
4437 struct sk_buff *new_frag, struct sk_buff **last_frag)
4438 {
4439 /* skb->len reflects data in skb as well as all fragments
4440 * skb->data_len reflects only data in fragments
4441 */
4442 if (!skb_has_frag_list(skb))
4443 skb_shinfo(skb)->frag_list = new_frag;
4444
4445 new_frag->next = NULL;
4446
4447 (*last_frag)->next = new_frag;
4448 *last_frag = new_frag;
4449
4450 skb->len += new_frag->len;
4451 skb->data_len += new_frag->len;
4452 skb->truesize += new_frag->truesize;
4453 }
4454
4455 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4456 struct l2cap_ctrl *control)
4457 {
4458 int err = -EINVAL;
4459
4460 switch (control->sar) {
4461 case L2CAP_SAR_UNSEGMENTED:
4462 if (chan->sdu)
4463 break;
4464
4465 err = chan->ops->recv(chan->data, skb);
4466 break;
4467
4468 case L2CAP_SAR_START:
4469 if (chan->sdu)
4470 break;
4471
4472 chan->sdu_len = get_unaligned_le16(skb->data);
4473 skb_pull(skb, L2CAP_SDULEN_SIZE);
4474
4475 if (chan->sdu_len > chan->imtu) {
4476 err = -EMSGSIZE;
4477 break;
4478 }
4479
4480 if (skb->len >= chan->sdu_len)
4481 break;
4482
4483 chan->sdu = skb;
4484 chan->sdu_last_frag = skb;
4485
4486 skb = NULL;
4487 err = 0;
4488 break;
4489
4490 case L2CAP_SAR_CONTINUE:
4491 if (!chan->sdu)
4492 break;
4493
4494 append_skb_frag(chan->sdu, skb,
4495 &chan->sdu_last_frag);
4496 skb = NULL;
4497
4498 if (chan->sdu->len >= chan->sdu_len)
4499 break;
4500
4501 err = 0;
4502 break;
4503
4504 case L2CAP_SAR_END:
4505 if (!chan->sdu)
4506 break;
4507
4508 append_skb_frag(chan->sdu, skb,
4509 &chan->sdu_last_frag);
4510 skb = NULL;
4511
4512 if (chan->sdu->len != chan->sdu_len)
4513 break;
4514
4515 err = chan->ops->recv(chan->data, chan->sdu);
4516
4517 if (!err) {
4518 /* Reassembly complete */
4519 chan->sdu = NULL;
4520 chan->sdu_last_frag = NULL;
4521 chan->sdu_len = 0;
4522 }
4523 break;
4524 }
4525
4526 if (err) {
4527 kfree_skb(skb);
4528 kfree_skb(chan->sdu);
4529 chan->sdu = NULL;
4530 chan->sdu_last_frag = NULL;
4531 chan->sdu_len = 0;
4532 }
4533
4534 return err;
4535 }
4536
4537 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4538 {
4539 u8 event;
4540
4541 if (chan->mode != L2CAP_MODE_ERTM)
4542 return;
4543
4544 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4545 l2cap_tx(chan, 0, 0, event);
4546 }
4547
4548 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4549 {
4550 int err = 0;
4551 /* Pass sequential frames to l2cap_reassemble_sdu()
4552 * until a gap is encountered.
4553 */
4554
4555 BT_DBG("chan %p", chan);
4556
4557 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4558 struct sk_buff *skb;
4559 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4560 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4561
4562 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4563
4564 if (!skb)
4565 break;
4566
4567 skb_unlink(skb, &chan->srej_q);
4568 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4569 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4570 if (err)
4571 break;
4572 }
4573
4574 if (skb_queue_empty(&chan->srej_q)) {
4575 chan->rx_state = L2CAP_RX_STATE_RECV;
4576 l2cap_send_ack(chan);
4577 }
4578
4579 return err;
4580 }
4581
4582 static void l2cap_handle_srej(struct l2cap_chan *chan,
4583 struct l2cap_ctrl *control)
4584 {
4585 struct sk_buff *skb;
4586
4587 BT_DBG("chan %p, control %p", chan, control);
4588
4589 if (control->reqseq == chan->next_tx_seq) {
4590 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4591 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4592 return;
4593 }
4594
4595 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4596
4597 if (skb == NULL) {
4598 BT_DBG("Seq %d not available for retransmission",
4599 control->reqseq);
4600 return;
4601 }
4602
4603 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4604 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4605 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4606 return;
4607 }
4608
4609 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4610
4611 if (control->poll) {
4612 l2cap_pass_to_tx(chan, control);
4613
4614 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4615 l2cap_retransmit(chan, control);
4616 l2cap_ertm_send(chan);
4617
4618 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4619 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4620 chan->srej_save_reqseq = control->reqseq;
4621 }
4622 } else {
4623 l2cap_pass_to_tx_fbit(chan, control);
4624
4625 if (control->final) {
4626 if (chan->srej_save_reqseq != control->reqseq ||
4627 !test_and_clear_bit(CONN_SREJ_ACT,
4628 &chan->conn_state))
4629 l2cap_retransmit(chan, control);
4630 } else {
4631 l2cap_retransmit(chan, control);
4632 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4633 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4634 chan->srej_save_reqseq = control->reqseq;
4635 }
4636 }
4637 }
4638 }
4639
4640 static void l2cap_handle_rej(struct l2cap_chan *chan,
4641 struct l2cap_ctrl *control)
4642 {
4643 struct sk_buff *skb;
4644
4645 BT_DBG("chan %p, control %p", chan, control);
4646
4647 if (control->reqseq == chan->next_tx_seq) {
4648 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4649 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4650 return;
4651 }
4652
4653 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4654
4655 if (chan->max_tx && skb &&
4656 bt_cb(skb)->control.retries >= chan->max_tx) {
4657 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4658 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4659 return;
4660 }
4661
4662 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4663
4664 l2cap_pass_to_tx(chan, control);
4665
4666 if (control->final) {
4667 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4668 l2cap_retransmit_all(chan, control);
4669 } else {
4670 l2cap_retransmit_all(chan, control);
4671 l2cap_ertm_send(chan);
4672 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4673 set_bit(CONN_REJ_ACT, &chan->conn_state);
4674 }
4675 }
4676
4677 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4678 {
4679 BT_DBG("chan %p, txseq %d", chan, txseq);
4680
4681 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4682 chan->expected_tx_seq);
4683
4684 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4685 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4686 chan->tx_win) {
4687 /* See notes below regarding "double poll" and
4688 * invalid packets.
4689 */
4690 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4691 BT_DBG("Invalid/Ignore - after SREJ");
4692 return L2CAP_TXSEQ_INVALID_IGNORE;
4693 } else {
4694 BT_DBG("Invalid - in window after SREJ sent");
4695 return L2CAP_TXSEQ_INVALID;
4696 }
4697 }
4698
4699 if (chan->srej_list.head == txseq) {
4700 BT_DBG("Expected SREJ");
4701 return L2CAP_TXSEQ_EXPECTED_SREJ;
4702 }
4703
4704 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4705 BT_DBG("Duplicate SREJ - txseq already stored");
4706 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4707 }
4708
4709 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4710 BT_DBG("Unexpected SREJ - not requested");
4711 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4712 }
4713 }
4714
4715 if (chan->expected_tx_seq == txseq) {
4716 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4717 chan->tx_win) {
4718 BT_DBG("Invalid - txseq outside tx window");
4719 return L2CAP_TXSEQ_INVALID;
4720 } else {
4721 BT_DBG("Expected");
4722 return L2CAP_TXSEQ_EXPECTED;
4723 }
4724 }
4725
4726 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4727 __seq_offset(chan, chan->expected_tx_seq,
4728 chan->last_acked_seq)){
4729 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4730 return L2CAP_TXSEQ_DUPLICATE;
4731 }
4732
4733 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4734 /* A source of invalid packets is a "double poll" condition,
4735 * where delays cause us to send multiple poll packets. If
4736 * the remote stack receives and processes both polls,
4737 * sequence numbers can wrap around in such a way that a
4738 * resent frame has a sequence number that looks like new data
4739 * with a sequence gap. This would trigger an erroneous SREJ
4740 * request.
4741 *
4742 * Fortunately, this is impossible with a tx window that's
4743 * less than half of the maximum sequence number, which allows
4744 * invalid frames to be safely ignored.
4745 *
4746 * With tx window sizes greater than half of the tx window
4747 * maximum, the frame is invalid and cannot be ignored. This
4748 * causes a disconnect.
4749 */
4750
4751 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4752 BT_DBG("Invalid/Ignore - txseq outside tx window");
4753 return L2CAP_TXSEQ_INVALID_IGNORE;
4754 } else {
4755 BT_DBG("Invalid - txseq outside tx window");
4756 return L2CAP_TXSEQ_INVALID;
4757 }
4758 } else {
4759 BT_DBG("Unexpected - txseq indicates missing frames");
4760 return L2CAP_TXSEQ_UNEXPECTED;
4761 }
4762 }
4763
4764 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4765 struct l2cap_ctrl *control,
4766 struct sk_buff *skb, u8 event)
4767 {
4768 int err = 0;
4769 bool skb_in_use = 0;
4770
4771 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4772 event);
4773
4774 switch (event) {
4775 case L2CAP_EV_RECV_IFRAME:
4776 switch (l2cap_classify_txseq(chan, control->txseq)) {
4777 case L2CAP_TXSEQ_EXPECTED:
4778 l2cap_pass_to_tx(chan, control);
4779
4780 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4781 BT_DBG("Busy, discarding expected seq %d",
4782 control->txseq);
4783 break;
4784 }
4785
4786 chan->expected_tx_seq = __next_seq(chan,
4787 control->txseq);
4788
4789 chan->buffer_seq = chan->expected_tx_seq;
4790 skb_in_use = 1;
4791
4792 err = l2cap_reassemble_sdu(chan, skb, control);
4793 if (err)
4794 break;
4795
4796 if (control->final) {
4797 if (!test_and_clear_bit(CONN_REJ_ACT,
4798 &chan->conn_state)) {
4799 control->final = 0;
4800 l2cap_retransmit_all(chan, control);
4801 l2cap_ertm_send(chan);
4802 }
4803 }
4804
4805 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4806 l2cap_send_ack(chan);
4807 break;
4808 case L2CAP_TXSEQ_UNEXPECTED:
4809 l2cap_pass_to_tx(chan, control);
4810
4811 /* Can't issue SREJ frames in the local busy state.
4812 * Drop this frame, it will be seen as missing
4813 * when local busy is exited.
4814 */
4815 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4816 BT_DBG("Busy, discarding unexpected seq %d",
4817 control->txseq);
4818 break;
4819 }
4820
4821 /* There was a gap in the sequence, so an SREJ
4822 * must be sent for each missing frame. The
4823 * current frame is stored for later use.
4824 */
4825 skb_queue_tail(&chan->srej_q, skb);
4826 skb_in_use = 1;
4827 BT_DBG("Queued %p (queue len %d)", skb,
4828 skb_queue_len(&chan->srej_q));
4829
4830 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4831 l2cap_seq_list_clear(&chan->srej_list);
4832 l2cap_send_srej(chan, control->txseq);
4833
4834 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4835 break;
4836 case L2CAP_TXSEQ_DUPLICATE:
4837 l2cap_pass_to_tx(chan, control);
4838 break;
4839 case L2CAP_TXSEQ_INVALID_IGNORE:
4840 break;
4841 case L2CAP_TXSEQ_INVALID:
4842 default:
4843 l2cap_send_disconn_req(chan->conn, chan,
4844 ECONNRESET);
4845 break;
4846 }
4847 break;
4848 case L2CAP_EV_RECV_RR:
4849 l2cap_pass_to_tx(chan, control);
4850 if (control->final) {
4851 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4852
4853 if (!test_and_clear_bit(CONN_REJ_ACT,
4854 &chan->conn_state)) {
4855 control->final = 0;
4856 l2cap_retransmit_all(chan, control);
4857 }
4858
4859 l2cap_ertm_send(chan);
4860 } else if (control->poll) {
4861 l2cap_send_i_or_rr_or_rnr(chan);
4862 } else {
4863 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4864 &chan->conn_state) &&
4865 chan->unacked_frames)
4866 __set_retrans_timer(chan);
4867
4868 l2cap_ertm_send(chan);
4869 }
4870 break;
4871 case L2CAP_EV_RECV_RNR:
4872 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4873 l2cap_pass_to_tx(chan, control);
4874 if (control && control->poll) {
4875 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4876 l2cap_send_rr_or_rnr(chan, 0);
4877 }
4878 __clear_retrans_timer(chan);
4879 l2cap_seq_list_clear(&chan->retrans_list);
4880 break;
4881 case L2CAP_EV_RECV_REJ:
4882 l2cap_handle_rej(chan, control);
4883 break;
4884 case L2CAP_EV_RECV_SREJ:
4885 l2cap_handle_srej(chan, control);
4886 break;
4887 default:
4888 break;
4889 }
4890
4891 if (skb && !skb_in_use) {
4892 BT_DBG("Freeing %p", skb);
4893 kfree_skb(skb);
4894 }
4895
4896 return err;
4897 }
4898
4899 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4900 struct l2cap_ctrl *control,
4901 struct sk_buff *skb, u8 event)
4902 {
4903 int err = 0;
4904 u16 txseq = control->txseq;
4905 bool skb_in_use = 0;
4906
4907 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4908 event);
4909
4910 switch (event) {
4911 case L2CAP_EV_RECV_IFRAME:
4912 switch (l2cap_classify_txseq(chan, txseq)) {
4913 case L2CAP_TXSEQ_EXPECTED:
4914 /* Keep frame for reassembly later */
4915 l2cap_pass_to_tx(chan, control);
4916 skb_queue_tail(&chan->srej_q, skb);
4917 skb_in_use = 1;
4918 BT_DBG("Queued %p (queue len %d)", skb,
4919 skb_queue_len(&chan->srej_q));
4920
4921 chan->expected_tx_seq = __next_seq(chan, txseq);
4922 break;
4923 case L2CAP_TXSEQ_EXPECTED_SREJ:
4924 l2cap_seq_list_pop(&chan->srej_list);
4925
4926 l2cap_pass_to_tx(chan, control);
4927 skb_queue_tail(&chan->srej_q, skb);
4928 skb_in_use = 1;
4929 BT_DBG("Queued %p (queue len %d)", skb,
4930 skb_queue_len(&chan->srej_q));
4931
4932 err = l2cap_rx_queued_iframes(chan);
4933 if (err)
4934 break;
4935
4936 break;
4937 case L2CAP_TXSEQ_UNEXPECTED:
4938 /* Got a frame that can't be reassembled yet.
4939 * Save it for later, and send SREJs to cover
4940 * the missing frames.
4941 */
4942 skb_queue_tail(&chan->srej_q, skb);
4943 skb_in_use = 1;
4944 BT_DBG("Queued %p (queue len %d)", skb,
4945 skb_queue_len(&chan->srej_q));
4946
4947 l2cap_pass_to_tx(chan, control);
4948 l2cap_send_srej(chan, control->txseq);
4949 break;
4950 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4951 /* This frame was requested with an SREJ, but
4952 * some expected retransmitted frames are
4953 * missing. Request retransmission of missing
4954 * SREJ'd frames.
4955 */
4956 skb_queue_tail(&chan->srej_q, skb);
4957 skb_in_use = 1;
4958 BT_DBG("Queued %p (queue len %d)", skb,
4959 skb_queue_len(&chan->srej_q));
4960
4961 l2cap_pass_to_tx(chan, control);
4962 l2cap_send_srej_list(chan, control->txseq);
4963 break;
4964 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4965 /* We've already queued this frame. Drop this copy. */
4966 l2cap_pass_to_tx(chan, control);
4967 break;
4968 case L2CAP_TXSEQ_DUPLICATE:
4969 /* Expecting a later sequence number, so this frame
4970 * was already received. Ignore it completely.
4971 */
4972 break;
4973 case L2CAP_TXSEQ_INVALID_IGNORE:
4974 break;
4975 case L2CAP_TXSEQ_INVALID:
4976 default:
4977 l2cap_send_disconn_req(chan->conn, chan,
4978 ECONNRESET);
4979 break;
4980 }
4981 break;
4982 case L2CAP_EV_RECV_RR:
4983 l2cap_pass_to_tx(chan, control);
4984 if (control->final) {
4985 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4986
4987 if (!test_and_clear_bit(CONN_REJ_ACT,
4988 &chan->conn_state)) {
4989 control->final = 0;
4990 l2cap_retransmit_all(chan, control);
4991 }
4992
4993 l2cap_ertm_send(chan);
4994 } else if (control->poll) {
4995 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4996 &chan->conn_state) &&
4997 chan->unacked_frames) {
4998 __set_retrans_timer(chan);
4999 }
5000
5001 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5002 l2cap_send_srej_tail(chan);
5003 } else {
5004 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5005 &chan->conn_state) &&
5006 chan->unacked_frames)
5007 __set_retrans_timer(chan);
5008
5009 l2cap_send_ack(chan);
5010 }
5011 break;
5012 case L2CAP_EV_RECV_RNR:
5013 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5014 l2cap_pass_to_tx(chan, control);
5015 if (control->poll) {
5016 l2cap_send_srej_tail(chan);
5017 } else {
5018 struct l2cap_ctrl rr_control;
5019 memset(&rr_control, 0, sizeof(rr_control));
5020 rr_control.sframe = 1;
5021 rr_control.super = L2CAP_SUPER_RR;
5022 rr_control.reqseq = chan->buffer_seq;
5023 l2cap_send_sframe(chan, &rr_control);
5024 }
5025
5026 break;
5027 case L2CAP_EV_RECV_REJ:
5028 l2cap_handle_rej(chan, control);
5029 break;
5030 case L2CAP_EV_RECV_SREJ:
5031 l2cap_handle_srej(chan, control);
5032 break;
5033 }
5034
5035 if (skb && !skb_in_use) {
5036 BT_DBG("Freeing %p", skb);
5037 kfree_skb(skb);
5038 }
5039
5040 return err;
5041 }
5042
5043 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5044 {
5045 /* Make sure reqseq is for a packet that has been sent but not acked */
5046 u16 unacked;
5047
5048 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5049 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5050 }
5051
5052 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5053 struct sk_buff *skb, u8 event)
5054 {
5055 int err = 0;
5056
5057 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5058 control, skb, event, chan->rx_state);
5059
5060 if (__valid_reqseq(chan, control->reqseq)) {
5061 switch (chan->rx_state) {
5062 case L2CAP_RX_STATE_RECV:
5063 err = l2cap_rx_state_recv(chan, control, skb, event);
5064 break;
5065 case L2CAP_RX_STATE_SREJ_SENT:
5066 err = l2cap_rx_state_srej_sent(chan, control, skb,
5067 event);
5068 break;
5069 default:
5070 /* shut it down */
5071 break;
5072 }
5073 } else {
5074 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5075 control->reqseq, chan->next_tx_seq,
5076 chan->expected_ack_seq);
5077 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5078 }
5079
5080 return err;
5081 }
5082
5083 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5084 struct sk_buff *skb)
5085 {
5086 int err = 0;
5087
5088 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5089 chan->rx_state);
5090
5091 if (l2cap_classify_txseq(chan, control->txseq) ==
5092 L2CAP_TXSEQ_EXPECTED) {
5093 l2cap_pass_to_tx(chan, control);
5094
5095 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5096 __next_seq(chan, chan->buffer_seq));
5097
5098 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5099
5100 l2cap_reassemble_sdu(chan, skb, control);
5101 } else {
5102 if (chan->sdu) {
5103 kfree_skb(chan->sdu);
5104 chan->sdu = NULL;
5105 }
5106 chan->sdu_last_frag = NULL;
5107 chan->sdu_len = 0;
5108
5109 if (skb) {
5110 BT_DBG("Freeing %p", skb);
5111 kfree_skb(skb);
5112 }
5113 }
5114
5115 chan->last_acked_seq = control->txseq;
5116 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5117
5118 return err;
5119 }
5120
5121 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5122 {
5123 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5124 u16 len;
5125 u8 event;
5126
5127 __unpack_control(chan, skb);
5128
5129 len = skb->len;
5130
5131 /*
5132 * We can just drop the corrupted I-frame here.
5133 * Receiver will miss it and start proper recovery
5134 * procedures and ask for retransmission.
5135 */
5136 if (l2cap_check_fcs(chan, skb))
5137 goto drop;
5138
5139 if (!control->sframe && control->sar == L2CAP_SAR_START)
5140 len -= L2CAP_SDULEN_SIZE;
5141
5142 if (chan->fcs == L2CAP_FCS_CRC16)
5143 len -= L2CAP_FCS_SIZE;
5144
5145 if (len > chan->mps) {
5146 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5147 goto drop;
5148 }
5149
5150 if (!control->sframe) {
5151 int err;
5152
5153 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5154 control->sar, control->reqseq, control->final,
5155 control->txseq);
5156
5157 /* Validate F-bit - F=0 always valid, F=1 only
5158 * valid in TX WAIT_F
5159 */
5160 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5161 goto drop;
5162
5163 if (chan->mode != L2CAP_MODE_STREAMING) {
5164 event = L2CAP_EV_RECV_IFRAME;
5165 err = l2cap_rx(chan, control, skb, event);
5166 } else {
5167 err = l2cap_stream_rx(chan, control, skb);
5168 }
5169
5170 if (err)
5171 l2cap_send_disconn_req(chan->conn, chan,
5172 ECONNRESET);
5173 } else {
5174 const u8 rx_func_to_event[4] = {
5175 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5176 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5177 };
5178
5179 /* Only I-frames are expected in streaming mode */
5180 if (chan->mode == L2CAP_MODE_STREAMING)
5181 goto drop;
5182
5183 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5184 control->reqseq, control->final, control->poll,
5185 control->super);
5186
5187 if (len != 0) {
5188 BT_ERR("%d", len);
5189 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5190 goto drop;
5191 }
5192
5193 /* Validate F and P bits */
5194 if (control->final && (control->poll ||
5195 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5196 goto drop;
5197
5198 event = rx_func_to_event[control->super];
5199 if (l2cap_rx(chan, control, skb, event))
5200 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5201 }
5202
5203 return 0;
5204
5205 drop:
5206 kfree_skb(skb);
5207 return 0;
5208 }
5209
5210 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5211 {
5212 struct l2cap_chan *chan;
5213
5214 chan = l2cap_get_chan_by_scid(conn, cid);
5215 if (!chan) {
5216 BT_DBG("unknown cid 0x%4.4x", cid);
5217 /* Drop packet and return */
5218 kfree_skb(skb);
5219 return 0;
5220 }
5221
5222 BT_DBG("chan %p, len %d", chan, skb->len);
5223
5224 if (chan->state != BT_CONNECTED)
5225 goto drop;
5226
5227 switch (chan->mode) {
5228 case L2CAP_MODE_BASIC:
5229 /* If socket recv buffers overflows we drop data here
5230 * which is *bad* because L2CAP has to be reliable.
5231 * But we don't have any other choice. L2CAP doesn't
5232 * provide flow control mechanism. */
5233
5234 if (chan->imtu < skb->len)
5235 goto drop;
5236
5237 if (!chan->ops->recv(chan->data, skb))
5238 goto done;
5239 break;
5240
5241 case L2CAP_MODE_ERTM:
5242 case L2CAP_MODE_STREAMING:
5243 l2cap_data_rcv(chan, skb);
5244 goto done;
5245
5246 default:
5247 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5248 break;
5249 }
5250
5251 drop:
5252 kfree_skb(skb);
5253
5254 done:
5255 l2cap_chan_unlock(chan);
5256
5257 return 0;
5258 }
5259
5260 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5261 {
5262 struct l2cap_chan *chan;
5263
5264 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5265 if (!chan)
5266 goto drop;
5267
5268 BT_DBG("chan %p, len %d", chan, skb->len);
5269
5270 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5271 goto drop;
5272
5273 if (chan->imtu < skb->len)
5274 goto drop;
5275
5276 if (!chan->ops->recv(chan->data, skb))
5277 return 0;
5278
5279 drop:
5280 kfree_skb(skb);
5281
5282 return 0;
5283 }
5284
5285 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5286 struct sk_buff *skb)
5287 {
5288 struct l2cap_chan *chan;
5289
5290 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5291 if (!chan)
5292 goto drop;
5293
5294 BT_DBG("chan %p, len %d", chan, skb->len);
5295
5296 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5297 goto drop;
5298
5299 if (chan->imtu < skb->len)
5300 goto drop;
5301
5302 if (!chan->ops->recv(chan->data, skb))
5303 return 0;
5304
5305 drop:
5306 kfree_skb(skb);
5307
5308 return 0;
5309 }
5310
5311 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5312 {
5313 struct l2cap_hdr *lh = (void *) skb->data;
5314 u16 cid, len;
5315 __le16 psm;
5316
5317 skb_pull(skb, L2CAP_HDR_SIZE);
5318 cid = __le16_to_cpu(lh->cid);
5319 len = __le16_to_cpu(lh->len);
5320
5321 if (len != skb->len) {
5322 kfree_skb(skb);
5323 return;
5324 }
5325
5326 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5327
5328 switch (cid) {
5329 case L2CAP_CID_LE_SIGNALING:
5330 case L2CAP_CID_SIGNALING:
5331 l2cap_sig_channel(conn, skb);
5332 break;
5333
5334 case L2CAP_CID_CONN_LESS:
5335 psm = get_unaligned((__le16 *) skb->data);
5336 skb_pull(skb, 2);
5337 l2cap_conless_channel(conn, psm, skb);
5338 break;
5339
5340 case L2CAP_CID_LE_DATA:
5341 l2cap_att_channel(conn, cid, skb);
5342 break;
5343
5344 case L2CAP_CID_SMP:
5345 if (smp_sig_channel(conn, skb))
5346 l2cap_conn_del(conn->hcon, EACCES);
5347 break;
5348
5349 default:
5350 l2cap_data_channel(conn, cid, skb);
5351 break;
5352 }
5353 }
5354
5355 /* ---- L2CAP interface with lower layer (HCI) ---- */
5356
5357 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5358 {
5359 int exact = 0, lm1 = 0, lm2 = 0;
5360 struct l2cap_chan *c;
5361
5362 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5363
5364 /* Find listening sockets and check their link_mode */
5365 read_lock(&chan_list_lock);
5366 list_for_each_entry(c, &chan_list, global_l) {
5367 struct sock *sk = c->sk;
5368
5369 if (c->state != BT_LISTEN)
5370 continue;
5371
5372 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5373 lm1 |= HCI_LM_ACCEPT;
5374 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5375 lm1 |= HCI_LM_MASTER;
5376 exact++;
5377 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5378 lm2 |= HCI_LM_ACCEPT;
5379 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5380 lm2 |= HCI_LM_MASTER;
5381 }
5382 }
5383 read_unlock(&chan_list_lock);
5384
5385 return exact ? lm1 : lm2;
5386 }
5387
5388 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5389 {
5390 struct l2cap_conn *conn;
5391
5392 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5393
5394 if (!status) {
5395 conn = l2cap_conn_add(hcon, status);
5396 if (conn)
5397 l2cap_conn_ready(conn);
5398 } else
5399 l2cap_conn_del(hcon, bt_to_errno(status));
5400
5401 return 0;
5402 }
5403
5404 int l2cap_disconn_ind(struct hci_conn *hcon)
5405 {
5406 struct l2cap_conn *conn = hcon->l2cap_data;
5407
5408 BT_DBG("hcon %p", hcon);
5409
5410 if (!conn)
5411 return HCI_ERROR_REMOTE_USER_TERM;
5412 return conn->disc_reason;
5413 }
5414
5415 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5416 {
5417 BT_DBG("hcon %p reason %d", hcon, reason);
5418
5419 l2cap_conn_del(hcon, bt_to_errno(reason));
5420 return 0;
5421 }
5422
5423 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5424 {
5425 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5426 return;
5427
5428 if (encrypt == 0x00) {
5429 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5430 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5431 } else if (chan->sec_level == BT_SECURITY_HIGH)
5432 l2cap_chan_close(chan, ECONNREFUSED);
5433 } else {
5434 if (chan->sec_level == BT_SECURITY_MEDIUM)
5435 __clear_chan_timer(chan);
5436 }
5437 }
5438
5439 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5440 {
5441 struct l2cap_conn *conn = hcon->l2cap_data;
5442 struct l2cap_chan *chan;
5443
5444 if (!conn)
5445 return 0;
5446
5447 BT_DBG("conn %p", conn);
5448
5449 if (hcon->type == LE_LINK) {
5450 if (!status && encrypt)
5451 smp_distribute_keys(conn, 0);
5452 cancel_delayed_work(&conn->security_timer);
5453 }
5454
5455 mutex_lock(&conn->chan_lock);
5456
5457 list_for_each_entry(chan, &conn->chan_l, list) {
5458 l2cap_chan_lock(chan);
5459
5460 BT_DBG("chan->scid %d", chan->scid);
5461
5462 if (chan->scid == L2CAP_CID_LE_DATA) {
5463 if (!status && encrypt) {
5464 chan->sec_level = hcon->sec_level;
5465 l2cap_chan_ready(chan);
5466 }
5467
5468 l2cap_chan_unlock(chan);
5469 continue;
5470 }
5471
5472 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5473 l2cap_chan_unlock(chan);
5474 continue;
5475 }
5476
5477 if (!status && (chan->state == BT_CONNECTED ||
5478 chan->state == BT_CONFIG)) {
5479 struct sock *sk = chan->sk;
5480
5481 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5482 sk->sk_state_change(sk);
5483
5484 l2cap_check_encryption(chan, encrypt);
5485 l2cap_chan_unlock(chan);
5486 continue;
5487 }
5488
5489 if (chan->state == BT_CONNECT) {
5490 if (!status) {
5491 l2cap_send_conn_req(chan);
5492 } else {
5493 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5494 }
5495 } else if (chan->state == BT_CONNECT2) {
5496 struct sock *sk = chan->sk;
5497 struct l2cap_conn_rsp rsp;
5498 __u16 res, stat;
5499
5500 lock_sock(sk);
5501
5502 if (!status) {
5503 if (test_bit(BT_SK_DEFER_SETUP,
5504 &bt_sk(sk)->flags)) {
5505 struct sock *parent = bt_sk(sk)->parent;
5506 res = L2CAP_CR_PEND;
5507 stat = L2CAP_CS_AUTHOR_PEND;
5508 if (parent)
5509 parent->sk_data_ready(parent, 0);
5510 } else {
5511 __l2cap_state_change(chan, BT_CONFIG);
5512 res = L2CAP_CR_SUCCESS;
5513 stat = L2CAP_CS_NO_INFO;
5514 }
5515 } else {
5516 __l2cap_state_change(chan, BT_DISCONN);
5517 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5518 res = L2CAP_CR_SEC_BLOCK;
5519 stat = L2CAP_CS_NO_INFO;
5520 }
5521
5522 release_sock(sk);
5523
5524 rsp.scid = cpu_to_le16(chan->dcid);
5525 rsp.dcid = cpu_to_le16(chan->scid);
5526 rsp.result = cpu_to_le16(res);
5527 rsp.status = cpu_to_le16(stat);
5528 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5529 sizeof(rsp), &rsp);
5530 }
5531
5532 l2cap_chan_unlock(chan);
5533 }
5534
5535 mutex_unlock(&conn->chan_lock);
5536
5537 return 0;
5538 }
5539
5540 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5541 {
5542 struct l2cap_conn *conn = hcon->l2cap_data;
5543
5544 if (!conn)
5545 conn = l2cap_conn_add(hcon, 0);
5546
5547 if (!conn)
5548 goto drop;
5549
5550 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5551
5552 if (!(flags & ACL_CONT)) {
5553 struct l2cap_hdr *hdr;
5554 int len;
5555
5556 if (conn->rx_len) {
5557 BT_ERR("Unexpected start frame (len %d)", skb->len);
5558 kfree_skb(conn->rx_skb);
5559 conn->rx_skb = NULL;
5560 conn->rx_len = 0;
5561 l2cap_conn_unreliable(conn, ECOMM);
5562 }
5563
5564 /* Start fragment always begin with Basic L2CAP header */
5565 if (skb->len < L2CAP_HDR_SIZE) {
5566 BT_ERR("Frame is too short (len %d)", skb->len);
5567 l2cap_conn_unreliable(conn, ECOMM);
5568 goto drop;
5569 }
5570
5571 hdr = (struct l2cap_hdr *) skb->data;
5572 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5573
5574 if (len == skb->len) {
5575 /* Complete frame received */
5576 l2cap_recv_frame(conn, skb);
5577 return 0;
5578 }
5579
5580 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5581
5582 if (skb->len > len) {
5583 BT_ERR("Frame is too long (len %d, expected len %d)",
5584 skb->len, len);
5585 l2cap_conn_unreliable(conn, ECOMM);
5586 goto drop;
5587 }
5588
5589 /* Allocate skb for the complete frame (with header) */
5590 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5591 if (!conn->rx_skb)
5592 goto drop;
5593
5594 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5595 skb->len);
5596 conn->rx_len = len - skb->len;
5597 } else {
5598 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5599
5600 if (!conn->rx_len) {
5601 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5602 l2cap_conn_unreliable(conn, ECOMM);
5603 goto drop;
5604 }
5605
5606 if (skb->len > conn->rx_len) {
5607 BT_ERR("Fragment is too long (len %d, expected %d)",
5608 skb->len, conn->rx_len);
5609 kfree_skb(conn->rx_skb);
5610 conn->rx_skb = NULL;
5611 conn->rx_len = 0;
5612 l2cap_conn_unreliable(conn, ECOMM);
5613 goto drop;
5614 }
5615
5616 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5617 skb->len);
5618 conn->rx_len -= skb->len;
5619
5620 if (!conn->rx_len) {
5621 /* Complete frame received */
5622 l2cap_recv_frame(conn, conn->rx_skb);
5623 conn->rx_skb = NULL;
5624 }
5625 }
5626
5627 drop:
5628 kfree_skb(skb);
5629 return 0;
5630 }
5631
5632 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5633 {
5634 struct l2cap_chan *c;
5635
5636 read_lock(&chan_list_lock);
5637
5638 list_for_each_entry(c, &chan_list, global_l) {
5639 struct sock *sk = c->sk;
5640
5641 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5642 batostr(&bt_sk(sk)->src),
5643 batostr(&bt_sk(sk)->dst),
5644 c->state, __le16_to_cpu(c->psm),
5645 c->scid, c->dcid, c->imtu, c->omtu,
5646 c->sec_level, c->mode);
5647 }
5648
5649 read_unlock(&chan_list_lock);
5650
5651 return 0;
5652 }
5653
5654 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5655 {
5656 return single_open(file, l2cap_debugfs_show, inode->i_private);
5657 }
5658
5659 static const struct file_operations l2cap_debugfs_fops = {
5660 .open = l2cap_debugfs_open,
5661 .read = seq_read,
5662 .llseek = seq_lseek,
5663 .release = single_release,
5664 };
5665
5666 static struct dentry *l2cap_debugfs;
5667
5668 int __init l2cap_init(void)
5669 {
5670 int err;
5671
5672 err = l2cap_init_sockets();
5673 if (err < 0)
5674 return err;
5675
5676 if (bt_debugfs) {
5677 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5678 bt_debugfs, NULL, &l2cap_debugfs_fops);
5679 if (!l2cap_debugfs)
5680 BT_ERR("Failed to create L2CAP debug file");
5681 }
5682
5683 return 0;
5684 }
5685
5686 void l2cap_exit(void)
5687 {
5688 debugfs_remove(l2cap_debugfs);
5689 l2cap_cleanup_sockets();
5690 }
5691
5692 module_param(disable_ertm, bool, 0644);
5693 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.181924 seconds and 6 git commands to generate.