Bluetooth: Use the ERTM transmit state machine from timeout handlers
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm = 1;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 {
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90 }
91
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 {
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116 }
117
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 {
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127 }
128
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130 {
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138 }
139
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141 {
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168 done:
169 write_unlock(&chan_list_lock);
170 return err;
171 }
172
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174 {
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182 }
183
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185 {
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194 }
195
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197 {
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203 }
204
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
206 {
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212 }
213
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215 {
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219 }
220
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 {
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228 }
229
230 static void __set_retrans_timer(struct l2cap_chan *chan)
231 {
232 if (!delayed_work_pending(&chan->monitor_timer) &&
233 chan->retrans_timeout) {
234 l2cap_set_timer(chan, &chan->retrans_timer,
235 msecs_to_jiffies(chan->retrans_timeout));
236 }
237 }
238
239 static void __set_monitor_timer(struct l2cap_chan *chan)
240 {
241 __clear_retrans_timer(chan);
242 if (chan->monitor_timeout) {
243 l2cap_set_timer(chan, &chan->monitor_timer,
244 msecs_to_jiffies(chan->monitor_timeout));
245 }
246 }
247
248 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
249 u16 seq)
250 {
251 struct sk_buff *skb;
252
253 skb_queue_walk(head, skb) {
254 if (bt_cb(skb)->control.txseq == seq)
255 return skb;
256 }
257
258 return NULL;
259 }
260
261 /* ---- L2CAP sequence number lists ---- */
262
263 /* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
269 * allocs or frees.
270 */
271
272 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
273 {
274 size_t alloc_size, i;
275
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
279 */
280 alloc_size = roundup_pow_of_two(size);
281
282 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
283 if (!seq_list->list)
284 return -ENOMEM;
285
286 seq_list->mask = alloc_size - 1;
287 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
288 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
289 for (i = 0; i < alloc_size; i++)
290 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
291
292 return 0;
293 }
294
295 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
296 {
297 kfree(seq_list->list);
298 }
299
300 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
301 u16 seq)
302 {
303 /* Constant-time check for list membership */
304 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
305 }
306
307 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
308 {
309 u16 mask = seq_list->mask;
310
311 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR;
314 } else if (seq_list->head == seq) {
315 /* Head can be removed in constant time */
316 seq_list->head = seq_list->list[seq & mask];
317 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
318
319 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
320 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
321 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
322 }
323 } else {
324 /* Walk the list to find the sequence number */
325 u16 prev = seq_list->head;
326 while (seq_list->list[prev & mask] != seq) {
327 prev = seq_list->list[prev & mask];
328 if (prev == L2CAP_SEQ_LIST_TAIL)
329 return L2CAP_SEQ_LIST_CLEAR;
330 }
331
332 /* Unlink the number from the list and clear it */
333 seq_list->list[prev & mask] = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 if (seq_list->tail == seq)
336 seq_list->tail = prev;
337 }
338 return seq;
339 }
340
341 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
342 {
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list, seq_list->head);
345 }
346
347 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
348 {
349 u16 i;
350
351 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
352 return;
353
354 for (i = 0; i <= seq_list->mask; i++)
355 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
356
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 }
360
361 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
362 {
363 u16 mask = seq_list->mask;
364
365 /* All appends happen in constant time */
366
367 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
368 return;
369
370 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
371 seq_list->head = seq;
372 else
373 seq_list->list[seq_list->tail & mask] = seq;
374
375 seq_list->tail = seq;
376 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
377 }
378
379 static void l2cap_chan_timeout(struct work_struct *work)
380 {
381 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
382 chan_timer.work);
383 struct l2cap_conn *conn = chan->conn;
384 int reason;
385
386 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
387
388 mutex_lock(&conn->chan_lock);
389 l2cap_chan_lock(chan);
390
391 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
392 reason = ECONNREFUSED;
393 else if (chan->state == BT_CONNECT &&
394 chan->sec_level != BT_SECURITY_SDP)
395 reason = ECONNREFUSED;
396 else
397 reason = ETIMEDOUT;
398
399 l2cap_chan_close(chan, reason);
400
401 l2cap_chan_unlock(chan);
402
403 chan->ops->close(chan->data);
404 mutex_unlock(&conn->chan_lock);
405
406 l2cap_chan_put(chan);
407 }
408
409 struct l2cap_chan *l2cap_chan_create(void)
410 {
411 struct l2cap_chan *chan;
412
413 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
414 if (!chan)
415 return NULL;
416
417 mutex_init(&chan->lock);
418
419 write_lock(&chan_list_lock);
420 list_add(&chan->global_l, &chan_list);
421 write_unlock(&chan_list_lock);
422
423 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
424
425 chan->state = BT_OPEN;
426
427 atomic_set(&chan->refcnt, 1);
428
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
431
432 BT_DBG("chan %p", chan);
433
434 return chan;
435 }
436
437 void l2cap_chan_destroy(struct l2cap_chan *chan)
438 {
439 write_lock(&chan_list_lock);
440 list_del(&chan->global_l);
441 write_unlock(&chan_list_lock);
442
443 l2cap_chan_put(chan);
444 }
445
446 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447 {
448 chan->fcs = L2CAP_FCS_CRC16;
449 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
453
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
455 }
456
457 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
458 {
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
461
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
463
464 chan->conn = conn;
465
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
469 /* LE connection */
470 chan->omtu = L2CAP_LE_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
473 } else {
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
477 }
478 break;
479
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
485 break;
486
487 default:
488 /* Raw socket can send/recv signalling messages only */
489 chan->scid = L2CAP_CID_SIGNALING;
490 chan->dcid = L2CAP_CID_SIGNALING;
491 chan->omtu = L2CAP_DEFAULT_MTU;
492 }
493
494 chan->local_id = L2CAP_BESTEFFORT_ID;
495 chan->local_stype = L2CAP_SERV_BESTEFFORT;
496 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
497 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
498 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
499 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
500
501 l2cap_chan_hold(chan);
502
503 list_add(&chan->list, &conn->chan_l);
504 }
505
506 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
507 {
508 mutex_lock(&conn->chan_lock);
509 __l2cap_chan_add(conn, chan);
510 mutex_unlock(&conn->chan_lock);
511 }
512
513 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
514 {
515 struct sock *sk = chan->sk;
516 struct l2cap_conn *conn = chan->conn;
517 struct sock *parent = bt_sk(sk)->parent;
518
519 __clear_chan_timer(chan);
520
521 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
522
523 if (conn) {
524 /* Delete from channel list */
525 list_del(&chan->list);
526
527 l2cap_chan_put(chan);
528
529 chan->conn = NULL;
530 hci_conn_put(conn->hcon);
531 }
532
533 lock_sock(sk);
534
535 __l2cap_state_change(chan, BT_CLOSED);
536 sock_set_flag(sk, SOCK_ZAPPED);
537
538 if (err)
539 __l2cap_chan_set_err(chan, err);
540
541 if (parent) {
542 bt_accept_unlink(sk);
543 parent->sk_data_ready(parent, 0);
544 } else
545 sk->sk_state_change(sk);
546
547 release_sock(sk);
548
549 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
550 return;
551
552 skb_queue_purge(&chan->tx_q);
553
554 if (chan->mode == L2CAP_MODE_ERTM) {
555 __clear_retrans_timer(chan);
556 __clear_monitor_timer(chan);
557 __clear_ack_timer(chan);
558
559 skb_queue_purge(&chan->srej_q);
560
561 l2cap_seq_list_free(&chan->srej_list);
562 l2cap_seq_list_free(&chan->retrans_list);
563 }
564 }
565
566 static void l2cap_chan_cleanup_listen(struct sock *parent)
567 {
568 struct sock *sk;
569
570 BT_DBG("parent %p", parent);
571
572 /* Close not yet accepted channels */
573 while ((sk = bt_accept_dequeue(parent, NULL))) {
574 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
575
576 l2cap_chan_lock(chan);
577 __clear_chan_timer(chan);
578 l2cap_chan_close(chan, ECONNRESET);
579 l2cap_chan_unlock(chan);
580
581 chan->ops->close(chan->data);
582 }
583 }
584
585 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
586 {
587 struct l2cap_conn *conn = chan->conn;
588 struct sock *sk = chan->sk;
589
590 BT_DBG("chan %p state %s sk %p", chan,
591 state_to_string(chan->state), sk);
592
593 switch (chan->state) {
594 case BT_LISTEN:
595 lock_sock(sk);
596 l2cap_chan_cleanup_listen(sk);
597
598 __l2cap_state_change(chan, BT_CLOSED);
599 sock_set_flag(sk, SOCK_ZAPPED);
600 release_sock(sk);
601 break;
602
603 case BT_CONNECTED:
604 case BT_CONFIG:
605 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
606 conn->hcon->type == ACL_LINK) {
607 __set_chan_timer(chan, sk->sk_sndtimeo);
608 l2cap_send_disconn_req(conn, chan, reason);
609 } else
610 l2cap_chan_del(chan, reason);
611 break;
612
613 case BT_CONNECT2:
614 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
615 conn->hcon->type == ACL_LINK) {
616 struct l2cap_conn_rsp rsp;
617 __u16 result;
618
619 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
620 result = L2CAP_CR_SEC_BLOCK;
621 else
622 result = L2CAP_CR_BAD_PSM;
623 l2cap_state_change(chan, BT_DISCONN);
624
625 rsp.scid = cpu_to_le16(chan->dcid);
626 rsp.dcid = cpu_to_le16(chan->scid);
627 rsp.result = cpu_to_le16(result);
628 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
629 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
630 sizeof(rsp), &rsp);
631 }
632
633 l2cap_chan_del(chan, reason);
634 break;
635
636 case BT_CONNECT:
637 case BT_DISCONN:
638 l2cap_chan_del(chan, reason);
639 break;
640
641 default:
642 lock_sock(sk);
643 sock_set_flag(sk, SOCK_ZAPPED);
644 release_sock(sk);
645 break;
646 }
647 }
648
649 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
650 {
651 if (chan->chan_type == L2CAP_CHAN_RAW) {
652 switch (chan->sec_level) {
653 case BT_SECURITY_HIGH:
654 return HCI_AT_DEDICATED_BONDING_MITM;
655 case BT_SECURITY_MEDIUM:
656 return HCI_AT_DEDICATED_BONDING;
657 default:
658 return HCI_AT_NO_BONDING;
659 }
660 } else if (chan->psm == cpu_to_le16(0x0001)) {
661 if (chan->sec_level == BT_SECURITY_LOW)
662 chan->sec_level = BT_SECURITY_SDP;
663
664 if (chan->sec_level == BT_SECURITY_HIGH)
665 return HCI_AT_NO_BONDING_MITM;
666 else
667 return HCI_AT_NO_BONDING;
668 } else {
669 switch (chan->sec_level) {
670 case BT_SECURITY_HIGH:
671 return HCI_AT_GENERAL_BONDING_MITM;
672 case BT_SECURITY_MEDIUM:
673 return HCI_AT_GENERAL_BONDING;
674 default:
675 return HCI_AT_NO_BONDING;
676 }
677 }
678 }
679
680 /* Service level security */
681 int l2cap_chan_check_security(struct l2cap_chan *chan)
682 {
683 struct l2cap_conn *conn = chan->conn;
684 __u8 auth_type;
685
686 auth_type = l2cap_get_auth_type(chan);
687
688 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
689 }
690
691 static u8 l2cap_get_ident(struct l2cap_conn *conn)
692 {
693 u8 id;
694
695 /* Get next available identificator.
696 * 1 - 128 are used by kernel.
697 * 129 - 199 are reserved.
698 * 200 - 254 are used by utilities like l2ping, etc.
699 */
700
701 spin_lock(&conn->lock);
702
703 if (++conn->tx_ident > 128)
704 conn->tx_ident = 1;
705
706 id = conn->tx_ident;
707
708 spin_unlock(&conn->lock);
709
710 return id;
711 }
712
713 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
714 {
715 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
716 u8 flags;
717
718 BT_DBG("code 0x%2.2x", code);
719
720 if (!skb)
721 return;
722
723 if (lmp_no_flush_capable(conn->hcon->hdev))
724 flags = ACL_START_NO_FLUSH;
725 else
726 flags = ACL_START;
727
728 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
729 skb->priority = HCI_PRIO_MAX;
730
731 hci_send_acl(conn->hchan, skb, flags);
732 }
733
734 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
735 {
736 struct hci_conn *hcon = chan->conn->hcon;
737 u16 flags;
738
739 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
740 skb->priority);
741
742 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
743 lmp_no_flush_capable(hcon->hdev))
744 flags = ACL_START_NO_FLUSH;
745 else
746 flags = ACL_START;
747
748 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
749 hci_send_acl(chan->conn->hchan, skb, flags);
750 }
751
752 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
753 {
754 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
755 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
756
757 if (enh & L2CAP_CTRL_FRAME_TYPE) {
758 /* S-Frame */
759 control->sframe = 1;
760 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
761 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
762
763 control->sar = 0;
764 control->txseq = 0;
765 } else {
766 /* I-Frame */
767 control->sframe = 0;
768 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
769 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
770
771 control->poll = 0;
772 control->super = 0;
773 }
774 }
775
776 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
777 {
778 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
779 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
780
781 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
782 /* S-Frame */
783 control->sframe = 1;
784 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
785 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
786
787 control->sar = 0;
788 control->txseq = 0;
789 } else {
790 /* I-Frame */
791 control->sframe = 0;
792 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
793 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
794
795 control->poll = 0;
796 control->super = 0;
797 }
798 }
799
800 static inline void __unpack_control(struct l2cap_chan *chan,
801 struct sk_buff *skb)
802 {
803 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
804 __unpack_extended_control(get_unaligned_le32(skb->data),
805 &bt_cb(skb)->control);
806 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
807 } else {
808 __unpack_enhanced_control(get_unaligned_le16(skb->data),
809 &bt_cb(skb)->control);
810 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
811 }
812 }
813
814 static u32 __pack_extended_control(struct l2cap_ctrl *control)
815 {
816 u32 packed;
817
818 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
820
821 if (control->sframe) {
822 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
823 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
824 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
825 } else {
826 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
827 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
828 }
829
830 return packed;
831 }
832
833 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
834 {
835 u16 packed;
836
837 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
838 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
839
840 if (control->sframe) {
841 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
842 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
843 packed |= L2CAP_CTRL_FRAME_TYPE;
844 } else {
845 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
846 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
847 }
848
849 return packed;
850 }
851
852 static inline void __pack_control(struct l2cap_chan *chan,
853 struct l2cap_ctrl *control,
854 struct sk_buff *skb)
855 {
856 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
857 put_unaligned_le32(__pack_extended_control(control),
858 skb->data + L2CAP_HDR_SIZE);
859 } else {
860 put_unaligned_le16(__pack_enhanced_control(control),
861 skb->data + L2CAP_HDR_SIZE);
862 }
863 }
864
865 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
866 u32 control)
867 {
868 struct sk_buff *skb;
869 struct l2cap_hdr *lh;
870 int hlen;
871
872 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
873 hlen = L2CAP_EXT_HDR_SIZE;
874 else
875 hlen = L2CAP_ENH_HDR_SIZE;
876
877 if (chan->fcs == L2CAP_FCS_CRC16)
878 hlen += L2CAP_FCS_SIZE;
879
880 skb = bt_skb_alloc(hlen, GFP_KERNEL);
881
882 if (!skb)
883 return ERR_PTR(-ENOMEM);
884
885 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
886 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
887 lh->cid = cpu_to_le16(chan->dcid);
888
889 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
890 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
891 else
892 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
893
894 if (chan->fcs == L2CAP_FCS_CRC16) {
895 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
896 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
897 }
898
899 skb->priority = HCI_PRIO_MAX;
900 return skb;
901 }
902
903 static void l2cap_send_sframe(struct l2cap_chan *chan,
904 struct l2cap_ctrl *control)
905 {
906 struct sk_buff *skb;
907 u32 control_field;
908
909 BT_DBG("chan %p, control %p", chan, control);
910
911 if (!control->sframe)
912 return;
913
914 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
915 !control->poll)
916 control->final = 1;
917
918 if (control->super == L2CAP_SUPER_RR)
919 clear_bit(CONN_RNR_SENT, &chan->conn_state);
920 else if (control->super == L2CAP_SUPER_RNR)
921 set_bit(CONN_RNR_SENT, &chan->conn_state);
922
923 if (control->super != L2CAP_SUPER_SREJ) {
924 chan->last_acked_seq = control->reqseq;
925 __clear_ack_timer(chan);
926 }
927
928 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
929 control->final, control->poll, control->super);
930
931 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
932 control_field = __pack_extended_control(control);
933 else
934 control_field = __pack_enhanced_control(control);
935
936 skb = l2cap_create_sframe_pdu(chan, control_field);
937 if (!IS_ERR(skb))
938 l2cap_do_send(chan, skb);
939 }
940
941 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
942 {
943 struct l2cap_ctrl control;
944
945 BT_DBG("chan %p, poll %d", chan, poll);
946
947 memset(&control, 0, sizeof(control));
948 control.sframe = 1;
949 control.poll = poll;
950
951 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
952 control.super = L2CAP_SUPER_RNR;
953 else
954 control.super = L2CAP_SUPER_RR;
955
956 control.reqseq = chan->buffer_seq;
957 l2cap_send_sframe(chan, &control);
958 }
959
960 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
961 {
962 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
963 }
964
965 static void l2cap_send_conn_req(struct l2cap_chan *chan)
966 {
967 struct l2cap_conn *conn = chan->conn;
968 struct l2cap_conn_req req;
969
970 req.scid = cpu_to_le16(chan->scid);
971 req.psm = chan->psm;
972
973 chan->ident = l2cap_get_ident(conn);
974
975 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
976
977 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
978 }
979
980 static void l2cap_chan_ready(struct l2cap_chan *chan)
981 {
982 struct sock *sk = chan->sk;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 BT_DBG("sk %p, parent %p", sk, parent);
990
991 /* This clears all conf flags, including CONF_NOT_COMPLETE */
992 chan->conf_state = 0;
993 __clear_chan_timer(chan);
994
995 __l2cap_state_change(chan, BT_CONNECTED);
996 sk->sk_state_change(sk);
997
998 if (parent)
999 parent->sk_data_ready(parent, 0);
1000
1001 release_sock(sk);
1002 }
1003
1004 static void l2cap_do_start(struct l2cap_chan *chan)
1005 {
1006 struct l2cap_conn *conn = chan->conn;
1007
1008 if (conn->hcon->type == LE_LINK) {
1009 l2cap_chan_ready(chan);
1010 return;
1011 }
1012
1013 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1014 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1015 return;
1016
1017 if (l2cap_chan_check_security(chan) &&
1018 __l2cap_no_conn_pending(chan))
1019 l2cap_send_conn_req(chan);
1020 } else {
1021 struct l2cap_info_req req;
1022 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1023
1024 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1025 conn->info_ident = l2cap_get_ident(conn);
1026
1027 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1028
1029 l2cap_send_cmd(conn, conn->info_ident,
1030 L2CAP_INFO_REQ, sizeof(req), &req);
1031 }
1032 }
1033
1034 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1035 {
1036 u32 local_feat_mask = l2cap_feat_mask;
1037 if (!disable_ertm)
1038 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1039
1040 switch (mode) {
1041 case L2CAP_MODE_ERTM:
1042 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1043 case L2CAP_MODE_STREAMING:
1044 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1045 default:
1046 return 0x00;
1047 }
1048 }
1049
1050 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1051 {
1052 struct sock *sk = chan->sk;
1053 struct l2cap_disconn_req req;
1054
1055 if (!conn)
1056 return;
1057
1058 if (chan->mode == L2CAP_MODE_ERTM) {
1059 __clear_retrans_timer(chan);
1060 __clear_monitor_timer(chan);
1061 __clear_ack_timer(chan);
1062 }
1063
1064 req.dcid = cpu_to_le16(chan->dcid);
1065 req.scid = cpu_to_le16(chan->scid);
1066 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1067 L2CAP_DISCONN_REQ, sizeof(req), &req);
1068
1069 lock_sock(sk);
1070 __l2cap_state_change(chan, BT_DISCONN);
1071 __l2cap_chan_set_err(chan, err);
1072 release_sock(sk);
1073 }
1074
1075 /* ---- L2CAP connections ---- */
1076 static void l2cap_conn_start(struct l2cap_conn *conn)
1077 {
1078 struct l2cap_chan *chan, *tmp;
1079
1080 BT_DBG("conn %p", conn);
1081
1082 mutex_lock(&conn->chan_lock);
1083
1084 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1085 struct sock *sk = chan->sk;
1086
1087 l2cap_chan_lock(chan);
1088
1089 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1090 l2cap_chan_unlock(chan);
1091 continue;
1092 }
1093
1094 if (chan->state == BT_CONNECT) {
1095 if (!l2cap_chan_check_security(chan) ||
1096 !__l2cap_no_conn_pending(chan)) {
1097 l2cap_chan_unlock(chan);
1098 continue;
1099 }
1100
1101 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1102 && test_bit(CONF_STATE2_DEVICE,
1103 &chan->conf_state)) {
1104 l2cap_chan_close(chan, ECONNRESET);
1105 l2cap_chan_unlock(chan);
1106 continue;
1107 }
1108
1109 l2cap_send_conn_req(chan);
1110
1111 } else if (chan->state == BT_CONNECT2) {
1112 struct l2cap_conn_rsp rsp;
1113 char buf[128];
1114 rsp.scid = cpu_to_le16(chan->dcid);
1115 rsp.dcid = cpu_to_le16(chan->scid);
1116
1117 if (l2cap_chan_check_security(chan)) {
1118 lock_sock(sk);
1119 if (test_bit(BT_SK_DEFER_SETUP,
1120 &bt_sk(sk)->flags)) {
1121 struct sock *parent = bt_sk(sk)->parent;
1122 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1123 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1124 if (parent)
1125 parent->sk_data_ready(parent, 0);
1126
1127 } else {
1128 __l2cap_state_change(chan, BT_CONFIG);
1129 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1130 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1131 }
1132 release_sock(sk);
1133 } else {
1134 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1135 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1136 }
1137
1138 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1139 sizeof(rsp), &rsp);
1140
1141 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1142 rsp.result != L2CAP_CR_SUCCESS) {
1143 l2cap_chan_unlock(chan);
1144 continue;
1145 }
1146
1147 set_bit(CONF_REQ_SENT, &chan->conf_state);
1148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1149 l2cap_build_conf_req(chan, buf), buf);
1150 chan->num_conf_req++;
1151 }
1152
1153 l2cap_chan_unlock(chan);
1154 }
1155
1156 mutex_unlock(&conn->chan_lock);
1157 }
1158
1159 /* Find socket with cid and source/destination bdaddr.
1160 * Returns closest match, locked.
1161 */
1162 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1163 bdaddr_t *src,
1164 bdaddr_t *dst)
1165 {
1166 struct l2cap_chan *c, *c1 = NULL;
1167
1168 read_lock(&chan_list_lock);
1169
1170 list_for_each_entry(c, &chan_list, global_l) {
1171 struct sock *sk = c->sk;
1172
1173 if (state && c->state != state)
1174 continue;
1175
1176 if (c->scid == cid) {
1177 int src_match, dst_match;
1178 int src_any, dst_any;
1179
1180 /* Exact match. */
1181 src_match = !bacmp(&bt_sk(sk)->src, src);
1182 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1183 if (src_match && dst_match) {
1184 read_unlock(&chan_list_lock);
1185 return c;
1186 }
1187
1188 /* Closest match */
1189 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1190 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1191 if ((src_match && dst_any) || (src_any && dst_match) ||
1192 (src_any && dst_any))
1193 c1 = c;
1194 }
1195 }
1196
1197 read_unlock(&chan_list_lock);
1198
1199 return c1;
1200 }
1201
1202 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1203 {
1204 struct sock *parent, *sk;
1205 struct l2cap_chan *chan, *pchan;
1206
1207 BT_DBG("");
1208
1209 /* Check if we have socket listening on cid */
1210 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1211 conn->src, conn->dst);
1212 if (!pchan)
1213 return;
1214
1215 parent = pchan->sk;
1216
1217 lock_sock(parent);
1218
1219 /* Check for backlog size */
1220 if (sk_acceptq_is_full(parent)) {
1221 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1222 goto clean;
1223 }
1224
1225 chan = pchan->ops->new_connection(pchan->data);
1226 if (!chan)
1227 goto clean;
1228
1229 sk = chan->sk;
1230
1231 hci_conn_hold(conn->hcon);
1232
1233 bacpy(&bt_sk(sk)->src, conn->src);
1234 bacpy(&bt_sk(sk)->dst, conn->dst);
1235
1236 bt_accept_enqueue(parent, sk);
1237
1238 l2cap_chan_add(conn, chan);
1239
1240 __set_chan_timer(chan, sk->sk_sndtimeo);
1241
1242 __l2cap_state_change(chan, BT_CONNECTED);
1243 parent->sk_data_ready(parent, 0);
1244
1245 clean:
1246 release_sock(parent);
1247 }
1248
1249 static void l2cap_conn_ready(struct l2cap_conn *conn)
1250 {
1251 struct l2cap_chan *chan;
1252
1253 BT_DBG("conn %p", conn);
1254
1255 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1256 l2cap_le_conn_ready(conn);
1257
1258 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1259 smp_conn_security(conn, conn->hcon->pending_sec_level);
1260
1261 mutex_lock(&conn->chan_lock);
1262
1263 list_for_each_entry(chan, &conn->chan_l, list) {
1264
1265 l2cap_chan_lock(chan);
1266
1267 if (conn->hcon->type == LE_LINK) {
1268 if (smp_conn_security(conn, chan->sec_level))
1269 l2cap_chan_ready(chan);
1270
1271 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1272 struct sock *sk = chan->sk;
1273 __clear_chan_timer(chan);
1274 lock_sock(sk);
1275 __l2cap_state_change(chan, BT_CONNECTED);
1276 sk->sk_state_change(sk);
1277 release_sock(sk);
1278
1279 } else if (chan->state == BT_CONNECT)
1280 l2cap_do_start(chan);
1281
1282 l2cap_chan_unlock(chan);
1283 }
1284
1285 mutex_unlock(&conn->chan_lock);
1286 }
1287
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1290 {
1291 struct l2cap_chan *chan;
1292
1293 BT_DBG("conn %p", conn);
1294
1295 mutex_lock(&conn->chan_lock);
1296
1297 list_for_each_entry(chan, &conn->chan_l, list) {
1298 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1299 __l2cap_chan_set_err(chan, err);
1300 }
1301
1302 mutex_unlock(&conn->chan_lock);
1303 }
1304
1305 static void l2cap_info_timeout(struct work_struct *work)
1306 {
1307 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1308 info_timer.work);
1309
1310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1311 conn->info_ident = 0;
1312
1313 l2cap_conn_start(conn);
1314 }
1315
1316 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1317 {
1318 struct l2cap_conn *conn = hcon->l2cap_data;
1319 struct l2cap_chan *chan, *l;
1320
1321 if (!conn)
1322 return;
1323
1324 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1325
1326 kfree_skb(conn->rx_skb);
1327
1328 mutex_lock(&conn->chan_lock);
1329
1330 /* Kill channels */
1331 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1332 l2cap_chan_hold(chan);
1333 l2cap_chan_lock(chan);
1334
1335 l2cap_chan_del(chan, err);
1336
1337 l2cap_chan_unlock(chan);
1338
1339 chan->ops->close(chan->data);
1340 l2cap_chan_put(chan);
1341 }
1342
1343 mutex_unlock(&conn->chan_lock);
1344
1345 hci_chan_del(conn->hchan);
1346
1347 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1348 cancel_delayed_work_sync(&conn->info_timer);
1349
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1351 cancel_delayed_work_sync(&conn->security_timer);
1352 smp_chan_destroy(conn);
1353 }
1354
1355 hcon->l2cap_data = NULL;
1356 kfree(conn);
1357 }
1358
1359 static void security_timeout(struct work_struct *work)
1360 {
1361 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1362 security_timer.work);
1363
1364 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1365 }
1366
1367 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1368 {
1369 struct l2cap_conn *conn = hcon->l2cap_data;
1370 struct hci_chan *hchan;
1371
1372 if (conn || status)
1373 return conn;
1374
1375 hchan = hci_chan_create(hcon);
1376 if (!hchan)
1377 return NULL;
1378
1379 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1380 if (!conn) {
1381 hci_chan_del(hchan);
1382 return NULL;
1383 }
1384
1385 hcon->l2cap_data = conn;
1386 conn->hcon = hcon;
1387 conn->hchan = hchan;
1388
1389 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1390
1391 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1392 conn->mtu = hcon->hdev->le_mtu;
1393 else
1394 conn->mtu = hcon->hdev->acl_mtu;
1395
1396 conn->src = &hcon->hdev->bdaddr;
1397 conn->dst = &hcon->dst;
1398
1399 conn->feat_mask = 0;
1400
1401 spin_lock_init(&conn->lock);
1402 mutex_init(&conn->chan_lock);
1403
1404 INIT_LIST_HEAD(&conn->chan_l);
1405
1406 if (hcon->type == LE_LINK)
1407 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1408 else
1409 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1410
1411 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1412
1413 return conn;
1414 }
1415
1416 /* ---- Socket interface ---- */
1417
1418 /* Find socket with psm and source / destination bdaddr.
1419 * Returns closest match.
1420 */
1421 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1422 bdaddr_t *src,
1423 bdaddr_t *dst)
1424 {
1425 struct l2cap_chan *c, *c1 = NULL;
1426
1427 read_lock(&chan_list_lock);
1428
1429 list_for_each_entry(c, &chan_list, global_l) {
1430 struct sock *sk = c->sk;
1431
1432 if (state && c->state != state)
1433 continue;
1434
1435 if (c->psm == psm) {
1436 int src_match, dst_match;
1437 int src_any, dst_any;
1438
1439 /* Exact match. */
1440 src_match = !bacmp(&bt_sk(sk)->src, src);
1441 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1442 if (src_match && dst_match) {
1443 read_unlock(&chan_list_lock);
1444 return c;
1445 }
1446
1447 /* Closest match */
1448 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1449 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1450 if ((src_match && dst_any) || (src_any && dst_match) ||
1451 (src_any && dst_any))
1452 c1 = c;
1453 }
1454 }
1455
1456 read_unlock(&chan_list_lock);
1457
1458 return c1;
1459 }
1460
1461 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1462 bdaddr_t *dst, u8 dst_type)
1463 {
1464 struct sock *sk = chan->sk;
1465 bdaddr_t *src = &bt_sk(sk)->src;
1466 struct l2cap_conn *conn;
1467 struct hci_conn *hcon;
1468 struct hci_dev *hdev;
1469 __u8 auth_type;
1470 int err;
1471
1472 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1473 dst_type, __le16_to_cpu(chan->psm));
1474
1475 hdev = hci_get_route(dst, src);
1476 if (!hdev)
1477 return -EHOSTUNREACH;
1478
1479 hci_dev_lock(hdev);
1480
1481 l2cap_chan_lock(chan);
1482
1483 /* PSM must be odd and lsb of upper byte must be 0 */
1484 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1485 chan->chan_type != L2CAP_CHAN_RAW) {
1486 err = -EINVAL;
1487 goto done;
1488 }
1489
1490 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1491 err = -EINVAL;
1492 goto done;
1493 }
1494
1495 switch (chan->mode) {
1496 case L2CAP_MODE_BASIC:
1497 break;
1498 case L2CAP_MODE_ERTM:
1499 case L2CAP_MODE_STREAMING:
1500 if (!disable_ertm)
1501 break;
1502 /* fall through */
1503 default:
1504 err = -ENOTSUPP;
1505 goto done;
1506 }
1507
1508 lock_sock(sk);
1509
1510 switch (sk->sk_state) {
1511 case BT_CONNECT:
1512 case BT_CONNECT2:
1513 case BT_CONFIG:
1514 /* Already connecting */
1515 err = 0;
1516 release_sock(sk);
1517 goto done;
1518
1519 case BT_CONNECTED:
1520 /* Already connected */
1521 err = -EISCONN;
1522 release_sock(sk);
1523 goto done;
1524
1525 case BT_OPEN:
1526 case BT_BOUND:
1527 /* Can connect */
1528 break;
1529
1530 default:
1531 err = -EBADFD;
1532 release_sock(sk);
1533 goto done;
1534 }
1535
1536 /* Set destination address and psm */
1537 bacpy(&bt_sk(sk)->dst, dst);
1538
1539 release_sock(sk);
1540
1541 chan->psm = psm;
1542 chan->dcid = cid;
1543
1544 auth_type = l2cap_get_auth_type(chan);
1545
1546 if (chan->dcid == L2CAP_CID_LE_DATA)
1547 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1548 chan->sec_level, auth_type);
1549 else
1550 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1551 chan->sec_level, auth_type);
1552
1553 if (IS_ERR(hcon)) {
1554 err = PTR_ERR(hcon);
1555 goto done;
1556 }
1557
1558 conn = l2cap_conn_add(hcon, 0);
1559 if (!conn) {
1560 hci_conn_put(hcon);
1561 err = -ENOMEM;
1562 goto done;
1563 }
1564
1565 if (hcon->type == LE_LINK) {
1566 err = 0;
1567
1568 if (!list_empty(&conn->chan_l)) {
1569 err = -EBUSY;
1570 hci_conn_put(hcon);
1571 }
1572
1573 if (err)
1574 goto done;
1575 }
1576
1577 /* Update source addr of the socket */
1578 bacpy(src, conn->src);
1579
1580 l2cap_chan_unlock(chan);
1581 l2cap_chan_add(conn, chan);
1582 l2cap_chan_lock(chan);
1583
1584 l2cap_state_change(chan, BT_CONNECT);
1585 __set_chan_timer(chan, sk->sk_sndtimeo);
1586
1587 if (hcon->state == BT_CONNECTED) {
1588 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1589 __clear_chan_timer(chan);
1590 if (l2cap_chan_check_security(chan))
1591 l2cap_state_change(chan, BT_CONNECTED);
1592 } else
1593 l2cap_do_start(chan);
1594 }
1595
1596 err = 0;
1597
1598 done:
1599 l2cap_chan_unlock(chan);
1600 hci_dev_unlock(hdev);
1601 hci_dev_put(hdev);
1602 return err;
1603 }
1604
1605 int __l2cap_wait_ack(struct sock *sk)
1606 {
1607 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1608 DECLARE_WAITQUEUE(wait, current);
1609 int err = 0;
1610 int timeo = HZ/5;
1611
1612 add_wait_queue(sk_sleep(sk), &wait);
1613 set_current_state(TASK_INTERRUPTIBLE);
1614 while (chan->unacked_frames > 0 && chan->conn) {
1615 if (!timeo)
1616 timeo = HZ/5;
1617
1618 if (signal_pending(current)) {
1619 err = sock_intr_errno(timeo);
1620 break;
1621 }
1622
1623 release_sock(sk);
1624 timeo = schedule_timeout(timeo);
1625 lock_sock(sk);
1626 set_current_state(TASK_INTERRUPTIBLE);
1627
1628 err = sock_error(sk);
1629 if (err)
1630 break;
1631 }
1632 set_current_state(TASK_RUNNING);
1633 remove_wait_queue(sk_sleep(sk), &wait);
1634 return err;
1635 }
1636
1637 static void l2cap_monitor_timeout(struct work_struct *work)
1638 {
1639 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1640 monitor_timer.work);
1641
1642 BT_DBG("chan %p", chan);
1643
1644 l2cap_chan_lock(chan);
1645
1646 if (!chan->conn) {
1647 l2cap_chan_unlock(chan);
1648 l2cap_chan_put(chan);
1649 return;
1650 }
1651
1652 l2cap_tx(chan, 0, 0, L2CAP_EV_MONITOR_TO);
1653
1654 l2cap_chan_unlock(chan);
1655 l2cap_chan_put(chan);
1656 }
1657
1658 static void l2cap_retrans_timeout(struct work_struct *work)
1659 {
1660 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1661 retrans_timer.work);
1662
1663 BT_DBG("chan %p", chan);
1664
1665 l2cap_chan_lock(chan);
1666
1667 if (!chan->conn) {
1668 l2cap_chan_unlock(chan);
1669 l2cap_chan_put(chan);
1670 return;
1671 }
1672
1673 l2cap_tx(chan, 0, 0, L2CAP_EV_RETRANS_TO);
1674 l2cap_chan_unlock(chan);
1675 l2cap_chan_put(chan);
1676 }
1677
1678 static int l2cap_streaming_send(struct l2cap_chan *chan,
1679 struct sk_buff_head *skbs)
1680 {
1681 struct sk_buff *skb;
1682 struct l2cap_ctrl *control;
1683
1684 BT_DBG("chan %p, skbs %p", chan, skbs);
1685
1686 if (chan->state != BT_CONNECTED)
1687 return -ENOTCONN;
1688
1689 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1690
1691 while (!skb_queue_empty(&chan->tx_q)) {
1692
1693 skb = skb_dequeue(&chan->tx_q);
1694
1695 bt_cb(skb)->control.retries = 1;
1696 control = &bt_cb(skb)->control;
1697
1698 control->reqseq = 0;
1699 control->txseq = chan->next_tx_seq;
1700
1701 __pack_control(chan, control, skb);
1702
1703 if (chan->fcs == L2CAP_FCS_CRC16) {
1704 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1705 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1706 }
1707
1708 l2cap_do_send(chan, skb);
1709
1710 BT_DBG("Sent txseq %d", (int)control->txseq);
1711
1712 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1713 chan->frames_sent++;
1714 }
1715
1716 return 0;
1717 }
1718
1719 static int l2cap_ertm_send(struct l2cap_chan *chan)
1720 {
1721 struct sk_buff *skb, *tx_skb;
1722 struct l2cap_ctrl *control;
1723 int sent = 0;
1724
1725 BT_DBG("chan %p", chan);
1726
1727 if (chan->state != BT_CONNECTED)
1728 return -ENOTCONN;
1729
1730 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1731 return 0;
1732
1733 while (chan->tx_send_head &&
1734 chan->unacked_frames < chan->remote_tx_win &&
1735 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1736
1737 skb = chan->tx_send_head;
1738
1739 bt_cb(skb)->control.retries = 1;
1740 control = &bt_cb(skb)->control;
1741
1742 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1743 control->final = 1;
1744
1745 control->reqseq = chan->buffer_seq;
1746 chan->last_acked_seq = chan->buffer_seq;
1747 control->txseq = chan->next_tx_seq;
1748
1749 __pack_control(chan, control, skb);
1750
1751 if (chan->fcs == L2CAP_FCS_CRC16) {
1752 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1753 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1754 }
1755
1756 /* Clone after data has been modified. Data is assumed to be
1757 read-only (for locking purposes) on cloned sk_buffs.
1758 */
1759 tx_skb = skb_clone(skb, GFP_KERNEL);
1760
1761 if (!tx_skb)
1762 break;
1763
1764 __set_retrans_timer(chan);
1765
1766 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1767 chan->unacked_frames++;
1768 chan->frames_sent++;
1769 sent++;
1770
1771 if (skb_queue_is_last(&chan->tx_q, skb))
1772 chan->tx_send_head = NULL;
1773 else
1774 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1775
1776 l2cap_do_send(chan, tx_skb);
1777 BT_DBG("Sent txseq %d", (int)control->txseq);
1778 }
1779
1780 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1781 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1782
1783 return sent;
1784 }
1785
1786 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1787 {
1788 struct l2cap_ctrl control;
1789 struct sk_buff *skb;
1790 struct sk_buff *tx_skb;
1791 u16 seq;
1792
1793 BT_DBG("chan %p", chan);
1794
1795 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1796 return;
1797
1798 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1799 seq = l2cap_seq_list_pop(&chan->retrans_list);
1800
1801 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1802 if (!skb) {
1803 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1804 seq);
1805 continue;
1806 }
1807
1808 bt_cb(skb)->control.retries++;
1809 control = bt_cb(skb)->control;
1810
1811 if (chan->max_tx != 0 &&
1812 bt_cb(skb)->control.retries > chan->max_tx) {
1813 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1814 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1815 l2cap_seq_list_clear(&chan->retrans_list);
1816 break;
1817 }
1818
1819 control.reqseq = chan->buffer_seq;
1820 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1821 control.final = 1;
1822 else
1823 control.final = 0;
1824
1825 if (skb_cloned(skb)) {
1826 /* Cloned sk_buffs are read-only, so we need a
1827 * writeable copy
1828 */
1829 tx_skb = skb_copy(skb, GFP_ATOMIC);
1830 } else {
1831 tx_skb = skb_clone(skb, GFP_ATOMIC);
1832 }
1833
1834 if (!tx_skb) {
1835 l2cap_seq_list_clear(&chan->retrans_list);
1836 break;
1837 }
1838
1839 /* Update skb contents */
1840 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1841 put_unaligned_le32(__pack_extended_control(&control),
1842 tx_skb->data + L2CAP_HDR_SIZE);
1843 } else {
1844 put_unaligned_le16(__pack_enhanced_control(&control),
1845 tx_skb->data + L2CAP_HDR_SIZE);
1846 }
1847
1848 if (chan->fcs == L2CAP_FCS_CRC16) {
1849 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1850 put_unaligned_le16(fcs, skb_put(tx_skb,
1851 L2CAP_FCS_SIZE));
1852 }
1853
1854 l2cap_do_send(chan, tx_skb);
1855
1856 BT_DBG("Resent txseq %d", control.txseq);
1857
1858 chan->last_acked_seq = chan->buffer_seq;
1859 }
1860 }
1861
1862 static void l2cap_retransmit(struct l2cap_chan *chan,
1863 struct l2cap_ctrl *control)
1864 {
1865 BT_DBG("chan %p, control %p", chan, control);
1866
1867 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1868 l2cap_ertm_resend(chan);
1869 }
1870
1871 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1872 struct l2cap_ctrl *control)
1873 {
1874 struct sk_buff *skb;
1875
1876 BT_DBG("chan %p, control %p", chan, control);
1877
1878 if (control->poll)
1879 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1880
1881 l2cap_seq_list_clear(&chan->retrans_list);
1882
1883 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1884 return;
1885
1886 if (chan->unacked_frames) {
1887 skb_queue_walk(&chan->tx_q, skb) {
1888 if (bt_cb(skb)->control.txseq == control->reqseq ||
1889 skb == chan->tx_send_head)
1890 break;
1891 }
1892
1893 skb_queue_walk_from(&chan->tx_q, skb) {
1894 if (skb == chan->tx_send_head)
1895 break;
1896
1897 l2cap_seq_list_append(&chan->retrans_list,
1898 bt_cb(skb)->control.txseq);
1899 }
1900
1901 l2cap_ertm_resend(chan);
1902 }
1903 }
1904
1905 static void l2cap_send_ack(struct l2cap_chan *chan)
1906 {
1907 struct l2cap_ctrl control;
1908 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1909 chan->last_acked_seq);
1910 int threshold;
1911
1912 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1913 chan, chan->last_acked_seq, chan->buffer_seq);
1914
1915 memset(&control, 0, sizeof(control));
1916 control.sframe = 1;
1917
1918 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1919 chan->rx_state == L2CAP_RX_STATE_RECV) {
1920 __clear_ack_timer(chan);
1921 control.super = L2CAP_SUPER_RNR;
1922 control.reqseq = chan->buffer_seq;
1923 l2cap_send_sframe(chan, &control);
1924 } else {
1925 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1926 l2cap_ertm_send(chan);
1927 /* If any i-frames were sent, they included an ack */
1928 if (chan->buffer_seq == chan->last_acked_seq)
1929 frames_to_ack = 0;
1930 }
1931
1932 /* Ack now if the tx window is 3/4ths full.
1933 * Calculate without mul or div
1934 */
1935 threshold = chan->tx_win;
1936 threshold += threshold << 1;
1937 threshold >>= 2;
1938
1939 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1940 threshold);
1941
1942 if (frames_to_ack >= threshold) {
1943 __clear_ack_timer(chan);
1944 control.super = L2CAP_SUPER_RR;
1945 control.reqseq = chan->buffer_seq;
1946 l2cap_send_sframe(chan, &control);
1947 frames_to_ack = 0;
1948 }
1949
1950 if (frames_to_ack)
1951 __set_ack_timer(chan);
1952 }
1953 }
1954
1955 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1956 struct msghdr *msg, int len,
1957 int count, struct sk_buff *skb)
1958 {
1959 struct l2cap_conn *conn = chan->conn;
1960 struct sk_buff **frag;
1961 int sent = 0;
1962
1963 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1964 return -EFAULT;
1965
1966 sent += count;
1967 len -= count;
1968
1969 /* Continuation fragments (no L2CAP header) */
1970 frag = &skb_shinfo(skb)->frag_list;
1971 while (len) {
1972 struct sk_buff *tmp;
1973
1974 count = min_t(unsigned int, conn->mtu, len);
1975
1976 tmp = chan->ops->alloc_skb(chan, count,
1977 msg->msg_flags & MSG_DONTWAIT);
1978 if (IS_ERR(tmp))
1979 return PTR_ERR(tmp);
1980
1981 *frag = tmp;
1982
1983 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1984 return -EFAULT;
1985
1986 (*frag)->priority = skb->priority;
1987
1988 sent += count;
1989 len -= count;
1990
1991 skb->len += (*frag)->len;
1992 skb->data_len += (*frag)->len;
1993
1994 frag = &(*frag)->next;
1995 }
1996
1997 return sent;
1998 }
1999
2000 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2001 struct msghdr *msg, size_t len,
2002 u32 priority)
2003 {
2004 struct l2cap_conn *conn = chan->conn;
2005 struct sk_buff *skb;
2006 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2007 struct l2cap_hdr *lh;
2008
2009 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
2010
2011 count = min_t(unsigned int, (conn->mtu - hlen), len);
2012
2013 skb = chan->ops->alloc_skb(chan, count + hlen,
2014 msg->msg_flags & MSG_DONTWAIT);
2015 if (IS_ERR(skb))
2016 return skb;
2017
2018 skb->priority = priority;
2019
2020 /* Create L2CAP header */
2021 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2022 lh->cid = cpu_to_le16(chan->dcid);
2023 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2024 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2025
2026 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2027 if (unlikely(err < 0)) {
2028 kfree_skb(skb);
2029 return ERR_PTR(err);
2030 }
2031 return skb;
2032 }
2033
2034 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2035 struct msghdr *msg, size_t len,
2036 u32 priority)
2037 {
2038 struct l2cap_conn *conn = chan->conn;
2039 struct sk_buff *skb;
2040 int err, count;
2041 struct l2cap_hdr *lh;
2042
2043 BT_DBG("chan %p len %d", chan, (int)len);
2044
2045 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2046
2047 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2048 msg->msg_flags & MSG_DONTWAIT);
2049 if (IS_ERR(skb))
2050 return skb;
2051
2052 skb->priority = priority;
2053
2054 /* Create L2CAP header */
2055 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2056 lh->cid = cpu_to_le16(chan->dcid);
2057 lh->len = cpu_to_le16(len);
2058
2059 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2060 if (unlikely(err < 0)) {
2061 kfree_skb(skb);
2062 return ERR_PTR(err);
2063 }
2064 return skb;
2065 }
2066
2067 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2068 struct msghdr *msg, size_t len,
2069 u16 sdulen)
2070 {
2071 struct l2cap_conn *conn = chan->conn;
2072 struct sk_buff *skb;
2073 int err, count, hlen;
2074 struct l2cap_hdr *lh;
2075
2076 BT_DBG("chan %p len %d", chan, (int)len);
2077
2078 if (!conn)
2079 return ERR_PTR(-ENOTCONN);
2080
2081 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2082 hlen = L2CAP_EXT_HDR_SIZE;
2083 else
2084 hlen = L2CAP_ENH_HDR_SIZE;
2085
2086 if (sdulen)
2087 hlen += L2CAP_SDULEN_SIZE;
2088
2089 if (chan->fcs == L2CAP_FCS_CRC16)
2090 hlen += L2CAP_FCS_SIZE;
2091
2092 count = min_t(unsigned int, (conn->mtu - hlen), len);
2093
2094 skb = chan->ops->alloc_skb(chan, count + hlen,
2095 msg->msg_flags & MSG_DONTWAIT);
2096 if (IS_ERR(skb))
2097 return skb;
2098
2099 /* Create L2CAP header */
2100 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2101 lh->cid = cpu_to_le16(chan->dcid);
2102 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2103
2104 /* Control header is populated later */
2105 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2106 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2107 else
2108 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2109
2110 if (sdulen)
2111 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2112
2113 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2114 if (unlikely(err < 0)) {
2115 kfree_skb(skb);
2116 return ERR_PTR(err);
2117 }
2118
2119 bt_cb(skb)->control.fcs = chan->fcs;
2120 bt_cb(skb)->control.retries = 0;
2121 return skb;
2122 }
2123
2124 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2125 struct sk_buff_head *seg_queue,
2126 struct msghdr *msg, size_t len)
2127 {
2128 struct sk_buff *skb;
2129 u16 sdu_len;
2130 size_t pdu_len;
2131 int err = 0;
2132 u8 sar;
2133
2134 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2135
2136 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2137 * so fragmented skbs are not used. The HCI layer's handling
2138 * of fragmented skbs is not compatible with ERTM's queueing.
2139 */
2140
2141 /* PDU size is derived from the HCI MTU */
2142 pdu_len = chan->conn->mtu;
2143
2144 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2145
2146 /* Adjust for largest possible L2CAP overhead. */
2147 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2148
2149 /* Remote device may have requested smaller PDUs */
2150 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2151
2152 if (len <= pdu_len) {
2153 sar = L2CAP_SAR_UNSEGMENTED;
2154 sdu_len = 0;
2155 pdu_len = len;
2156 } else {
2157 sar = L2CAP_SAR_START;
2158 sdu_len = len;
2159 pdu_len -= L2CAP_SDULEN_SIZE;
2160 }
2161
2162 while (len > 0) {
2163 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2164
2165 if (IS_ERR(skb)) {
2166 __skb_queue_purge(seg_queue);
2167 return PTR_ERR(skb);
2168 }
2169
2170 bt_cb(skb)->control.sar = sar;
2171 __skb_queue_tail(seg_queue, skb);
2172
2173 len -= pdu_len;
2174 if (sdu_len) {
2175 sdu_len = 0;
2176 pdu_len += L2CAP_SDULEN_SIZE;
2177 }
2178
2179 if (len <= pdu_len) {
2180 sar = L2CAP_SAR_END;
2181 pdu_len = len;
2182 } else {
2183 sar = L2CAP_SAR_CONTINUE;
2184 }
2185 }
2186
2187 return err;
2188 }
2189
2190 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2191 u32 priority)
2192 {
2193 struct sk_buff *skb;
2194 int err;
2195 struct sk_buff_head seg_queue;
2196
2197 /* Connectionless channel */
2198 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2199 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2200 if (IS_ERR(skb))
2201 return PTR_ERR(skb);
2202
2203 l2cap_do_send(chan, skb);
2204 return len;
2205 }
2206
2207 switch (chan->mode) {
2208 case L2CAP_MODE_BASIC:
2209 /* Check outgoing MTU */
2210 if (len > chan->omtu)
2211 return -EMSGSIZE;
2212
2213 /* Create a basic PDU */
2214 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2215 if (IS_ERR(skb))
2216 return PTR_ERR(skb);
2217
2218 l2cap_do_send(chan, skb);
2219 err = len;
2220 break;
2221
2222 case L2CAP_MODE_ERTM:
2223 case L2CAP_MODE_STREAMING:
2224 /* Check outgoing MTU */
2225 if (len > chan->omtu) {
2226 err = -EMSGSIZE;
2227 break;
2228 }
2229
2230 __skb_queue_head_init(&seg_queue);
2231
2232 /* Do segmentation before calling in to the state machine,
2233 * since it's possible to block while waiting for memory
2234 * allocation.
2235 */
2236 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2237
2238 /* The channel could have been closed while segmenting,
2239 * check that it is still connected.
2240 */
2241 if (chan->state != BT_CONNECTED) {
2242 __skb_queue_purge(&seg_queue);
2243 err = -ENOTCONN;
2244 }
2245
2246 if (err)
2247 break;
2248
2249 if (chan->mode == L2CAP_MODE_ERTM)
2250 err = l2cap_tx(chan, 0, &seg_queue,
2251 L2CAP_EV_DATA_REQUEST);
2252 else
2253 err = l2cap_streaming_send(chan, &seg_queue);
2254
2255 if (!err)
2256 err = len;
2257
2258 /* If the skbs were not queued for sending, they'll still be in
2259 * seg_queue and need to be purged.
2260 */
2261 __skb_queue_purge(&seg_queue);
2262 break;
2263
2264 default:
2265 BT_DBG("bad state %1.1x", chan->mode);
2266 err = -EBADFD;
2267 }
2268
2269 return err;
2270 }
2271
2272 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2273 {
2274 struct l2cap_ctrl control;
2275 u16 seq;
2276
2277 BT_DBG("chan %p, txseq %d", chan, txseq);
2278
2279 memset(&control, 0, sizeof(control));
2280 control.sframe = 1;
2281 control.super = L2CAP_SUPER_SREJ;
2282
2283 for (seq = chan->expected_tx_seq; seq != txseq;
2284 seq = __next_seq(chan, seq)) {
2285 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2286 control.reqseq = seq;
2287 l2cap_send_sframe(chan, &control);
2288 l2cap_seq_list_append(&chan->srej_list, seq);
2289 }
2290 }
2291
2292 chan->expected_tx_seq = __next_seq(chan, txseq);
2293 }
2294
2295 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2296 {
2297 struct l2cap_ctrl control;
2298
2299 BT_DBG("chan %p", chan);
2300
2301 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2302 return;
2303
2304 memset(&control, 0, sizeof(control));
2305 control.sframe = 1;
2306 control.super = L2CAP_SUPER_SREJ;
2307 control.reqseq = chan->srej_list.tail;
2308 l2cap_send_sframe(chan, &control);
2309 }
2310
2311 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2312 {
2313 struct l2cap_ctrl control;
2314 u16 initial_head;
2315 u16 seq;
2316
2317 BT_DBG("chan %p, txseq %d", chan, txseq);
2318
2319 memset(&control, 0, sizeof(control));
2320 control.sframe = 1;
2321 control.super = L2CAP_SUPER_SREJ;
2322
2323 /* Capture initial list head to allow only one pass through the list. */
2324 initial_head = chan->srej_list.head;
2325
2326 do {
2327 seq = l2cap_seq_list_pop(&chan->srej_list);
2328 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2329 break;
2330
2331 control.reqseq = seq;
2332 l2cap_send_sframe(chan, &control);
2333 l2cap_seq_list_append(&chan->srej_list, seq);
2334 } while (chan->srej_list.head != initial_head);
2335 }
2336
2337 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2338 {
2339 struct sk_buff *acked_skb;
2340 u16 ackseq;
2341
2342 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2343
2344 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2345 return;
2346
2347 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2348 chan->expected_ack_seq, chan->unacked_frames);
2349
2350 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2351 ackseq = __next_seq(chan, ackseq)) {
2352
2353 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2354 if (acked_skb) {
2355 skb_unlink(acked_skb, &chan->tx_q);
2356 kfree_skb(acked_skb);
2357 chan->unacked_frames--;
2358 }
2359 }
2360
2361 chan->expected_ack_seq = reqseq;
2362
2363 if (chan->unacked_frames == 0)
2364 __clear_retrans_timer(chan);
2365
2366 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2367 }
2368
2369 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2370 {
2371 BT_DBG("chan %p", chan);
2372
2373 chan->expected_tx_seq = chan->buffer_seq;
2374 l2cap_seq_list_clear(&chan->srej_list);
2375 skb_queue_purge(&chan->srej_q);
2376 chan->rx_state = L2CAP_RX_STATE_RECV;
2377 }
2378
2379 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2380 struct l2cap_ctrl *control,
2381 struct sk_buff_head *skbs, u8 event)
2382 {
2383 int err = 0;
2384
2385 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2386 event);
2387
2388 switch (event) {
2389 case L2CAP_EV_DATA_REQUEST:
2390 if (chan->tx_send_head == NULL)
2391 chan->tx_send_head = skb_peek(skbs);
2392
2393 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2394 l2cap_ertm_send(chan);
2395 break;
2396 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2397 BT_DBG("Enter LOCAL_BUSY");
2398 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2399
2400 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2401 /* The SREJ_SENT state must be aborted if we are to
2402 * enter the LOCAL_BUSY state.
2403 */
2404 l2cap_abort_rx_srej_sent(chan);
2405 }
2406
2407 l2cap_send_ack(chan);
2408
2409 break;
2410 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2411 BT_DBG("Exit LOCAL_BUSY");
2412 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2413
2414 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2415 struct l2cap_ctrl local_control;
2416
2417 memset(&local_control, 0, sizeof(local_control));
2418 local_control.sframe = 1;
2419 local_control.super = L2CAP_SUPER_RR;
2420 local_control.poll = 1;
2421 local_control.reqseq = chan->buffer_seq;
2422 l2cap_send_sframe(chan, &local_control);
2423
2424 chan->retry_count = 1;
2425 __set_monitor_timer(chan);
2426 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2427 }
2428 break;
2429 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2430 l2cap_process_reqseq(chan, control->reqseq);
2431 break;
2432 case L2CAP_EV_EXPLICIT_POLL:
2433 l2cap_send_rr_or_rnr(chan, 1);
2434 chan->retry_count = 1;
2435 __set_monitor_timer(chan);
2436 __clear_ack_timer(chan);
2437 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2438 break;
2439 case L2CAP_EV_RETRANS_TO:
2440 l2cap_send_rr_or_rnr(chan, 1);
2441 chan->retry_count = 1;
2442 __set_monitor_timer(chan);
2443 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2444 break;
2445 case L2CAP_EV_RECV_FBIT:
2446 /* Nothing to process */
2447 break;
2448 default:
2449 break;
2450 }
2451
2452 return err;
2453 }
2454
2455 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2456 struct l2cap_ctrl *control,
2457 struct sk_buff_head *skbs, u8 event)
2458 {
2459 int err = 0;
2460
2461 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2462 event);
2463
2464 switch (event) {
2465 case L2CAP_EV_DATA_REQUEST:
2466 if (chan->tx_send_head == NULL)
2467 chan->tx_send_head = skb_peek(skbs);
2468 /* Queue data, but don't send. */
2469 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2470 break;
2471 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2472 BT_DBG("Enter LOCAL_BUSY");
2473 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2474
2475 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2476 /* The SREJ_SENT state must be aborted if we are to
2477 * enter the LOCAL_BUSY state.
2478 */
2479 l2cap_abort_rx_srej_sent(chan);
2480 }
2481
2482 l2cap_send_ack(chan);
2483
2484 break;
2485 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2486 BT_DBG("Exit LOCAL_BUSY");
2487 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2488
2489 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2490 struct l2cap_ctrl local_control;
2491 memset(&local_control, 0, sizeof(local_control));
2492 local_control.sframe = 1;
2493 local_control.super = L2CAP_SUPER_RR;
2494 local_control.poll = 1;
2495 local_control.reqseq = chan->buffer_seq;
2496 l2cap_send_sframe(chan, &local_control);
2497
2498 chan->retry_count = 1;
2499 __set_monitor_timer(chan);
2500 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2501 }
2502 break;
2503 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2504 l2cap_process_reqseq(chan, control->reqseq);
2505
2506 /* Fall through */
2507
2508 case L2CAP_EV_RECV_FBIT:
2509 if (control && control->final) {
2510 __clear_monitor_timer(chan);
2511 if (chan->unacked_frames > 0)
2512 __set_retrans_timer(chan);
2513 chan->retry_count = 0;
2514 chan->tx_state = L2CAP_TX_STATE_XMIT;
2515 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2516 }
2517 break;
2518 case L2CAP_EV_EXPLICIT_POLL:
2519 /* Ignore */
2520 break;
2521 case L2CAP_EV_MONITOR_TO:
2522 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2523 l2cap_send_rr_or_rnr(chan, 1);
2524 __set_monitor_timer(chan);
2525 chan->retry_count++;
2526 } else {
2527 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2528 }
2529 break;
2530 default:
2531 break;
2532 }
2533
2534 return err;
2535 }
2536
2537 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2538 struct sk_buff_head *skbs, u8 event)
2539 {
2540 int err = 0;
2541
2542 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2543 chan, control, skbs, event, chan->tx_state);
2544
2545 switch (chan->tx_state) {
2546 case L2CAP_TX_STATE_XMIT:
2547 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2548 break;
2549 case L2CAP_TX_STATE_WAIT_F:
2550 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2551 break;
2552 default:
2553 /* Ignore event */
2554 break;
2555 }
2556
2557 return err;
2558 }
2559
2560 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2561 struct l2cap_ctrl *control)
2562 {
2563 BT_DBG("chan %p, control %p", chan, control);
2564 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2565 }
2566
2567 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2568 struct l2cap_ctrl *control)
2569 {
2570 BT_DBG("chan %p, control %p", chan, control);
2571 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_FBIT);
2572 }
2573
2574 /* Copy frame to all raw sockets on that connection */
2575 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2576 {
2577 struct sk_buff *nskb;
2578 struct l2cap_chan *chan;
2579
2580 BT_DBG("conn %p", conn);
2581
2582 mutex_lock(&conn->chan_lock);
2583
2584 list_for_each_entry(chan, &conn->chan_l, list) {
2585 struct sock *sk = chan->sk;
2586 if (chan->chan_type != L2CAP_CHAN_RAW)
2587 continue;
2588
2589 /* Don't send frame to the socket it came from */
2590 if (skb->sk == sk)
2591 continue;
2592 nskb = skb_clone(skb, GFP_ATOMIC);
2593 if (!nskb)
2594 continue;
2595
2596 if (chan->ops->recv(chan->data, nskb))
2597 kfree_skb(nskb);
2598 }
2599
2600 mutex_unlock(&conn->chan_lock);
2601 }
2602
2603 /* ---- L2CAP signalling commands ---- */
2604 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2605 u8 code, u8 ident, u16 dlen, void *data)
2606 {
2607 struct sk_buff *skb, **frag;
2608 struct l2cap_cmd_hdr *cmd;
2609 struct l2cap_hdr *lh;
2610 int len, count;
2611
2612 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2613 conn, code, ident, dlen);
2614
2615 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2616 count = min_t(unsigned int, conn->mtu, len);
2617
2618 skb = bt_skb_alloc(count, GFP_ATOMIC);
2619 if (!skb)
2620 return NULL;
2621
2622 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2623 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2624
2625 if (conn->hcon->type == LE_LINK)
2626 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2627 else
2628 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2629
2630 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2631 cmd->code = code;
2632 cmd->ident = ident;
2633 cmd->len = cpu_to_le16(dlen);
2634
2635 if (dlen) {
2636 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2637 memcpy(skb_put(skb, count), data, count);
2638 data += count;
2639 }
2640
2641 len -= skb->len;
2642
2643 /* Continuation fragments (no L2CAP header) */
2644 frag = &skb_shinfo(skb)->frag_list;
2645 while (len) {
2646 count = min_t(unsigned int, conn->mtu, len);
2647
2648 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2649 if (!*frag)
2650 goto fail;
2651
2652 memcpy(skb_put(*frag, count), data, count);
2653
2654 len -= count;
2655 data += count;
2656
2657 frag = &(*frag)->next;
2658 }
2659
2660 return skb;
2661
2662 fail:
2663 kfree_skb(skb);
2664 return NULL;
2665 }
2666
2667 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2668 {
2669 struct l2cap_conf_opt *opt = *ptr;
2670 int len;
2671
2672 len = L2CAP_CONF_OPT_SIZE + opt->len;
2673 *ptr += len;
2674
2675 *type = opt->type;
2676 *olen = opt->len;
2677
2678 switch (opt->len) {
2679 case 1:
2680 *val = *((u8 *) opt->val);
2681 break;
2682
2683 case 2:
2684 *val = get_unaligned_le16(opt->val);
2685 break;
2686
2687 case 4:
2688 *val = get_unaligned_le32(opt->val);
2689 break;
2690
2691 default:
2692 *val = (unsigned long) opt->val;
2693 break;
2694 }
2695
2696 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2697 return len;
2698 }
2699
2700 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2701 {
2702 struct l2cap_conf_opt *opt = *ptr;
2703
2704 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2705
2706 opt->type = type;
2707 opt->len = len;
2708
2709 switch (len) {
2710 case 1:
2711 *((u8 *) opt->val) = val;
2712 break;
2713
2714 case 2:
2715 put_unaligned_le16(val, opt->val);
2716 break;
2717
2718 case 4:
2719 put_unaligned_le32(val, opt->val);
2720 break;
2721
2722 default:
2723 memcpy(opt->val, (void *) val, len);
2724 break;
2725 }
2726
2727 *ptr += L2CAP_CONF_OPT_SIZE + len;
2728 }
2729
2730 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2731 {
2732 struct l2cap_conf_efs efs;
2733
2734 switch (chan->mode) {
2735 case L2CAP_MODE_ERTM:
2736 efs.id = chan->local_id;
2737 efs.stype = chan->local_stype;
2738 efs.msdu = cpu_to_le16(chan->local_msdu);
2739 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2740 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2741 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2742 break;
2743
2744 case L2CAP_MODE_STREAMING:
2745 efs.id = 1;
2746 efs.stype = L2CAP_SERV_BESTEFFORT;
2747 efs.msdu = cpu_to_le16(chan->local_msdu);
2748 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2749 efs.acc_lat = 0;
2750 efs.flush_to = 0;
2751 break;
2752
2753 default:
2754 return;
2755 }
2756
2757 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2758 (unsigned long) &efs);
2759 }
2760
2761 static void l2cap_ack_timeout(struct work_struct *work)
2762 {
2763 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2764 ack_timer.work);
2765
2766 BT_DBG("chan %p", chan);
2767
2768 l2cap_chan_lock(chan);
2769
2770 l2cap_send_ack(chan);
2771
2772 l2cap_chan_unlock(chan);
2773
2774 l2cap_chan_put(chan);
2775 }
2776
2777 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2778 {
2779 int err;
2780
2781 chan->next_tx_seq = 0;
2782 chan->expected_tx_seq = 0;
2783 chan->expected_ack_seq = 0;
2784 chan->unacked_frames = 0;
2785 chan->buffer_seq = 0;
2786 chan->frames_sent = 0;
2787 chan->last_acked_seq = 0;
2788 chan->sdu = NULL;
2789 chan->sdu_last_frag = NULL;
2790 chan->sdu_len = 0;
2791
2792 skb_queue_head_init(&chan->tx_q);
2793
2794 if (chan->mode != L2CAP_MODE_ERTM)
2795 return 0;
2796
2797 chan->rx_state = L2CAP_RX_STATE_RECV;
2798 chan->tx_state = L2CAP_TX_STATE_XMIT;
2799
2800 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2801 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2802 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2803
2804 skb_queue_head_init(&chan->srej_q);
2805
2806 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2807 if (err < 0)
2808 return err;
2809
2810 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2811 if (err < 0)
2812 l2cap_seq_list_free(&chan->srej_list);
2813
2814 return err;
2815 }
2816
2817 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2818 {
2819 switch (mode) {
2820 case L2CAP_MODE_STREAMING:
2821 case L2CAP_MODE_ERTM:
2822 if (l2cap_mode_supported(mode, remote_feat_mask))
2823 return mode;
2824 /* fall through */
2825 default:
2826 return L2CAP_MODE_BASIC;
2827 }
2828 }
2829
2830 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2831 {
2832 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2833 }
2834
2835 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2836 {
2837 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2838 }
2839
2840 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2841 {
2842 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2843 __l2cap_ews_supported(chan)) {
2844 /* use extended control field */
2845 set_bit(FLAG_EXT_CTRL, &chan->flags);
2846 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2847 } else {
2848 chan->tx_win = min_t(u16, chan->tx_win,
2849 L2CAP_DEFAULT_TX_WINDOW);
2850 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2851 }
2852 }
2853
2854 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2855 {
2856 struct l2cap_conf_req *req = data;
2857 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2858 void *ptr = req->data;
2859 u16 size;
2860
2861 BT_DBG("chan %p", chan);
2862
2863 if (chan->num_conf_req || chan->num_conf_rsp)
2864 goto done;
2865
2866 switch (chan->mode) {
2867 case L2CAP_MODE_STREAMING:
2868 case L2CAP_MODE_ERTM:
2869 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2870 break;
2871
2872 if (__l2cap_efs_supported(chan))
2873 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2874
2875 /* fall through */
2876 default:
2877 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2878 break;
2879 }
2880
2881 done:
2882 if (chan->imtu != L2CAP_DEFAULT_MTU)
2883 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2884
2885 switch (chan->mode) {
2886 case L2CAP_MODE_BASIC:
2887 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2888 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2889 break;
2890
2891 rfc.mode = L2CAP_MODE_BASIC;
2892 rfc.txwin_size = 0;
2893 rfc.max_transmit = 0;
2894 rfc.retrans_timeout = 0;
2895 rfc.monitor_timeout = 0;
2896 rfc.max_pdu_size = 0;
2897
2898 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2899 (unsigned long) &rfc);
2900 break;
2901
2902 case L2CAP_MODE_ERTM:
2903 rfc.mode = L2CAP_MODE_ERTM;
2904 rfc.max_transmit = chan->max_tx;
2905 rfc.retrans_timeout = 0;
2906 rfc.monitor_timeout = 0;
2907
2908 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2909 L2CAP_EXT_HDR_SIZE -
2910 L2CAP_SDULEN_SIZE -
2911 L2CAP_FCS_SIZE);
2912 rfc.max_pdu_size = cpu_to_le16(size);
2913
2914 l2cap_txwin_setup(chan);
2915
2916 rfc.txwin_size = min_t(u16, chan->tx_win,
2917 L2CAP_DEFAULT_TX_WINDOW);
2918
2919 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2920 (unsigned long) &rfc);
2921
2922 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2923 l2cap_add_opt_efs(&ptr, chan);
2924
2925 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2926 break;
2927
2928 if (chan->fcs == L2CAP_FCS_NONE ||
2929 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2930 chan->fcs = L2CAP_FCS_NONE;
2931 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2932 }
2933
2934 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2935 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2936 chan->tx_win);
2937 break;
2938
2939 case L2CAP_MODE_STREAMING:
2940 rfc.mode = L2CAP_MODE_STREAMING;
2941 rfc.txwin_size = 0;
2942 rfc.max_transmit = 0;
2943 rfc.retrans_timeout = 0;
2944 rfc.monitor_timeout = 0;
2945
2946 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2947 L2CAP_EXT_HDR_SIZE -
2948 L2CAP_SDULEN_SIZE -
2949 L2CAP_FCS_SIZE);
2950 rfc.max_pdu_size = cpu_to_le16(size);
2951
2952 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2953 (unsigned long) &rfc);
2954
2955 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2956 l2cap_add_opt_efs(&ptr, chan);
2957
2958 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2959 break;
2960
2961 if (chan->fcs == L2CAP_FCS_NONE ||
2962 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2963 chan->fcs = L2CAP_FCS_NONE;
2964 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2965 }
2966 break;
2967 }
2968
2969 req->dcid = cpu_to_le16(chan->dcid);
2970 req->flags = cpu_to_le16(0);
2971
2972 return ptr - data;
2973 }
2974
2975 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2976 {
2977 struct l2cap_conf_rsp *rsp = data;
2978 void *ptr = rsp->data;
2979 void *req = chan->conf_req;
2980 int len = chan->conf_len;
2981 int type, hint, olen;
2982 unsigned long val;
2983 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2984 struct l2cap_conf_efs efs;
2985 u8 remote_efs = 0;
2986 u16 mtu = L2CAP_DEFAULT_MTU;
2987 u16 result = L2CAP_CONF_SUCCESS;
2988 u16 size;
2989
2990 BT_DBG("chan %p", chan);
2991
2992 while (len >= L2CAP_CONF_OPT_SIZE) {
2993 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2994
2995 hint = type & L2CAP_CONF_HINT;
2996 type &= L2CAP_CONF_MASK;
2997
2998 switch (type) {
2999 case L2CAP_CONF_MTU:
3000 mtu = val;
3001 break;
3002
3003 case L2CAP_CONF_FLUSH_TO:
3004 chan->flush_to = val;
3005 break;
3006
3007 case L2CAP_CONF_QOS:
3008 break;
3009
3010 case L2CAP_CONF_RFC:
3011 if (olen == sizeof(rfc))
3012 memcpy(&rfc, (void *) val, olen);
3013 break;
3014
3015 case L2CAP_CONF_FCS:
3016 if (val == L2CAP_FCS_NONE)
3017 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3018 break;
3019
3020 case L2CAP_CONF_EFS:
3021 remote_efs = 1;
3022 if (olen == sizeof(efs))
3023 memcpy(&efs, (void *) val, olen);
3024 break;
3025
3026 case L2CAP_CONF_EWS:
3027 if (!enable_hs)
3028 return -ECONNREFUSED;
3029
3030 set_bit(FLAG_EXT_CTRL, &chan->flags);
3031 set_bit(CONF_EWS_RECV, &chan->conf_state);
3032 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3033 chan->remote_tx_win = val;
3034 break;
3035
3036 default:
3037 if (hint)
3038 break;
3039
3040 result = L2CAP_CONF_UNKNOWN;
3041 *((u8 *) ptr++) = type;
3042 break;
3043 }
3044 }
3045
3046 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3047 goto done;
3048
3049 switch (chan->mode) {
3050 case L2CAP_MODE_STREAMING:
3051 case L2CAP_MODE_ERTM:
3052 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3053 chan->mode = l2cap_select_mode(rfc.mode,
3054 chan->conn->feat_mask);
3055 break;
3056 }
3057
3058 if (remote_efs) {
3059 if (__l2cap_efs_supported(chan))
3060 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3061 else
3062 return -ECONNREFUSED;
3063 }
3064
3065 if (chan->mode != rfc.mode)
3066 return -ECONNREFUSED;
3067
3068 break;
3069 }
3070
3071 done:
3072 if (chan->mode != rfc.mode) {
3073 result = L2CAP_CONF_UNACCEPT;
3074 rfc.mode = chan->mode;
3075
3076 if (chan->num_conf_rsp == 1)
3077 return -ECONNREFUSED;
3078
3079 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3080 sizeof(rfc), (unsigned long) &rfc);
3081 }
3082
3083 if (result == L2CAP_CONF_SUCCESS) {
3084 /* Configure output options and let the other side know
3085 * which ones we don't like. */
3086
3087 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3088 result = L2CAP_CONF_UNACCEPT;
3089 else {
3090 chan->omtu = mtu;
3091 set_bit(CONF_MTU_DONE, &chan->conf_state);
3092 }
3093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3094
3095 if (remote_efs) {
3096 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3097 efs.stype != L2CAP_SERV_NOTRAFIC &&
3098 efs.stype != chan->local_stype) {
3099
3100 result = L2CAP_CONF_UNACCEPT;
3101
3102 if (chan->num_conf_req >= 1)
3103 return -ECONNREFUSED;
3104
3105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3106 sizeof(efs),
3107 (unsigned long) &efs);
3108 } else {
3109 /* Send PENDING Conf Rsp */
3110 result = L2CAP_CONF_PENDING;
3111 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3112 }
3113 }
3114
3115 switch (rfc.mode) {
3116 case L2CAP_MODE_BASIC:
3117 chan->fcs = L2CAP_FCS_NONE;
3118 set_bit(CONF_MODE_DONE, &chan->conf_state);
3119 break;
3120
3121 case L2CAP_MODE_ERTM:
3122 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3123 chan->remote_tx_win = rfc.txwin_size;
3124 else
3125 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3126
3127 chan->remote_max_tx = rfc.max_transmit;
3128
3129 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3130 chan->conn->mtu -
3131 L2CAP_EXT_HDR_SIZE -
3132 L2CAP_SDULEN_SIZE -
3133 L2CAP_FCS_SIZE);
3134 rfc.max_pdu_size = cpu_to_le16(size);
3135 chan->remote_mps = size;
3136
3137 rfc.retrans_timeout =
3138 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3139 rfc.monitor_timeout =
3140 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3141
3142 set_bit(CONF_MODE_DONE, &chan->conf_state);
3143
3144 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3145 sizeof(rfc), (unsigned long) &rfc);
3146
3147 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3148 chan->remote_id = efs.id;
3149 chan->remote_stype = efs.stype;
3150 chan->remote_msdu = le16_to_cpu(efs.msdu);
3151 chan->remote_flush_to =
3152 le32_to_cpu(efs.flush_to);
3153 chan->remote_acc_lat =
3154 le32_to_cpu(efs.acc_lat);
3155 chan->remote_sdu_itime =
3156 le32_to_cpu(efs.sdu_itime);
3157 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3158 sizeof(efs), (unsigned long) &efs);
3159 }
3160 break;
3161
3162 case L2CAP_MODE_STREAMING:
3163 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3164 chan->conn->mtu -
3165 L2CAP_EXT_HDR_SIZE -
3166 L2CAP_SDULEN_SIZE -
3167 L2CAP_FCS_SIZE);
3168 rfc.max_pdu_size = cpu_to_le16(size);
3169 chan->remote_mps = size;
3170
3171 set_bit(CONF_MODE_DONE, &chan->conf_state);
3172
3173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3174 sizeof(rfc), (unsigned long) &rfc);
3175
3176 break;
3177
3178 default:
3179 result = L2CAP_CONF_UNACCEPT;
3180
3181 memset(&rfc, 0, sizeof(rfc));
3182 rfc.mode = chan->mode;
3183 }
3184
3185 if (result == L2CAP_CONF_SUCCESS)
3186 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3187 }
3188 rsp->scid = cpu_to_le16(chan->dcid);
3189 rsp->result = cpu_to_le16(result);
3190 rsp->flags = cpu_to_le16(0x0000);
3191
3192 return ptr - data;
3193 }
3194
3195 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3196 {
3197 struct l2cap_conf_req *req = data;
3198 void *ptr = req->data;
3199 int type, olen;
3200 unsigned long val;
3201 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3202 struct l2cap_conf_efs efs;
3203
3204 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3205
3206 while (len >= L2CAP_CONF_OPT_SIZE) {
3207 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3208
3209 switch (type) {
3210 case L2CAP_CONF_MTU:
3211 if (val < L2CAP_DEFAULT_MIN_MTU) {
3212 *result = L2CAP_CONF_UNACCEPT;
3213 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3214 } else
3215 chan->imtu = val;
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3217 break;
3218
3219 case L2CAP_CONF_FLUSH_TO:
3220 chan->flush_to = val;
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3222 2, chan->flush_to);
3223 break;
3224
3225 case L2CAP_CONF_RFC:
3226 if (olen == sizeof(rfc))
3227 memcpy(&rfc, (void *)val, olen);
3228
3229 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3230 rfc.mode != chan->mode)
3231 return -ECONNREFUSED;
3232
3233 chan->fcs = 0;
3234
3235 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3236 sizeof(rfc), (unsigned long) &rfc);
3237 break;
3238
3239 case L2CAP_CONF_EWS:
3240 chan->tx_win = min_t(u16, val,
3241 L2CAP_DEFAULT_EXT_WINDOW);
3242 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3243 chan->tx_win);
3244 break;
3245
3246 case L2CAP_CONF_EFS:
3247 if (olen == sizeof(efs))
3248 memcpy(&efs, (void *)val, olen);
3249
3250 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3251 efs.stype != L2CAP_SERV_NOTRAFIC &&
3252 efs.stype != chan->local_stype)
3253 return -ECONNREFUSED;
3254
3255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3256 sizeof(efs), (unsigned long) &efs);
3257 break;
3258 }
3259 }
3260
3261 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3262 return -ECONNREFUSED;
3263
3264 chan->mode = rfc.mode;
3265
3266 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3267 switch (rfc.mode) {
3268 case L2CAP_MODE_ERTM:
3269 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3270 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3271 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3272
3273 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3274 chan->local_msdu = le16_to_cpu(efs.msdu);
3275 chan->local_sdu_itime =
3276 le32_to_cpu(efs.sdu_itime);
3277 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3278 chan->local_flush_to =
3279 le32_to_cpu(efs.flush_to);
3280 }
3281 break;
3282
3283 case L2CAP_MODE_STREAMING:
3284 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3285 }
3286 }
3287
3288 req->dcid = cpu_to_le16(chan->dcid);
3289 req->flags = cpu_to_le16(0x0000);
3290
3291 return ptr - data;
3292 }
3293
3294 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3295 {
3296 struct l2cap_conf_rsp *rsp = data;
3297 void *ptr = rsp->data;
3298
3299 BT_DBG("chan %p", chan);
3300
3301 rsp->scid = cpu_to_le16(chan->dcid);
3302 rsp->result = cpu_to_le16(result);
3303 rsp->flags = cpu_to_le16(flags);
3304
3305 return ptr - data;
3306 }
3307
3308 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3309 {
3310 struct l2cap_conn_rsp rsp;
3311 struct l2cap_conn *conn = chan->conn;
3312 u8 buf[128];
3313
3314 rsp.scid = cpu_to_le16(chan->dcid);
3315 rsp.dcid = cpu_to_le16(chan->scid);
3316 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3317 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3318 l2cap_send_cmd(conn, chan->ident,
3319 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3320
3321 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3322 return;
3323
3324 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3325 l2cap_build_conf_req(chan, buf), buf);
3326 chan->num_conf_req++;
3327 }
3328
3329 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3330 {
3331 int type, olen;
3332 unsigned long val;
3333 struct l2cap_conf_rfc rfc;
3334
3335 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3336
3337 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3338 return;
3339
3340 while (len >= L2CAP_CONF_OPT_SIZE) {
3341 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3342
3343 switch (type) {
3344 case L2CAP_CONF_RFC:
3345 if (olen == sizeof(rfc))
3346 memcpy(&rfc, (void *)val, olen);
3347 goto done;
3348 }
3349 }
3350
3351 /* Use sane default values in case a misbehaving remote device
3352 * did not send an RFC option.
3353 */
3354 rfc.mode = chan->mode;
3355 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3356 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3357 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3358
3359 BT_ERR("Expected RFC option was not found, using defaults");
3360
3361 done:
3362 switch (rfc.mode) {
3363 case L2CAP_MODE_ERTM:
3364 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3365 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3366 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3367 break;
3368 case L2CAP_MODE_STREAMING:
3369 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3370 }
3371 }
3372
3373 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3374 {
3375 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3376
3377 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3378 return 0;
3379
3380 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3381 cmd->ident == conn->info_ident) {
3382 cancel_delayed_work(&conn->info_timer);
3383
3384 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3385 conn->info_ident = 0;
3386
3387 l2cap_conn_start(conn);
3388 }
3389
3390 return 0;
3391 }
3392
3393 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3394 {
3395 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3396 struct l2cap_conn_rsp rsp;
3397 struct l2cap_chan *chan = NULL, *pchan;
3398 struct sock *parent, *sk = NULL;
3399 int result, status = L2CAP_CS_NO_INFO;
3400
3401 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3402 __le16 psm = req->psm;
3403
3404 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3405
3406 /* Check if we have socket listening on psm */
3407 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3408 if (!pchan) {
3409 result = L2CAP_CR_BAD_PSM;
3410 goto sendresp;
3411 }
3412
3413 parent = pchan->sk;
3414
3415 mutex_lock(&conn->chan_lock);
3416 lock_sock(parent);
3417
3418 /* Check if the ACL is secure enough (if not SDP) */
3419 if (psm != cpu_to_le16(0x0001) &&
3420 !hci_conn_check_link_mode(conn->hcon)) {
3421 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3422 result = L2CAP_CR_SEC_BLOCK;
3423 goto response;
3424 }
3425
3426 result = L2CAP_CR_NO_MEM;
3427
3428 /* Check for backlog size */
3429 if (sk_acceptq_is_full(parent)) {
3430 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3431 goto response;
3432 }
3433
3434 chan = pchan->ops->new_connection(pchan->data);
3435 if (!chan)
3436 goto response;
3437
3438 sk = chan->sk;
3439
3440 /* Check if we already have channel with that dcid */
3441 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3442 sock_set_flag(sk, SOCK_ZAPPED);
3443 chan->ops->close(chan->data);
3444 goto response;
3445 }
3446
3447 hci_conn_hold(conn->hcon);
3448
3449 bacpy(&bt_sk(sk)->src, conn->src);
3450 bacpy(&bt_sk(sk)->dst, conn->dst);
3451 chan->psm = psm;
3452 chan->dcid = scid;
3453
3454 bt_accept_enqueue(parent, sk);
3455
3456 __l2cap_chan_add(conn, chan);
3457
3458 dcid = chan->scid;
3459
3460 __set_chan_timer(chan, sk->sk_sndtimeo);
3461
3462 chan->ident = cmd->ident;
3463
3464 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3465 if (l2cap_chan_check_security(chan)) {
3466 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3467 __l2cap_state_change(chan, BT_CONNECT2);
3468 result = L2CAP_CR_PEND;
3469 status = L2CAP_CS_AUTHOR_PEND;
3470 parent->sk_data_ready(parent, 0);
3471 } else {
3472 __l2cap_state_change(chan, BT_CONFIG);
3473 result = L2CAP_CR_SUCCESS;
3474 status = L2CAP_CS_NO_INFO;
3475 }
3476 } else {
3477 __l2cap_state_change(chan, BT_CONNECT2);
3478 result = L2CAP_CR_PEND;
3479 status = L2CAP_CS_AUTHEN_PEND;
3480 }
3481 } else {
3482 __l2cap_state_change(chan, BT_CONNECT2);
3483 result = L2CAP_CR_PEND;
3484 status = L2CAP_CS_NO_INFO;
3485 }
3486
3487 response:
3488 release_sock(parent);
3489 mutex_unlock(&conn->chan_lock);
3490
3491 sendresp:
3492 rsp.scid = cpu_to_le16(scid);
3493 rsp.dcid = cpu_to_le16(dcid);
3494 rsp.result = cpu_to_le16(result);
3495 rsp.status = cpu_to_le16(status);
3496 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3497
3498 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3499 struct l2cap_info_req info;
3500 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3501
3502 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3503 conn->info_ident = l2cap_get_ident(conn);
3504
3505 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3506
3507 l2cap_send_cmd(conn, conn->info_ident,
3508 L2CAP_INFO_REQ, sizeof(info), &info);
3509 }
3510
3511 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3512 result == L2CAP_CR_SUCCESS) {
3513 u8 buf[128];
3514 set_bit(CONF_REQ_SENT, &chan->conf_state);
3515 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3516 l2cap_build_conf_req(chan, buf), buf);
3517 chan->num_conf_req++;
3518 }
3519
3520 return 0;
3521 }
3522
3523 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3524 {
3525 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3526 u16 scid, dcid, result, status;
3527 struct l2cap_chan *chan;
3528 u8 req[128];
3529 int err;
3530
3531 scid = __le16_to_cpu(rsp->scid);
3532 dcid = __le16_to_cpu(rsp->dcid);
3533 result = __le16_to_cpu(rsp->result);
3534 status = __le16_to_cpu(rsp->status);
3535
3536 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3537 dcid, scid, result, status);
3538
3539 mutex_lock(&conn->chan_lock);
3540
3541 if (scid) {
3542 chan = __l2cap_get_chan_by_scid(conn, scid);
3543 if (!chan) {
3544 err = -EFAULT;
3545 goto unlock;
3546 }
3547 } else {
3548 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3549 if (!chan) {
3550 err = -EFAULT;
3551 goto unlock;
3552 }
3553 }
3554
3555 err = 0;
3556
3557 l2cap_chan_lock(chan);
3558
3559 switch (result) {
3560 case L2CAP_CR_SUCCESS:
3561 l2cap_state_change(chan, BT_CONFIG);
3562 chan->ident = 0;
3563 chan->dcid = dcid;
3564 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3565
3566 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3567 break;
3568
3569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3570 l2cap_build_conf_req(chan, req), req);
3571 chan->num_conf_req++;
3572 break;
3573
3574 case L2CAP_CR_PEND:
3575 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3576 break;
3577
3578 default:
3579 l2cap_chan_del(chan, ECONNREFUSED);
3580 break;
3581 }
3582
3583 l2cap_chan_unlock(chan);
3584
3585 unlock:
3586 mutex_unlock(&conn->chan_lock);
3587
3588 return err;
3589 }
3590
3591 static inline void set_default_fcs(struct l2cap_chan *chan)
3592 {
3593 /* FCS is enabled only in ERTM or streaming mode, if one or both
3594 * sides request it.
3595 */
3596 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3597 chan->fcs = L2CAP_FCS_NONE;
3598 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3599 chan->fcs = L2CAP_FCS_CRC16;
3600 }
3601
3602 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3603 {
3604 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3605 u16 dcid, flags;
3606 u8 rsp[64];
3607 struct l2cap_chan *chan;
3608 int len, err = 0;
3609
3610 dcid = __le16_to_cpu(req->dcid);
3611 flags = __le16_to_cpu(req->flags);
3612
3613 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3614
3615 chan = l2cap_get_chan_by_scid(conn, dcid);
3616 if (!chan)
3617 return -ENOENT;
3618
3619 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3620 struct l2cap_cmd_rej_cid rej;
3621
3622 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3623 rej.scid = cpu_to_le16(chan->scid);
3624 rej.dcid = cpu_to_le16(chan->dcid);
3625
3626 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3627 sizeof(rej), &rej);
3628 goto unlock;
3629 }
3630
3631 /* Reject if config buffer is too small. */
3632 len = cmd_len - sizeof(*req);
3633 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3634 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3635 l2cap_build_conf_rsp(chan, rsp,
3636 L2CAP_CONF_REJECT, flags), rsp);
3637 goto unlock;
3638 }
3639
3640 /* Store config. */
3641 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3642 chan->conf_len += len;
3643
3644 if (flags & 0x0001) {
3645 /* Incomplete config. Send empty response. */
3646 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3647 l2cap_build_conf_rsp(chan, rsp,
3648 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3649 goto unlock;
3650 }
3651
3652 /* Complete config. */
3653 len = l2cap_parse_conf_req(chan, rsp);
3654 if (len < 0) {
3655 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3656 goto unlock;
3657 }
3658
3659 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3660 chan->num_conf_rsp++;
3661
3662 /* Reset config buffer. */
3663 chan->conf_len = 0;
3664
3665 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3666 goto unlock;
3667
3668 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3669 set_default_fcs(chan);
3670
3671 l2cap_state_change(chan, BT_CONNECTED);
3672
3673 if (chan->mode == L2CAP_MODE_ERTM ||
3674 chan->mode == L2CAP_MODE_STREAMING)
3675 err = l2cap_ertm_init(chan);
3676
3677 if (err < 0)
3678 l2cap_send_disconn_req(chan->conn, chan, -err);
3679 else
3680 l2cap_chan_ready(chan);
3681
3682 goto unlock;
3683 }
3684
3685 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3686 u8 buf[64];
3687 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3688 l2cap_build_conf_req(chan, buf), buf);
3689 chan->num_conf_req++;
3690 }
3691
3692 /* Got Conf Rsp PENDING from remote side and asume we sent
3693 Conf Rsp PENDING in the code above */
3694 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3695 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3696
3697 /* check compatibility */
3698
3699 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3700 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3701
3702 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3703 l2cap_build_conf_rsp(chan, rsp,
3704 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3705 }
3706
3707 unlock:
3708 l2cap_chan_unlock(chan);
3709 return err;
3710 }
3711
3712 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3713 {
3714 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3715 u16 scid, flags, result;
3716 struct l2cap_chan *chan;
3717 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3718 int err = 0;
3719
3720 scid = __le16_to_cpu(rsp->scid);
3721 flags = __le16_to_cpu(rsp->flags);
3722 result = __le16_to_cpu(rsp->result);
3723
3724 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3725 result, len);
3726
3727 chan = l2cap_get_chan_by_scid(conn, scid);
3728 if (!chan)
3729 return 0;
3730
3731 switch (result) {
3732 case L2CAP_CONF_SUCCESS:
3733 l2cap_conf_rfc_get(chan, rsp->data, len);
3734 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3735 break;
3736
3737 case L2CAP_CONF_PENDING:
3738 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3739
3740 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3741 char buf[64];
3742
3743 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3744 buf, &result);
3745 if (len < 0) {
3746 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3747 goto done;
3748 }
3749
3750 /* check compatibility */
3751
3752 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3753 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3754
3755 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3756 l2cap_build_conf_rsp(chan, buf,
3757 L2CAP_CONF_SUCCESS, 0x0000), buf);
3758 }
3759 goto done;
3760
3761 case L2CAP_CONF_UNACCEPT:
3762 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3763 char req[64];
3764
3765 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3766 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3767 goto done;
3768 }
3769
3770 /* throw out any old stored conf requests */
3771 result = L2CAP_CONF_SUCCESS;
3772 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3773 req, &result);
3774 if (len < 0) {
3775 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3776 goto done;
3777 }
3778
3779 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3780 L2CAP_CONF_REQ, len, req);
3781 chan->num_conf_req++;
3782 if (result != L2CAP_CONF_SUCCESS)
3783 goto done;
3784 break;
3785 }
3786
3787 default:
3788 l2cap_chan_set_err(chan, ECONNRESET);
3789
3790 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3791 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3792 goto done;
3793 }
3794
3795 if (flags & 0x01)
3796 goto done;
3797
3798 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3799
3800 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3801 set_default_fcs(chan);
3802
3803 l2cap_state_change(chan, BT_CONNECTED);
3804 if (chan->mode == L2CAP_MODE_ERTM ||
3805 chan->mode == L2CAP_MODE_STREAMING)
3806 err = l2cap_ertm_init(chan);
3807
3808 if (err < 0)
3809 l2cap_send_disconn_req(chan->conn, chan, -err);
3810 else
3811 l2cap_chan_ready(chan);
3812 }
3813
3814 done:
3815 l2cap_chan_unlock(chan);
3816 return err;
3817 }
3818
3819 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3820 {
3821 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3822 struct l2cap_disconn_rsp rsp;
3823 u16 dcid, scid;
3824 struct l2cap_chan *chan;
3825 struct sock *sk;
3826
3827 scid = __le16_to_cpu(req->scid);
3828 dcid = __le16_to_cpu(req->dcid);
3829
3830 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3831
3832 mutex_lock(&conn->chan_lock);
3833
3834 chan = __l2cap_get_chan_by_scid(conn, dcid);
3835 if (!chan) {
3836 mutex_unlock(&conn->chan_lock);
3837 return 0;
3838 }
3839
3840 l2cap_chan_lock(chan);
3841
3842 sk = chan->sk;
3843
3844 rsp.dcid = cpu_to_le16(chan->scid);
3845 rsp.scid = cpu_to_le16(chan->dcid);
3846 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3847
3848 lock_sock(sk);
3849 sk->sk_shutdown = SHUTDOWN_MASK;
3850 release_sock(sk);
3851
3852 l2cap_chan_hold(chan);
3853 l2cap_chan_del(chan, ECONNRESET);
3854
3855 l2cap_chan_unlock(chan);
3856
3857 chan->ops->close(chan->data);
3858 l2cap_chan_put(chan);
3859
3860 mutex_unlock(&conn->chan_lock);
3861
3862 return 0;
3863 }
3864
3865 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3866 {
3867 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3868 u16 dcid, scid;
3869 struct l2cap_chan *chan;
3870
3871 scid = __le16_to_cpu(rsp->scid);
3872 dcid = __le16_to_cpu(rsp->dcid);
3873
3874 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3875
3876 mutex_lock(&conn->chan_lock);
3877
3878 chan = __l2cap_get_chan_by_scid(conn, scid);
3879 if (!chan) {
3880 mutex_unlock(&conn->chan_lock);
3881 return 0;
3882 }
3883
3884 l2cap_chan_lock(chan);
3885
3886 l2cap_chan_hold(chan);
3887 l2cap_chan_del(chan, 0);
3888
3889 l2cap_chan_unlock(chan);
3890
3891 chan->ops->close(chan->data);
3892 l2cap_chan_put(chan);
3893
3894 mutex_unlock(&conn->chan_lock);
3895
3896 return 0;
3897 }
3898
3899 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3900 {
3901 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3902 u16 type;
3903
3904 type = __le16_to_cpu(req->type);
3905
3906 BT_DBG("type 0x%4.4x", type);
3907
3908 if (type == L2CAP_IT_FEAT_MASK) {
3909 u8 buf[8];
3910 u32 feat_mask = l2cap_feat_mask;
3911 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3912 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3913 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3914 if (!disable_ertm)
3915 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3916 | L2CAP_FEAT_FCS;
3917 if (enable_hs)
3918 feat_mask |= L2CAP_FEAT_EXT_FLOW
3919 | L2CAP_FEAT_EXT_WINDOW;
3920
3921 put_unaligned_le32(feat_mask, rsp->data);
3922 l2cap_send_cmd(conn, cmd->ident,
3923 L2CAP_INFO_RSP, sizeof(buf), buf);
3924 } else if (type == L2CAP_IT_FIXED_CHAN) {
3925 u8 buf[12];
3926 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3927
3928 if (enable_hs)
3929 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3930 else
3931 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3932
3933 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3934 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3935 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3936 l2cap_send_cmd(conn, cmd->ident,
3937 L2CAP_INFO_RSP, sizeof(buf), buf);
3938 } else {
3939 struct l2cap_info_rsp rsp;
3940 rsp.type = cpu_to_le16(type);
3941 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3942 l2cap_send_cmd(conn, cmd->ident,
3943 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3944 }
3945
3946 return 0;
3947 }
3948
3949 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3950 {
3951 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3952 u16 type, result;
3953
3954 type = __le16_to_cpu(rsp->type);
3955 result = __le16_to_cpu(rsp->result);
3956
3957 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3958
3959 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3960 if (cmd->ident != conn->info_ident ||
3961 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3962 return 0;
3963
3964 cancel_delayed_work(&conn->info_timer);
3965
3966 if (result != L2CAP_IR_SUCCESS) {
3967 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3968 conn->info_ident = 0;
3969
3970 l2cap_conn_start(conn);
3971
3972 return 0;
3973 }
3974
3975 switch (type) {
3976 case L2CAP_IT_FEAT_MASK:
3977 conn->feat_mask = get_unaligned_le32(rsp->data);
3978
3979 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3980 struct l2cap_info_req req;
3981 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3982
3983 conn->info_ident = l2cap_get_ident(conn);
3984
3985 l2cap_send_cmd(conn, conn->info_ident,
3986 L2CAP_INFO_REQ, sizeof(req), &req);
3987 } else {
3988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3989 conn->info_ident = 0;
3990
3991 l2cap_conn_start(conn);
3992 }
3993 break;
3994
3995 case L2CAP_IT_FIXED_CHAN:
3996 conn->fixed_chan_mask = rsp->data[0];
3997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3998 conn->info_ident = 0;
3999
4000 l2cap_conn_start(conn);
4001 break;
4002 }
4003
4004 return 0;
4005 }
4006
4007 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4008 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4009 void *data)
4010 {
4011 struct l2cap_create_chan_req *req = data;
4012 struct l2cap_create_chan_rsp rsp;
4013 u16 psm, scid;
4014
4015 if (cmd_len != sizeof(*req))
4016 return -EPROTO;
4017
4018 if (!enable_hs)
4019 return -EINVAL;
4020
4021 psm = le16_to_cpu(req->psm);
4022 scid = le16_to_cpu(req->scid);
4023
4024 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4025
4026 /* Placeholder: Always reject */
4027 rsp.dcid = 0;
4028 rsp.scid = cpu_to_le16(scid);
4029 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4030 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4031
4032 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4033 sizeof(rsp), &rsp);
4034
4035 return 0;
4036 }
4037
4038 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4039 struct l2cap_cmd_hdr *cmd, void *data)
4040 {
4041 BT_DBG("conn %p", conn);
4042
4043 return l2cap_connect_rsp(conn, cmd, data);
4044 }
4045
4046 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4047 u16 icid, u16 result)
4048 {
4049 struct l2cap_move_chan_rsp rsp;
4050
4051 BT_DBG("icid %d, result %d", icid, result);
4052
4053 rsp.icid = cpu_to_le16(icid);
4054 rsp.result = cpu_to_le16(result);
4055
4056 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4057 }
4058
4059 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4060 struct l2cap_chan *chan, u16 icid, u16 result)
4061 {
4062 struct l2cap_move_chan_cfm cfm;
4063 u8 ident;
4064
4065 BT_DBG("icid %d, result %d", icid, result);
4066
4067 ident = l2cap_get_ident(conn);
4068 if (chan)
4069 chan->ident = ident;
4070
4071 cfm.icid = cpu_to_le16(icid);
4072 cfm.result = cpu_to_le16(result);
4073
4074 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4075 }
4076
4077 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4078 u16 icid)
4079 {
4080 struct l2cap_move_chan_cfm_rsp rsp;
4081
4082 BT_DBG("icid %d", icid);
4083
4084 rsp.icid = cpu_to_le16(icid);
4085 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4086 }
4087
4088 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4090 {
4091 struct l2cap_move_chan_req *req = data;
4092 u16 icid = 0;
4093 u16 result = L2CAP_MR_NOT_ALLOWED;
4094
4095 if (cmd_len != sizeof(*req))
4096 return -EPROTO;
4097
4098 icid = le16_to_cpu(req->icid);
4099
4100 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4101
4102 if (!enable_hs)
4103 return -EINVAL;
4104
4105 /* Placeholder: Always refuse */
4106 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4107
4108 return 0;
4109 }
4110
4111 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4112 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4113 {
4114 struct l2cap_move_chan_rsp *rsp = data;
4115 u16 icid, result;
4116
4117 if (cmd_len != sizeof(*rsp))
4118 return -EPROTO;
4119
4120 icid = le16_to_cpu(rsp->icid);
4121 result = le16_to_cpu(rsp->result);
4122
4123 BT_DBG("icid %d, result %d", icid, result);
4124
4125 /* Placeholder: Always unconfirmed */
4126 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4127
4128 return 0;
4129 }
4130
4131 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4132 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4133 {
4134 struct l2cap_move_chan_cfm *cfm = data;
4135 u16 icid, result;
4136
4137 if (cmd_len != sizeof(*cfm))
4138 return -EPROTO;
4139
4140 icid = le16_to_cpu(cfm->icid);
4141 result = le16_to_cpu(cfm->result);
4142
4143 BT_DBG("icid %d, result %d", icid, result);
4144
4145 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4146
4147 return 0;
4148 }
4149
4150 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4151 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4152 {
4153 struct l2cap_move_chan_cfm_rsp *rsp = data;
4154 u16 icid;
4155
4156 if (cmd_len != sizeof(*rsp))
4157 return -EPROTO;
4158
4159 icid = le16_to_cpu(rsp->icid);
4160
4161 BT_DBG("icid %d", icid);
4162
4163 return 0;
4164 }
4165
4166 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4167 u16 to_multiplier)
4168 {
4169 u16 max_latency;
4170
4171 if (min > max || min < 6 || max > 3200)
4172 return -EINVAL;
4173
4174 if (to_multiplier < 10 || to_multiplier > 3200)
4175 return -EINVAL;
4176
4177 if (max >= to_multiplier * 8)
4178 return -EINVAL;
4179
4180 max_latency = (to_multiplier * 8 / max) - 1;
4181 if (latency > 499 || latency > max_latency)
4182 return -EINVAL;
4183
4184 return 0;
4185 }
4186
4187 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4188 struct l2cap_cmd_hdr *cmd, u8 *data)
4189 {
4190 struct hci_conn *hcon = conn->hcon;
4191 struct l2cap_conn_param_update_req *req;
4192 struct l2cap_conn_param_update_rsp rsp;
4193 u16 min, max, latency, to_multiplier, cmd_len;
4194 int err;
4195
4196 if (!(hcon->link_mode & HCI_LM_MASTER))
4197 return -EINVAL;
4198
4199 cmd_len = __le16_to_cpu(cmd->len);
4200 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4201 return -EPROTO;
4202
4203 req = (struct l2cap_conn_param_update_req *) data;
4204 min = __le16_to_cpu(req->min);
4205 max = __le16_to_cpu(req->max);
4206 latency = __le16_to_cpu(req->latency);
4207 to_multiplier = __le16_to_cpu(req->to_multiplier);
4208
4209 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4210 min, max, latency, to_multiplier);
4211
4212 memset(&rsp, 0, sizeof(rsp));
4213
4214 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4215 if (err)
4216 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4217 else
4218 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4219
4220 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4221 sizeof(rsp), &rsp);
4222
4223 if (!err)
4224 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4225
4226 return 0;
4227 }
4228
4229 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4230 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4231 {
4232 int err = 0;
4233
4234 switch (cmd->code) {
4235 case L2CAP_COMMAND_REJ:
4236 l2cap_command_rej(conn, cmd, data);
4237 break;
4238
4239 case L2CAP_CONN_REQ:
4240 err = l2cap_connect_req(conn, cmd, data);
4241 break;
4242
4243 case L2CAP_CONN_RSP:
4244 err = l2cap_connect_rsp(conn, cmd, data);
4245 break;
4246
4247 case L2CAP_CONF_REQ:
4248 err = l2cap_config_req(conn, cmd, cmd_len, data);
4249 break;
4250
4251 case L2CAP_CONF_RSP:
4252 err = l2cap_config_rsp(conn, cmd, data);
4253 break;
4254
4255 case L2CAP_DISCONN_REQ:
4256 err = l2cap_disconnect_req(conn, cmd, data);
4257 break;
4258
4259 case L2CAP_DISCONN_RSP:
4260 err = l2cap_disconnect_rsp(conn, cmd, data);
4261 break;
4262
4263 case L2CAP_ECHO_REQ:
4264 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4265 break;
4266
4267 case L2CAP_ECHO_RSP:
4268 break;
4269
4270 case L2CAP_INFO_REQ:
4271 err = l2cap_information_req(conn, cmd, data);
4272 break;
4273
4274 case L2CAP_INFO_RSP:
4275 err = l2cap_information_rsp(conn, cmd, data);
4276 break;
4277
4278 case L2CAP_CREATE_CHAN_REQ:
4279 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4280 break;
4281
4282 case L2CAP_CREATE_CHAN_RSP:
4283 err = l2cap_create_channel_rsp(conn, cmd, data);
4284 break;
4285
4286 case L2CAP_MOVE_CHAN_REQ:
4287 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4288 break;
4289
4290 case L2CAP_MOVE_CHAN_RSP:
4291 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4292 break;
4293
4294 case L2CAP_MOVE_CHAN_CFM:
4295 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4296 break;
4297
4298 case L2CAP_MOVE_CHAN_CFM_RSP:
4299 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4300 break;
4301
4302 default:
4303 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4304 err = -EINVAL;
4305 break;
4306 }
4307
4308 return err;
4309 }
4310
4311 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4312 struct l2cap_cmd_hdr *cmd, u8 *data)
4313 {
4314 switch (cmd->code) {
4315 case L2CAP_COMMAND_REJ:
4316 return 0;
4317
4318 case L2CAP_CONN_PARAM_UPDATE_REQ:
4319 return l2cap_conn_param_update_req(conn, cmd, data);
4320
4321 case L2CAP_CONN_PARAM_UPDATE_RSP:
4322 return 0;
4323
4324 default:
4325 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4326 return -EINVAL;
4327 }
4328 }
4329
4330 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4331 struct sk_buff *skb)
4332 {
4333 u8 *data = skb->data;
4334 int len = skb->len;
4335 struct l2cap_cmd_hdr cmd;
4336 int err;
4337
4338 l2cap_raw_recv(conn, skb);
4339
4340 while (len >= L2CAP_CMD_HDR_SIZE) {
4341 u16 cmd_len;
4342 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4343 data += L2CAP_CMD_HDR_SIZE;
4344 len -= L2CAP_CMD_HDR_SIZE;
4345
4346 cmd_len = le16_to_cpu(cmd.len);
4347
4348 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4349
4350 if (cmd_len > len || !cmd.ident) {
4351 BT_DBG("corrupted command");
4352 break;
4353 }
4354
4355 if (conn->hcon->type == LE_LINK)
4356 err = l2cap_le_sig_cmd(conn, &cmd, data);
4357 else
4358 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4359
4360 if (err) {
4361 struct l2cap_cmd_rej_unk rej;
4362
4363 BT_ERR("Wrong link type (%d)", err);
4364
4365 /* FIXME: Map err to a valid reason */
4366 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4367 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4368 }
4369
4370 data += cmd_len;
4371 len -= cmd_len;
4372 }
4373
4374 kfree_skb(skb);
4375 }
4376
4377 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4378 {
4379 u16 our_fcs, rcv_fcs;
4380 int hdr_size;
4381
4382 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4383 hdr_size = L2CAP_EXT_HDR_SIZE;
4384 else
4385 hdr_size = L2CAP_ENH_HDR_SIZE;
4386
4387 if (chan->fcs == L2CAP_FCS_CRC16) {
4388 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4389 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4390 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4391
4392 if (our_fcs != rcv_fcs)
4393 return -EBADMSG;
4394 }
4395 return 0;
4396 }
4397
4398 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4399 {
4400 struct l2cap_ctrl control;
4401
4402 BT_DBG("chan %p", chan);
4403
4404 memset(&control, 0, sizeof(control));
4405 control.sframe = 1;
4406 control.final = 1;
4407 control.reqseq = chan->buffer_seq;
4408 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4409
4410 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4411 control.super = L2CAP_SUPER_RNR;
4412 l2cap_send_sframe(chan, &control);
4413 }
4414
4415 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4416 chan->unacked_frames > 0)
4417 __set_retrans_timer(chan);
4418
4419 /* Send pending iframes */
4420 l2cap_ertm_send(chan);
4421
4422 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4423 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4424 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4425 * send it now.
4426 */
4427 control.super = L2CAP_SUPER_RR;
4428 l2cap_send_sframe(chan, &control);
4429 }
4430 }
4431
4432 static void append_skb_frag(struct sk_buff *skb,
4433 struct sk_buff *new_frag, struct sk_buff **last_frag)
4434 {
4435 /* skb->len reflects data in skb as well as all fragments
4436 * skb->data_len reflects only data in fragments
4437 */
4438 if (!skb_has_frag_list(skb))
4439 skb_shinfo(skb)->frag_list = new_frag;
4440
4441 new_frag->next = NULL;
4442
4443 (*last_frag)->next = new_frag;
4444 *last_frag = new_frag;
4445
4446 skb->len += new_frag->len;
4447 skb->data_len += new_frag->len;
4448 skb->truesize += new_frag->truesize;
4449 }
4450
4451 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4452 struct l2cap_ctrl *control)
4453 {
4454 int err = -EINVAL;
4455
4456 switch (control->sar) {
4457 case L2CAP_SAR_UNSEGMENTED:
4458 if (chan->sdu)
4459 break;
4460
4461 err = chan->ops->recv(chan->data, skb);
4462 break;
4463
4464 case L2CAP_SAR_START:
4465 if (chan->sdu)
4466 break;
4467
4468 chan->sdu_len = get_unaligned_le16(skb->data);
4469 skb_pull(skb, L2CAP_SDULEN_SIZE);
4470
4471 if (chan->sdu_len > chan->imtu) {
4472 err = -EMSGSIZE;
4473 break;
4474 }
4475
4476 if (skb->len >= chan->sdu_len)
4477 break;
4478
4479 chan->sdu = skb;
4480 chan->sdu_last_frag = skb;
4481
4482 skb = NULL;
4483 err = 0;
4484 break;
4485
4486 case L2CAP_SAR_CONTINUE:
4487 if (!chan->sdu)
4488 break;
4489
4490 append_skb_frag(chan->sdu, skb,
4491 &chan->sdu_last_frag);
4492 skb = NULL;
4493
4494 if (chan->sdu->len >= chan->sdu_len)
4495 break;
4496
4497 err = 0;
4498 break;
4499
4500 case L2CAP_SAR_END:
4501 if (!chan->sdu)
4502 break;
4503
4504 append_skb_frag(chan->sdu, skb,
4505 &chan->sdu_last_frag);
4506 skb = NULL;
4507
4508 if (chan->sdu->len != chan->sdu_len)
4509 break;
4510
4511 err = chan->ops->recv(chan->data, chan->sdu);
4512
4513 if (!err) {
4514 /* Reassembly complete */
4515 chan->sdu = NULL;
4516 chan->sdu_last_frag = NULL;
4517 chan->sdu_len = 0;
4518 }
4519 break;
4520 }
4521
4522 if (err) {
4523 kfree_skb(skb);
4524 kfree_skb(chan->sdu);
4525 chan->sdu = NULL;
4526 chan->sdu_last_frag = NULL;
4527 chan->sdu_len = 0;
4528 }
4529
4530 return err;
4531 }
4532
4533 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4534 {
4535 u8 event;
4536
4537 if (chan->mode != L2CAP_MODE_ERTM)
4538 return;
4539
4540 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4541 l2cap_tx(chan, 0, 0, event);
4542 }
4543
4544 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4545 {
4546 int err = 0;
4547 /* Pass sequential frames to l2cap_reassemble_sdu()
4548 * until a gap is encountered.
4549 */
4550
4551 BT_DBG("chan %p", chan);
4552
4553 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4554 struct sk_buff *skb;
4555 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4556 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4557
4558 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4559
4560 if (!skb)
4561 break;
4562
4563 skb_unlink(skb, &chan->srej_q);
4564 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4565 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4566 if (err)
4567 break;
4568 }
4569
4570 if (skb_queue_empty(&chan->srej_q)) {
4571 chan->rx_state = L2CAP_RX_STATE_RECV;
4572 l2cap_send_ack(chan);
4573 }
4574
4575 return err;
4576 }
4577
4578 static void l2cap_handle_srej(struct l2cap_chan *chan,
4579 struct l2cap_ctrl *control)
4580 {
4581 struct sk_buff *skb;
4582
4583 BT_DBG("chan %p, control %p", chan, control);
4584
4585 if (control->reqseq == chan->next_tx_seq) {
4586 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4587 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4588 return;
4589 }
4590
4591 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4592
4593 if (skb == NULL) {
4594 BT_DBG("Seq %d not available for retransmission",
4595 control->reqseq);
4596 return;
4597 }
4598
4599 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4600 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4601 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4602 return;
4603 }
4604
4605 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4606
4607 if (control->poll) {
4608 l2cap_pass_to_tx(chan, control);
4609
4610 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4611 l2cap_retransmit(chan, control);
4612 l2cap_ertm_send(chan);
4613
4614 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4615 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4616 chan->srej_save_reqseq = control->reqseq;
4617 }
4618 } else {
4619 l2cap_pass_to_tx_fbit(chan, control);
4620
4621 if (control->final) {
4622 if (chan->srej_save_reqseq != control->reqseq ||
4623 !test_and_clear_bit(CONN_SREJ_ACT,
4624 &chan->conn_state))
4625 l2cap_retransmit(chan, control);
4626 } else {
4627 l2cap_retransmit(chan, control);
4628 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4629 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4630 chan->srej_save_reqseq = control->reqseq;
4631 }
4632 }
4633 }
4634 }
4635
4636 static void l2cap_handle_rej(struct l2cap_chan *chan,
4637 struct l2cap_ctrl *control)
4638 {
4639 struct sk_buff *skb;
4640
4641 BT_DBG("chan %p, control %p", chan, control);
4642
4643 if (control->reqseq == chan->next_tx_seq) {
4644 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4645 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4646 return;
4647 }
4648
4649 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4650
4651 if (chan->max_tx && skb &&
4652 bt_cb(skb)->control.retries >= chan->max_tx) {
4653 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4654 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4655 return;
4656 }
4657
4658 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4659
4660 l2cap_pass_to_tx(chan, control);
4661
4662 if (control->final) {
4663 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4664 l2cap_retransmit_all(chan, control);
4665 } else {
4666 l2cap_retransmit_all(chan, control);
4667 l2cap_ertm_send(chan);
4668 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4669 set_bit(CONN_REJ_ACT, &chan->conn_state);
4670 }
4671 }
4672
4673 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4674 {
4675 BT_DBG("chan %p, txseq %d", chan, txseq);
4676
4677 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4678 chan->expected_tx_seq);
4679
4680 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4681 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4682 chan->tx_win) {
4683 /* See notes below regarding "double poll" and
4684 * invalid packets.
4685 */
4686 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4687 BT_DBG("Invalid/Ignore - after SREJ");
4688 return L2CAP_TXSEQ_INVALID_IGNORE;
4689 } else {
4690 BT_DBG("Invalid - in window after SREJ sent");
4691 return L2CAP_TXSEQ_INVALID;
4692 }
4693 }
4694
4695 if (chan->srej_list.head == txseq) {
4696 BT_DBG("Expected SREJ");
4697 return L2CAP_TXSEQ_EXPECTED_SREJ;
4698 }
4699
4700 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4701 BT_DBG("Duplicate SREJ - txseq already stored");
4702 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4703 }
4704
4705 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4706 BT_DBG("Unexpected SREJ - not requested");
4707 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4708 }
4709 }
4710
4711 if (chan->expected_tx_seq == txseq) {
4712 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4713 chan->tx_win) {
4714 BT_DBG("Invalid - txseq outside tx window");
4715 return L2CAP_TXSEQ_INVALID;
4716 } else {
4717 BT_DBG("Expected");
4718 return L2CAP_TXSEQ_EXPECTED;
4719 }
4720 }
4721
4722 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4723 __seq_offset(chan, chan->expected_tx_seq,
4724 chan->last_acked_seq)){
4725 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4726 return L2CAP_TXSEQ_DUPLICATE;
4727 }
4728
4729 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4730 /* A source of invalid packets is a "double poll" condition,
4731 * where delays cause us to send multiple poll packets. If
4732 * the remote stack receives and processes both polls,
4733 * sequence numbers can wrap around in such a way that a
4734 * resent frame has a sequence number that looks like new data
4735 * with a sequence gap. This would trigger an erroneous SREJ
4736 * request.
4737 *
4738 * Fortunately, this is impossible with a tx window that's
4739 * less than half of the maximum sequence number, which allows
4740 * invalid frames to be safely ignored.
4741 *
4742 * With tx window sizes greater than half of the tx window
4743 * maximum, the frame is invalid and cannot be ignored. This
4744 * causes a disconnect.
4745 */
4746
4747 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4748 BT_DBG("Invalid/Ignore - txseq outside tx window");
4749 return L2CAP_TXSEQ_INVALID_IGNORE;
4750 } else {
4751 BT_DBG("Invalid - txseq outside tx window");
4752 return L2CAP_TXSEQ_INVALID;
4753 }
4754 } else {
4755 BT_DBG("Unexpected - txseq indicates missing frames");
4756 return L2CAP_TXSEQ_UNEXPECTED;
4757 }
4758 }
4759
4760 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4761 struct l2cap_ctrl *control,
4762 struct sk_buff *skb, u8 event)
4763 {
4764 int err = 0;
4765 bool skb_in_use = 0;
4766
4767 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4768 event);
4769
4770 switch (event) {
4771 case L2CAP_EV_RECV_IFRAME:
4772 switch (l2cap_classify_txseq(chan, control->txseq)) {
4773 case L2CAP_TXSEQ_EXPECTED:
4774 l2cap_pass_to_tx(chan, control);
4775
4776 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4777 BT_DBG("Busy, discarding expected seq %d",
4778 control->txseq);
4779 break;
4780 }
4781
4782 chan->expected_tx_seq = __next_seq(chan,
4783 control->txseq);
4784
4785 chan->buffer_seq = chan->expected_tx_seq;
4786 skb_in_use = 1;
4787
4788 err = l2cap_reassemble_sdu(chan, skb, control);
4789 if (err)
4790 break;
4791
4792 if (control->final) {
4793 if (!test_and_clear_bit(CONN_REJ_ACT,
4794 &chan->conn_state)) {
4795 control->final = 0;
4796 l2cap_retransmit_all(chan, control);
4797 l2cap_ertm_send(chan);
4798 }
4799 }
4800
4801 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4802 l2cap_send_ack(chan);
4803 break;
4804 case L2CAP_TXSEQ_UNEXPECTED:
4805 l2cap_pass_to_tx(chan, control);
4806
4807 /* Can't issue SREJ frames in the local busy state.
4808 * Drop this frame, it will be seen as missing
4809 * when local busy is exited.
4810 */
4811 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4812 BT_DBG("Busy, discarding unexpected seq %d",
4813 control->txseq);
4814 break;
4815 }
4816
4817 /* There was a gap in the sequence, so an SREJ
4818 * must be sent for each missing frame. The
4819 * current frame is stored for later use.
4820 */
4821 skb_queue_tail(&chan->srej_q, skb);
4822 skb_in_use = 1;
4823 BT_DBG("Queued %p (queue len %d)", skb,
4824 skb_queue_len(&chan->srej_q));
4825
4826 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4827 l2cap_seq_list_clear(&chan->srej_list);
4828 l2cap_send_srej(chan, control->txseq);
4829
4830 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4831 break;
4832 case L2CAP_TXSEQ_DUPLICATE:
4833 l2cap_pass_to_tx(chan, control);
4834 break;
4835 case L2CAP_TXSEQ_INVALID_IGNORE:
4836 break;
4837 case L2CAP_TXSEQ_INVALID:
4838 default:
4839 l2cap_send_disconn_req(chan->conn, chan,
4840 ECONNRESET);
4841 break;
4842 }
4843 break;
4844 case L2CAP_EV_RECV_RR:
4845 l2cap_pass_to_tx(chan, control);
4846 if (control->final) {
4847 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4848
4849 if (!test_and_clear_bit(CONN_REJ_ACT,
4850 &chan->conn_state)) {
4851 control->final = 0;
4852 l2cap_retransmit_all(chan, control);
4853 }
4854
4855 l2cap_ertm_send(chan);
4856 } else if (control->poll) {
4857 l2cap_send_i_or_rr_or_rnr(chan);
4858 } else {
4859 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4860 &chan->conn_state) &&
4861 chan->unacked_frames)
4862 __set_retrans_timer(chan);
4863
4864 l2cap_ertm_send(chan);
4865 }
4866 break;
4867 case L2CAP_EV_RECV_RNR:
4868 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4869 l2cap_pass_to_tx(chan, control);
4870 if (control && control->poll) {
4871 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4872 l2cap_send_rr_or_rnr(chan, 0);
4873 }
4874 __clear_retrans_timer(chan);
4875 l2cap_seq_list_clear(&chan->retrans_list);
4876 break;
4877 case L2CAP_EV_RECV_REJ:
4878 l2cap_handle_rej(chan, control);
4879 break;
4880 case L2CAP_EV_RECV_SREJ:
4881 l2cap_handle_srej(chan, control);
4882 break;
4883 default:
4884 break;
4885 }
4886
4887 if (skb && !skb_in_use) {
4888 BT_DBG("Freeing %p", skb);
4889 kfree_skb(skb);
4890 }
4891
4892 return err;
4893 }
4894
4895 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4896 struct l2cap_ctrl *control,
4897 struct sk_buff *skb, u8 event)
4898 {
4899 int err = 0;
4900 u16 txseq = control->txseq;
4901 bool skb_in_use = 0;
4902
4903 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4904 event);
4905
4906 switch (event) {
4907 case L2CAP_EV_RECV_IFRAME:
4908 switch (l2cap_classify_txseq(chan, txseq)) {
4909 case L2CAP_TXSEQ_EXPECTED:
4910 /* Keep frame for reassembly later */
4911 l2cap_pass_to_tx(chan, control);
4912 skb_queue_tail(&chan->srej_q, skb);
4913 skb_in_use = 1;
4914 BT_DBG("Queued %p (queue len %d)", skb,
4915 skb_queue_len(&chan->srej_q));
4916
4917 chan->expected_tx_seq = __next_seq(chan, txseq);
4918 break;
4919 case L2CAP_TXSEQ_EXPECTED_SREJ:
4920 l2cap_seq_list_pop(&chan->srej_list);
4921
4922 l2cap_pass_to_tx(chan, control);
4923 skb_queue_tail(&chan->srej_q, skb);
4924 skb_in_use = 1;
4925 BT_DBG("Queued %p (queue len %d)", skb,
4926 skb_queue_len(&chan->srej_q));
4927
4928 err = l2cap_rx_queued_iframes(chan);
4929 if (err)
4930 break;
4931
4932 break;
4933 case L2CAP_TXSEQ_UNEXPECTED:
4934 /* Got a frame that can't be reassembled yet.
4935 * Save it for later, and send SREJs to cover
4936 * the missing frames.
4937 */
4938 skb_queue_tail(&chan->srej_q, skb);
4939 skb_in_use = 1;
4940 BT_DBG("Queued %p (queue len %d)", skb,
4941 skb_queue_len(&chan->srej_q));
4942
4943 l2cap_pass_to_tx(chan, control);
4944 l2cap_send_srej(chan, control->txseq);
4945 break;
4946 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4947 /* This frame was requested with an SREJ, but
4948 * some expected retransmitted frames are
4949 * missing. Request retransmission of missing
4950 * SREJ'd frames.
4951 */
4952 skb_queue_tail(&chan->srej_q, skb);
4953 skb_in_use = 1;
4954 BT_DBG("Queued %p (queue len %d)", skb,
4955 skb_queue_len(&chan->srej_q));
4956
4957 l2cap_pass_to_tx(chan, control);
4958 l2cap_send_srej_list(chan, control->txseq);
4959 break;
4960 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4961 /* We've already queued this frame. Drop this copy. */
4962 l2cap_pass_to_tx(chan, control);
4963 break;
4964 case L2CAP_TXSEQ_DUPLICATE:
4965 /* Expecting a later sequence number, so this frame
4966 * was already received. Ignore it completely.
4967 */
4968 break;
4969 case L2CAP_TXSEQ_INVALID_IGNORE:
4970 break;
4971 case L2CAP_TXSEQ_INVALID:
4972 default:
4973 l2cap_send_disconn_req(chan->conn, chan,
4974 ECONNRESET);
4975 break;
4976 }
4977 break;
4978 case L2CAP_EV_RECV_RR:
4979 l2cap_pass_to_tx(chan, control);
4980 if (control->final) {
4981 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4982
4983 if (!test_and_clear_bit(CONN_REJ_ACT,
4984 &chan->conn_state)) {
4985 control->final = 0;
4986 l2cap_retransmit_all(chan, control);
4987 }
4988
4989 l2cap_ertm_send(chan);
4990 } else if (control->poll) {
4991 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4992 &chan->conn_state) &&
4993 chan->unacked_frames) {
4994 __set_retrans_timer(chan);
4995 }
4996
4997 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4998 l2cap_send_srej_tail(chan);
4999 } else {
5000 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5001 &chan->conn_state) &&
5002 chan->unacked_frames)
5003 __set_retrans_timer(chan);
5004
5005 l2cap_send_ack(chan);
5006 }
5007 break;
5008 case L2CAP_EV_RECV_RNR:
5009 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5010 l2cap_pass_to_tx(chan, control);
5011 if (control->poll) {
5012 l2cap_send_srej_tail(chan);
5013 } else {
5014 struct l2cap_ctrl rr_control;
5015 memset(&rr_control, 0, sizeof(rr_control));
5016 rr_control.sframe = 1;
5017 rr_control.super = L2CAP_SUPER_RR;
5018 rr_control.reqseq = chan->buffer_seq;
5019 l2cap_send_sframe(chan, &rr_control);
5020 }
5021
5022 break;
5023 case L2CAP_EV_RECV_REJ:
5024 l2cap_handle_rej(chan, control);
5025 break;
5026 case L2CAP_EV_RECV_SREJ:
5027 l2cap_handle_srej(chan, control);
5028 break;
5029 }
5030
5031 if (skb && !skb_in_use) {
5032 BT_DBG("Freeing %p", skb);
5033 kfree_skb(skb);
5034 }
5035
5036 return err;
5037 }
5038
5039 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5040 {
5041 /* Make sure reqseq is for a packet that has been sent but not acked */
5042 u16 unacked;
5043
5044 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5045 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5046 }
5047
5048 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5049 struct sk_buff *skb, u8 event)
5050 {
5051 int err = 0;
5052
5053 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5054 control, skb, event, chan->rx_state);
5055
5056 if (__valid_reqseq(chan, control->reqseq)) {
5057 switch (chan->rx_state) {
5058 case L2CAP_RX_STATE_RECV:
5059 err = l2cap_rx_state_recv(chan, control, skb, event);
5060 break;
5061 case L2CAP_RX_STATE_SREJ_SENT:
5062 err = l2cap_rx_state_srej_sent(chan, control, skb,
5063 event);
5064 break;
5065 default:
5066 /* shut it down */
5067 break;
5068 }
5069 } else {
5070 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5071 control->reqseq, chan->next_tx_seq,
5072 chan->expected_ack_seq);
5073 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5074 }
5075
5076 return err;
5077 }
5078
5079 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5080 struct sk_buff *skb)
5081 {
5082 int err = 0;
5083
5084 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5085 chan->rx_state);
5086
5087 if (l2cap_classify_txseq(chan, control->txseq) ==
5088 L2CAP_TXSEQ_EXPECTED) {
5089 l2cap_pass_to_tx(chan, control);
5090
5091 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5092 __next_seq(chan, chan->buffer_seq));
5093
5094 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5095
5096 l2cap_reassemble_sdu(chan, skb, control);
5097 } else {
5098 if (chan->sdu) {
5099 kfree_skb(chan->sdu);
5100 chan->sdu = NULL;
5101 }
5102 chan->sdu_last_frag = NULL;
5103 chan->sdu_len = 0;
5104
5105 if (skb) {
5106 BT_DBG("Freeing %p", skb);
5107 kfree_skb(skb);
5108 }
5109 }
5110
5111 chan->last_acked_seq = control->txseq;
5112 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5113
5114 return err;
5115 }
5116
5117 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5118 {
5119 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5120 u16 len;
5121 u8 event;
5122
5123 __unpack_control(chan, skb);
5124
5125 len = skb->len;
5126
5127 /*
5128 * We can just drop the corrupted I-frame here.
5129 * Receiver will miss it and start proper recovery
5130 * procedures and ask for retransmission.
5131 */
5132 if (l2cap_check_fcs(chan, skb))
5133 goto drop;
5134
5135 if (!control->sframe && control->sar == L2CAP_SAR_START)
5136 len -= L2CAP_SDULEN_SIZE;
5137
5138 if (chan->fcs == L2CAP_FCS_CRC16)
5139 len -= L2CAP_FCS_SIZE;
5140
5141 if (len > chan->mps) {
5142 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5143 goto drop;
5144 }
5145
5146 if (!control->sframe) {
5147 int err;
5148
5149 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5150 control->sar, control->reqseq, control->final,
5151 control->txseq);
5152
5153 /* Validate F-bit - F=0 always valid, F=1 only
5154 * valid in TX WAIT_F
5155 */
5156 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5157 goto drop;
5158
5159 if (chan->mode != L2CAP_MODE_STREAMING) {
5160 event = L2CAP_EV_RECV_IFRAME;
5161 err = l2cap_rx(chan, control, skb, event);
5162 } else {
5163 err = l2cap_stream_rx(chan, control, skb);
5164 }
5165
5166 if (err)
5167 l2cap_send_disconn_req(chan->conn, chan,
5168 ECONNRESET);
5169 } else {
5170 const u8 rx_func_to_event[4] = {
5171 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5172 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5173 };
5174
5175 /* Only I-frames are expected in streaming mode */
5176 if (chan->mode == L2CAP_MODE_STREAMING)
5177 goto drop;
5178
5179 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5180 control->reqseq, control->final, control->poll,
5181 control->super);
5182
5183 if (len != 0) {
5184 BT_ERR("%d", len);
5185 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5186 goto drop;
5187 }
5188
5189 /* Validate F and P bits */
5190 if (control->final && (control->poll ||
5191 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5192 goto drop;
5193
5194 event = rx_func_to_event[control->super];
5195 if (l2cap_rx(chan, control, skb, event))
5196 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5197 }
5198
5199 return 0;
5200
5201 drop:
5202 kfree_skb(skb);
5203 return 0;
5204 }
5205
5206 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5207 {
5208 struct l2cap_chan *chan;
5209
5210 chan = l2cap_get_chan_by_scid(conn, cid);
5211 if (!chan) {
5212 BT_DBG("unknown cid 0x%4.4x", cid);
5213 /* Drop packet and return */
5214 kfree_skb(skb);
5215 return 0;
5216 }
5217
5218 BT_DBG("chan %p, len %d", chan, skb->len);
5219
5220 if (chan->state != BT_CONNECTED)
5221 goto drop;
5222
5223 switch (chan->mode) {
5224 case L2CAP_MODE_BASIC:
5225 /* If socket recv buffers overflows we drop data here
5226 * which is *bad* because L2CAP has to be reliable.
5227 * But we don't have any other choice. L2CAP doesn't
5228 * provide flow control mechanism. */
5229
5230 if (chan->imtu < skb->len)
5231 goto drop;
5232
5233 if (!chan->ops->recv(chan->data, skb))
5234 goto done;
5235 break;
5236
5237 case L2CAP_MODE_ERTM:
5238 case L2CAP_MODE_STREAMING:
5239 l2cap_data_rcv(chan, skb);
5240 goto done;
5241
5242 default:
5243 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5244 break;
5245 }
5246
5247 drop:
5248 kfree_skb(skb);
5249
5250 done:
5251 l2cap_chan_unlock(chan);
5252
5253 return 0;
5254 }
5255
5256 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5257 {
5258 struct l2cap_chan *chan;
5259
5260 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5261 if (!chan)
5262 goto drop;
5263
5264 BT_DBG("chan %p, len %d", chan, skb->len);
5265
5266 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5267 goto drop;
5268
5269 if (chan->imtu < skb->len)
5270 goto drop;
5271
5272 if (!chan->ops->recv(chan->data, skb))
5273 return 0;
5274
5275 drop:
5276 kfree_skb(skb);
5277
5278 return 0;
5279 }
5280
5281 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5282 struct sk_buff *skb)
5283 {
5284 struct l2cap_chan *chan;
5285
5286 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5287 if (!chan)
5288 goto drop;
5289
5290 BT_DBG("chan %p, len %d", chan, skb->len);
5291
5292 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5293 goto drop;
5294
5295 if (chan->imtu < skb->len)
5296 goto drop;
5297
5298 if (!chan->ops->recv(chan->data, skb))
5299 return 0;
5300
5301 drop:
5302 kfree_skb(skb);
5303
5304 return 0;
5305 }
5306
5307 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5308 {
5309 struct l2cap_hdr *lh = (void *) skb->data;
5310 u16 cid, len;
5311 __le16 psm;
5312
5313 skb_pull(skb, L2CAP_HDR_SIZE);
5314 cid = __le16_to_cpu(lh->cid);
5315 len = __le16_to_cpu(lh->len);
5316
5317 if (len != skb->len) {
5318 kfree_skb(skb);
5319 return;
5320 }
5321
5322 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5323
5324 switch (cid) {
5325 case L2CAP_CID_LE_SIGNALING:
5326 case L2CAP_CID_SIGNALING:
5327 l2cap_sig_channel(conn, skb);
5328 break;
5329
5330 case L2CAP_CID_CONN_LESS:
5331 psm = get_unaligned((__le16 *) skb->data);
5332 skb_pull(skb, 2);
5333 l2cap_conless_channel(conn, psm, skb);
5334 break;
5335
5336 case L2CAP_CID_LE_DATA:
5337 l2cap_att_channel(conn, cid, skb);
5338 break;
5339
5340 case L2CAP_CID_SMP:
5341 if (smp_sig_channel(conn, skb))
5342 l2cap_conn_del(conn->hcon, EACCES);
5343 break;
5344
5345 default:
5346 l2cap_data_channel(conn, cid, skb);
5347 break;
5348 }
5349 }
5350
5351 /* ---- L2CAP interface with lower layer (HCI) ---- */
5352
5353 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5354 {
5355 int exact = 0, lm1 = 0, lm2 = 0;
5356 struct l2cap_chan *c;
5357
5358 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5359
5360 /* Find listening sockets and check their link_mode */
5361 read_lock(&chan_list_lock);
5362 list_for_each_entry(c, &chan_list, global_l) {
5363 struct sock *sk = c->sk;
5364
5365 if (c->state != BT_LISTEN)
5366 continue;
5367
5368 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5369 lm1 |= HCI_LM_ACCEPT;
5370 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5371 lm1 |= HCI_LM_MASTER;
5372 exact++;
5373 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5374 lm2 |= HCI_LM_ACCEPT;
5375 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5376 lm2 |= HCI_LM_MASTER;
5377 }
5378 }
5379 read_unlock(&chan_list_lock);
5380
5381 return exact ? lm1 : lm2;
5382 }
5383
5384 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5385 {
5386 struct l2cap_conn *conn;
5387
5388 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5389
5390 if (!status) {
5391 conn = l2cap_conn_add(hcon, status);
5392 if (conn)
5393 l2cap_conn_ready(conn);
5394 } else
5395 l2cap_conn_del(hcon, bt_to_errno(status));
5396
5397 return 0;
5398 }
5399
5400 int l2cap_disconn_ind(struct hci_conn *hcon)
5401 {
5402 struct l2cap_conn *conn = hcon->l2cap_data;
5403
5404 BT_DBG("hcon %p", hcon);
5405
5406 if (!conn)
5407 return HCI_ERROR_REMOTE_USER_TERM;
5408 return conn->disc_reason;
5409 }
5410
5411 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5412 {
5413 BT_DBG("hcon %p reason %d", hcon, reason);
5414
5415 l2cap_conn_del(hcon, bt_to_errno(reason));
5416 return 0;
5417 }
5418
5419 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5420 {
5421 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5422 return;
5423
5424 if (encrypt == 0x00) {
5425 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5426 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5427 } else if (chan->sec_level == BT_SECURITY_HIGH)
5428 l2cap_chan_close(chan, ECONNREFUSED);
5429 } else {
5430 if (chan->sec_level == BT_SECURITY_MEDIUM)
5431 __clear_chan_timer(chan);
5432 }
5433 }
5434
5435 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5436 {
5437 struct l2cap_conn *conn = hcon->l2cap_data;
5438 struct l2cap_chan *chan;
5439
5440 if (!conn)
5441 return 0;
5442
5443 BT_DBG("conn %p", conn);
5444
5445 if (hcon->type == LE_LINK) {
5446 if (!status && encrypt)
5447 smp_distribute_keys(conn, 0);
5448 cancel_delayed_work(&conn->security_timer);
5449 }
5450
5451 mutex_lock(&conn->chan_lock);
5452
5453 list_for_each_entry(chan, &conn->chan_l, list) {
5454 l2cap_chan_lock(chan);
5455
5456 BT_DBG("chan->scid %d", chan->scid);
5457
5458 if (chan->scid == L2CAP_CID_LE_DATA) {
5459 if (!status && encrypt) {
5460 chan->sec_level = hcon->sec_level;
5461 l2cap_chan_ready(chan);
5462 }
5463
5464 l2cap_chan_unlock(chan);
5465 continue;
5466 }
5467
5468 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5469 l2cap_chan_unlock(chan);
5470 continue;
5471 }
5472
5473 if (!status && (chan->state == BT_CONNECTED ||
5474 chan->state == BT_CONFIG)) {
5475 struct sock *sk = chan->sk;
5476
5477 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5478 sk->sk_state_change(sk);
5479
5480 l2cap_check_encryption(chan, encrypt);
5481 l2cap_chan_unlock(chan);
5482 continue;
5483 }
5484
5485 if (chan->state == BT_CONNECT) {
5486 if (!status) {
5487 l2cap_send_conn_req(chan);
5488 } else {
5489 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5490 }
5491 } else if (chan->state == BT_CONNECT2) {
5492 struct sock *sk = chan->sk;
5493 struct l2cap_conn_rsp rsp;
5494 __u16 res, stat;
5495
5496 lock_sock(sk);
5497
5498 if (!status) {
5499 if (test_bit(BT_SK_DEFER_SETUP,
5500 &bt_sk(sk)->flags)) {
5501 struct sock *parent = bt_sk(sk)->parent;
5502 res = L2CAP_CR_PEND;
5503 stat = L2CAP_CS_AUTHOR_PEND;
5504 if (parent)
5505 parent->sk_data_ready(parent, 0);
5506 } else {
5507 __l2cap_state_change(chan, BT_CONFIG);
5508 res = L2CAP_CR_SUCCESS;
5509 stat = L2CAP_CS_NO_INFO;
5510 }
5511 } else {
5512 __l2cap_state_change(chan, BT_DISCONN);
5513 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5514 res = L2CAP_CR_SEC_BLOCK;
5515 stat = L2CAP_CS_NO_INFO;
5516 }
5517
5518 release_sock(sk);
5519
5520 rsp.scid = cpu_to_le16(chan->dcid);
5521 rsp.dcid = cpu_to_le16(chan->scid);
5522 rsp.result = cpu_to_le16(res);
5523 rsp.status = cpu_to_le16(stat);
5524 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5525 sizeof(rsp), &rsp);
5526 }
5527
5528 l2cap_chan_unlock(chan);
5529 }
5530
5531 mutex_unlock(&conn->chan_lock);
5532
5533 return 0;
5534 }
5535
5536 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5537 {
5538 struct l2cap_conn *conn = hcon->l2cap_data;
5539
5540 if (!conn)
5541 conn = l2cap_conn_add(hcon, 0);
5542
5543 if (!conn)
5544 goto drop;
5545
5546 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5547
5548 if (!(flags & ACL_CONT)) {
5549 struct l2cap_hdr *hdr;
5550 int len;
5551
5552 if (conn->rx_len) {
5553 BT_ERR("Unexpected start frame (len %d)", skb->len);
5554 kfree_skb(conn->rx_skb);
5555 conn->rx_skb = NULL;
5556 conn->rx_len = 0;
5557 l2cap_conn_unreliable(conn, ECOMM);
5558 }
5559
5560 /* Start fragment always begin with Basic L2CAP header */
5561 if (skb->len < L2CAP_HDR_SIZE) {
5562 BT_ERR("Frame is too short (len %d)", skb->len);
5563 l2cap_conn_unreliable(conn, ECOMM);
5564 goto drop;
5565 }
5566
5567 hdr = (struct l2cap_hdr *) skb->data;
5568 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5569
5570 if (len == skb->len) {
5571 /* Complete frame received */
5572 l2cap_recv_frame(conn, skb);
5573 return 0;
5574 }
5575
5576 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5577
5578 if (skb->len > len) {
5579 BT_ERR("Frame is too long (len %d, expected len %d)",
5580 skb->len, len);
5581 l2cap_conn_unreliable(conn, ECOMM);
5582 goto drop;
5583 }
5584
5585 /* Allocate skb for the complete frame (with header) */
5586 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5587 if (!conn->rx_skb)
5588 goto drop;
5589
5590 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5591 skb->len);
5592 conn->rx_len = len - skb->len;
5593 } else {
5594 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5595
5596 if (!conn->rx_len) {
5597 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5598 l2cap_conn_unreliable(conn, ECOMM);
5599 goto drop;
5600 }
5601
5602 if (skb->len > conn->rx_len) {
5603 BT_ERR("Fragment is too long (len %d, expected %d)",
5604 skb->len, conn->rx_len);
5605 kfree_skb(conn->rx_skb);
5606 conn->rx_skb = NULL;
5607 conn->rx_len = 0;
5608 l2cap_conn_unreliable(conn, ECOMM);
5609 goto drop;
5610 }
5611
5612 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5613 skb->len);
5614 conn->rx_len -= skb->len;
5615
5616 if (!conn->rx_len) {
5617 /* Complete frame received */
5618 l2cap_recv_frame(conn, conn->rx_skb);
5619 conn->rx_skb = NULL;
5620 }
5621 }
5622
5623 drop:
5624 kfree_skb(skb);
5625 return 0;
5626 }
5627
5628 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5629 {
5630 struct l2cap_chan *c;
5631
5632 read_lock(&chan_list_lock);
5633
5634 list_for_each_entry(c, &chan_list, global_l) {
5635 struct sock *sk = c->sk;
5636
5637 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5638 batostr(&bt_sk(sk)->src),
5639 batostr(&bt_sk(sk)->dst),
5640 c->state, __le16_to_cpu(c->psm),
5641 c->scid, c->dcid, c->imtu, c->omtu,
5642 c->sec_level, c->mode);
5643 }
5644
5645 read_unlock(&chan_list_lock);
5646
5647 return 0;
5648 }
5649
5650 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5651 {
5652 return single_open(file, l2cap_debugfs_show, inode->i_private);
5653 }
5654
5655 static const struct file_operations l2cap_debugfs_fops = {
5656 .open = l2cap_debugfs_open,
5657 .read = seq_read,
5658 .llseek = seq_lseek,
5659 .release = single_release,
5660 };
5661
5662 static struct dentry *l2cap_debugfs;
5663
5664 int __init l2cap_init(void)
5665 {
5666 int err;
5667
5668 err = l2cap_init_sockets();
5669 if (err < 0)
5670 return err;
5671
5672 if (bt_debugfs) {
5673 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5674 bt_debugfs, NULL, &l2cap_debugfs_fops);
5675 if (!l2cap_debugfs)
5676 BT_ERR("Failed to create L2CAP debug file");
5677 }
5678
5679 return 0;
5680 }
5681
5682 void l2cap_exit(void)
5683 {
5684 debugfs_remove(l2cap_debugfs);
5685 l2cap_cleanup_sockets();
5686 }
5687
5688 module_param(disable_ertm, bool, 0644);
5689 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.140784 seconds and 6 git commands to generate.