Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 struct l2cap_chan *c;
66
67 list_for_each_entry(c, &conn->chan_l, list) {
68 if (c->dcid == cid)
69 return c;
70 }
71 return NULL;
72 }
73
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 struct l2cap_chan *c;
77
78 list_for_each_entry(c, &conn->chan_l, list) {
79 if (c->scid == cid)
80 return c;
81 }
82 return NULL;
83 }
84
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 struct l2cap_chan *c;
90
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
93 if (c)
94 l2cap_chan_lock(c);
95 mutex_unlock(&conn->chan_lock);
96
97 return c;
98 }
99
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 struct l2cap_chan *c;
103
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
106 return c;
107 }
108 return NULL;
109 }
110
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 struct l2cap_chan *c;
114
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 return c;
118 }
119 return NULL;
120 }
121
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 int err;
125
126 write_lock(&chan_list_lock);
127
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 err = -EADDRINUSE;
130 goto done;
131 }
132
133 if (psm) {
134 chan->psm = psm;
135 chan->sport = psm;
136 err = 0;
137 } else {
138 u16 p;
139
140 err = -EINVAL;
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
145 err = 0;
146 break;
147 }
148 }
149
150 done:
151 write_unlock(&chan_list_lock);
152 return err;
153 }
154
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 {
157 write_lock(&chan_list_lock);
158
159 chan->scid = scid;
160
161 write_unlock(&chan_list_lock);
162
163 return 0;
164 }
165
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 u16 cid = L2CAP_CID_DYN_START;
169
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
172 return cid;
173 }
174
175 return 0;
176 }
177
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
182
183 chan->state = state;
184 chan->ops->state_change(chan, state);
185 }
186
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 struct sock *sk = chan->sk;
190
191 lock_sock(sk);
192 __l2cap_state_change(chan, state);
193 release_sock(sk);
194 }
195
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 struct sock *sk = chan->sk;
199
200 sk->sk_err = err;
201 }
202
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 struct sock *sk = chan->sk;
206
207 lock_sock(sk);
208 __l2cap_chan_set_err(chan, err);
209 release_sock(sk);
210 }
211
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219 }
220
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 static void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 BT_DBG("chan %p", chan);
422
423 write_lock(&chan_list_lock);
424 list_del(&chan->global_l);
425 write_unlock(&chan_list_lock);
426
427 kfree(chan);
428 }
429
430 void l2cap_chan_hold(struct l2cap_chan *c)
431 {
432 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
433
434 atomic_inc(&c->refcnt);
435 }
436
437 void l2cap_chan_put(struct l2cap_chan *c)
438 {
439 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
440
441 if (atomic_dec_and_test(&c->refcnt))
442 l2cap_chan_destroy(c);
443 }
444
445 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
446 {
447 chan->fcs = L2CAP_FCS_CRC16;
448 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
449 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
450 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
451 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
453
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
455 }
456
457 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
458 {
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
461
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
463
464 chan->conn = conn;
465
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
469 /* LE connection */
470 chan->omtu = L2CAP_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
473 } else {
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
477 }
478 break;
479
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
485 break;
486
487 case L2CAP_CHAN_CONN_FIX_A2MP:
488 chan->scid = L2CAP_CID_A2MP;
489 chan->dcid = L2CAP_CID_A2MP;
490 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
491 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
492 break;
493
494 default:
495 /* Raw socket can send/recv signalling messages only */
496 chan->scid = L2CAP_CID_SIGNALING;
497 chan->dcid = L2CAP_CID_SIGNALING;
498 chan->omtu = L2CAP_DEFAULT_MTU;
499 }
500
501 chan->local_id = L2CAP_BESTEFFORT_ID;
502 chan->local_stype = L2CAP_SERV_BESTEFFORT;
503 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
504 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
505 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
506 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
507
508 l2cap_chan_hold(chan);
509
510 list_add(&chan->list, &conn->chan_l);
511 }
512
513 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
514 {
515 mutex_lock(&conn->chan_lock);
516 __l2cap_chan_add(conn, chan);
517 mutex_unlock(&conn->chan_lock);
518 }
519
520 void l2cap_chan_del(struct l2cap_chan *chan, int err)
521 {
522 struct l2cap_conn *conn = chan->conn;
523
524 __clear_chan_timer(chan);
525
526 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
527
528 if (conn) {
529 /* Delete from channel list */
530 list_del(&chan->list);
531
532 l2cap_chan_put(chan);
533
534 chan->conn = NULL;
535
536 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
537 hci_conn_put(conn->hcon);
538 }
539
540 if (chan->ops->teardown)
541 chan->ops->teardown(chan, err);
542
543 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
544 return;
545
546 switch(chan->mode) {
547 case L2CAP_MODE_BASIC:
548 break;
549
550 case L2CAP_MODE_ERTM:
551 __clear_retrans_timer(chan);
552 __clear_monitor_timer(chan);
553 __clear_ack_timer(chan);
554
555 skb_queue_purge(&chan->srej_q);
556
557 l2cap_seq_list_free(&chan->srej_list);
558 l2cap_seq_list_free(&chan->retrans_list);
559
560 /* fall through */
561
562 case L2CAP_MODE_STREAMING:
563 skb_queue_purge(&chan->tx_q);
564 break;
565 }
566
567 return;
568 }
569
570 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
571 {
572 struct l2cap_conn *conn = chan->conn;
573 struct sock *sk = chan->sk;
574
575 BT_DBG("chan %p state %s sk %p", chan,
576 state_to_string(chan->state), sk);
577
578 switch (chan->state) {
579 case BT_LISTEN:
580 if (chan->ops->teardown)
581 chan->ops->teardown(chan, 0);
582 break;
583
584 case BT_CONNECTED:
585 case BT_CONFIG:
586 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
587 conn->hcon->type == ACL_LINK) {
588 __set_chan_timer(chan, sk->sk_sndtimeo);
589 l2cap_send_disconn_req(conn, chan, reason);
590 } else
591 l2cap_chan_del(chan, reason);
592 break;
593
594 case BT_CONNECT2:
595 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
596 conn->hcon->type == ACL_LINK) {
597 struct l2cap_conn_rsp rsp;
598 __u16 result;
599
600 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
601 result = L2CAP_CR_SEC_BLOCK;
602 else
603 result = L2CAP_CR_BAD_PSM;
604 l2cap_state_change(chan, BT_DISCONN);
605
606 rsp.scid = cpu_to_le16(chan->dcid);
607 rsp.dcid = cpu_to_le16(chan->scid);
608 rsp.result = cpu_to_le16(result);
609 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
610 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
611 sizeof(rsp), &rsp);
612 }
613
614 l2cap_chan_del(chan, reason);
615 break;
616
617 case BT_CONNECT:
618 case BT_DISCONN:
619 l2cap_chan_del(chan, reason);
620 break;
621
622 default:
623 if (chan->ops->teardown)
624 chan->ops->teardown(chan, 0);
625 break;
626 }
627 }
628
629 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
630 {
631 if (chan->chan_type == L2CAP_CHAN_RAW) {
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_DEDICATED_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_DEDICATED_BONDING;
637 default:
638 return HCI_AT_NO_BONDING;
639 }
640 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
641 if (chan->sec_level == BT_SECURITY_LOW)
642 chan->sec_level = BT_SECURITY_SDP;
643
644 if (chan->sec_level == BT_SECURITY_HIGH)
645 return HCI_AT_NO_BONDING_MITM;
646 else
647 return HCI_AT_NO_BONDING;
648 } else {
649 switch (chan->sec_level) {
650 case BT_SECURITY_HIGH:
651 return HCI_AT_GENERAL_BONDING_MITM;
652 case BT_SECURITY_MEDIUM:
653 return HCI_AT_GENERAL_BONDING;
654 default:
655 return HCI_AT_NO_BONDING;
656 }
657 }
658 }
659
660 /* Service level security */
661 int l2cap_chan_check_security(struct l2cap_chan *chan)
662 {
663 struct l2cap_conn *conn = chan->conn;
664 __u8 auth_type;
665
666 auth_type = l2cap_get_auth_type(chan);
667
668 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
669 }
670
671 static u8 l2cap_get_ident(struct l2cap_conn *conn)
672 {
673 u8 id;
674
675 /* Get next available identificator.
676 * 1 - 128 are used by kernel.
677 * 129 - 199 are reserved.
678 * 200 - 254 are used by utilities like l2ping, etc.
679 */
680
681 spin_lock(&conn->lock);
682
683 if (++conn->tx_ident > 128)
684 conn->tx_ident = 1;
685
686 id = conn->tx_ident;
687
688 spin_unlock(&conn->lock);
689
690 return id;
691 }
692
693 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
694 {
695 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
696 u8 flags;
697
698 BT_DBG("code 0x%2.2x", code);
699
700 if (!skb)
701 return;
702
703 if (lmp_no_flush_capable(conn->hcon->hdev))
704 flags = ACL_START_NO_FLUSH;
705 else
706 flags = ACL_START;
707
708 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
709 skb->priority = HCI_PRIO_MAX;
710
711 hci_send_acl(conn->hchan, skb, flags);
712 }
713
714 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
715 {
716 struct hci_conn *hcon = chan->conn->hcon;
717 u16 flags;
718
719 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
720 skb->priority);
721
722 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
723 lmp_no_flush_capable(hcon->hdev))
724 flags = ACL_START_NO_FLUSH;
725 else
726 flags = ACL_START;
727
728 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
729 hci_send_acl(chan->conn->hchan, skb, flags);
730 }
731
732 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
733 {
734 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
735 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
736
737 if (enh & L2CAP_CTRL_FRAME_TYPE) {
738 /* S-Frame */
739 control->sframe = 1;
740 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
741 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
742
743 control->sar = 0;
744 control->txseq = 0;
745 } else {
746 /* I-Frame */
747 control->sframe = 0;
748 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
749 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
750
751 control->poll = 0;
752 control->super = 0;
753 }
754 }
755
756 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
757 {
758 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
759 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
760
761 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
762 /* S-Frame */
763 control->sframe = 1;
764 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
765 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
766
767 control->sar = 0;
768 control->txseq = 0;
769 } else {
770 /* I-Frame */
771 control->sframe = 0;
772 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
773 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
774
775 control->poll = 0;
776 control->super = 0;
777 }
778 }
779
780 static inline void __unpack_control(struct l2cap_chan *chan,
781 struct sk_buff *skb)
782 {
783 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
784 __unpack_extended_control(get_unaligned_le32(skb->data),
785 &bt_cb(skb)->control);
786 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
787 } else {
788 __unpack_enhanced_control(get_unaligned_le16(skb->data),
789 &bt_cb(skb)->control);
790 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
791 }
792 }
793
794 static u32 __pack_extended_control(struct l2cap_ctrl *control)
795 {
796 u32 packed;
797
798 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
799 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
800
801 if (control->sframe) {
802 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
803 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
804 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
805 } else {
806 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
807 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
808 }
809
810 return packed;
811 }
812
813 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
814 {
815 u16 packed;
816
817 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
818 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
819
820 if (control->sframe) {
821 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
822 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
823 packed |= L2CAP_CTRL_FRAME_TYPE;
824 } else {
825 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
826 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
827 }
828
829 return packed;
830 }
831
832 static inline void __pack_control(struct l2cap_chan *chan,
833 struct l2cap_ctrl *control,
834 struct sk_buff *skb)
835 {
836 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
837 put_unaligned_le32(__pack_extended_control(control),
838 skb->data + L2CAP_HDR_SIZE);
839 } else {
840 put_unaligned_le16(__pack_enhanced_control(control),
841 skb->data + L2CAP_HDR_SIZE);
842 }
843 }
844
845 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
846 {
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
848 return L2CAP_EXT_HDR_SIZE;
849 else
850 return L2CAP_ENH_HDR_SIZE;
851 }
852
853 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
854 u32 control)
855 {
856 struct sk_buff *skb;
857 struct l2cap_hdr *lh;
858 int hlen = __ertm_hdr_size(chan);
859
860 if (chan->fcs == L2CAP_FCS_CRC16)
861 hlen += L2CAP_FCS_SIZE;
862
863 skb = bt_skb_alloc(hlen, GFP_KERNEL);
864
865 if (!skb)
866 return ERR_PTR(-ENOMEM);
867
868 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
869 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
870 lh->cid = cpu_to_le16(chan->dcid);
871
872 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
873 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
874 else
875 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
876
877 if (chan->fcs == L2CAP_FCS_CRC16) {
878 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
879 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
880 }
881
882 skb->priority = HCI_PRIO_MAX;
883 return skb;
884 }
885
886 static void l2cap_send_sframe(struct l2cap_chan *chan,
887 struct l2cap_ctrl *control)
888 {
889 struct sk_buff *skb;
890 u32 control_field;
891
892 BT_DBG("chan %p, control %p", chan, control);
893
894 if (!control->sframe)
895 return;
896
897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
898 !control->poll)
899 control->final = 1;
900
901 if (control->super == L2CAP_SUPER_RR)
902 clear_bit(CONN_RNR_SENT, &chan->conn_state);
903 else if (control->super == L2CAP_SUPER_RNR)
904 set_bit(CONN_RNR_SENT, &chan->conn_state);
905
906 if (control->super != L2CAP_SUPER_SREJ) {
907 chan->last_acked_seq = control->reqseq;
908 __clear_ack_timer(chan);
909 }
910
911 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
912 control->final, control->poll, control->super);
913
914 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
915 control_field = __pack_extended_control(control);
916 else
917 control_field = __pack_enhanced_control(control);
918
919 skb = l2cap_create_sframe_pdu(chan, control_field);
920 if (!IS_ERR(skb))
921 l2cap_do_send(chan, skb);
922 }
923
924 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
925 {
926 struct l2cap_ctrl control;
927
928 BT_DBG("chan %p, poll %d", chan, poll);
929
930 memset(&control, 0, sizeof(control));
931 control.sframe = 1;
932 control.poll = poll;
933
934 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
935 control.super = L2CAP_SUPER_RNR;
936 else
937 control.super = L2CAP_SUPER_RR;
938
939 control.reqseq = chan->buffer_seq;
940 l2cap_send_sframe(chan, &control);
941 }
942
943 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
944 {
945 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
946 }
947
948 static void l2cap_send_conn_req(struct l2cap_chan *chan)
949 {
950 struct l2cap_conn *conn = chan->conn;
951 struct l2cap_conn_req req;
952
953 req.scid = cpu_to_le16(chan->scid);
954 req.psm = chan->psm;
955
956 chan->ident = l2cap_get_ident(conn);
957
958 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
959
960 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
961 }
962
963 static void l2cap_chan_ready(struct l2cap_chan *chan)
964 {
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
968
969 chan->state = BT_CONNECTED;
970
971 chan->ops->ready(chan);
972 }
973
974 static void l2cap_do_start(struct l2cap_chan *chan)
975 {
976 struct l2cap_conn *conn = chan->conn;
977
978 if (conn->hcon->type == LE_LINK) {
979 l2cap_chan_ready(chan);
980 return;
981 }
982
983 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
984 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
985 return;
986
987 if (l2cap_chan_check_security(chan) &&
988 __l2cap_no_conn_pending(chan))
989 l2cap_send_conn_req(chan);
990 } else {
991 struct l2cap_info_req req;
992 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
993
994 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
995 conn->info_ident = l2cap_get_ident(conn);
996
997 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
998
999 l2cap_send_cmd(conn, conn->info_ident,
1000 L2CAP_INFO_REQ, sizeof(req), &req);
1001 }
1002 }
1003
1004 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1005 {
1006 u32 local_feat_mask = l2cap_feat_mask;
1007 if (!disable_ertm)
1008 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1009
1010 switch (mode) {
1011 case L2CAP_MODE_ERTM:
1012 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1013 case L2CAP_MODE_STREAMING:
1014 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1015 default:
1016 return 0x00;
1017 }
1018 }
1019
1020 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1021 {
1022 struct sock *sk = chan->sk;
1023 struct l2cap_disconn_req req;
1024
1025 if (!conn)
1026 return;
1027
1028 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1029 __clear_retrans_timer(chan);
1030 __clear_monitor_timer(chan);
1031 __clear_ack_timer(chan);
1032 }
1033
1034 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1035 __l2cap_state_change(chan, BT_DISCONN);
1036 return;
1037 }
1038
1039 req.dcid = cpu_to_le16(chan->dcid);
1040 req.scid = cpu_to_le16(chan->scid);
1041 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1042 L2CAP_DISCONN_REQ, sizeof(req), &req);
1043
1044 lock_sock(sk);
1045 __l2cap_state_change(chan, BT_DISCONN);
1046 __l2cap_chan_set_err(chan, err);
1047 release_sock(sk);
1048 }
1049
1050 /* ---- L2CAP connections ---- */
1051 static void l2cap_conn_start(struct l2cap_conn *conn)
1052 {
1053 struct l2cap_chan *chan, *tmp;
1054
1055 BT_DBG("conn %p", conn);
1056
1057 mutex_lock(&conn->chan_lock);
1058
1059 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1060 struct sock *sk = chan->sk;
1061
1062 l2cap_chan_lock(chan);
1063
1064 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1065 l2cap_chan_unlock(chan);
1066 continue;
1067 }
1068
1069 if (chan->state == BT_CONNECT) {
1070 if (!l2cap_chan_check_security(chan) ||
1071 !__l2cap_no_conn_pending(chan)) {
1072 l2cap_chan_unlock(chan);
1073 continue;
1074 }
1075
1076 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1077 && test_bit(CONF_STATE2_DEVICE,
1078 &chan->conf_state)) {
1079 l2cap_chan_close(chan, ECONNRESET);
1080 l2cap_chan_unlock(chan);
1081 continue;
1082 }
1083
1084 l2cap_send_conn_req(chan);
1085
1086 } else if (chan->state == BT_CONNECT2) {
1087 struct l2cap_conn_rsp rsp;
1088 char buf[128];
1089 rsp.scid = cpu_to_le16(chan->dcid);
1090 rsp.dcid = cpu_to_le16(chan->scid);
1091
1092 if (l2cap_chan_check_security(chan)) {
1093 lock_sock(sk);
1094 if (test_bit(BT_SK_DEFER_SETUP,
1095 &bt_sk(sk)->flags)) {
1096 struct sock *parent = bt_sk(sk)->parent;
1097 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1098 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1099 if (parent)
1100 parent->sk_data_ready(parent, 0);
1101
1102 } else {
1103 __l2cap_state_change(chan, BT_CONFIG);
1104 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1105 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1106 }
1107 release_sock(sk);
1108 } else {
1109 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1110 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1111 }
1112
1113 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1114 sizeof(rsp), &rsp);
1115
1116 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1117 rsp.result != L2CAP_CR_SUCCESS) {
1118 l2cap_chan_unlock(chan);
1119 continue;
1120 }
1121
1122 set_bit(CONF_REQ_SENT, &chan->conf_state);
1123 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1124 l2cap_build_conf_req(chan, buf), buf);
1125 chan->num_conf_req++;
1126 }
1127
1128 l2cap_chan_unlock(chan);
1129 }
1130
1131 mutex_unlock(&conn->chan_lock);
1132 }
1133
1134 /* Find socket with cid and source/destination bdaddr.
1135 * Returns closest match, locked.
1136 */
1137 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1138 bdaddr_t *src,
1139 bdaddr_t *dst)
1140 {
1141 struct l2cap_chan *c, *c1 = NULL;
1142
1143 read_lock(&chan_list_lock);
1144
1145 list_for_each_entry(c, &chan_list, global_l) {
1146 struct sock *sk = c->sk;
1147
1148 if (state && c->state != state)
1149 continue;
1150
1151 if (c->scid == cid) {
1152 int src_match, dst_match;
1153 int src_any, dst_any;
1154
1155 /* Exact match. */
1156 src_match = !bacmp(&bt_sk(sk)->src, src);
1157 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1158 if (src_match && dst_match) {
1159 read_unlock(&chan_list_lock);
1160 return c;
1161 }
1162
1163 /* Closest match */
1164 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1165 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1166 if ((src_match && dst_any) || (src_any && dst_match) ||
1167 (src_any && dst_any))
1168 c1 = c;
1169 }
1170 }
1171
1172 read_unlock(&chan_list_lock);
1173
1174 return c1;
1175 }
1176
1177 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1178 {
1179 struct sock *parent, *sk;
1180 struct l2cap_chan *chan, *pchan;
1181
1182 BT_DBG("");
1183
1184 /* Check if we have socket listening on cid */
1185 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1186 conn->src, conn->dst);
1187 if (!pchan)
1188 return;
1189
1190 parent = pchan->sk;
1191
1192 lock_sock(parent);
1193
1194 chan = pchan->ops->new_connection(pchan);
1195 if (!chan)
1196 goto clean;
1197
1198 sk = chan->sk;
1199
1200 hci_conn_hold(conn->hcon);
1201 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1202
1203 bacpy(&bt_sk(sk)->src, conn->src);
1204 bacpy(&bt_sk(sk)->dst, conn->dst);
1205
1206 bt_accept_enqueue(parent, sk);
1207
1208 l2cap_chan_add(conn, chan);
1209
1210 l2cap_chan_ready(chan);
1211
1212 clean:
1213 release_sock(parent);
1214 }
1215
1216 static void l2cap_conn_ready(struct l2cap_conn *conn)
1217 {
1218 struct l2cap_chan *chan;
1219 struct hci_conn *hcon = conn->hcon;
1220
1221 BT_DBG("conn %p", conn);
1222
1223 if (!hcon->out && hcon->type == LE_LINK)
1224 l2cap_le_conn_ready(conn);
1225
1226 if (hcon->out && hcon->type == LE_LINK)
1227 smp_conn_security(hcon, hcon->pending_sec_level);
1228
1229 mutex_lock(&conn->chan_lock);
1230
1231 list_for_each_entry(chan, &conn->chan_l, list) {
1232
1233 l2cap_chan_lock(chan);
1234
1235 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1238 }
1239
1240 if (hcon->type == LE_LINK) {
1241 if (smp_conn_security(hcon, chan->sec_level))
1242 l2cap_chan_ready(chan);
1243
1244 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1245 struct sock *sk = chan->sk;
1246 __clear_chan_timer(chan);
1247 lock_sock(sk);
1248 __l2cap_state_change(chan, BT_CONNECTED);
1249 sk->sk_state_change(sk);
1250 release_sock(sk);
1251
1252 } else if (chan->state == BT_CONNECT)
1253 l2cap_do_start(chan);
1254
1255 l2cap_chan_unlock(chan);
1256 }
1257
1258 mutex_unlock(&conn->chan_lock);
1259 }
1260
1261 /* Notify sockets that we cannot guaranty reliability anymore */
1262 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1263 {
1264 struct l2cap_chan *chan;
1265
1266 BT_DBG("conn %p", conn);
1267
1268 mutex_lock(&conn->chan_lock);
1269
1270 list_for_each_entry(chan, &conn->chan_l, list) {
1271 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1272 __l2cap_chan_set_err(chan, err);
1273 }
1274
1275 mutex_unlock(&conn->chan_lock);
1276 }
1277
1278 static void l2cap_info_timeout(struct work_struct *work)
1279 {
1280 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1281 info_timer.work);
1282
1283 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1284 conn->info_ident = 0;
1285
1286 l2cap_conn_start(conn);
1287 }
1288
1289 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1290 {
1291 struct l2cap_conn *conn = hcon->l2cap_data;
1292 struct l2cap_chan *chan, *l;
1293
1294 if (!conn)
1295 return;
1296
1297 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1298
1299 kfree_skb(conn->rx_skb);
1300
1301 mutex_lock(&conn->chan_lock);
1302
1303 /* Kill channels */
1304 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1305 l2cap_chan_hold(chan);
1306 l2cap_chan_lock(chan);
1307
1308 l2cap_chan_del(chan, err);
1309
1310 l2cap_chan_unlock(chan);
1311
1312 chan->ops->close(chan);
1313 l2cap_chan_put(chan);
1314 }
1315
1316 mutex_unlock(&conn->chan_lock);
1317
1318 hci_chan_del(conn->hchan);
1319
1320 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1321 cancel_delayed_work_sync(&conn->info_timer);
1322
1323 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1324 cancel_delayed_work_sync(&conn->security_timer);
1325 smp_chan_destroy(conn);
1326 }
1327
1328 hcon->l2cap_data = NULL;
1329 kfree(conn);
1330 }
1331
1332 static void security_timeout(struct work_struct *work)
1333 {
1334 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1335 security_timer.work);
1336
1337 BT_DBG("conn %p", conn);
1338
1339 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1340 smp_chan_destroy(conn);
1341 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1342 }
1343 }
1344
1345 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1346 {
1347 struct l2cap_conn *conn = hcon->l2cap_data;
1348 struct hci_chan *hchan;
1349
1350 if (conn || status)
1351 return conn;
1352
1353 hchan = hci_chan_create(hcon);
1354 if (!hchan)
1355 return NULL;
1356
1357 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1358 if (!conn) {
1359 hci_chan_del(hchan);
1360 return NULL;
1361 }
1362
1363 hcon->l2cap_data = conn;
1364 conn->hcon = hcon;
1365 conn->hchan = hchan;
1366
1367 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1368
1369 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1370 conn->mtu = hcon->hdev->le_mtu;
1371 else
1372 conn->mtu = hcon->hdev->acl_mtu;
1373
1374 conn->src = &hcon->hdev->bdaddr;
1375 conn->dst = &hcon->dst;
1376
1377 conn->feat_mask = 0;
1378
1379 spin_lock_init(&conn->lock);
1380 mutex_init(&conn->chan_lock);
1381
1382 INIT_LIST_HEAD(&conn->chan_l);
1383
1384 if (hcon->type == LE_LINK)
1385 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1386 else
1387 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1388
1389 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1390
1391 return conn;
1392 }
1393
1394 /* ---- Socket interface ---- */
1395
1396 /* Find socket with psm and source / destination bdaddr.
1397 * Returns closest match.
1398 */
1399 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1400 bdaddr_t *src,
1401 bdaddr_t *dst)
1402 {
1403 struct l2cap_chan *c, *c1 = NULL;
1404
1405 read_lock(&chan_list_lock);
1406
1407 list_for_each_entry(c, &chan_list, global_l) {
1408 struct sock *sk = c->sk;
1409
1410 if (state && c->state != state)
1411 continue;
1412
1413 if (c->psm == psm) {
1414 int src_match, dst_match;
1415 int src_any, dst_any;
1416
1417 /* Exact match. */
1418 src_match = !bacmp(&bt_sk(sk)->src, src);
1419 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1420 if (src_match && dst_match) {
1421 read_unlock(&chan_list_lock);
1422 return c;
1423 }
1424
1425 /* Closest match */
1426 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1427 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1428 if ((src_match && dst_any) || (src_any && dst_match) ||
1429 (src_any && dst_any))
1430 c1 = c;
1431 }
1432 }
1433
1434 read_unlock(&chan_list_lock);
1435
1436 return c1;
1437 }
1438
1439 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1440 bdaddr_t *dst, u8 dst_type)
1441 {
1442 struct sock *sk = chan->sk;
1443 bdaddr_t *src = &bt_sk(sk)->src;
1444 struct l2cap_conn *conn;
1445 struct hci_conn *hcon;
1446 struct hci_dev *hdev;
1447 __u8 auth_type;
1448 int err;
1449
1450 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1451 dst_type, __le16_to_cpu(chan->psm));
1452
1453 hdev = hci_get_route(dst, src);
1454 if (!hdev)
1455 return -EHOSTUNREACH;
1456
1457 hci_dev_lock(hdev);
1458
1459 l2cap_chan_lock(chan);
1460
1461 /* PSM must be odd and lsb of upper byte must be 0 */
1462 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1463 chan->chan_type != L2CAP_CHAN_RAW) {
1464 err = -EINVAL;
1465 goto done;
1466 }
1467
1468 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1469 err = -EINVAL;
1470 goto done;
1471 }
1472
1473 switch (chan->mode) {
1474 case L2CAP_MODE_BASIC:
1475 break;
1476 case L2CAP_MODE_ERTM:
1477 case L2CAP_MODE_STREAMING:
1478 if (!disable_ertm)
1479 break;
1480 /* fall through */
1481 default:
1482 err = -ENOTSUPP;
1483 goto done;
1484 }
1485
1486 switch (chan->state) {
1487 case BT_CONNECT:
1488 case BT_CONNECT2:
1489 case BT_CONFIG:
1490 /* Already connecting */
1491 err = 0;
1492 goto done;
1493
1494 case BT_CONNECTED:
1495 /* Already connected */
1496 err = -EISCONN;
1497 goto done;
1498
1499 case BT_OPEN:
1500 case BT_BOUND:
1501 /* Can connect */
1502 break;
1503
1504 default:
1505 err = -EBADFD;
1506 goto done;
1507 }
1508
1509 /* Set destination address and psm */
1510 lock_sock(sk);
1511 bacpy(&bt_sk(sk)->dst, dst);
1512 release_sock(sk);
1513
1514 chan->psm = psm;
1515 chan->dcid = cid;
1516
1517 auth_type = l2cap_get_auth_type(chan);
1518
1519 if (chan->dcid == L2CAP_CID_LE_DATA)
1520 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1521 chan->sec_level, auth_type);
1522 else
1523 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1524 chan->sec_level, auth_type);
1525
1526 if (IS_ERR(hcon)) {
1527 err = PTR_ERR(hcon);
1528 goto done;
1529 }
1530
1531 conn = l2cap_conn_add(hcon, 0);
1532 if (!conn) {
1533 hci_conn_put(hcon);
1534 err = -ENOMEM;
1535 goto done;
1536 }
1537
1538 if (hcon->type == LE_LINK) {
1539 err = 0;
1540
1541 if (!list_empty(&conn->chan_l)) {
1542 err = -EBUSY;
1543 hci_conn_put(hcon);
1544 }
1545
1546 if (err)
1547 goto done;
1548 }
1549
1550 /* Update source addr of the socket */
1551 bacpy(src, conn->src);
1552
1553 l2cap_chan_unlock(chan);
1554 l2cap_chan_add(conn, chan);
1555 l2cap_chan_lock(chan);
1556
1557 l2cap_state_change(chan, BT_CONNECT);
1558 __set_chan_timer(chan, sk->sk_sndtimeo);
1559
1560 if (hcon->state == BT_CONNECTED) {
1561 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1562 __clear_chan_timer(chan);
1563 if (l2cap_chan_check_security(chan))
1564 l2cap_state_change(chan, BT_CONNECTED);
1565 } else
1566 l2cap_do_start(chan);
1567 }
1568
1569 err = 0;
1570
1571 done:
1572 l2cap_chan_unlock(chan);
1573 hci_dev_unlock(hdev);
1574 hci_dev_put(hdev);
1575 return err;
1576 }
1577
1578 int __l2cap_wait_ack(struct sock *sk)
1579 {
1580 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1581 DECLARE_WAITQUEUE(wait, current);
1582 int err = 0;
1583 int timeo = HZ/5;
1584
1585 add_wait_queue(sk_sleep(sk), &wait);
1586 set_current_state(TASK_INTERRUPTIBLE);
1587 while (chan->unacked_frames > 0 && chan->conn) {
1588 if (!timeo)
1589 timeo = HZ/5;
1590
1591 if (signal_pending(current)) {
1592 err = sock_intr_errno(timeo);
1593 break;
1594 }
1595
1596 release_sock(sk);
1597 timeo = schedule_timeout(timeo);
1598 lock_sock(sk);
1599 set_current_state(TASK_INTERRUPTIBLE);
1600
1601 err = sock_error(sk);
1602 if (err)
1603 break;
1604 }
1605 set_current_state(TASK_RUNNING);
1606 remove_wait_queue(sk_sleep(sk), &wait);
1607 return err;
1608 }
1609
1610 static void l2cap_monitor_timeout(struct work_struct *work)
1611 {
1612 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1613 monitor_timer.work);
1614
1615 BT_DBG("chan %p", chan);
1616
1617 l2cap_chan_lock(chan);
1618
1619 if (!chan->conn) {
1620 l2cap_chan_unlock(chan);
1621 l2cap_chan_put(chan);
1622 return;
1623 }
1624
1625 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1626
1627 l2cap_chan_unlock(chan);
1628 l2cap_chan_put(chan);
1629 }
1630
1631 static void l2cap_retrans_timeout(struct work_struct *work)
1632 {
1633 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1634 retrans_timer.work);
1635
1636 BT_DBG("chan %p", chan);
1637
1638 l2cap_chan_lock(chan);
1639
1640 if (!chan->conn) {
1641 l2cap_chan_unlock(chan);
1642 l2cap_chan_put(chan);
1643 return;
1644 }
1645
1646 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1647 l2cap_chan_unlock(chan);
1648 l2cap_chan_put(chan);
1649 }
1650
1651 static void l2cap_streaming_send(struct l2cap_chan *chan,
1652 struct sk_buff_head *skbs)
1653 {
1654 struct sk_buff *skb;
1655 struct l2cap_ctrl *control;
1656
1657 BT_DBG("chan %p, skbs %p", chan, skbs);
1658
1659 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1660
1661 while (!skb_queue_empty(&chan->tx_q)) {
1662
1663 skb = skb_dequeue(&chan->tx_q);
1664
1665 bt_cb(skb)->control.retries = 1;
1666 control = &bt_cb(skb)->control;
1667
1668 control->reqseq = 0;
1669 control->txseq = chan->next_tx_seq;
1670
1671 __pack_control(chan, control, skb);
1672
1673 if (chan->fcs == L2CAP_FCS_CRC16) {
1674 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1675 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1676 }
1677
1678 l2cap_do_send(chan, skb);
1679
1680 BT_DBG("Sent txseq %u", control->txseq);
1681
1682 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1683 chan->frames_sent++;
1684 }
1685 }
1686
1687 static int l2cap_ertm_send(struct l2cap_chan *chan)
1688 {
1689 struct sk_buff *skb, *tx_skb;
1690 struct l2cap_ctrl *control;
1691 int sent = 0;
1692
1693 BT_DBG("chan %p", chan);
1694
1695 if (chan->state != BT_CONNECTED)
1696 return -ENOTCONN;
1697
1698 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1699 return 0;
1700
1701 while (chan->tx_send_head &&
1702 chan->unacked_frames < chan->remote_tx_win &&
1703 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1704
1705 skb = chan->tx_send_head;
1706
1707 bt_cb(skb)->control.retries = 1;
1708 control = &bt_cb(skb)->control;
1709
1710 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1711 control->final = 1;
1712
1713 control->reqseq = chan->buffer_seq;
1714 chan->last_acked_seq = chan->buffer_seq;
1715 control->txseq = chan->next_tx_seq;
1716
1717 __pack_control(chan, control, skb);
1718
1719 if (chan->fcs == L2CAP_FCS_CRC16) {
1720 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1721 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1722 }
1723
1724 /* Clone after data has been modified. Data is assumed to be
1725 read-only (for locking purposes) on cloned sk_buffs.
1726 */
1727 tx_skb = skb_clone(skb, GFP_KERNEL);
1728
1729 if (!tx_skb)
1730 break;
1731
1732 __set_retrans_timer(chan);
1733
1734 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1735 chan->unacked_frames++;
1736 chan->frames_sent++;
1737 sent++;
1738
1739 if (skb_queue_is_last(&chan->tx_q, skb))
1740 chan->tx_send_head = NULL;
1741 else
1742 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1743
1744 l2cap_do_send(chan, tx_skb);
1745 BT_DBG("Sent txseq %u", control->txseq);
1746 }
1747
1748 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1749 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1750
1751 return sent;
1752 }
1753
1754 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1755 {
1756 struct l2cap_ctrl control;
1757 struct sk_buff *skb;
1758 struct sk_buff *tx_skb;
1759 u16 seq;
1760
1761 BT_DBG("chan %p", chan);
1762
1763 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1764 return;
1765
1766 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1767 seq = l2cap_seq_list_pop(&chan->retrans_list);
1768
1769 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1770 if (!skb) {
1771 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1772 seq);
1773 continue;
1774 }
1775
1776 bt_cb(skb)->control.retries++;
1777 control = bt_cb(skb)->control;
1778
1779 if (chan->max_tx != 0 &&
1780 bt_cb(skb)->control.retries > chan->max_tx) {
1781 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1782 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1783 l2cap_seq_list_clear(&chan->retrans_list);
1784 break;
1785 }
1786
1787 control.reqseq = chan->buffer_seq;
1788 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1789 control.final = 1;
1790 else
1791 control.final = 0;
1792
1793 if (skb_cloned(skb)) {
1794 /* Cloned sk_buffs are read-only, so we need a
1795 * writeable copy
1796 */
1797 tx_skb = skb_copy(skb, GFP_ATOMIC);
1798 } else {
1799 tx_skb = skb_clone(skb, GFP_ATOMIC);
1800 }
1801
1802 if (!tx_skb) {
1803 l2cap_seq_list_clear(&chan->retrans_list);
1804 break;
1805 }
1806
1807 /* Update skb contents */
1808 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1809 put_unaligned_le32(__pack_extended_control(&control),
1810 tx_skb->data + L2CAP_HDR_SIZE);
1811 } else {
1812 put_unaligned_le16(__pack_enhanced_control(&control),
1813 tx_skb->data + L2CAP_HDR_SIZE);
1814 }
1815
1816 if (chan->fcs == L2CAP_FCS_CRC16) {
1817 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1818 put_unaligned_le16(fcs, skb_put(tx_skb,
1819 L2CAP_FCS_SIZE));
1820 }
1821
1822 l2cap_do_send(chan, tx_skb);
1823
1824 BT_DBG("Resent txseq %d", control.txseq);
1825
1826 chan->last_acked_seq = chan->buffer_seq;
1827 }
1828 }
1829
1830 static void l2cap_retransmit(struct l2cap_chan *chan,
1831 struct l2cap_ctrl *control)
1832 {
1833 BT_DBG("chan %p, control %p", chan, control);
1834
1835 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1836 l2cap_ertm_resend(chan);
1837 }
1838
1839 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1840 struct l2cap_ctrl *control)
1841 {
1842 struct sk_buff *skb;
1843
1844 BT_DBG("chan %p, control %p", chan, control);
1845
1846 if (control->poll)
1847 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1848
1849 l2cap_seq_list_clear(&chan->retrans_list);
1850
1851 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1852 return;
1853
1854 if (chan->unacked_frames) {
1855 skb_queue_walk(&chan->tx_q, skb) {
1856 if (bt_cb(skb)->control.txseq == control->reqseq ||
1857 skb == chan->tx_send_head)
1858 break;
1859 }
1860
1861 skb_queue_walk_from(&chan->tx_q, skb) {
1862 if (skb == chan->tx_send_head)
1863 break;
1864
1865 l2cap_seq_list_append(&chan->retrans_list,
1866 bt_cb(skb)->control.txseq);
1867 }
1868
1869 l2cap_ertm_resend(chan);
1870 }
1871 }
1872
1873 static void l2cap_send_ack(struct l2cap_chan *chan)
1874 {
1875 struct l2cap_ctrl control;
1876 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1877 chan->last_acked_seq);
1878 int threshold;
1879
1880 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1881 chan, chan->last_acked_seq, chan->buffer_seq);
1882
1883 memset(&control, 0, sizeof(control));
1884 control.sframe = 1;
1885
1886 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1887 chan->rx_state == L2CAP_RX_STATE_RECV) {
1888 __clear_ack_timer(chan);
1889 control.super = L2CAP_SUPER_RNR;
1890 control.reqseq = chan->buffer_seq;
1891 l2cap_send_sframe(chan, &control);
1892 } else {
1893 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1894 l2cap_ertm_send(chan);
1895 /* If any i-frames were sent, they included an ack */
1896 if (chan->buffer_seq == chan->last_acked_seq)
1897 frames_to_ack = 0;
1898 }
1899
1900 /* Ack now if the window is 3/4ths full.
1901 * Calculate without mul or div
1902 */
1903 threshold = chan->ack_win;
1904 threshold += threshold << 1;
1905 threshold >>= 2;
1906
1907 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1908 threshold);
1909
1910 if (frames_to_ack >= threshold) {
1911 __clear_ack_timer(chan);
1912 control.super = L2CAP_SUPER_RR;
1913 control.reqseq = chan->buffer_seq;
1914 l2cap_send_sframe(chan, &control);
1915 frames_to_ack = 0;
1916 }
1917
1918 if (frames_to_ack)
1919 __set_ack_timer(chan);
1920 }
1921 }
1922
1923 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1924 struct msghdr *msg, int len,
1925 int count, struct sk_buff *skb)
1926 {
1927 struct l2cap_conn *conn = chan->conn;
1928 struct sk_buff **frag;
1929 int sent = 0;
1930
1931 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1932 return -EFAULT;
1933
1934 sent += count;
1935 len -= count;
1936
1937 /* Continuation fragments (no L2CAP header) */
1938 frag = &skb_shinfo(skb)->frag_list;
1939 while (len) {
1940 struct sk_buff *tmp;
1941
1942 count = min_t(unsigned int, conn->mtu, len);
1943
1944 tmp = chan->ops->alloc_skb(chan, count,
1945 msg->msg_flags & MSG_DONTWAIT);
1946 if (IS_ERR(tmp))
1947 return PTR_ERR(tmp);
1948
1949 *frag = tmp;
1950
1951 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1952 return -EFAULT;
1953
1954 (*frag)->priority = skb->priority;
1955
1956 sent += count;
1957 len -= count;
1958
1959 skb->len += (*frag)->len;
1960 skb->data_len += (*frag)->len;
1961
1962 frag = &(*frag)->next;
1963 }
1964
1965 return sent;
1966 }
1967
1968 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1969 struct msghdr *msg, size_t len,
1970 u32 priority)
1971 {
1972 struct l2cap_conn *conn = chan->conn;
1973 struct sk_buff *skb;
1974 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1975 struct l2cap_hdr *lh;
1976
1977 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1978
1979 count = min_t(unsigned int, (conn->mtu - hlen), len);
1980
1981 skb = chan->ops->alloc_skb(chan, count + hlen,
1982 msg->msg_flags & MSG_DONTWAIT);
1983 if (IS_ERR(skb))
1984 return skb;
1985
1986 skb->priority = priority;
1987
1988 /* Create L2CAP header */
1989 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1990 lh->cid = cpu_to_le16(chan->dcid);
1991 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1992 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1993
1994 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1995 if (unlikely(err < 0)) {
1996 kfree_skb(skb);
1997 return ERR_PTR(err);
1998 }
1999 return skb;
2000 }
2001
2002 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2003 struct msghdr *msg, size_t len,
2004 u32 priority)
2005 {
2006 struct l2cap_conn *conn = chan->conn;
2007 struct sk_buff *skb;
2008 int err, count;
2009 struct l2cap_hdr *lh;
2010
2011 BT_DBG("chan %p len %zu", chan, len);
2012
2013 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2014
2015 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2016 msg->msg_flags & MSG_DONTWAIT);
2017 if (IS_ERR(skb))
2018 return skb;
2019
2020 skb->priority = priority;
2021
2022 /* Create L2CAP header */
2023 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2024 lh->cid = cpu_to_le16(chan->dcid);
2025 lh->len = cpu_to_le16(len);
2026
2027 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2028 if (unlikely(err < 0)) {
2029 kfree_skb(skb);
2030 return ERR_PTR(err);
2031 }
2032 return skb;
2033 }
2034
2035 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2036 struct msghdr *msg, size_t len,
2037 u16 sdulen)
2038 {
2039 struct l2cap_conn *conn = chan->conn;
2040 struct sk_buff *skb;
2041 int err, count, hlen;
2042 struct l2cap_hdr *lh;
2043
2044 BT_DBG("chan %p len %zu", chan, len);
2045
2046 if (!conn)
2047 return ERR_PTR(-ENOTCONN);
2048
2049 hlen = __ertm_hdr_size(chan);
2050
2051 if (sdulen)
2052 hlen += L2CAP_SDULEN_SIZE;
2053
2054 if (chan->fcs == L2CAP_FCS_CRC16)
2055 hlen += L2CAP_FCS_SIZE;
2056
2057 count = min_t(unsigned int, (conn->mtu - hlen), len);
2058
2059 skb = chan->ops->alloc_skb(chan, count + hlen,
2060 msg->msg_flags & MSG_DONTWAIT);
2061 if (IS_ERR(skb))
2062 return skb;
2063
2064 /* Create L2CAP header */
2065 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2066 lh->cid = cpu_to_le16(chan->dcid);
2067 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2068
2069 /* Control header is populated later */
2070 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2071 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2072 else
2073 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2074
2075 if (sdulen)
2076 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2077
2078 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2079 if (unlikely(err < 0)) {
2080 kfree_skb(skb);
2081 return ERR_PTR(err);
2082 }
2083
2084 bt_cb(skb)->control.fcs = chan->fcs;
2085 bt_cb(skb)->control.retries = 0;
2086 return skb;
2087 }
2088
2089 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2090 struct sk_buff_head *seg_queue,
2091 struct msghdr *msg, size_t len)
2092 {
2093 struct sk_buff *skb;
2094 u16 sdu_len;
2095 size_t pdu_len;
2096 u8 sar;
2097
2098 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2099
2100 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2101 * so fragmented skbs are not used. The HCI layer's handling
2102 * of fragmented skbs is not compatible with ERTM's queueing.
2103 */
2104
2105 /* PDU size is derived from the HCI MTU */
2106 pdu_len = chan->conn->mtu;
2107
2108 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2109
2110 /* Adjust for largest possible L2CAP overhead. */
2111 if (chan->fcs)
2112 pdu_len -= L2CAP_FCS_SIZE;
2113
2114 pdu_len -= __ertm_hdr_size(chan);
2115
2116 /* Remote device may have requested smaller PDUs */
2117 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2118
2119 if (len <= pdu_len) {
2120 sar = L2CAP_SAR_UNSEGMENTED;
2121 sdu_len = 0;
2122 pdu_len = len;
2123 } else {
2124 sar = L2CAP_SAR_START;
2125 sdu_len = len;
2126 pdu_len -= L2CAP_SDULEN_SIZE;
2127 }
2128
2129 while (len > 0) {
2130 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2131
2132 if (IS_ERR(skb)) {
2133 __skb_queue_purge(seg_queue);
2134 return PTR_ERR(skb);
2135 }
2136
2137 bt_cb(skb)->control.sar = sar;
2138 __skb_queue_tail(seg_queue, skb);
2139
2140 len -= pdu_len;
2141 if (sdu_len) {
2142 sdu_len = 0;
2143 pdu_len += L2CAP_SDULEN_SIZE;
2144 }
2145
2146 if (len <= pdu_len) {
2147 sar = L2CAP_SAR_END;
2148 pdu_len = len;
2149 } else {
2150 sar = L2CAP_SAR_CONTINUE;
2151 }
2152 }
2153
2154 return 0;
2155 }
2156
2157 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2158 u32 priority)
2159 {
2160 struct sk_buff *skb;
2161 int err;
2162 struct sk_buff_head seg_queue;
2163
2164 /* Connectionless channel */
2165 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2166 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2167 if (IS_ERR(skb))
2168 return PTR_ERR(skb);
2169
2170 l2cap_do_send(chan, skb);
2171 return len;
2172 }
2173
2174 switch (chan->mode) {
2175 case L2CAP_MODE_BASIC:
2176 /* Check outgoing MTU */
2177 if (len > chan->omtu)
2178 return -EMSGSIZE;
2179
2180 /* Create a basic PDU */
2181 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2182 if (IS_ERR(skb))
2183 return PTR_ERR(skb);
2184
2185 l2cap_do_send(chan, skb);
2186 err = len;
2187 break;
2188
2189 case L2CAP_MODE_ERTM:
2190 case L2CAP_MODE_STREAMING:
2191 /* Check outgoing MTU */
2192 if (len > chan->omtu) {
2193 err = -EMSGSIZE;
2194 break;
2195 }
2196
2197 __skb_queue_head_init(&seg_queue);
2198
2199 /* Do segmentation before calling in to the state machine,
2200 * since it's possible to block while waiting for memory
2201 * allocation.
2202 */
2203 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2204
2205 /* The channel could have been closed while segmenting,
2206 * check that it is still connected.
2207 */
2208 if (chan->state != BT_CONNECTED) {
2209 __skb_queue_purge(&seg_queue);
2210 err = -ENOTCONN;
2211 }
2212
2213 if (err)
2214 break;
2215
2216 if (chan->mode == L2CAP_MODE_ERTM)
2217 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2218 else
2219 l2cap_streaming_send(chan, &seg_queue);
2220
2221 err = len;
2222
2223 /* If the skbs were not queued for sending, they'll still be in
2224 * seg_queue and need to be purged.
2225 */
2226 __skb_queue_purge(&seg_queue);
2227 break;
2228
2229 default:
2230 BT_DBG("bad state %1.1x", chan->mode);
2231 err = -EBADFD;
2232 }
2233
2234 return err;
2235 }
2236
2237 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2238 {
2239 struct l2cap_ctrl control;
2240 u16 seq;
2241
2242 BT_DBG("chan %p, txseq %u", chan, txseq);
2243
2244 memset(&control, 0, sizeof(control));
2245 control.sframe = 1;
2246 control.super = L2CAP_SUPER_SREJ;
2247
2248 for (seq = chan->expected_tx_seq; seq != txseq;
2249 seq = __next_seq(chan, seq)) {
2250 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2251 control.reqseq = seq;
2252 l2cap_send_sframe(chan, &control);
2253 l2cap_seq_list_append(&chan->srej_list, seq);
2254 }
2255 }
2256
2257 chan->expected_tx_seq = __next_seq(chan, txseq);
2258 }
2259
2260 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2261 {
2262 struct l2cap_ctrl control;
2263
2264 BT_DBG("chan %p", chan);
2265
2266 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2267 return;
2268
2269 memset(&control, 0, sizeof(control));
2270 control.sframe = 1;
2271 control.super = L2CAP_SUPER_SREJ;
2272 control.reqseq = chan->srej_list.tail;
2273 l2cap_send_sframe(chan, &control);
2274 }
2275
2276 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2277 {
2278 struct l2cap_ctrl control;
2279 u16 initial_head;
2280 u16 seq;
2281
2282 BT_DBG("chan %p, txseq %u", chan, txseq);
2283
2284 memset(&control, 0, sizeof(control));
2285 control.sframe = 1;
2286 control.super = L2CAP_SUPER_SREJ;
2287
2288 /* Capture initial list head to allow only one pass through the list. */
2289 initial_head = chan->srej_list.head;
2290
2291 do {
2292 seq = l2cap_seq_list_pop(&chan->srej_list);
2293 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2294 break;
2295
2296 control.reqseq = seq;
2297 l2cap_send_sframe(chan, &control);
2298 l2cap_seq_list_append(&chan->srej_list, seq);
2299 } while (chan->srej_list.head != initial_head);
2300 }
2301
2302 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2303 {
2304 struct sk_buff *acked_skb;
2305 u16 ackseq;
2306
2307 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2308
2309 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2310 return;
2311
2312 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2313 chan->expected_ack_seq, chan->unacked_frames);
2314
2315 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2316 ackseq = __next_seq(chan, ackseq)) {
2317
2318 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2319 if (acked_skb) {
2320 skb_unlink(acked_skb, &chan->tx_q);
2321 kfree_skb(acked_skb);
2322 chan->unacked_frames--;
2323 }
2324 }
2325
2326 chan->expected_ack_seq = reqseq;
2327
2328 if (chan->unacked_frames == 0)
2329 __clear_retrans_timer(chan);
2330
2331 BT_DBG("unacked_frames %u", chan->unacked_frames);
2332 }
2333
2334 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2335 {
2336 BT_DBG("chan %p", chan);
2337
2338 chan->expected_tx_seq = chan->buffer_seq;
2339 l2cap_seq_list_clear(&chan->srej_list);
2340 skb_queue_purge(&chan->srej_q);
2341 chan->rx_state = L2CAP_RX_STATE_RECV;
2342 }
2343
2344 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2345 struct l2cap_ctrl *control,
2346 struct sk_buff_head *skbs, u8 event)
2347 {
2348 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2349 event);
2350
2351 switch (event) {
2352 case L2CAP_EV_DATA_REQUEST:
2353 if (chan->tx_send_head == NULL)
2354 chan->tx_send_head = skb_peek(skbs);
2355
2356 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2357 l2cap_ertm_send(chan);
2358 break;
2359 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2360 BT_DBG("Enter LOCAL_BUSY");
2361 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2362
2363 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2364 /* The SREJ_SENT state must be aborted if we are to
2365 * enter the LOCAL_BUSY state.
2366 */
2367 l2cap_abort_rx_srej_sent(chan);
2368 }
2369
2370 l2cap_send_ack(chan);
2371
2372 break;
2373 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2374 BT_DBG("Exit LOCAL_BUSY");
2375 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2376
2377 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2378 struct l2cap_ctrl local_control;
2379
2380 memset(&local_control, 0, sizeof(local_control));
2381 local_control.sframe = 1;
2382 local_control.super = L2CAP_SUPER_RR;
2383 local_control.poll = 1;
2384 local_control.reqseq = chan->buffer_seq;
2385 l2cap_send_sframe(chan, &local_control);
2386
2387 chan->retry_count = 1;
2388 __set_monitor_timer(chan);
2389 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2390 }
2391 break;
2392 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2393 l2cap_process_reqseq(chan, control->reqseq);
2394 break;
2395 case L2CAP_EV_EXPLICIT_POLL:
2396 l2cap_send_rr_or_rnr(chan, 1);
2397 chan->retry_count = 1;
2398 __set_monitor_timer(chan);
2399 __clear_ack_timer(chan);
2400 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2401 break;
2402 case L2CAP_EV_RETRANS_TO:
2403 l2cap_send_rr_or_rnr(chan, 1);
2404 chan->retry_count = 1;
2405 __set_monitor_timer(chan);
2406 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2407 break;
2408 case L2CAP_EV_RECV_FBIT:
2409 /* Nothing to process */
2410 break;
2411 default:
2412 break;
2413 }
2414 }
2415
2416 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2417 struct l2cap_ctrl *control,
2418 struct sk_buff_head *skbs, u8 event)
2419 {
2420 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2421 event);
2422
2423 switch (event) {
2424 case L2CAP_EV_DATA_REQUEST:
2425 if (chan->tx_send_head == NULL)
2426 chan->tx_send_head = skb_peek(skbs);
2427 /* Queue data, but don't send. */
2428 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2429 break;
2430 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2431 BT_DBG("Enter LOCAL_BUSY");
2432 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2433
2434 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2435 /* The SREJ_SENT state must be aborted if we are to
2436 * enter the LOCAL_BUSY state.
2437 */
2438 l2cap_abort_rx_srej_sent(chan);
2439 }
2440
2441 l2cap_send_ack(chan);
2442
2443 break;
2444 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2445 BT_DBG("Exit LOCAL_BUSY");
2446 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2447
2448 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2449 struct l2cap_ctrl local_control;
2450 memset(&local_control, 0, sizeof(local_control));
2451 local_control.sframe = 1;
2452 local_control.super = L2CAP_SUPER_RR;
2453 local_control.poll = 1;
2454 local_control.reqseq = chan->buffer_seq;
2455 l2cap_send_sframe(chan, &local_control);
2456
2457 chan->retry_count = 1;
2458 __set_monitor_timer(chan);
2459 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2460 }
2461 break;
2462 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2463 l2cap_process_reqseq(chan, control->reqseq);
2464
2465 /* Fall through */
2466
2467 case L2CAP_EV_RECV_FBIT:
2468 if (control && control->final) {
2469 __clear_monitor_timer(chan);
2470 if (chan->unacked_frames > 0)
2471 __set_retrans_timer(chan);
2472 chan->retry_count = 0;
2473 chan->tx_state = L2CAP_TX_STATE_XMIT;
2474 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2475 }
2476 break;
2477 case L2CAP_EV_EXPLICIT_POLL:
2478 /* Ignore */
2479 break;
2480 case L2CAP_EV_MONITOR_TO:
2481 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2482 l2cap_send_rr_or_rnr(chan, 1);
2483 __set_monitor_timer(chan);
2484 chan->retry_count++;
2485 } else {
2486 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2487 }
2488 break;
2489 default:
2490 break;
2491 }
2492 }
2493
2494 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2495 struct sk_buff_head *skbs, u8 event)
2496 {
2497 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2498 chan, control, skbs, event, chan->tx_state);
2499
2500 switch (chan->tx_state) {
2501 case L2CAP_TX_STATE_XMIT:
2502 l2cap_tx_state_xmit(chan, control, skbs, event);
2503 break;
2504 case L2CAP_TX_STATE_WAIT_F:
2505 l2cap_tx_state_wait_f(chan, control, skbs, event);
2506 break;
2507 default:
2508 /* Ignore event */
2509 break;
2510 }
2511 }
2512
2513 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2514 struct l2cap_ctrl *control)
2515 {
2516 BT_DBG("chan %p, control %p", chan, control);
2517 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2518 }
2519
2520 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2521 struct l2cap_ctrl *control)
2522 {
2523 BT_DBG("chan %p, control %p", chan, control);
2524 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2525 }
2526
2527 /* Copy frame to all raw sockets on that connection */
2528 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2529 {
2530 struct sk_buff *nskb;
2531 struct l2cap_chan *chan;
2532
2533 BT_DBG("conn %p", conn);
2534
2535 mutex_lock(&conn->chan_lock);
2536
2537 list_for_each_entry(chan, &conn->chan_l, list) {
2538 struct sock *sk = chan->sk;
2539 if (chan->chan_type != L2CAP_CHAN_RAW)
2540 continue;
2541
2542 /* Don't send frame to the socket it came from */
2543 if (skb->sk == sk)
2544 continue;
2545 nskb = skb_clone(skb, GFP_ATOMIC);
2546 if (!nskb)
2547 continue;
2548
2549 if (chan->ops->recv(chan, nskb))
2550 kfree_skb(nskb);
2551 }
2552
2553 mutex_unlock(&conn->chan_lock);
2554 }
2555
2556 /* ---- L2CAP signalling commands ---- */
2557 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2558 u8 ident, u16 dlen, void *data)
2559 {
2560 struct sk_buff *skb, **frag;
2561 struct l2cap_cmd_hdr *cmd;
2562 struct l2cap_hdr *lh;
2563 int len, count;
2564
2565 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2566 conn, code, ident, dlen);
2567
2568 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2569 count = min_t(unsigned int, conn->mtu, len);
2570
2571 skb = bt_skb_alloc(count, GFP_ATOMIC);
2572 if (!skb)
2573 return NULL;
2574
2575 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2576 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2577
2578 if (conn->hcon->type == LE_LINK)
2579 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2580 else
2581 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2582
2583 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2584 cmd->code = code;
2585 cmd->ident = ident;
2586 cmd->len = cpu_to_le16(dlen);
2587
2588 if (dlen) {
2589 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2590 memcpy(skb_put(skb, count), data, count);
2591 data += count;
2592 }
2593
2594 len -= skb->len;
2595
2596 /* Continuation fragments (no L2CAP header) */
2597 frag = &skb_shinfo(skb)->frag_list;
2598 while (len) {
2599 count = min_t(unsigned int, conn->mtu, len);
2600
2601 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2602 if (!*frag)
2603 goto fail;
2604
2605 memcpy(skb_put(*frag, count), data, count);
2606
2607 len -= count;
2608 data += count;
2609
2610 frag = &(*frag)->next;
2611 }
2612
2613 return skb;
2614
2615 fail:
2616 kfree_skb(skb);
2617 return NULL;
2618 }
2619
2620 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2621 {
2622 struct l2cap_conf_opt *opt = *ptr;
2623 int len;
2624
2625 len = L2CAP_CONF_OPT_SIZE + opt->len;
2626 *ptr += len;
2627
2628 *type = opt->type;
2629 *olen = opt->len;
2630
2631 switch (opt->len) {
2632 case 1:
2633 *val = *((u8 *) opt->val);
2634 break;
2635
2636 case 2:
2637 *val = get_unaligned_le16(opt->val);
2638 break;
2639
2640 case 4:
2641 *val = get_unaligned_le32(opt->val);
2642 break;
2643
2644 default:
2645 *val = (unsigned long) opt->val;
2646 break;
2647 }
2648
2649 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2650 return len;
2651 }
2652
2653 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2654 {
2655 struct l2cap_conf_opt *opt = *ptr;
2656
2657 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2658
2659 opt->type = type;
2660 opt->len = len;
2661
2662 switch (len) {
2663 case 1:
2664 *((u8 *) opt->val) = val;
2665 break;
2666
2667 case 2:
2668 put_unaligned_le16(val, opt->val);
2669 break;
2670
2671 case 4:
2672 put_unaligned_le32(val, opt->val);
2673 break;
2674
2675 default:
2676 memcpy(opt->val, (void *) val, len);
2677 break;
2678 }
2679
2680 *ptr += L2CAP_CONF_OPT_SIZE + len;
2681 }
2682
2683 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2684 {
2685 struct l2cap_conf_efs efs;
2686
2687 switch (chan->mode) {
2688 case L2CAP_MODE_ERTM:
2689 efs.id = chan->local_id;
2690 efs.stype = chan->local_stype;
2691 efs.msdu = cpu_to_le16(chan->local_msdu);
2692 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2693 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2694 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2695 break;
2696
2697 case L2CAP_MODE_STREAMING:
2698 efs.id = 1;
2699 efs.stype = L2CAP_SERV_BESTEFFORT;
2700 efs.msdu = cpu_to_le16(chan->local_msdu);
2701 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2702 efs.acc_lat = 0;
2703 efs.flush_to = 0;
2704 break;
2705
2706 default:
2707 return;
2708 }
2709
2710 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2711 (unsigned long) &efs);
2712 }
2713
2714 static void l2cap_ack_timeout(struct work_struct *work)
2715 {
2716 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2717 ack_timer.work);
2718 u16 frames_to_ack;
2719
2720 BT_DBG("chan %p", chan);
2721
2722 l2cap_chan_lock(chan);
2723
2724 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2725 chan->last_acked_seq);
2726
2727 if (frames_to_ack)
2728 l2cap_send_rr_or_rnr(chan, 0);
2729
2730 l2cap_chan_unlock(chan);
2731 l2cap_chan_put(chan);
2732 }
2733
2734 int l2cap_ertm_init(struct l2cap_chan *chan)
2735 {
2736 int err;
2737
2738 chan->next_tx_seq = 0;
2739 chan->expected_tx_seq = 0;
2740 chan->expected_ack_seq = 0;
2741 chan->unacked_frames = 0;
2742 chan->buffer_seq = 0;
2743 chan->frames_sent = 0;
2744 chan->last_acked_seq = 0;
2745 chan->sdu = NULL;
2746 chan->sdu_last_frag = NULL;
2747 chan->sdu_len = 0;
2748
2749 skb_queue_head_init(&chan->tx_q);
2750
2751 if (chan->mode != L2CAP_MODE_ERTM)
2752 return 0;
2753
2754 chan->rx_state = L2CAP_RX_STATE_RECV;
2755 chan->tx_state = L2CAP_TX_STATE_XMIT;
2756
2757 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2758 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2759 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2760
2761 skb_queue_head_init(&chan->srej_q);
2762
2763 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2764 if (err < 0)
2765 return err;
2766
2767 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2768 if (err < 0)
2769 l2cap_seq_list_free(&chan->srej_list);
2770
2771 return err;
2772 }
2773
2774 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2775 {
2776 switch (mode) {
2777 case L2CAP_MODE_STREAMING:
2778 case L2CAP_MODE_ERTM:
2779 if (l2cap_mode_supported(mode, remote_feat_mask))
2780 return mode;
2781 /* fall through */
2782 default:
2783 return L2CAP_MODE_BASIC;
2784 }
2785 }
2786
2787 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2788 {
2789 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2790 }
2791
2792 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2793 {
2794 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2795 }
2796
2797 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2798 {
2799 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2800 __l2cap_ews_supported(chan)) {
2801 /* use extended control field */
2802 set_bit(FLAG_EXT_CTRL, &chan->flags);
2803 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2804 } else {
2805 chan->tx_win = min_t(u16, chan->tx_win,
2806 L2CAP_DEFAULT_TX_WINDOW);
2807 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2808 }
2809 chan->ack_win = chan->tx_win;
2810 }
2811
2812 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2813 {
2814 struct l2cap_conf_req *req = data;
2815 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2816 void *ptr = req->data;
2817 u16 size;
2818
2819 BT_DBG("chan %p", chan);
2820
2821 if (chan->num_conf_req || chan->num_conf_rsp)
2822 goto done;
2823
2824 switch (chan->mode) {
2825 case L2CAP_MODE_STREAMING:
2826 case L2CAP_MODE_ERTM:
2827 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2828 break;
2829
2830 if (__l2cap_efs_supported(chan))
2831 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2832
2833 /* fall through */
2834 default:
2835 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2836 break;
2837 }
2838
2839 done:
2840 if (chan->imtu != L2CAP_DEFAULT_MTU)
2841 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2842
2843 switch (chan->mode) {
2844 case L2CAP_MODE_BASIC:
2845 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2846 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2847 break;
2848
2849 rfc.mode = L2CAP_MODE_BASIC;
2850 rfc.txwin_size = 0;
2851 rfc.max_transmit = 0;
2852 rfc.retrans_timeout = 0;
2853 rfc.monitor_timeout = 0;
2854 rfc.max_pdu_size = 0;
2855
2856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2857 (unsigned long) &rfc);
2858 break;
2859
2860 case L2CAP_MODE_ERTM:
2861 rfc.mode = L2CAP_MODE_ERTM;
2862 rfc.max_transmit = chan->max_tx;
2863 rfc.retrans_timeout = 0;
2864 rfc.monitor_timeout = 0;
2865
2866 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2867 L2CAP_EXT_HDR_SIZE -
2868 L2CAP_SDULEN_SIZE -
2869 L2CAP_FCS_SIZE);
2870 rfc.max_pdu_size = cpu_to_le16(size);
2871
2872 l2cap_txwin_setup(chan);
2873
2874 rfc.txwin_size = min_t(u16, chan->tx_win,
2875 L2CAP_DEFAULT_TX_WINDOW);
2876
2877 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2878 (unsigned long) &rfc);
2879
2880 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2881 l2cap_add_opt_efs(&ptr, chan);
2882
2883 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2884 break;
2885
2886 if (chan->fcs == L2CAP_FCS_NONE ||
2887 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2888 chan->fcs = L2CAP_FCS_NONE;
2889 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2890 }
2891
2892 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2893 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2894 chan->tx_win);
2895 break;
2896
2897 case L2CAP_MODE_STREAMING:
2898 l2cap_txwin_setup(chan);
2899 rfc.mode = L2CAP_MODE_STREAMING;
2900 rfc.txwin_size = 0;
2901 rfc.max_transmit = 0;
2902 rfc.retrans_timeout = 0;
2903 rfc.monitor_timeout = 0;
2904
2905 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2906 L2CAP_EXT_HDR_SIZE -
2907 L2CAP_SDULEN_SIZE -
2908 L2CAP_FCS_SIZE);
2909 rfc.max_pdu_size = cpu_to_le16(size);
2910
2911 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2912 (unsigned long) &rfc);
2913
2914 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2915 l2cap_add_opt_efs(&ptr, chan);
2916
2917 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2918 break;
2919
2920 if (chan->fcs == L2CAP_FCS_NONE ||
2921 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2922 chan->fcs = L2CAP_FCS_NONE;
2923 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2924 }
2925 break;
2926 }
2927
2928 req->dcid = cpu_to_le16(chan->dcid);
2929 req->flags = __constant_cpu_to_le16(0);
2930
2931 return ptr - data;
2932 }
2933
2934 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2935 {
2936 struct l2cap_conf_rsp *rsp = data;
2937 void *ptr = rsp->data;
2938 void *req = chan->conf_req;
2939 int len = chan->conf_len;
2940 int type, hint, olen;
2941 unsigned long val;
2942 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2943 struct l2cap_conf_efs efs;
2944 u8 remote_efs = 0;
2945 u16 mtu = L2CAP_DEFAULT_MTU;
2946 u16 result = L2CAP_CONF_SUCCESS;
2947 u16 size;
2948
2949 BT_DBG("chan %p", chan);
2950
2951 while (len >= L2CAP_CONF_OPT_SIZE) {
2952 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2953
2954 hint = type & L2CAP_CONF_HINT;
2955 type &= L2CAP_CONF_MASK;
2956
2957 switch (type) {
2958 case L2CAP_CONF_MTU:
2959 mtu = val;
2960 break;
2961
2962 case L2CAP_CONF_FLUSH_TO:
2963 chan->flush_to = val;
2964 break;
2965
2966 case L2CAP_CONF_QOS:
2967 break;
2968
2969 case L2CAP_CONF_RFC:
2970 if (olen == sizeof(rfc))
2971 memcpy(&rfc, (void *) val, olen);
2972 break;
2973
2974 case L2CAP_CONF_FCS:
2975 if (val == L2CAP_FCS_NONE)
2976 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2977 break;
2978
2979 case L2CAP_CONF_EFS:
2980 remote_efs = 1;
2981 if (olen == sizeof(efs))
2982 memcpy(&efs, (void *) val, olen);
2983 break;
2984
2985 case L2CAP_CONF_EWS:
2986 if (!enable_hs)
2987 return -ECONNREFUSED;
2988
2989 set_bit(FLAG_EXT_CTRL, &chan->flags);
2990 set_bit(CONF_EWS_RECV, &chan->conf_state);
2991 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2992 chan->remote_tx_win = val;
2993 break;
2994
2995 default:
2996 if (hint)
2997 break;
2998
2999 result = L2CAP_CONF_UNKNOWN;
3000 *((u8 *) ptr++) = type;
3001 break;
3002 }
3003 }
3004
3005 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3006 goto done;
3007
3008 switch (chan->mode) {
3009 case L2CAP_MODE_STREAMING:
3010 case L2CAP_MODE_ERTM:
3011 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3012 chan->mode = l2cap_select_mode(rfc.mode,
3013 chan->conn->feat_mask);
3014 break;
3015 }
3016
3017 if (remote_efs) {
3018 if (__l2cap_efs_supported(chan))
3019 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3020 else
3021 return -ECONNREFUSED;
3022 }
3023
3024 if (chan->mode != rfc.mode)
3025 return -ECONNREFUSED;
3026
3027 break;
3028 }
3029
3030 done:
3031 if (chan->mode != rfc.mode) {
3032 result = L2CAP_CONF_UNACCEPT;
3033 rfc.mode = chan->mode;
3034
3035 if (chan->num_conf_rsp == 1)
3036 return -ECONNREFUSED;
3037
3038 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3039 sizeof(rfc), (unsigned long) &rfc);
3040 }
3041
3042 if (result == L2CAP_CONF_SUCCESS) {
3043 /* Configure output options and let the other side know
3044 * which ones we don't like. */
3045
3046 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3047 result = L2CAP_CONF_UNACCEPT;
3048 else {
3049 chan->omtu = mtu;
3050 set_bit(CONF_MTU_DONE, &chan->conf_state);
3051 }
3052 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3053
3054 if (remote_efs) {
3055 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3056 efs.stype != L2CAP_SERV_NOTRAFIC &&
3057 efs.stype != chan->local_stype) {
3058
3059 result = L2CAP_CONF_UNACCEPT;
3060
3061 if (chan->num_conf_req >= 1)
3062 return -ECONNREFUSED;
3063
3064 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3065 sizeof(efs),
3066 (unsigned long) &efs);
3067 } else {
3068 /* Send PENDING Conf Rsp */
3069 result = L2CAP_CONF_PENDING;
3070 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3071 }
3072 }
3073
3074 switch (rfc.mode) {
3075 case L2CAP_MODE_BASIC:
3076 chan->fcs = L2CAP_FCS_NONE;
3077 set_bit(CONF_MODE_DONE, &chan->conf_state);
3078 break;
3079
3080 case L2CAP_MODE_ERTM:
3081 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3082 chan->remote_tx_win = rfc.txwin_size;
3083 else
3084 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3085
3086 chan->remote_max_tx = rfc.max_transmit;
3087
3088 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3089 chan->conn->mtu -
3090 L2CAP_EXT_HDR_SIZE -
3091 L2CAP_SDULEN_SIZE -
3092 L2CAP_FCS_SIZE);
3093 rfc.max_pdu_size = cpu_to_le16(size);
3094 chan->remote_mps = size;
3095
3096 rfc.retrans_timeout =
3097 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3098 rfc.monitor_timeout =
3099 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3100
3101 set_bit(CONF_MODE_DONE, &chan->conf_state);
3102
3103 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3104 sizeof(rfc), (unsigned long) &rfc);
3105
3106 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3107 chan->remote_id = efs.id;
3108 chan->remote_stype = efs.stype;
3109 chan->remote_msdu = le16_to_cpu(efs.msdu);
3110 chan->remote_flush_to =
3111 le32_to_cpu(efs.flush_to);
3112 chan->remote_acc_lat =
3113 le32_to_cpu(efs.acc_lat);
3114 chan->remote_sdu_itime =
3115 le32_to_cpu(efs.sdu_itime);
3116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3117 sizeof(efs), (unsigned long) &efs);
3118 }
3119 break;
3120
3121 case L2CAP_MODE_STREAMING:
3122 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3123 chan->conn->mtu -
3124 L2CAP_EXT_HDR_SIZE -
3125 L2CAP_SDULEN_SIZE -
3126 L2CAP_FCS_SIZE);
3127 rfc.max_pdu_size = cpu_to_le16(size);
3128 chan->remote_mps = size;
3129
3130 set_bit(CONF_MODE_DONE, &chan->conf_state);
3131
3132 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3133 sizeof(rfc), (unsigned long) &rfc);
3134
3135 break;
3136
3137 default:
3138 result = L2CAP_CONF_UNACCEPT;
3139
3140 memset(&rfc, 0, sizeof(rfc));
3141 rfc.mode = chan->mode;
3142 }
3143
3144 if (result == L2CAP_CONF_SUCCESS)
3145 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3146 }
3147 rsp->scid = cpu_to_le16(chan->dcid);
3148 rsp->result = cpu_to_le16(result);
3149 rsp->flags = __constant_cpu_to_le16(0);
3150
3151 return ptr - data;
3152 }
3153
3154 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3155 {
3156 struct l2cap_conf_req *req = data;
3157 void *ptr = req->data;
3158 int type, olen;
3159 unsigned long val;
3160 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3161 struct l2cap_conf_efs efs;
3162
3163 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3164
3165 while (len >= L2CAP_CONF_OPT_SIZE) {
3166 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3167
3168 switch (type) {
3169 case L2CAP_CONF_MTU:
3170 if (val < L2CAP_DEFAULT_MIN_MTU) {
3171 *result = L2CAP_CONF_UNACCEPT;
3172 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3173 } else
3174 chan->imtu = val;
3175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3176 break;
3177
3178 case L2CAP_CONF_FLUSH_TO:
3179 chan->flush_to = val;
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3181 2, chan->flush_to);
3182 break;
3183
3184 case L2CAP_CONF_RFC:
3185 if (olen == sizeof(rfc))
3186 memcpy(&rfc, (void *)val, olen);
3187
3188 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3189 rfc.mode != chan->mode)
3190 return -ECONNREFUSED;
3191
3192 chan->fcs = 0;
3193
3194 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3195 sizeof(rfc), (unsigned long) &rfc);
3196 break;
3197
3198 case L2CAP_CONF_EWS:
3199 chan->ack_win = min_t(u16, val, chan->ack_win);
3200 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3201 chan->tx_win);
3202 break;
3203
3204 case L2CAP_CONF_EFS:
3205 if (olen == sizeof(efs))
3206 memcpy(&efs, (void *)val, olen);
3207
3208 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3209 efs.stype != L2CAP_SERV_NOTRAFIC &&
3210 efs.stype != chan->local_stype)
3211 return -ECONNREFUSED;
3212
3213 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3214 sizeof(efs), (unsigned long) &efs);
3215 break;
3216 }
3217 }
3218
3219 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3220 return -ECONNREFUSED;
3221
3222 chan->mode = rfc.mode;
3223
3224 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3225 switch (rfc.mode) {
3226 case L2CAP_MODE_ERTM:
3227 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3228 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3229 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3230 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3231 chan->ack_win = min_t(u16, chan->ack_win,
3232 rfc.txwin_size);
3233
3234 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3235 chan->local_msdu = le16_to_cpu(efs.msdu);
3236 chan->local_sdu_itime =
3237 le32_to_cpu(efs.sdu_itime);
3238 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3239 chan->local_flush_to =
3240 le32_to_cpu(efs.flush_to);
3241 }
3242 break;
3243
3244 case L2CAP_MODE_STREAMING:
3245 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3246 }
3247 }
3248
3249 req->dcid = cpu_to_le16(chan->dcid);
3250 req->flags = __constant_cpu_to_le16(0);
3251
3252 return ptr - data;
3253 }
3254
3255 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3256 {
3257 struct l2cap_conf_rsp *rsp = data;
3258 void *ptr = rsp->data;
3259
3260 BT_DBG("chan %p", chan);
3261
3262 rsp->scid = cpu_to_le16(chan->dcid);
3263 rsp->result = cpu_to_le16(result);
3264 rsp->flags = cpu_to_le16(flags);
3265
3266 return ptr - data;
3267 }
3268
3269 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3270 {
3271 struct l2cap_conn_rsp rsp;
3272 struct l2cap_conn *conn = chan->conn;
3273 u8 buf[128];
3274
3275 rsp.scid = cpu_to_le16(chan->dcid);
3276 rsp.dcid = cpu_to_le16(chan->scid);
3277 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3278 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3279 l2cap_send_cmd(conn, chan->ident,
3280 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3281
3282 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3283 return;
3284
3285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3286 l2cap_build_conf_req(chan, buf), buf);
3287 chan->num_conf_req++;
3288 }
3289
3290 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3291 {
3292 int type, olen;
3293 unsigned long val;
3294 /* Use sane default values in case a misbehaving remote device
3295 * did not send an RFC or extended window size option.
3296 */
3297 u16 txwin_ext = chan->ack_win;
3298 struct l2cap_conf_rfc rfc = {
3299 .mode = chan->mode,
3300 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3301 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3302 .max_pdu_size = cpu_to_le16(chan->imtu),
3303 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3304 };
3305
3306 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3307
3308 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3309 return;
3310
3311 while (len >= L2CAP_CONF_OPT_SIZE) {
3312 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3313
3314 switch (type) {
3315 case L2CAP_CONF_RFC:
3316 if (olen == sizeof(rfc))
3317 memcpy(&rfc, (void *)val, olen);
3318 break;
3319 case L2CAP_CONF_EWS:
3320 txwin_ext = val;
3321 break;
3322 }
3323 }
3324
3325 switch (rfc.mode) {
3326 case L2CAP_MODE_ERTM:
3327 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3328 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3329 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3330 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3331 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3332 else
3333 chan->ack_win = min_t(u16, chan->ack_win,
3334 rfc.txwin_size);
3335 break;
3336 case L2CAP_MODE_STREAMING:
3337 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3338 }
3339 }
3340
3341 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3342 {
3343 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3344
3345 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3346 return 0;
3347
3348 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3349 cmd->ident == conn->info_ident) {
3350 cancel_delayed_work(&conn->info_timer);
3351
3352 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3353 conn->info_ident = 0;
3354
3355 l2cap_conn_start(conn);
3356 }
3357
3358 return 0;
3359 }
3360
3361 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3362 {
3363 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3364 struct l2cap_conn_rsp rsp;
3365 struct l2cap_chan *chan = NULL, *pchan;
3366 struct sock *parent, *sk = NULL;
3367 int result, status = L2CAP_CS_NO_INFO;
3368
3369 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3370 __le16 psm = req->psm;
3371
3372 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3373
3374 /* Check if we have socket listening on psm */
3375 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3376 if (!pchan) {
3377 result = L2CAP_CR_BAD_PSM;
3378 goto sendresp;
3379 }
3380
3381 parent = pchan->sk;
3382
3383 mutex_lock(&conn->chan_lock);
3384 lock_sock(parent);
3385
3386 /* Check if the ACL is secure enough (if not SDP) */
3387 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3388 !hci_conn_check_link_mode(conn->hcon)) {
3389 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3390 result = L2CAP_CR_SEC_BLOCK;
3391 goto response;
3392 }
3393
3394 result = L2CAP_CR_NO_MEM;
3395
3396 /* Check if we already have channel with that dcid */
3397 if (__l2cap_get_chan_by_dcid(conn, scid))
3398 goto response;
3399
3400 chan = pchan->ops->new_connection(pchan);
3401 if (!chan)
3402 goto response;
3403
3404 sk = chan->sk;
3405
3406 hci_conn_hold(conn->hcon);
3407
3408 bacpy(&bt_sk(sk)->src, conn->src);
3409 bacpy(&bt_sk(sk)->dst, conn->dst);
3410 chan->psm = psm;
3411 chan->dcid = scid;
3412
3413 bt_accept_enqueue(parent, sk);
3414
3415 __l2cap_chan_add(conn, chan);
3416
3417 dcid = chan->scid;
3418
3419 __set_chan_timer(chan, sk->sk_sndtimeo);
3420
3421 chan->ident = cmd->ident;
3422
3423 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3424 if (l2cap_chan_check_security(chan)) {
3425 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3426 __l2cap_state_change(chan, BT_CONNECT2);
3427 result = L2CAP_CR_PEND;
3428 status = L2CAP_CS_AUTHOR_PEND;
3429 parent->sk_data_ready(parent, 0);
3430 } else {
3431 __l2cap_state_change(chan, BT_CONFIG);
3432 result = L2CAP_CR_SUCCESS;
3433 status = L2CAP_CS_NO_INFO;
3434 }
3435 } else {
3436 __l2cap_state_change(chan, BT_CONNECT2);
3437 result = L2CAP_CR_PEND;
3438 status = L2CAP_CS_AUTHEN_PEND;
3439 }
3440 } else {
3441 __l2cap_state_change(chan, BT_CONNECT2);
3442 result = L2CAP_CR_PEND;
3443 status = L2CAP_CS_NO_INFO;
3444 }
3445
3446 response:
3447 release_sock(parent);
3448 mutex_unlock(&conn->chan_lock);
3449
3450 sendresp:
3451 rsp.scid = cpu_to_le16(scid);
3452 rsp.dcid = cpu_to_le16(dcid);
3453 rsp.result = cpu_to_le16(result);
3454 rsp.status = cpu_to_le16(status);
3455 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3456
3457 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3458 struct l2cap_info_req info;
3459 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3460
3461 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3462 conn->info_ident = l2cap_get_ident(conn);
3463
3464 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3465
3466 l2cap_send_cmd(conn, conn->info_ident,
3467 L2CAP_INFO_REQ, sizeof(info), &info);
3468 }
3469
3470 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3471 result == L2CAP_CR_SUCCESS) {
3472 u8 buf[128];
3473 set_bit(CONF_REQ_SENT, &chan->conf_state);
3474 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3475 l2cap_build_conf_req(chan, buf), buf);
3476 chan->num_conf_req++;
3477 }
3478
3479 return 0;
3480 }
3481
3482 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3483 {
3484 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3485 u16 scid, dcid, result, status;
3486 struct l2cap_chan *chan;
3487 u8 req[128];
3488 int err;
3489
3490 scid = __le16_to_cpu(rsp->scid);
3491 dcid = __le16_to_cpu(rsp->dcid);
3492 result = __le16_to_cpu(rsp->result);
3493 status = __le16_to_cpu(rsp->status);
3494
3495 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3496 dcid, scid, result, status);
3497
3498 mutex_lock(&conn->chan_lock);
3499
3500 if (scid) {
3501 chan = __l2cap_get_chan_by_scid(conn, scid);
3502 if (!chan) {
3503 err = -EFAULT;
3504 goto unlock;
3505 }
3506 } else {
3507 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3508 if (!chan) {
3509 err = -EFAULT;
3510 goto unlock;
3511 }
3512 }
3513
3514 err = 0;
3515
3516 l2cap_chan_lock(chan);
3517
3518 switch (result) {
3519 case L2CAP_CR_SUCCESS:
3520 l2cap_state_change(chan, BT_CONFIG);
3521 chan->ident = 0;
3522 chan->dcid = dcid;
3523 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3524
3525 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3526 break;
3527
3528 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3529 l2cap_build_conf_req(chan, req), req);
3530 chan->num_conf_req++;
3531 break;
3532
3533 case L2CAP_CR_PEND:
3534 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3535 break;
3536
3537 default:
3538 l2cap_chan_del(chan, ECONNREFUSED);
3539 break;
3540 }
3541
3542 l2cap_chan_unlock(chan);
3543
3544 unlock:
3545 mutex_unlock(&conn->chan_lock);
3546
3547 return err;
3548 }
3549
3550 static inline void set_default_fcs(struct l2cap_chan *chan)
3551 {
3552 /* FCS is enabled only in ERTM or streaming mode, if one or both
3553 * sides request it.
3554 */
3555 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3556 chan->fcs = L2CAP_FCS_NONE;
3557 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3558 chan->fcs = L2CAP_FCS_CRC16;
3559 }
3560
3561 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3562 {
3563 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3564 u16 dcid, flags;
3565 u8 rsp[64];
3566 struct l2cap_chan *chan;
3567 int len, err = 0;
3568
3569 dcid = __le16_to_cpu(req->dcid);
3570 flags = __le16_to_cpu(req->flags);
3571
3572 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3573
3574 chan = l2cap_get_chan_by_scid(conn, dcid);
3575 if (!chan)
3576 return -ENOENT;
3577
3578 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3579 struct l2cap_cmd_rej_cid rej;
3580
3581 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3582 rej.scid = cpu_to_le16(chan->scid);
3583 rej.dcid = cpu_to_le16(chan->dcid);
3584
3585 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3586 sizeof(rej), &rej);
3587 goto unlock;
3588 }
3589
3590 /* Reject if config buffer is too small. */
3591 len = cmd_len - sizeof(*req);
3592 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3593 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3594 l2cap_build_conf_rsp(chan, rsp,
3595 L2CAP_CONF_REJECT, flags), rsp);
3596 goto unlock;
3597 }
3598
3599 /* Store config. */
3600 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3601 chan->conf_len += len;
3602
3603 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3604 /* Incomplete config. Send empty response. */
3605 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3606 l2cap_build_conf_rsp(chan, rsp,
3607 L2CAP_CONF_SUCCESS, flags), rsp);
3608 goto unlock;
3609 }
3610
3611 /* Complete config. */
3612 len = l2cap_parse_conf_req(chan, rsp);
3613 if (len < 0) {
3614 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3615 goto unlock;
3616 }
3617
3618 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3619 chan->num_conf_rsp++;
3620
3621 /* Reset config buffer. */
3622 chan->conf_len = 0;
3623
3624 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3625 goto unlock;
3626
3627 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3628 set_default_fcs(chan);
3629
3630 if (chan->mode == L2CAP_MODE_ERTM ||
3631 chan->mode == L2CAP_MODE_STREAMING)
3632 err = l2cap_ertm_init(chan);
3633
3634 if (err < 0)
3635 l2cap_send_disconn_req(chan->conn, chan, -err);
3636 else
3637 l2cap_chan_ready(chan);
3638
3639 goto unlock;
3640 }
3641
3642 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3643 u8 buf[64];
3644 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3645 l2cap_build_conf_req(chan, buf), buf);
3646 chan->num_conf_req++;
3647 }
3648
3649 /* Got Conf Rsp PENDING from remote side and asume we sent
3650 Conf Rsp PENDING in the code above */
3651 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3652 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3653
3654 /* check compatibility */
3655
3656 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3657 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3658
3659 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3660 l2cap_build_conf_rsp(chan, rsp,
3661 L2CAP_CONF_SUCCESS, flags), rsp);
3662 }
3663
3664 unlock:
3665 l2cap_chan_unlock(chan);
3666 return err;
3667 }
3668
3669 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3670 {
3671 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3672 u16 scid, flags, result;
3673 struct l2cap_chan *chan;
3674 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3675 int err = 0;
3676
3677 scid = __le16_to_cpu(rsp->scid);
3678 flags = __le16_to_cpu(rsp->flags);
3679 result = __le16_to_cpu(rsp->result);
3680
3681 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3682 result, len);
3683
3684 chan = l2cap_get_chan_by_scid(conn, scid);
3685 if (!chan)
3686 return 0;
3687
3688 switch (result) {
3689 case L2CAP_CONF_SUCCESS:
3690 l2cap_conf_rfc_get(chan, rsp->data, len);
3691 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3692 break;
3693
3694 case L2CAP_CONF_PENDING:
3695 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3696
3697 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3698 char buf[64];
3699
3700 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3701 buf, &result);
3702 if (len < 0) {
3703 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3704 goto done;
3705 }
3706
3707 /* check compatibility */
3708
3709 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3710 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3711
3712 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3713 l2cap_build_conf_rsp(chan, buf,
3714 L2CAP_CONF_SUCCESS, 0x0000), buf);
3715 }
3716 goto done;
3717
3718 case L2CAP_CONF_UNACCEPT:
3719 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3720 char req[64];
3721
3722 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3723 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3724 goto done;
3725 }
3726
3727 /* throw out any old stored conf requests */
3728 result = L2CAP_CONF_SUCCESS;
3729 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3730 req, &result);
3731 if (len < 0) {
3732 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3733 goto done;
3734 }
3735
3736 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3737 L2CAP_CONF_REQ, len, req);
3738 chan->num_conf_req++;
3739 if (result != L2CAP_CONF_SUCCESS)
3740 goto done;
3741 break;
3742 }
3743
3744 default:
3745 l2cap_chan_set_err(chan, ECONNRESET);
3746
3747 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3748 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3749 goto done;
3750 }
3751
3752 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3753 goto done;
3754
3755 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3756
3757 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3758 set_default_fcs(chan);
3759
3760 if (chan->mode == L2CAP_MODE_ERTM ||
3761 chan->mode == L2CAP_MODE_STREAMING)
3762 err = l2cap_ertm_init(chan);
3763
3764 if (err < 0)
3765 l2cap_send_disconn_req(chan->conn, chan, -err);
3766 else
3767 l2cap_chan_ready(chan);
3768 }
3769
3770 done:
3771 l2cap_chan_unlock(chan);
3772 return err;
3773 }
3774
3775 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3776 {
3777 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3778 struct l2cap_disconn_rsp rsp;
3779 u16 dcid, scid;
3780 struct l2cap_chan *chan;
3781 struct sock *sk;
3782
3783 scid = __le16_to_cpu(req->scid);
3784 dcid = __le16_to_cpu(req->dcid);
3785
3786 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3787
3788 mutex_lock(&conn->chan_lock);
3789
3790 chan = __l2cap_get_chan_by_scid(conn, dcid);
3791 if (!chan) {
3792 mutex_unlock(&conn->chan_lock);
3793 return 0;
3794 }
3795
3796 l2cap_chan_lock(chan);
3797
3798 sk = chan->sk;
3799
3800 rsp.dcid = cpu_to_le16(chan->scid);
3801 rsp.scid = cpu_to_le16(chan->dcid);
3802 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3803
3804 lock_sock(sk);
3805 sk->sk_shutdown = SHUTDOWN_MASK;
3806 release_sock(sk);
3807
3808 l2cap_chan_hold(chan);
3809 l2cap_chan_del(chan, ECONNRESET);
3810
3811 l2cap_chan_unlock(chan);
3812
3813 chan->ops->close(chan);
3814 l2cap_chan_put(chan);
3815
3816 mutex_unlock(&conn->chan_lock);
3817
3818 return 0;
3819 }
3820
3821 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3822 {
3823 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3824 u16 dcid, scid;
3825 struct l2cap_chan *chan;
3826
3827 scid = __le16_to_cpu(rsp->scid);
3828 dcid = __le16_to_cpu(rsp->dcid);
3829
3830 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3831
3832 mutex_lock(&conn->chan_lock);
3833
3834 chan = __l2cap_get_chan_by_scid(conn, scid);
3835 if (!chan) {
3836 mutex_unlock(&conn->chan_lock);
3837 return 0;
3838 }
3839
3840 l2cap_chan_lock(chan);
3841
3842 l2cap_chan_hold(chan);
3843 l2cap_chan_del(chan, 0);
3844
3845 l2cap_chan_unlock(chan);
3846
3847 chan->ops->close(chan);
3848 l2cap_chan_put(chan);
3849
3850 mutex_unlock(&conn->chan_lock);
3851
3852 return 0;
3853 }
3854
3855 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3856 {
3857 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3858 u16 type;
3859
3860 type = __le16_to_cpu(req->type);
3861
3862 BT_DBG("type 0x%4.4x", type);
3863
3864 if (type == L2CAP_IT_FEAT_MASK) {
3865 u8 buf[8];
3866 u32 feat_mask = l2cap_feat_mask;
3867 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3868 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3869 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3870 if (!disable_ertm)
3871 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3872 | L2CAP_FEAT_FCS;
3873 if (enable_hs)
3874 feat_mask |= L2CAP_FEAT_EXT_FLOW
3875 | L2CAP_FEAT_EXT_WINDOW;
3876
3877 put_unaligned_le32(feat_mask, rsp->data);
3878 l2cap_send_cmd(conn, cmd->ident,
3879 L2CAP_INFO_RSP, sizeof(buf), buf);
3880 } else if (type == L2CAP_IT_FIXED_CHAN) {
3881 u8 buf[12];
3882 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3883
3884 if (enable_hs)
3885 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3886 else
3887 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3888
3889 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3890 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3891 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3892 l2cap_send_cmd(conn, cmd->ident,
3893 L2CAP_INFO_RSP, sizeof(buf), buf);
3894 } else {
3895 struct l2cap_info_rsp rsp;
3896 rsp.type = cpu_to_le16(type);
3897 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3898 l2cap_send_cmd(conn, cmd->ident,
3899 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3900 }
3901
3902 return 0;
3903 }
3904
3905 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3906 {
3907 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3908 u16 type, result;
3909
3910 type = __le16_to_cpu(rsp->type);
3911 result = __le16_to_cpu(rsp->result);
3912
3913 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3914
3915 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3916 if (cmd->ident != conn->info_ident ||
3917 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3918 return 0;
3919
3920 cancel_delayed_work(&conn->info_timer);
3921
3922 if (result != L2CAP_IR_SUCCESS) {
3923 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3924 conn->info_ident = 0;
3925
3926 l2cap_conn_start(conn);
3927
3928 return 0;
3929 }
3930
3931 switch (type) {
3932 case L2CAP_IT_FEAT_MASK:
3933 conn->feat_mask = get_unaligned_le32(rsp->data);
3934
3935 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3936 struct l2cap_info_req req;
3937 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3938
3939 conn->info_ident = l2cap_get_ident(conn);
3940
3941 l2cap_send_cmd(conn, conn->info_ident,
3942 L2CAP_INFO_REQ, sizeof(req), &req);
3943 } else {
3944 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3945 conn->info_ident = 0;
3946
3947 l2cap_conn_start(conn);
3948 }
3949 break;
3950
3951 case L2CAP_IT_FIXED_CHAN:
3952 conn->fixed_chan_mask = rsp->data[0];
3953 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3954 conn->info_ident = 0;
3955
3956 l2cap_conn_start(conn);
3957 break;
3958 }
3959
3960 return 0;
3961 }
3962
3963 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3964 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3965 void *data)
3966 {
3967 struct l2cap_create_chan_req *req = data;
3968 struct l2cap_create_chan_rsp rsp;
3969 u16 psm, scid;
3970
3971 if (cmd_len != sizeof(*req))
3972 return -EPROTO;
3973
3974 if (!enable_hs)
3975 return -EINVAL;
3976
3977 psm = le16_to_cpu(req->psm);
3978 scid = le16_to_cpu(req->scid);
3979
3980 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3981
3982 /* Placeholder: Always reject */
3983 rsp.dcid = 0;
3984 rsp.scid = cpu_to_le16(scid);
3985 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3986 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3987
3988 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3989 sizeof(rsp), &rsp);
3990
3991 return 0;
3992 }
3993
3994 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3995 struct l2cap_cmd_hdr *cmd, void *data)
3996 {
3997 BT_DBG("conn %p", conn);
3998
3999 return l2cap_connect_rsp(conn, cmd, data);
4000 }
4001
4002 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4003 u16 icid, u16 result)
4004 {
4005 struct l2cap_move_chan_rsp rsp;
4006
4007 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4008
4009 rsp.icid = cpu_to_le16(icid);
4010 rsp.result = cpu_to_le16(result);
4011
4012 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4013 }
4014
4015 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4016 struct l2cap_chan *chan,
4017 u16 icid, u16 result)
4018 {
4019 struct l2cap_move_chan_cfm cfm;
4020 u8 ident;
4021
4022 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4023
4024 ident = l2cap_get_ident(conn);
4025 if (chan)
4026 chan->ident = ident;
4027
4028 cfm.icid = cpu_to_le16(icid);
4029 cfm.result = cpu_to_le16(result);
4030
4031 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4032 }
4033
4034 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4035 u16 icid)
4036 {
4037 struct l2cap_move_chan_cfm_rsp rsp;
4038
4039 BT_DBG("icid 0x%4.4x", icid);
4040
4041 rsp.icid = cpu_to_le16(icid);
4042 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4043 }
4044
4045 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4046 struct l2cap_cmd_hdr *cmd,
4047 u16 cmd_len, void *data)
4048 {
4049 struct l2cap_move_chan_req *req = data;
4050 u16 icid = 0;
4051 u16 result = L2CAP_MR_NOT_ALLOWED;
4052
4053 if (cmd_len != sizeof(*req))
4054 return -EPROTO;
4055
4056 icid = le16_to_cpu(req->icid);
4057
4058 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4059
4060 if (!enable_hs)
4061 return -EINVAL;
4062
4063 /* Placeholder: Always refuse */
4064 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4065
4066 return 0;
4067 }
4068
4069 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4070 struct l2cap_cmd_hdr *cmd,
4071 u16 cmd_len, void *data)
4072 {
4073 struct l2cap_move_chan_rsp *rsp = data;
4074 u16 icid, result;
4075
4076 if (cmd_len != sizeof(*rsp))
4077 return -EPROTO;
4078
4079 icid = le16_to_cpu(rsp->icid);
4080 result = le16_to_cpu(rsp->result);
4081
4082 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4083
4084 /* Placeholder: Always unconfirmed */
4085 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4086
4087 return 0;
4088 }
4089
4090 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4091 struct l2cap_cmd_hdr *cmd,
4092 u16 cmd_len, void *data)
4093 {
4094 struct l2cap_move_chan_cfm *cfm = data;
4095 u16 icid, result;
4096
4097 if (cmd_len != sizeof(*cfm))
4098 return -EPROTO;
4099
4100 icid = le16_to_cpu(cfm->icid);
4101 result = le16_to_cpu(cfm->result);
4102
4103 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4104
4105 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4106
4107 return 0;
4108 }
4109
4110 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4111 struct l2cap_cmd_hdr *cmd,
4112 u16 cmd_len, void *data)
4113 {
4114 struct l2cap_move_chan_cfm_rsp *rsp = data;
4115 u16 icid;
4116
4117 if (cmd_len != sizeof(*rsp))
4118 return -EPROTO;
4119
4120 icid = le16_to_cpu(rsp->icid);
4121
4122 BT_DBG("icid 0x%4.4x", icid);
4123
4124 return 0;
4125 }
4126
4127 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4128 u16 to_multiplier)
4129 {
4130 u16 max_latency;
4131
4132 if (min > max || min < 6 || max > 3200)
4133 return -EINVAL;
4134
4135 if (to_multiplier < 10 || to_multiplier > 3200)
4136 return -EINVAL;
4137
4138 if (max >= to_multiplier * 8)
4139 return -EINVAL;
4140
4141 max_latency = (to_multiplier * 8 / max) - 1;
4142 if (latency > 499 || latency > max_latency)
4143 return -EINVAL;
4144
4145 return 0;
4146 }
4147
4148 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4149 struct l2cap_cmd_hdr *cmd, u8 *data)
4150 {
4151 struct hci_conn *hcon = conn->hcon;
4152 struct l2cap_conn_param_update_req *req;
4153 struct l2cap_conn_param_update_rsp rsp;
4154 u16 min, max, latency, to_multiplier, cmd_len;
4155 int err;
4156
4157 if (!(hcon->link_mode & HCI_LM_MASTER))
4158 return -EINVAL;
4159
4160 cmd_len = __le16_to_cpu(cmd->len);
4161 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4162 return -EPROTO;
4163
4164 req = (struct l2cap_conn_param_update_req *) data;
4165 min = __le16_to_cpu(req->min);
4166 max = __le16_to_cpu(req->max);
4167 latency = __le16_to_cpu(req->latency);
4168 to_multiplier = __le16_to_cpu(req->to_multiplier);
4169
4170 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4171 min, max, latency, to_multiplier);
4172
4173 memset(&rsp, 0, sizeof(rsp));
4174
4175 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4176 if (err)
4177 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4178 else
4179 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4180
4181 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4182 sizeof(rsp), &rsp);
4183
4184 if (!err)
4185 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4186
4187 return 0;
4188 }
4189
4190 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4191 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4192 {
4193 int err = 0;
4194
4195 switch (cmd->code) {
4196 case L2CAP_COMMAND_REJ:
4197 l2cap_command_rej(conn, cmd, data);
4198 break;
4199
4200 case L2CAP_CONN_REQ:
4201 err = l2cap_connect_req(conn, cmd, data);
4202 break;
4203
4204 case L2CAP_CONN_RSP:
4205 err = l2cap_connect_rsp(conn, cmd, data);
4206 break;
4207
4208 case L2CAP_CONF_REQ:
4209 err = l2cap_config_req(conn, cmd, cmd_len, data);
4210 break;
4211
4212 case L2CAP_CONF_RSP:
4213 err = l2cap_config_rsp(conn, cmd, data);
4214 break;
4215
4216 case L2CAP_DISCONN_REQ:
4217 err = l2cap_disconnect_req(conn, cmd, data);
4218 break;
4219
4220 case L2CAP_DISCONN_RSP:
4221 err = l2cap_disconnect_rsp(conn, cmd, data);
4222 break;
4223
4224 case L2CAP_ECHO_REQ:
4225 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4226 break;
4227
4228 case L2CAP_ECHO_RSP:
4229 break;
4230
4231 case L2CAP_INFO_REQ:
4232 err = l2cap_information_req(conn, cmd, data);
4233 break;
4234
4235 case L2CAP_INFO_RSP:
4236 err = l2cap_information_rsp(conn, cmd, data);
4237 break;
4238
4239 case L2CAP_CREATE_CHAN_REQ:
4240 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4241 break;
4242
4243 case L2CAP_CREATE_CHAN_RSP:
4244 err = l2cap_create_channel_rsp(conn, cmd, data);
4245 break;
4246
4247 case L2CAP_MOVE_CHAN_REQ:
4248 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4249 break;
4250
4251 case L2CAP_MOVE_CHAN_RSP:
4252 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4253 break;
4254
4255 case L2CAP_MOVE_CHAN_CFM:
4256 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4257 break;
4258
4259 case L2CAP_MOVE_CHAN_CFM_RSP:
4260 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4261 break;
4262
4263 default:
4264 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4265 err = -EINVAL;
4266 break;
4267 }
4268
4269 return err;
4270 }
4271
4272 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u8 *data)
4274 {
4275 switch (cmd->code) {
4276 case L2CAP_COMMAND_REJ:
4277 return 0;
4278
4279 case L2CAP_CONN_PARAM_UPDATE_REQ:
4280 return l2cap_conn_param_update_req(conn, cmd, data);
4281
4282 case L2CAP_CONN_PARAM_UPDATE_RSP:
4283 return 0;
4284
4285 default:
4286 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4287 return -EINVAL;
4288 }
4289 }
4290
4291 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4292 struct sk_buff *skb)
4293 {
4294 u8 *data = skb->data;
4295 int len = skb->len;
4296 struct l2cap_cmd_hdr cmd;
4297 int err;
4298
4299 l2cap_raw_recv(conn, skb);
4300
4301 while (len >= L2CAP_CMD_HDR_SIZE) {
4302 u16 cmd_len;
4303 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4304 data += L2CAP_CMD_HDR_SIZE;
4305 len -= L2CAP_CMD_HDR_SIZE;
4306
4307 cmd_len = le16_to_cpu(cmd.len);
4308
4309 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4310
4311 if (cmd_len > len || !cmd.ident) {
4312 BT_DBG("corrupted command");
4313 break;
4314 }
4315
4316 if (conn->hcon->type == LE_LINK)
4317 err = l2cap_le_sig_cmd(conn, &cmd, data);
4318 else
4319 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4320
4321 if (err) {
4322 struct l2cap_cmd_rej_unk rej;
4323
4324 BT_ERR("Wrong link type (%d)", err);
4325
4326 /* FIXME: Map err to a valid reason */
4327 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4328 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4329 }
4330
4331 data += cmd_len;
4332 len -= cmd_len;
4333 }
4334
4335 kfree_skb(skb);
4336 }
4337
4338 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4339 {
4340 u16 our_fcs, rcv_fcs;
4341 int hdr_size;
4342
4343 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4344 hdr_size = L2CAP_EXT_HDR_SIZE;
4345 else
4346 hdr_size = L2CAP_ENH_HDR_SIZE;
4347
4348 if (chan->fcs == L2CAP_FCS_CRC16) {
4349 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4350 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4351 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4352
4353 if (our_fcs != rcv_fcs)
4354 return -EBADMSG;
4355 }
4356 return 0;
4357 }
4358
4359 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4360 {
4361 struct l2cap_ctrl control;
4362
4363 BT_DBG("chan %p", chan);
4364
4365 memset(&control, 0, sizeof(control));
4366 control.sframe = 1;
4367 control.final = 1;
4368 control.reqseq = chan->buffer_seq;
4369 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4370
4371 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4372 control.super = L2CAP_SUPER_RNR;
4373 l2cap_send_sframe(chan, &control);
4374 }
4375
4376 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4377 chan->unacked_frames > 0)
4378 __set_retrans_timer(chan);
4379
4380 /* Send pending iframes */
4381 l2cap_ertm_send(chan);
4382
4383 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4384 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4385 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4386 * send it now.
4387 */
4388 control.super = L2CAP_SUPER_RR;
4389 l2cap_send_sframe(chan, &control);
4390 }
4391 }
4392
4393 static void append_skb_frag(struct sk_buff *skb,
4394 struct sk_buff *new_frag, struct sk_buff **last_frag)
4395 {
4396 /* skb->len reflects data in skb as well as all fragments
4397 * skb->data_len reflects only data in fragments
4398 */
4399 if (!skb_has_frag_list(skb))
4400 skb_shinfo(skb)->frag_list = new_frag;
4401
4402 new_frag->next = NULL;
4403
4404 (*last_frag)->next = new_frag;
4405 *last_frag = new_frag;
4406
4407 skb->len += new_frag->len;
4408 skb->data_len += new_frag->len;
4409 skb->truesize += new_frag->truesize;
4410 }
4411
4412 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4413 struct l2cap_ctrl *control)
4414 {
4415 int err = -EINVAL;
4416
4417 switch (control->sar) {
4418 case L2CAP_SAR_UNSEGMENTED:
4419 if (chan->sdu)
4420 break;
4421
4422 err = chan->ops->recv(chan, skb);
4423 break;
4424
4425 case L2CAP_SAR_START:
4426 if (chan->sdu)
4427 break;
4428
4429 chan->sdu_len = get_unaligned_le16(skb->data);
4430 skb_pull(skb, L2CAP_SDULEN_SIZE);
4431
4432 if (chan->sdu_len > chan->imtu) {
4433 err = -EMSGSIZE;
4434 break;
4435 }
4436
4437 if (skb->len >= chan->sdu_len)
4438 break;
4439
4440 chan->sdu = skb;
4441 chan->sdu_last_frag = skb;
4442
4443 skb = NULL;
4444 err = 0;
4445 break;
4446
4447 case L2CAP_SAR_CONTINUE:
4448 if (!chan->sdu)
4449 break;
4450
4451 append_skb_frag(chan->sdu, skb,
4452 &chan->sdu_last_frag);
4453 skb = NULL;
4454
4455 if (chan->sdu->len >= chan->sdu_len)
4456 break;
4457
4458 err = 0;
4459 break;
4460
4461 case L2CAP_SAR_END:
4462 if (!chan->sdu)
4463 break;
4464
4465 append_skb_frag(chan->sdu, skb,
4466 &chan->sdu_last_frag);
4467 skb = NULL;
4468
4469 if (chan->sdu->len != chan->sdu_len)
4470 break;
4471
4472 err = chan->ops->recv(chan, chan->sdu);
4473
4474 if (!err) {
4475 /* Reassembly complete */
4476 chan->sdu = NULL;
4477 chan->sdu_last_frag = NULL;
4478 chan->sdu_len = 0;
4479 }
4480 break;
4481 }
4482
4483 if (err) {
4484 kfree_skb(skb);
4485 kfree_skb(chan->sdu);
4486 chan->sdu = NULL;
4487 chan->sdu_last_frag = NULL;
4488 chan->sdu_len = 0;
4489 }
4490
4491 return err;
4492 }
4493
4494 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4495 {
4496 u8 event;
4497
4498 if (chan->mode != L2CAP_MODE_ERTM)
4499 return;
4500
4501 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4502 l2cap_tx(chan, NULL, NULL, event);
4503 }
4504
4505 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4506 {
4507 int err = 0;
4508 /* Pass sequential frames to l2cap_reassemble_sdu()
4509 * until a gap is encountered.
4510 */
4511
4512 BT_DBG("chan %p", chan);
4513
4514 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4515 struct sk_buff *skb;
4516 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4517 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4518
4519 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4520
4521 if (!skb)
4522 break;
4523
4524 skb_unlink(skb, &chan->srej_q);
4525 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4526 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4527 if (err)
4528 break;
4529 }
4530
4531 if (skb_queue_empty(&chan->srej_q)) {
4532 chan->rx_state = L2CAP_RX_STATE_RECV;
4533 l2cap_send_ack(chan);
4534 }
4535
4536 return err;
4537 }
4538
4539 static void l2cap_handle_srej(struct l2cap_chan *chan,
4540 struct l2cap_ctrl *control)
4541 {
4542 struct sk_buff *skb;
4543
4544 BT_DBG("chan %p, control %p", chan, control);
4545
4546 if (control->reqseq == chan->next_tx_seq) {
4547 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4548 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4549 return;
4550 }
4551
4552 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4553
4554 if (skb == NULL) {
4555 BT_DBG("Seq %d not available for retransmission",
4556 control->reqseq);
4557 return;
4558 }
4559
4560 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4561 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4562 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4563 return;
4564 }
4565
4566 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4567
4568 if (control->poll) {
4569 l2cap_pass_to_tx(chan, control);
4570
4571 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4572 l2cap_retransmit(chan, control);
4573 l2cap_ertm_send(chan);
4574
4575 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4576 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4577 chan->srej_save_reqseq = control->reqseq;
4578 }
4579 } else {
4580 l2cap_pass_to_tx_fbit(chan, control);
4581
4582 if (control->final) {
4583 if (chan->srej_save_reqseq != control->reqseq ||
4584 !test_and_clear_bit(CONN_SREJ_ACT,
4585 &chan->conn_state))
4586 l2cap_retransmit(chan, control);
4587 } else {
4588 l2cap_retransmit(chan, control);
4589 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4590 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4591 chan->srej_save_reqseq = control->reqseq;
4592 }
4593 }
4594 }
4595 }
4596
4597 static void l2cap_handle_rej(struct l2cap_chan *chan,
4598 struct l2cap_ctrl *control)
4599 {
4600 struct sk_buff *skb;
4601
4602 BT_DBG("chan %p, control %p", chan, control);
4603
4604 if (control->reqseq == chan->next_tx_seq) {
4605 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4606 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4607 return;
4608 }
4609
4610 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4611
4612 if (chan->max_tx && skb &&
4613 bt_cb(skb)->control.retries >= chan->max_tx) {
4614 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4615 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4616 return;
4617 }
4618
4619 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4620
4621 l2cap_pass_to_tx(chan, control);
4622
4623 if (control->final) {
4624 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4625 l2cap_retransmit_all(chan, control);
4626 } else {
4627 l2cap_retransmit_all(chan, control);
4628 l2cap_ertm_send(chan);
4629 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4630 set_bit(CONN_REJ_ACT, &chan->conn_state);
4631 }
4632 }
4633
4634 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4635 {
4636 BT_DBG("chan %p, txseq %d", chan, txseq);
4637
4638 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4639 chan->expected_tx_seq);
4640
4641 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4642 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4643 chan->tx_win) {
4644 /* See notes below regarding "double poll" and
4645 * invalid packets.
4646 */
4647 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4648 BT_DBG("Invalid/Ignore - after SREJ");
4649 return L2CAP_TXSEQ_INVALID_IGNORE;
4650 } else {
4651 BT_DBG("Invalid - in window after SREJ sent");
4652 return L2CAP_TXSEQ_INVALID;
4653 }
4654 }
4655
4656 if (chan->srej_list.head == txseq) {
4657 BT_DBG("Expected SREJ");
4658 return L2CAP_TXSEQ_EXPECTED_SREJ;
4659 }
4660
4661 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4662 BT_DBG("Duplicate SREJ - txseq already stored");
4663 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4664 }
4665
4666 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4667 BT_DBG("Unexpected SREJ - not requested");
4668 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4669 }
4670 }
4671
4672 if (chan->expected_tx_seq == txseq) {
4673 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4674 chan->tx_win) {
4675 BT_DBG("Invalid - txseq outside tx window");
4676 return L2CAP_TXSEQ_INVALID;
4677 } else {
4678 BT_DBG("Expected");
4679 return L2CAP_TXSEQ_EXPECTED;
4680 }
4681 }
4682
4683 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4684 __seq_offset(chan, chan->expected_tx_seq,
4685 chan->last_acked_seq)){
4686 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4687 return L2CAP_TXSEQ_DUPLICATE;
4688 }
4689
4690 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4691 /* A source of invalid packets is a "double poll" condition,
4692 * where delays cause us to send multiple poll packets. If
4693 * the remote stack receives and processes both polls,
4694 * sequence numbers can wrap around in such a way that a
4695 * resent frame has a sequence number that looks like new data
4696 * with a sequence gap. This would trigger an erroneous SREJ
4697 * request.
4698 *
4699 * Fortunately, this is impossible with a tx window that's
4700 * less than half of the maximum sequence number, which allows
4701 * invalid frames to be safely ignored.
4702 *
4703 * With tx window sizes greater than half of the tx window
4704 * maximum, the frame is invalid and cannot be ignored. This
4705 * causes a disconnect.
4706 */
4707
4708 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4709 BT_DBG("Invalid/Ignore - txseq outside tx window");
4710 return L2CAP_TXSEQ_INVALID_IGNORE;
4711 } else {
4712 BT_DBG("Invalid - txseq outside tx window");
4713 return L2CAP_TXSEQ_INVALID;
4714 }
4715 } else {
4716 BT_DBG("Unexpected - txseq indicates missing frames");
4717 return L2CAP_TXSEQ_UNEXPECTED;
4718 }
4719 }
4720
4721 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4722 struct l2cap_ctrl *control,
4723 struct sk_buff *skb, u8 event)
4724 {
4725 int err = 0;
4726 bool skb_in_use = 0;
4727
4728 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4729 event);
4730
4731 switch (event) {
4732 case L2CAP_EV_RECV_IFRAME:
4733 switch (l2cap_classify_txseq(chan, control->txseq)) {
4734 case L2CAP_TXSEQ_EXPECTED:
4735 l2cap_pass_to_tx(chan, control);
4736
4737 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4738 BT_DBG("Busy, discarding expected seq %d",
4739 control->txseq);
4740 break;
4741 }
4742
4743 chan->expected_tx_seq = __next_seq(chan,
4744 control->txseq);
4745
4746 chan->buffer_seq = chan->expected_tx_seq;
4747 skb_in_use = 1;
4748
4749 err = l2cap_reassemble_sdu(chan, skb, control);
4750 if (err)
4751 break;
4752
4753 if (control->final) {
4754 if (!test_and_clear_bit(CONN_REJ_ACT,
4755 &chan->conn_state)) {
4756 control->final = 0;
4757 l2cap_retransmit_all(chan, control);
4758 l2cap_ertm_send(chan);
4759 }
4760 }
4761
4762 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4763 l2cap_send_ack(chan);
4764 break;
4765 case L2CAP_TXSEQ_UNEXPECTED:
4766 l2cap_pass_to_tx(chan, control);
4767
4768 /* Can't issue SREJ frames in the local busy state.
4769 * Drop this frame, it will be seen as missing
4770 * when local busy is exited.
4771 */
4772 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4773 BT_DBG("Busy, discarding unexpected seq %d",
4774 control->txseq);
4775 break;
4776 }
4777
4778 /* There was a gap in the sequence, so an SREJ
4779 * must be sent for each missing frame. The
4780 * current frame is stored for later use.
4781 */
4782 skb_queue_tail(&chan->srej_q, skb);
4783 skb_in_use = 1;
4784 BT_DBG("Queued %p (queue len %d)", skb,
4785 skb_queue_len(&chan->srej_q));
4786
4787 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4788 l2cap_seq_list_clear(&chan->srej_list);
4789 l2cap_send_srej(chan, control->txseq);
4790
4791 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4792 break;
4793 case L2CAP_TXSEQ_DUPLICATE:
4794 l2cap_pass_to_tx(chan, control);
4795 break;
4796 case L2CAP_TXSEQ_INVALID_IGNORE:
4797 break;
4798 case L2CAP_TXSEQ_INVALID:
4799 default:
4800 l2cap_send_disconn_req(chan->conn, chan,
4801 ECONNRESET);
4802 break;
4803 }
4804 break;
4805 case L2CAP_EV_RECV_RR:
4806 l2cap_pass_to_tx(chan, control);
4807 if (control->final) {
4808 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4809
4810 if (!test_and_clear_bit(CONN_REJ_ACT,
4811 &chan->conn_state)) {
4812 control->final = 0;
4813 l2cap_retransmit_all(chan, control);
4814 }
4815
4816 l2cap_ertm_send(chan);
4817 } else if (control->poll) {
4818 l2cap_send_i_or_rr_or_rnr(chan);
4819 } else {
4820 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4821 &chan->conn_state) &&
4822 chan->unacked_frames)
4823 __set_retrans_timer(chan);
4824
4825 l2cap_ertm_send(chan);
4826 }
4827 break;
4828 case L2CAP_EV_RECV_RNR:
4829 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4830 l2cap_pass_to_tx(chan, control);
4831 if (control && control->poll) {
4832 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4833 l2cap_send_rr_or_rnr(chan, 0);
4834 }
4835 __clear_retrans_timer(chan);
4836 l2cap_seq_list_clear(&chan->retrans_list);
4837 break;
4838 case L2CAP_EV_RECV_REJ:
4839 l2cap_handle_rej(chan, control);
4840 break;
4841 case L2CAP_EV_RECV_SREJ:
4842 l2cap_handle_srej(chan, control);
4843 break;
4844 default:
4845 break;
4846 }
4847
4848 if (skb && !skb_in_use) {
4849 BT_DBG("Freeing %p", skb);
4850 kfree_skb(skb);
4851 }
4852
4853 return err;
4854 }
4855
4856 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4857 struct l2cap_ctrl *control,
4858 struct sk_buff *skb, u8 event)
4859 {
4860 int err = 0;
4861 u16 txseq = control->txseq;
4862 bool skb_in_use = 0;
4863
4864 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4865 event);
4866
4867 switch (event) {
4868 case L2CAP_EV_RECV_IFRAME:
4869 switch (l2cap_classify_txseq(chan, txseq)) {
4870 case L2CAP_TXSEQ_EXPECTED:
4871 /* Keep frame for reassembly later */
4872 l2cap_pass_to_tx(chan, control);
4873 skb_queue_tail(&chan->srej_q, skb);
4874 skb_in_use = 1;
4875 BT_DBG("Queued %p (queue len %d)", skb,
4876 skb_queue_len(&chan->srej_q));
4877
4878 chan->expected_tx_seq = __next_seq(chan, txseq);
4879 break;
4880 case L2CAP_TXSEQ_EXPECTED_SREJ:
4881 l2cap_seq_list_pop(&chan->srej_list);
4882
4883 l2cap_pass_to_tx(chan, control);
4884 skb_queue_tail(&chan->srej_q, skb);
4885 skb_in_use = 1;
4886 BT_DBG("Queued %p (queue len %d)", skb,
4887 skb_queue_len(&chan->srej_q));
4888
4889 err = l2cap_rx_queued_iframes(chan);
4890 if (err)
4891 break;
4892
4893 break;
4894 case L2CAP_TXSEQ_UNEXPECTED:
4895 /* Got a frame that can't be reassembled yet.
4896 * Save it for later, and send SREJs to cover
4897 * the missing frames.
4898 */
4899 skb_queue_tail(&chan->srej_q, skb);
4900 skb_in_use = 1;
4901 BT_DBG("Queued %p (queue len %d)", skb,
4902 skb_queue_len(&chan->srej_q));
4903
4904 l2cap_pass_to_tx(chan, control);
4905 l2cap_send_srej(chan, control->txseq);
4906 break;
4907 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4908 /* This frame was requested with an SREJ, but
4909 * some expected retransmitted frames are
4910 * missing. Request retransmission of missing
4911 * SREJ'd frames.
4912 */
4913 skb_queue_tail(&chan->srej_q, skb);
4914 skb_in_use = 1;
4915 BT_DBG("Queued %p (queue len %d)", skb,
4916 skb_queue_len(&chan->srej_q));
4917
4918 l2cap_pass_to_tx(chan, control);
4919 l2cap_send_srej_list(chan, control->txseq);
4920 break;
4921 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4922 /* We've already queued this frame. Drop this copy. */
4923 l2cap_pass_to_tx(chan, control);
4924 break;
4925 case L2CAP_TXSEQ_DUPLICATE:
4926 /* Expecting a later sequence number, so this frame
4927 * was already received. Ignore it completely.
4928 */
4929 break;
4930 case L2CAP_TXSEQ_INVALID_IGNORE:
4931 break;
4932 case L2CAP_TXSEQ_INVALID:
4933 default:
4934 l2cap_send_disconn_req(chan->conn, chan,
4935 ECONNRESET);
4936 break;
4937 }
4938 break;
4939 case L2CAP_EV_RECV_RR:
4940 l2cap_pass_to_tx(chan, control);
4941 if (control->final) {
4942 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4943
4944 if (!test_and_clear_bit(CONN_REJ_ACT,
4945 &chan->conn_state)) {
4946 control->final = 0;
4947 l2cap_retransmit_all(chan, control);
4948 }
4949
4950 l2cap_ertm_send(chan);
4951 } else if (control->poll) {
4952 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4953 &chan->conn_state) &&
4954 chan->unacked_frames) {
4955 __set_retrans_timer(chan);
4956 }
4957
4958 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4959 l2cap_send_srej_tail(chan);
4960 } else {
4961 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4962 &chan->conn_state) &&
4963 chan->unacked_frames)
4964 __set_retrans_timer(chan);
4965
4966 l2cap_send_ack(chan);
4967 }
4968 break;
4969 case L2CAP_EV_RECV_RNR:
4970 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4971 l2cap_pass_to_tx(chan, control);
4972 if (control->poll) {
4973 l2cap_send_srej_tail(chan);
4974 } else {
4975 struct l2cap_ctrl rr_control;
4976 memset(&rr_control, 0, sizeof(rr_control));
4977 rr_control.sframe = 1;
4978 rr_control.super = L2CAP_SUPER_RR;
4979 rr_control.reqseq = chan->buffer_seq;
4980 l2cap_send_sframe(chan, &rr_control);
4981 }
4982
4983 break;
4984 case L2CAP_EV_RECV_REJ:
4985 l2cap_handle_rej(chan, control);
4986 break;
4987 case L2CAP_EV_RECV_SREJ:
4988 l2cap_handle_srej(chan, control);
4989 break;
4990 }
4991
4992 if (skb && !skb_in_use) {
4993 BT_DBG("Freeing %p", skb);
4994 kfree_skb(skb);
4995 }
4996
4997 return err;
4998 }
4999
5000 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5001 {
5002 /* Make sure reqseq is for a packet that has been sent but not acked */
5003 u16 unacked;
5004
5005 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5006 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5007 }
5008
5009 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5010 struct sk_buff *skb, u8 event)
5011 {
5012 int err = 0;
5013
5014 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5015 control, skb, event, chan->rx_state);
5016
5017 if (__valid_reqseq(chan, control->reqseq)) {
5018 switch (chan->rx_state) {
5019 case L2CAP_RX_STATE_RECV:
5020 err = l2cap_rx_state_recv(chan, control, skb, event);
5021 break;
5022 case L2CAP_RX_STATE_SREJ_SENT:
5023 err = l2cap_rx_state_srej_sent(chan, control, skb,
5024 event);
5025 break;
5026 default:
5027 /* shut it down */
5028 break;
5029 }
5030 } else {
5031 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5032 control->reqseq, chan->next_tx_seq,
5033 chan->expected_ack_seq);
5034 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5035 }
5036
5037 return err;
5038 }
5039
5040 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5041 struct sk_buff *skb)
5042 {
5043 int err = 0;
5044
5045 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5046 chan->rx_state);
5047
5048 if (l2cap_classify_txseq(chan, control->txseq) ==
5049 L2CAP_TXSEQ_EXPECTED) {
5050 l2cap_pass_to_tx(chan, control);
5051
5052 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5053 __next_seq(chan, chan->buffer_seq));
5054
5055 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5056
5057 l2cap_reassemble_sdu(chan, skb, control);
5058 } else {
5059 if (chan->sdu) {
5060 kfree_skb(chan->sdu);
5061 chan->sdu = NULL;
5062 }
5063 chan->sdu_last_frag = NULL;
5064 chan->sdu_len = 0;
5065
5066 if (skb) {
5067 BT_DBG("Freeing %p", skb);
5068 kfree_skb(skb);
5069 }
5070 }
5071
5072 chan->last_acked_seq = control->txseq;
5073 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5074
5075 return err;
5076 }
5077
5078 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5079 {
5080 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5081 u16 len;
5082 u8 event;
5083
5084 __unpack_control(chan, skb);
5085
5086 len = skb->len;
5087
5088 /*
5089 * We can just drop the corrupted I-frame here.
5090 * Receiver will miss it and start proper recovery
5091 * procedures and ask for retransmission.
5092 */
5093 if (l2cap_check_fcs(chan, skb))
5094 goto drop;
5095
5096 if (!control->sframe && control->sar == L2CAP_SAR_START)
5097 len -= L2CAP_SDULEN_SIZE;
5098
5099 if (chan->fcs == L2CAP_FCS_CRC16)
5100 len -= L2CAP_FCS_SIZE;
5101
5102 if (len > chan->mps) {
5103 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5104 goto drop;
5105 }
5106
5107 if (!control->sframe) {
5108 int err;
5109
5110 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5111 control->sar, control->reqseq, control->final,
5112 control->txseq);
5113
5114 /* Validate F-bit - F=0 always valid, F=1 only
5115 * valid in TX WAIT_F
5116 */
5117 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5118 goto drop;
5119
5120 if (chan->mode != L2CAP_MODE_STREAMING) {
5121 event = L2CAP_EV_RECV_IFRAME;
5122 err = l2cap_rx(chan, control, skb, event);
5123 } else {
5124 err = l2cap_stream_rx(chan, control, skb);
5125 }
5126
5127 if (err)
5128 l2cap_send_disconn_req(chan->conn, chan,
5129 ECONNRESET);
5130 } else {
5131 const u8 rx_func_to_event[4] = {
5132 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5133 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5134 };
5135
5136 /* Only I-frames are expected in streaming mode */
5137 if (chan->mode == L2CAP_MODE_STREAMING)
5138 goto drop;
5139
5140 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5141 control->reqseq, control->final, control->poll,
5142 control->super);
5143
5144 if (len != 0) {
5145 BT_ERR("%d", len);
5146 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5147 goto drop;
5148 }
5149
5150 /* Validate F and P bits */
5151 if (control->final && (control->poll ||
5152 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5153 goto drop;
5154
5155 event = rx_func_to_event[control->super];
5156 if (l2cap_rx(chan, control, skb, event))
5157 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5158 }
5159
5160 return 0;
5161
5162 drop:
5163 kfree_skb(skb);
5164 return 0;
5165 }
5166
5167 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5168 struct sk_buff *skb)
5169 {
5170 struct l2cap_chan *chan;
5171
5172 chan = l2cap_get_chan_by_scid(conn, cid);
5173 if (!chan) {
5174 if (cid == L2CAP_CID_A2MP) {
5175 chan = a2mp_channel_create(conn, skb);
5176 if (!chan) {
5177 kfree_skb(skb);
5178 return;
5179 }
5180
5181 l2cap_chan_lock(chan);
5182 } else {
5183 BT_DBG("unknown cid 0x%4.4x", cid);
5184 /* Drop packet and return */
5185 kfree_skb(skb);
5186 return;
5187 }
5188 }
5189
5190 BT_DBG("chan %p, len %d", chan, skb->len);
5191
5192 if (chan->state != BT_CONNECTED)
5193 goto drop;
5194
5195 switch (chan->mode) {
5196 case L2CAP_MODE_BASIC:
5197 /* If socket recv buffers overflows we drop data here
5198 * which is *bad* because L2CAP has to be reliable.
5199 * But we don't have any other choice. L2CAP doesn't
5200 * provide flow control mechanism. */
5201
5202 if (chan->imtu < skb->len)
5203 goto drop;
5204
5205 if (!chan->ops->recv(chan, skb))
5206 goto done;
5207 break;
5208
5209 case L2CAP_MODE_ERTM:
5210 case L2CAP_MODE_STREAMING:
5211 l2cap_data_rcv(chan, skb);
5212 goto done;
5213
5214 default:
5215 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5216 break;
5217 }
5218
5219 drop:
5220 kfree_skb(skb);
5221
5222 done:
5223 l2cap_chan_unlock(chan);
5224 }
5225
5226 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5227 struct sk_buff *skb)
5228 {
5229 struct l2cap_chan *chan;
5230
5231 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5232 if (!chan)
5233 goto drop;
5234
5235 BT_DBG("chan %p, len %d", chan, skb->len);
5236
5237 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5238 goto drop;
5239
5240 if (chan->imtu < skb->len)
5241 goto drop;
5242
5243 if (!chan->ops->recv(chan, skb))
5244 return;
5245
5246 drop:
5247 kfree_skb(skb);
5248 }
5249
5250 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5251 struct sk_buff *skb)
5252 {
5253 struct l2cap_chan *chan;
5254
5255 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5256 if (!chan)
5257 goto drop;
5258
5259 BT_DBG("chan %p, len %d", chan, skb->len);
5260
5261 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5262 goto drop;
5263
5264 if (chan->imtu < skb->len)
5265 goto drop;
5266
5267 if (!chan->ops->recv(chan, skb))
5268 return;
5269
5270 drop:
5271 kfree_skb(skb);
5272 }
5273
5274 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5275 {
5276 struct l2cap_hdr *lh = (void *) skb->data;
5277 u16 cid, len;
5278 __le16 psm;
5279
5280 skb_pull(skb, L2CAP_HDR_SIZE);
5281 cid = __le16_to_cpu(lh->cid);
5282 len = __le16_to_cpu(lh->len);
5283
5284 if (len != skb->len) {
5285 kfree_skb(skb);
5286 return;
5287 }
5288
5289 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5290
5291 switch (cid) {
5292 case L2CAP_CID_LE_SIGNALING:
5293 case L2CAP_CID_SIGNALING:
5294 l2cap_sig_channel(conn, skb);
5295 break;
5296
5297 case L2CAP_CID_CONN_LESS:
5298 psm = get_unaligned((__le16 *) skb->data);
5299 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5300 l2cap_conless_channel(conn, psm, skb);
5301 break;
5302
5303 case L2CAP_CID_LE_DATA:
5304 l2cap_att_channel(conn, cid, skb);
5305 break;
5306
5307 case L2CAP_CID_SMP:
5308 if (smp_sig_channel(conn, skb))
5309 l2cap_conn_del(conn->hcon, EACCES);
5310 break;
5311
5312 default:
5313 l2cap_data_channel(conn, cid, skb);
5314 break;
5315 }
5316 }
5317
5318 /* ---- L2CAP interface with lower layer (HCI) ---- */
5319
5320 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5321 {
5322 int exact = 0, lm1 = 0, lm2 = 0;
5323 struct l2cap_chan *c;
5324
5325 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5326
5327 /* Find listening sockets and check their link_mode */
5328 read_lock(&chan_list_lock);
5329 list_for_each_entry(c, &chan_list, global_l) {
5330 struct sock *sk = c->sk;
5331
5332 if (c->state != BT_LISTEN)
5333 continue;
5334
5335 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5336 lm1 |= HCI_LM_ACCEPT;
5337 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5338 lm1 |= HCI_LM_MASTER;
5339 exact++;
5340 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5341 lm2 |= HCI_LM_ACCEPT;
5342 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5343 lm2 |= HCI_LM_MASTER;
5344 }
5345 }
5346 read_unlock(&chan_list_lock);
5347
5348 return exact ? lm1 : lm2;
5349 }
5350
5351 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5352 {
5353 struct l2cap_conn *conn;
5354
5355 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5356
5357 if (!status) {
5358 conn = l2cap_conn_add(hcon, status);
5359 if (conn)
5360 l2cap_conn_ready(conn);
5361 } else
5362 l2cap_conn_del(hcon, bt_to_errno(status));
5363
5364 }
5365
5366 int l2cap_disconn_ind(struct hci_conn *hcon)
5367 {
5368 struct l2cap_conn *conn = hcon->l2cap_data;
5369
5370 BT_DBG("hcon %p", hcon);
5371
5372 if (!conn)
5373 return HCI_ERROR_REMOTE_USER_TERM;
5374 return conn->disc_reason;
5375 }
5376
5377 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5378 {
5379 BT_DBG("hcon %p reason %d", hcon, reason);
5380
5381 l2cap_conn_del(hcon, bt_to_errno(reason));
5382 }
5383
5384 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5385 {
5386 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5387 return;
5388
5389 if (encrypt == 0x00) {
5390 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5391 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5392 } else if (chan->sec_level == BT_SECURITY_HIGH)
5393 l2cap_chan_close(chan, ECONNREFUSED);
5394 } else {
5395 if (chan->sec_level == BT_SECURITY_MEDIUM)
5396 __clear_chan_timer(chan);
5397 }
5398 }
5399
5400 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5401 {
5402 struct l2cap_conn *conn = hcon->l2cap_data;
5403 struct l2cap_chan *chan;
5404
5405 if (!conn)
5406 return 0;
5407
5408 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5409
5410 if (hcon->type == LE_LINK) {
5411 if (!status && encrypt)
5412 smp_distribute_keys(conn, 0);
5413 cancel_delayed_work(&conn->security_timer);
5414 }
5415
5416 mutex_lock(&conn->chan_lock);
5417
5418 list_for_each_entry(chan, &conn->chan_l, list) {
5419 l2cap_chan_lock(chan);
5420
5421 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5422 state_to_string(chan->state));
5423
5424 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5425 l2cap_chan_unlock(chan);
5426 continue;
5427 }
5428
5429 if (chan->scid == L2CAP_CID_LE_DATA) {
5430 if (!status && encrypt) {
5431 chan->sec_level = hcon->sec_level;
5432 l2cap_chan_ready(chan);
5433 }
5434
5435 l2cap_chan_unlock(chan);
5436 continue;
5437 }
5438
5439 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5440 l2cap_chan_unlock(chan);
5441 continue;
5442 }
5443
5444 if (!status && (chan->state == BT_CONNECTED ||
5445 chan->state == BT_CONFIG)) {
5446 struct sock *sk = chan->sk;
5447
5448 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5449 sk->sk_state_change(sk);
5450
5451 l2cap_check_encryption(chan, encrypt);
5452 l2cap_chan_unlock(chan);
5453 continue;
5454 }
5455
5456 if (chan->state == BT_CONNECT) {
5457 if (!status) {
5458 l2cap_send_conn_req(chan);
5459 } else {
5460 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5461 }
5462 } else if (chan->state == BT_CONNECT2) {
5463 struct sock *sk = chan->sk;
5464 struct l2cap_conn_rsp rsp;
5465 __u16 res, stat;
5466
5467 lock_sock(sk);
5468
5469 if (!status) {
5470 if (test_bit(BT_SK_DEFER_SETUP,
5471 &bt_sk(sk)->flags)) {
5472 struct sock *parent = bt_sk(sk)->parent;
5473 res = L2CAP_CR_PEND;
5474 stat = L2CAP_CS_AUTHOR_PEND;
5475 if (parent)
5476 parent->sk_data_ready(parent, 0);
5477 } else {
5478 __l2cap_state_change(chan, BT_CONFIG);
5479 res = L2CAP_CR_SUCCESS;
5480 stat = L2CAP_CS_NO_INFO;
5481 }
5482 } else {
5483 __l2cap_state_change(chan, BT_DISCONN);
5484 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5485 res = L2CAP_CR_SEC_BLOCK;
5486 stat = L2CAP_CS_NO_INFO;
5487 }
5488
5489 release_sock(sk);
5490
5491 rsp.scid = cpu_to_le16(chan->dcid);
5492 rsp.dcid = cpu_to_le16(chan->scid);
5493 rsp.result = cpu_to_le16(res);
5494 rsp.status = cpu_to_le16(stat);
5495 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5496 sizeof(rsp), &rsp);
5497
5498 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5499 res == L2CAP_CR_SUCCESS) {
5500 char buf[128];
5501 set_bit(CONF_REQ_SENT, &chan->conf_state);
5502 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5503 L2CAP_CONF_REQ,
5504 l2cap_build_conf_req(chan, buf),
5505 buf);
5506 chan->num_conf_req++;
5507 }
5508 }
5509
5510 l2cap_chan_unlock(chan);
5511 }
5512
5513 mutex_unlock(&conn->chan_lock);
5514
5515 return 0;
5516 }
5517
5518 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5519 {
5520 struct l2cap_conn *conn = hcon->l2cap_data;
5521
5522 if (!conn)
5523 conn = l2cap_conn_add(hcon, 0);
5524
5525 if (!conn)
5526 goto drop;
5527
5528 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5529
5530 if (!(flags & ACL_CONT)) {
5531 struct l2cap_hdr *hdr;
5532 int len;
5533
5534 if (conn->rx_len) {
5535 BT_ERR("Unexpected start frame (len %d)", skb->len);
5536 kfree_skb(conn->rx_skb);
5537 conn->rx_skb = NULL;
5538 conn->rx_len = 0;
5539 l2cap_conn_unreliable(conn, ECOMM);
5540 }
5541
5542 /* Start fragment always begin with Basic L2CAP header */
5543 if (skb->len < L2CAP_HDR_SIZE) {
5544 BT_ERR("Frame is too short (len %d)", skb->len);
5545 l2cap_conn_unreliable(conn, ECOMM);
5546 goto drop;
5547 }
5548
5549 hdr = (struct l2cap_hdr *) skb->data;
5550 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5551
5552 if (len == skb->len) {
5553 /* Complete frame received */
5554 l2cap_recv_frame(conn, skb);
5555 return 0;
5556 }
5557
5558 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5559
5560 if (skb->len > len) {
5561 BT_ERR("Frame is too long (len %d, expected len %d)",
5562 skb->len, len);
5563 l2cap_conn_unreliable(conn, ECOMM);
5564 goto drop;
5565 }
5566
5567 /* Allocate skb for the complete frame (with header) */
5568 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5569 if (!conn->rx_skb)
5570 goto drop;
5571
5572 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5573 skb->len);
5574 conn->rx_len = len - skb->len;
5575 } else {
5576 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5577
5578 if (!conn->rx_len) {
5579 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5580 l2cap_conn_unreliable(conn, ECOMM);
5581 goto drop;
5582 }
5583
5584 if (skb->len > conn->rx_len) {
5585 BT_ERR("Fragment is too long (len %d, expected %d)",
5586 skb->len, conn->rx_len);
5587 kfree_skb(conn->rx_skb);
5588 conn->rx_skb = NULL;
5589 conn->rx_len = 0;
5590 l2cap_conn_unreliable(conn, ECOMM);
5591 goto drop;
5592 }
5593
5594 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5595 skb->len);
5596 conn->rx_len -= skb->len;
5597
5598 if (!conn->rx_len) {
5599 /* Complete frame received */
5600 l2cap_recv_frame(conn, conn->rx_skb);
5601 conn->rx_skb = NULL;
5602 }
5603 }
5604
5605 drop:
5606 kfree_skb(skb);
5607 return 0;
5608 }
5609
5610 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5611 {
5612 struct l2cap_chan *c;
5613
5614 read_lock(&chan_list_lock);
5615
5616 list_for_each_entry(c, &chan_list, global_l) {
5617 struct sock *sk = c->sk;
5618
5619 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5620 batostr(&bt_sk(sk)->src),
5621 batostr(&bt_sk(sk)->dst),
5622 c->state, __le16_to_cpu(c->psm),
5623 c->scid, c->dcid, c->imtu, c->omtu,
5624 c->sec_level, c->mode);
5625 }
5626
5627 read_unlock(&chan_list_lock);
5628
5629 return 0;
5630 }
5631
5632 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5633 {
5634 return single_open(file, l2cap_debugfs_show, inode->i_private);
5635 }
5636
5637 static const struct file_operations l2cap_debugfs_fops = {
5638 .open = l2cap_debugfs_open,
5639 .read = seq_read,
5640 .llseek = seq_lseek,
5641 .release = single_release,
5642 };
5643
5644 static struct dentry *l2cap_debugfs;
5645
5646 int __init l2cap_init(void)
5647 {
5648 int err;
5649
5650 err = l2cap_init_sockets();
5651 if (err < 0)
5652 return err;
5653
5654 if (bt_debugfs) {
5655 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5656 bt_debugfs, NULL, &l2cap_debugfs_fops);
5657 if (!l2cap_debugfs)
5658 BT_ERR("Failed to create L2CAP debug file");
5659 }
5660
5661 return 0;
5662 }
5663
5664 void l2cap_exit(void)
5665 {
5666 debugfs_remove(l2cap_debugfs);
5667 l2cap_cleanup_sockets();
5668 }
5669
5670 module_param(disable_ertm, bool, 0644);
5671 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.152265 seconds and 5 git commands to generate.