Bluetooth: Move l2cap_chan_hold/put to l2cap_core.c
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 struct l2cap_chan *c;
66
67 list_for_each_entry(c, &conn->chan_l, list) {
68 if (c->dcid == cid)
69 return c;
70 }
71 return NULL;
72 }
73
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 struct l2cap_chan *c;
77
78 list_for_each_entry(c, &conn->chan_l, list) {
79 if (c->scid == cid)
80 return c;
81 }
82 return NULL;
83 }
84
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 struct l2cap_chan *c;
90
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
93 if (c)
94 l2cap_chan_lock(c);
95 mutex_unlock(&conn->chan_lock);
96
97 return c;
98 }
99
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 struct l2cap_chan *c;
103
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
106 return c;
107 }
108 return NULL;
109 }
110
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 struct l2cap_chan *c;
114
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 return c;
118 }
119 return NULL;
120 }
121
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 int err;
125
126 write_lock(&chan_list_lock);
127
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 err = -EADDRINUSE;
130 goto done;
131 }
132
133 if (psm) {
134 chan->psm = psm;
135 chan->sport = psm;
136 err = 0;
137 } else {
138 u16 p;
139
140 err = -EINVAL;
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
145 err = 0;
146 break;
147 }
148 }
149
150 done:
151 write_unlock(&chan_list_lock);
152 return err;
153 }
154
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 {
157 write_lock(&chan_list_lock);
158
159 chan->scid = scid;
160
161 write_unlock(&chan_list_lock);
162
163 return 0;
164 }
165
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 u16 cid = L2CAP_CID_DYN_START;
169
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
172 return cid;
173 }
174
175 return 0;
176 }
177
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
182
183 chan->state = state;
184 chan->ops->state_change(chan, state);
185 }
186
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 struct sock *sk = chan->sk;
190
191 lock_sock(sk);
192 __l2cap_state_change(chan, state);
193 release_sock(sk);
194 }
195
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 struct sock *sk = chan->sk;
199
200 sk->sk_err = err;
201 }
202
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 struct sock *sk = chan->sk;
206
207 lock_sock(sk);
208 __l2cap_chan_set_err(chan, err);
209 release_sock(sk);
210 }
211
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219 }
220
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426 }
427
428 void l2cap_chan_hold(struct l2cap_chan *c)
429 {
430 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
431
432 atomic_inc(&c->refcnt);
433 }
434
435 void l2cap_chan_put(struct l2cap_chan *c)
436 {
437 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
438
439 if (atomic_dec_and_test(&c->refcnt))
440 kfree(c);
441 }
442
443 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
444 {
445 chan->fcs = L2CAP_FCS_CRC16;
446 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
447 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
448 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
449 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
450 chan->sec_level = BT_SECURITY_LOW;
451
452 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
453 }
454
455 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
456 {
457 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
458 __le16_to_cpu(chan->psm), chan->dcid);
459
460 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
461
462 chan->conn = conn;
463
464 switch (chan->chan_type) {
465 case L2CAP_CHAN_CONN_ORIENTED:
466 if (conn->hcon->type == LE_LINK) {
467 /* LE connection */
468 chan->omtu = L2CAP_DEFAULT_MTU;
469 chan->scid = L2CAP_CID_LE_DATA;
470 chan->dcid = L2CAP_CID_LE_DATA;
471 } else {
472 /* Alloc CID for connection-oriented socket */
473 chan->scid = l2cap_alloc_cid(conn);
474 chan->omtu = L2CAP_DEFAULT_MTU;
475 }
476 break;
477
478 case L2CAP_CHAN_CONN_LESS:
479 /* Connectionless socket */
480 chan->scid = L2CAP_CID_CONN_LESS;
481 chan->dcid = L2CAP_CID_CONN_LESS;
482 chan->omtu = L2CAP_DEFAULT_MTU;
483 break;
484
485 case L2CAP_CHAN_CONN_FIX_A2MP:
486 chan->scid = L2CAP_CID_A2MP;
487 chan->dcid = L2CAP_CID_A2MP;
488 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
489 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
490 break;
491
492 default:
493 /* Raw socket can send/recv signalling messages only */
494 chan->scid = L2CAP_CID_SIGNALING;
495 chan->dcid = L2CAP_CID_SIGNALING;
496 chan->omtu = L2CAP_DEFAULT_MTU;
497 }
498
499 chan->local_id = L2CAP_BESTEFFORT_ID;
500 chan->local_stype = L2CAP_SERV_BESTEFFORT;
501 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
502 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
503 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
504 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
505
506 l2cap_chan_hold(chan);
507
508 list_add(&chan->list, &conn->chan_l);
509 }
510
511 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
512 {
513 mutex_lock(&conn->chan_lock);
514 __l2cap_chan_add(conn, chan);
515 mutex_unlock(&conn->chan_lock);
516 }
517
518 void l2cap_chan_del(struct l2cap_chan *chan, int err)
519 {
520 struct l2cap_conn *conn = chan->conn;
521
522 __clear_chan_timer(chan);
523
524 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
525
526 if (conn) {
527 /* Delete from channel list */
528 list_del(&chan->list);
529
530 l2cap_chan_put(chan);
531
532 chan->conn = NULL;
533
534 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
535 hci_conn_put(conn->hcon);
536 }
537
538 if (chan->ops->teardown)
539 chan->ops->teardown(chan, err);
540
541 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
542 return;
543
544 switch(chan->mode) {
545 case L2CAP_MODE_BASIC:
546 break;
547
548 case L2CAP_MODE_ERTM:
549 __clear_retrans_timer(chan);
550 __clear_monitor_timer(chan);
551 __clear_ack_timer(chan);
552
553 skb_queue_purge(&chan->srej_q);
554
555 l2cap_seq_list_free(&chan->srej_list);
556 l2cap_seq_list_free(&chan->retrans_list);
557
558 /* fall through */
559
560 case L2CAP_MODE_STREAMING:
561 skb_queue_purge(&chan->tx_q);
562 break;
563 }
564
565 return;
566 }
567
568 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
569 {
570 struct l2cap_conn *conn = chan->conn;
571 struct sock *sk = chan->sk;
572
573 BT_DBG("chan %p state %s sk %p", chan,
574 state_to_string(chan->state), sk);
575
576 switch (chan->state) {
577 case BT_LISTEN:
578 if (chan->ops->teardown)
579 chan->ops->teardown(chan, 0);
580 break;
581
582 case BT_CONNECTED:
583 case BT_CONFIG:
584 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
585 conn->hcon->type == ACL_LINK) {
586 __set_chan_timer(chan, sk->sk_sndtimeo);
587 l2cap_send_disconn_req(conn, chan, reason);
588 } else
589 l2cap_chan_del(chan, reason);
590 break;
591
592 case BT_CONNECT2:
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 struct l2cap_conn_rsp rsp;
596 __u16 result;
597
598 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
599 result = L2CAP_CR_SEC_BLOCK;
600 else
601 result = L2CAP_CR_BAD_PSM;
602 l2cap_state_change(chan, BT_DISCONN);
603
604 rsp.scid = cpu_to_le16(chan->dcid);
605 rsp.dcid = cpu_to_le16(chan->scid);
606 rsp.result = cpu_to_le16(result);
607 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
608 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
609 sizeof(rsp), &rsp);
610 }
611
612 l2cap_chan_del(chan, reason);
613 break;
614
615 case BT_CONNECT:
616 case BT_DISCONN:
617 l2cap_chan_del(chan, reason);
618 break;
619
620 default:
621 if (chan->ops->teardown)
622 chan->ops->teardown(chan, 0);
623 break;
624 }
625 }
626
627 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
628 {
629 if (chan->chan_type == L2CAP_CHAN_RAW) {
630 switch (chan->sec_level) {
631 case BT_SECURITY_HIGH:
632 return HCI_AT_DEDICATED_BONDING_MITM;
633 case BT_SECURITY_MEDIUM:
634 return HCI_AT_DEDICATED_BONDING;
635 default:
636 return HCI_AT_NO_BONDING;
637 }
638 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
639 if (chan->sec_level == BT_SECURITY_LOW)
640 chan->sec_level = BT_SECURITY_SDP;
641
642 if (chan->sec_level == BT_SECURITY_HIGH)
643 return HCI_AT_NO_BONDING_MITM;
644 else
645 return HCI_AT_NO_BONDING;
646 } else {
647 switch (chan->sec_level) {
648 case BT_SECURITY_HIGH:
649 return HCI_AT_GENERAL_BONDING_MITM;
650 case BT_SECURITY_MEDIUM:
651 return HCI_AT_GENERAL_BONDING;
652 default:
653 return HCI_AT_NO_BONDING;
654 }
655 }
656 }
657
658 /* Service level security */
659 int l2cap_chan_check_security(struct l2cap_chan *chan)
660 {
661 struct l2cap_conn *conn = chan->conn;
662 __u8 auth_type;
663
664 auth_type = l2cap_get_auth_type(chan);
665
666 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
667 }
668
669 static u8 l2cap_get_ident(struct l2cap_conn *conn)
670 {
671 u8 id;
672
673 /* Get next available identificator.
674 * 1 - 128 are used by kernel.
675 * 129 - 199 are reserved.
676 * 200 - 254 are used by utilities like l2ping, etc.
677 */
678
679 spin_lock(&conn->lock);
680
681 if (++conn->tx_ident > 128)
682 conn->tx_ident = 1;
683
684 id = conn->tx_ident;
685
686 spin_unlock(&conn->lock);
687
688 return id;
689 }
690
691 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
692 {
693 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
694 u8 flags;
695
696 BT_DBG("code 0x%2.2x", code);
697
698 if (!skb)
699 return;
700
701 if (lmp_no_flush_capable(conn->hcon->hdev))
702 flags = ACL_START_NO_FLUSH;
703 else
704 flags = ACL_START;
705
706 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
707 skb->priority = HCI_PRIO_MAX;
708
709 hci_send_acl(conn->hchan, skb, flags);
710 }
711
712 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
713 {
714 struct hci_conn *hcon = chan->conn->hcon;
715 u16 flags;
716
717 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
718 skb->priority);
719
720 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
721 lmp_no_flush_capable(hcon->hdev))
722 flags = ACL_START_NO_FLUSH;
723 else
724 flags = ACL_START;
725
726 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
727 hci_send_acl(chan->conn->hchan, skb, flags);
728 }
729
730 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
731 {
732 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
733 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
734
735 if (enh & L2CAP_CTRL_FRAME_TYPE) {
736 /* S-Frame */
737 control->sframe = 1;
738 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
739 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
740
741 control->sar = 0;
742 control->txseq = 0;
743 } else {
744 /* I-Frame */
745 control->sframe = 0;
746 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
747 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
748
749 control->poll = 0;
750 control->super = 0;
751 }
752 }
753
754 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
755 {
756 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
757 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
758
759 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
760 /* S-Frame */
761 control->sframe = 1;
762 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
763 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
764
765 control->sar = 0;
766 control->txseq = 0;
767 } else {
768 /* I-Frame */
769 control->sframe = 0;
770 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
771 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
772
773 control->poll = 0;
774 control->super = 0;
775 }
776 }
777
778 static inline void __unpack_control(struct l2cap_chan *chan,
779 struct sk_buff *skb)
780 {
781 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
782 __unpack_extended_control(get_unaligned_le32(skb->data),
783 &bt_cb(skb)->control);
784 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
785 } else {
786 __unpack_enhanced_control(get_unaligned_le16(skb->data),
787 &bt_cb(skb)->control);
788 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
789 }
790 }
791
792 static u32 __pack_extended_control(struct l2cap_ctrl *control)
793 {
794 u32 packed;
795
796 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
797 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
798
799 if (control->sframe) {
800 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
801 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
802 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
803 } else {
804 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
805 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
806 }
807
808 return packed;
809 }
810
811 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
812 {
813 u16 packed;
814
815 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
816 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
817
818 if (control->sframe) {
819 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
820 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
821 packed |= L2CAP_CTRL_FRAME_TYPE;
822 } else {
823 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
824 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
825 }
826
827 return packed;
828 }
829
830 static inline void __pack_control(struct l2cap_chan *chan,
831 struct l2cap_ctrl *control,
832 struct sk_buff *skb)
833 {
834 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
835 put_unaligned_le32(__pack_extended_control(control),
836 skb->data + L2CAP_HDR_SIZE);
837 } else {
838 put_unaligned_le16(__pack_enhanced_control(control),
839 skb->data + L2CAP_HDR_SIZE);
840 }
841 }
842
843 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
844 {
845 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
846 return L2CAP_EXT_HDR_SIZE;
847 else
848 return L2CAP_ENH_HDR_SIZE;
849 }
850
851 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
852 u32 control)
853 {
854 struct sk_buff *skb;
855 struct l2cap_hdr *lh;
856 int hlen = __ertm_hdr_size(chan);
857
858 if (chan->fcs == L2CAP_FCS_CRC16)
859 hlen += L2CAP_FCS_SIZE;
860
861 skb = bt_skb_alloc(hlen, GFP_KERNEL);
862
863 if (!skb)
864 return ERR_PTR(-ENOMEM);
865
866 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
867 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
868 lh->cid = cpu_to_le16(chan->dcid);
869
870 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
871 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
872 else
873 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
874
875 if (chan->fcs == L2CAP_FCS_CRC16) {
876 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
877 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
878 }
879
880 skb->priority = HCI_PRIO_MAX;
881 return skb;
882 }
883
884 static void l2cap_send_sframe(struct l2cap_chan *chan,
885 struct l2cap_ctrl *control)
886 {
887 struct sk_buff *skb;
888 u32 control_field;
889
890 BT_DBG("chan %p, control %p", chan, control);
891
892 if (!control->sframe)
893 return;
894
895 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
896 !control->poll)
897 control->final = 1;
898
899 if (control->super == L2CAP_SUPER_RR)
900 clear_bit(CONN_RNR_SENT, &chan->conn_state);
901 else if (control->super == L2CAP_SUPER_RNR)
902 set_bit(CONN_RNR_SENT, &chan->conn_state);
903
904 if (control->super != L2CAP_SUPER_SREJ) {
905 chan->last_acked_seq = control->reqseq;
906 __clear_ack_timer(chan);
907 }
908
909 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
910 control->final, control->poll, control->super);
911
912 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
913 control_field = __pack_extended_control(control);
914 else
915 control_field = __pack_enhanced_control(control);
916
917 skb = l2cap_create_sframe_pdu(chan, control_field);
918 if (!IS_ERR(skb))
919 l2cap_do_send(chan, skb);
920 }
921
922 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
923 {
924 struct l2cap_ctrl control;
925
926 BT_DBG("chan %p, poll %d", chan, poll);
927
928 memset(&control, 0, sizeof(control));
929 control.sframe = 1;
930 control.poll = poll;
931
932 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
933 control.super = L2CAP_SUPER_RNR;
934 else
935 control.super = L2CAP_SUPER_RR;
936
937 control.reqseq = chan->buffer_seq;
938 l2cap_send_sframe(chan, &control);
939 }
940
941 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
942 {
943 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
944 }
945
946 static void l2cap_send_conn_req(struct l2cap_chan *chan)
947 {
948 struct l2cap_conn *conn = chan->conn;
949 struct l2cap_conn_req req;
950
951 req.scid = cpu_to_le16(chan->scid);
952 req.psm = chan->psm;
953
954 chan->ident = l2cap_get_ident(conn);
955
956 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
957
958 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
959 }
960
961 static void l2cap_chan_ready(struct l2cap_chan *chan)
962 {
963 /* This clears all conf flags, including CONF_NOT_COMPLETE */
964 chan->conf_state = 0;
965 __clear_chan_timer(chan);
966
967 chan->state = BT_CONNECTED;
968
969 chan->ops->ready(chan);
970 }
971
972 static void l2cap_do_start(struct l2cap_chan *chan)
973 {
974 struct l2cap_conn *conn = chan->conn;
975
976 if (conn->hcon->type == LE_LINK) {
977 l2cap_chan_ready(chan);
978 return;
979 }
980
981 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
982 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
983 return;
984
985 if (l2cap_chan_check_security(chan) &&
986 __l2cap_no_conn_pending(chan))
987 l2cap_send_conn_req(chan);
988 } else {
989 struct l2cap_info_req req;
990 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
991
992 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
993 conn->info_ident = l2cap_get_ident(conn);
994
995 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
996
997 l2cap_send_cmd(conn, conn->info_ident,
998 L2CAP_INFO_REQ, sizeof(req), &req);
999 }
1000 }
1001
1002 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1003 {
1004 u32 local_feat_mask = l2cap_feat_mask;
1005 if (!disable_ertm)
1006 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1007
1008 switch (mode) {
1009 case L2CAP_MODE_ERTM:
1010 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1011 case L2CAP_MODE_STREAMING:
1012 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1013 default:
1014 return 0x00;
1015 }
1016 }
1017
1018 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1019 {
1020 struct sock *sk = chan->sk;
1021 struct l2cap_disconn_req req;
1022
1023 if (!conn)
1024 return;
1025
1026 if (chan->mode == L2CAP_MODE_ERTM) {
1027 __clear_retrans_timer(chan);
1028 __clear_monitor_timer(chan);
1029 __clear_ack_timer(chan);
1030 }
1031
1032 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1033 __l2cap_state_change(chan, BT_DISCONN);
1034 return;
1035 }
1036
1037 req.dcid = cpu_to_le16(chan->dcid);
1038 req.scid = cpu_to_le16(chan->scid);
1039 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1040 L2CAP_DISCONN_REQ, sizeof(req), &req);
1041
1042 lock_sock(sk);
1043 __l2cap_state_change(chan, BT_DISCONN);
1044 __l2cap_chan_set_err(chan, err);
1045 release_sock(sk);
1046 }
1047
1048 /* ---- L2CAP connections ---- */
1049 static void l2cap_conn_start(struct l2cap_conn *conn)
1050 {
1051 struct l2cap_chan *chan, *tmp;
1052
1053 BT_DBG("conn %p", conn);
1054
1055 mutex_lock(&conn->chan_lock);
1056
1057 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1058 struct sock *sk = chan->sk;
1059
1060 l2cap_chan_lock(chan);
1061
1062 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1063 l2cap_chan_unlock(chan);
1064 continue;
1065 }
1066
1067 if (chan->state == BT_CONNECT) {
1068 if (!l2cap_chan_check_security(chan) ||
1069 !__l2cap_no_conn_pending(chan)) {
1070 l2cap_chan_unlock(chan);
1071 continue;
1072 }
1073
1074 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1075 && test_bit(CONF_STATE2_DEVICE,
1076 &chan->conf_state)) {
1077 l2cap_chan_close(chan, ECONNRESET);
1078 l2cap_chan_unlock(chan);
1079 continue;
1080 }
1081
1082 l2cap_send_conn_req(chan);
1083
1084 } else if (chan->state == BT_CONNECT2) {
1085 struct l2cap_conn_rsp rsp;
1086 char buf[128];
1087 rsp.scid = cpu_to_le16(chan->dcid);
1088 rsp.dcid = cpu_to_le16(chan->scid);
1089
1090 if (l2cap_chan_check_security(chan)) {
1091 lock_sock(sk);
1092 if (test_bit(BT_SK_DEFER_SETUP,
1093 &bt_sk(sk)->flags)) {
1094 struct sock *parent = bt_sk(sk)->parent;
1095 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1096 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1097 if (parent)
1098 parent->sk_data_ready(parent, 0);
1099
1100 } else {
1101 __l2cap_state_change(chan, BT_CONFIG);
1102 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1103 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1104 }
1105 release_sock(sk);
1106 } else {
1107 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1108 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1109 }
1110
1111 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1112 sizeof(rsp), &rsp);
1113
1114 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1115 rsp.result != L2CAP_CR_SUCCESS) {
1116 l2cap_chan_unlock(chan);
1117 continue;
1118 }
1119
1120 set_bit(CONF_REQ_SENT, &chan->conf_state);
1121 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1122 l2cap_build_conf_req(chan, buf), buf);
1123 chan->num_conf_req++;
1124 }
1125
1126 l2cap_chan_unlock(chan);
1127 }
1128
1129 mutex_unlock(&conn->chan_lock);
1130 }
1131
1132 /* Find socket with cid and source/destination bdaddr.
1133 * Returns closest match, locked.
1134 */
1135 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1136 bdaddr_t *src,
1137 bdaddr_t *dst)
1138 {
1139 struct l2cap_chan *c, *c1 = NULL;
1140
1141 read_lock(&chan_list_lock);
1142
1143 list_for_each_entry(c, &chan_list, global_l) {
1144 struct sock *sk = c->sk;
1145
1146 if (state && c->state != state)
1147 continue;
1148
1149 if (c->scid == cid) {
1150 int src_match, dst_match;
1151 int src_any, dst_any;
1152
1153 /* Exact match. */
1154 src_match = !bacmp(&bt_sk(sk)->src, src);
1155 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1156 if (src_match && dst_match) {
1157 read_unlock(&chan_list_lock);
1158 return c;
1159 }
1160
1161 /* Closest match */
1162 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1163 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1164 if ((src_match && dst_any) || (src_any && dst_match) ||
1165 (src_any && dst_any))
1166 c1 = c;
1167 }
1168 }
1169
1170 read_unlock(&chan_list_lock);
1171
1172 return c1;
1173 }
1174
1175 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1176 {
1177 struct sock *parent, *sk;
1178 struct l2cap_chan *chan, *pchan;
1179
1180 BT_DBG("");
1181
1182 /* Check if we have socket listening on cid */
1183 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1184 conn->src, conn->dst);
1185 if (!pchan)
1186 return;
1187
1188 parent = pchan->sk;
1189
1190 lock_sock(parent);
1191
1192 chan = pchan->ops->new_connection(pchan);
1193 if (!chan)
1194 goto clean;
1195
1196 sk = chan->sk;
1197
1198 hci_conn_hold(conn->hcon);
1199
1200 bacpy(&bt_sk(sk)->src, conn->src);
1201 bacpy(&bt_sk(sk)->dst, conn->dst);
1202
1203 bt_accept_enqueue(parent, sk);
1204
1205 l2cap_chan_add(conn, chan);
1206
1207 l2cap_chan_ready(chan);
1208
1209 clean:
1210 release_sock(parent);
1211 }
1212
1213 static void l2cap_conn_ready(struct l2cap_conn *conn)
1214 {
1215 struct l2cap_chan *chan;
1216
1217 BT_DBG("conn %p", conn);
1218
1219 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1220 l2cap_le_conn_ready(conn);
1221
1222 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1223 smp_conn_security(conn, conn->hcon->pending_sec_level);
1224
1225 mutex_lock(&conn->chan_lock);
1226
1227 list_for_each_entry(chan, &conn->chan_l, list) {
1228
1229 l2cap_chan_lock(chan);
1230
1231 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1232 l2cap_chan_unlock(chan);
1233 continue;
1234 }
1235
1236 if (conn->hcon->type == LE_LINK) {
1237 if (smp_conn_security(conn, chan->sec_level))
1238 l2cap_chan_ready(chan);
1239
1240 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1241 struct sock *sk = chan->sk;
1242 __clear_chan_timer(chan);
1243 lock_sock(sk);
1244 __l2cap_state_change(chan, BT_CONNECTED);
1245 sk->sk_state_change(sk);
1246 release_sock(sk);
1247
1248 } else if (chan->state == BT_CONNECT)
1249 l2cap_do_start(chan);
1250
1251 l2cap_chan_unlock(chan);
1252 }
1253
1254 mutex_unlock(&conn->chan_lock);
1255 }
1256
1257 /* Notify sockets that we cannot guaranty reliability anymore */
1258 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1259 {
1260 struct l2cap_chan *chan;
1261
1262 BT_DBG("conn %p", conn);
1263
1264 mutex_lock(&conn->chan_lock);
1265
1266 list_for_each_entry(chan, &conn->chan_l, list) {
1267 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1268 __l2cap_chan_set_err(chan, err);
1269 }
1270
1271 mutex_unlock(&conn->chan_lock);
1272 }
1273
1274 static void l2cap_info_timeout(struct work_struct *work)
1275 {
1276 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1277 info_timer.work);
1278
1279 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1280 conn->info_ident = 0;
1281
1282 l2cap_conn_start(conn);
1283 }
1284
1285 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1286 {
1287 struct l2cap_conn *conn = hcon->l2cap_data;
1288 struct l2cap_chan *chan, *l;
1289
1290 if (!conn)
1291 return;
1292
1293 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1294
1295 kfree_skb(conn->rx_skb);
1296
1297 mutex_lock(&conn->chan_lock);
1298
1299 /* Kill channels */
1300 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1301 l2cap_chan_hold(chan);
1302 l2cap_chan_lock(chan);
1303
1304 l2cap_chan_del(chan, err);
1305
1306 l2cap_chan_unlock(chan);
1307
1308 chan->ops->close(chan);
1309 l2cap_chan_put(chan);
1310 }
1311
1312 mutex_unlock(&conn->chan_lock);
1313
1314 hci_chan_del(conn->hchan);
1315
1316 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1317 cancel_delayed_work_sync(&conn->info_timer);
1318
1319 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1320 cancel_delayed_work_sync(&conn->security_timer);
1321 smp_chan_destroy(conn);
1322 }
1323
1324 hcon->l2cap_data = NULL;
1325 kfree(conn);
1326 }
1327
1328 static void security_timeout(struct work_struct *work)
1329 {
1330 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1331 security_timer.work);
1332
1333 BT_DBG("conn %p", conn);
1334
1335 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1336 smp_chan_destroy(conn);
1337 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1338 }
1339 }
1340
1341 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1342 {
1343 struct l2cap_conn *conn = hcon->l2cap_data;
1344 struct hci_chan *hchan;
1345
1346 if (conn || status)
1347 return conn;
1348
1349 hchan = hci_chan_create(hcon);
1350 if (!hchan)
1351 return NULL;
1352
1353 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1354 if (!conn) {
1355 hci_chan_del(hchan);
1356 return NULL;
1357 }
1358
1359 hcon->l2cap_data = conn;
1360 conn->hcon = hcon;
1361 conn->hchan = hchan;
1362
1363 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1364
1365 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1366 conn->mtu = hcon->hdev->le_mtu;
1367 else
1368 conn->mtu = hcon->hdev->acl_mtu;
1369
1370 conn->src = &hcon->hdev->bdaddr;
1371 conn->dst = &hcon->dst;
1372
1373 conn->feat_mask = 0;
1374
1375 spin_lock_init(&conn->lock);
1376 mutex_init(&conn->chan_lock);
1377
1378 INIT_LIST_HEAD(&conn->chan_l);
1379
1380 if (hcon->type == LE_LINK)
1381 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1382 else
1383 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1384
1385 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1386
1387 return conn;
1388 }
1389
1390 /* ---- Socket interface ---- */
1391
1392 /* Find socket with psm and source / destination bdaddr.
1393 * Returns closest match.
1394 */
1395 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1396 bdaddr_t *src,
1397 bdaddr_t *dst)
1398 {
1399 struct l2cap_chan *c, *c1 = NULL;
1400
1401 read_lock(&chan_list_lock);
1402
1403 list_for_each_entry(c, &chan_list, global_l) {
1404 struct sock *sk = c->sk;
1405
1406 if (state && c->state != state)
1407 continue;
1408
1409 if (c->psm == psm) {
1410 int src_match, dst_match;
1411 int src_any, dst_any;
1412
1413 /* Exact match. */
1414 src_match = !bacmp(&bt_sk(sk)->src, src);
1415 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1416 if (src_match && dst_match) {
1417 read_unlock(&chan_list_lock);
1418 return c;
1419 }
1420
1421 /* Closest match */
1422 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1423 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1424 if ((src_match && dst_any) || (src_any && dst_match) ||
1425 (src_any && dst_any))
1426 c1 = c;
1427 }
1428 }
1429
1430 read_unlock(&chan_list_lock);
1431
1432 return c1;
1433 }
1434
1435 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1436 bdaddr_t *dst, u8 dst_type)
1437 {
1438 struct sock *sk = chan->sk;
1439 bdaddr_t *src = &bt_sk(sk)->src;
1440 struct l2cap_conn *conn;
1441 struct hci_conn *hcon;
1442 struct hci_dev *hdev;
1443 __u8 auth_type;
1444 int err;
1445
1446 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1447 dst_type, __le16_to_cpu(chan->psm));
1448
1449 hdev = hci_get_route(dst, src);
1450 if (!hdev)
1451 return -EHOSTUNREACH;
1452
1453 hci_dev_lock(hdev);
1454
1455 l2cap_chan_lock(chan);
1456
1457 /* PSM must be odd and lsb of upper byte must be 0 */
1458 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1459 chan->chan_type != L2CAP_CHAN_RAW) {
1460 err = -EINVAL;
1461 goto done;
1462 }
1463
1464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1465 err = -EINVAL;
1466 goto done;
1467 }
1468
1469 switch (chan->mode) {
1470 case L2CAP_MODE_BASIC:
1471 break;
1472 case L2CAP_MODE_ERTM:
1473 case L2CAP_MODE_STREAMING:
1474 if (!disable_ertm)
1475 break;
1476 /* fall through */
1477 default:
1478 err = -ENOTSUPP;
1479 goto done;
1480 }
1481
1482 switch (chan->state) {
1483 case BT_CONNECT:
1484 case BT_CONNECT2:
1485 case BT_CONFIG:
1486 /* Already connecting */
1487 err = 0;
1488 goto done;
1489
1490 case BT_CONNECTED:
1491 /* Already connected */
1492 err = -EISCONN;
1493 goto done;
1494
1495 case BT_OPEN:
1496 case BT_BOUND:
1497 /* Can connect */
1498 break;
1499
1500 default:
1501 err = -EBADFD;
1502 goto done;
1503 }
1504
1505 /* Set destination address and psm */
1506 lock_sock(sk);
1507 bacpy(&bt_sk(sk)->dst, dst);
1508 release_sock(sk);
1509
1510 chan->psm = psm;
1511 chan->dcid = cid;
1512
1513 auth_type = l2cap_get_auth_type(chan);
1514
1515 if (chan->dcid == L2CAP_CID_LE_DATA)
1516 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1517 chan->sec_level, auth_type);
1518 else
1519 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1520 chan->sec_level, auth_type);
1521
1522 if (IS_ERR(hcon)) {
1523 err = PTR_ERR(hcon);
1524 goto done;
1525 }
1526
1527 conn = l2cap_conn_add(hcon, 0);
1528 if (!conn) {
1529 hci_conn_put(hcon);
1530 err = -ENOMEM;
1531 goto done;
1532 }
1533
1534 if (hcon->type == LE_LINK) {
1535 err = 0;
1536
1537 if (!list_empty(&conn->chan_l)) {
1538 err = -EBUSY;
1539 hci_conn_put(hcon);
1540 }
1541
1542 if (err)
1543 goto done;
1544 }
1545
1546 /* Update source addr of the socket */
1547 bacpy(src, conn->src);
1548
1549 l2cap_chan_unlock(chan);
1550 l2cap_chan_add(conn, chan);
1551 l2cap_chan_lock(chan);
1552
1553 l2cap_state_change(chan, BT_CONNECT);
1554 __set_chan_timer(chan, sk->sk_sndtimeo);
1555
1556 if (hcon->state == BT_CONNECTED) {
1557 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1558 __clear_chan_timer(chan);
1559 if (l2cap_chan_check_security(chan))
1560 l2cap_state_change(chan, BT_CONNECTED);
1561 } else
1562 l2cap_do_start(chan);
1563 }
1564
1565 err = 0;
1566
1567 done:
1568 l2cap_chan_unlock(chan);
1569 hci_dev_unlock(hdev);
1570 hci_dev_put(hdev);
1571 return err;
1572 }
1573
1574 int __l2cap_wait_ack(struct sock *sk)
1575 {
1576 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1577 DECLARE_WAITQUEUE(wait, current);
1578 int err = 0;
1579 int timeo = HZ/5;
1580
1581 add_wait_queue(sk_sleep(sk), &wait);
1582 set_current_state(TASK_INTERRUPTIBLE);
1583 while (chan->unacked_frames > 0 && chan->conn) {
1584 if (!timeo)
1585 timeo = HZ/5;
1586
1587 if (signal_pending(current)) {
1588 err = sock_intr_errno(timeo);
1589 break;
1590 }
1591
1592 release_sock(sk);
1593 timeo = schedule_timeout(timeo);
1594 lock_sock(sk);
1595 set_current_state(TASK_INTERRUPTIBLE);
1596
1597 err = sock_error(sk);
1598 if (err)
1599 break;
1600 }
1601 set_current_state(TASK_RUNNING);
1602 remove_wait_queue(sk_sleep(sk), &wait);
1603 return err;
1604 }
1605
1606 static void l2cap_monitor_timeout(struct work_struct *work)
1607 {
1608 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1609 monitor_timer.work);
1610
1611 BT_DBG("chan %p", chan);
1612
1613 l2cap_chan_lock(chan);
1614
1615 if (!chan->conn) {
1616 l2cap_chan_unlock(chan);
1617 l2cap_chan_put(chan);
1618 return;
1619 }
1620
1621 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1622
1623 l2cap_chan_unlock(chan);
1624 l2cap_chan_put(chan);
1625 }
1626
1627 static void l2cap_retrans_timeout(struct work_struct *work)
1628 {
1629 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1630 retrans_timer.work);
1631
1632 BT_DBG("chan %p", chan);
1633
1634 l2cap_chan_lock(chan);
1635
1636 if (!chan->conn) {
1637 l2cap_chan_unlock(chan);
1638 l2cap_chan_put(chan);
1639 return;
1640 }
1641
1642 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1643 l2cap_chan_unlock(chan);
1644 l2cap_chan_put(chan);
1645 }
1646
1647 static void l2cap_streaming_send(struct l2cap_chan *chan,
1648 struct sk_buff_head *skbs)
1649 {
1650 struct sk_buff *skb;
1651 struct l2cap_ctrl *control;
1652
1653 BT_DBG("chan %p, skbs %p", chan, skbs);
1654
1655 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1656
1657 while (!skb_queue_empty(&chan->tx_q)) {
1658
1659 skb = skb_dequeue(&chan->tx_q);
1660
1661 bt_cb(skb)->control.retries = 1;
1662 control = &bt_cb(skb)->control;
1663
1664 control->reqseq = 0;
1665 control->txseq = chan->next_tx_seq;
1666
1667 __pack_control(chan, control, skb);
1668
1669 if (chan->fcs == L2CAP_FCS_CRC16) {
1670 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1671 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1672 }
1673
1674 l2cap_do_send(chan, skb);
1675
1676 BT_DBG("Sent txseq %u", control->txseq);
1677
1678 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1679 chan->frames_sent++;
1680 }
1681 }
1682
1683 static int l2cap_ertm_send(struct l2cap_chan *chan)
1684 {
1685 struct sk_buff *skb, *tx_skb;
1686 struct l2cap_ctrl *control;
1687 int sent = 0;
1688
1689 BT_DBG("chan %p", chan);
1690
1691 if (chan->state != BT_CONNECTED)
1692 return -ENOTCONN;
1693
1694 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1695 return 0;
1696
1697 while (chan->tx_send_head &&
1698 chan->unacked_frames < chan->remote_tx_win &&
1699 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1700
1701 skb = chan->tx_send_head;
1702
1703 bt_cb(skb)->control.retries = 1;
1704 control = &bt_cb(skb)->control;
1705
1706 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1707 control->final = 1;
1708
1709 control->reqseq = chan->buffer_seq;
1710 chan->last_acked_seq = chan->buffer_seq;
1711 control->txseq = chan->next_tx_seq;
1712
1713 __pack_control(chan, control, skb);
1714
1715 if (chan->fcs == L2CAP_FCS_CRC16) {
1716 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1717 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1718 }
1719
1720 /* Clone after data has been modified. Data is assumed to be
1721 read-only (for locking purposes) on cloned sk_buffs.
1722 */
1723 tx_skb = skb_clone(skb, GFP_KERNEL);
1724
1725 if (!tx_skb)
1726 break;
1727
1728 __set_retrans_timer(chan);
1729
1730 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1731 chan->unacked_frames++;
1732 chan->frames_sent++;
1733 sent++;
1734
1735 if (skb_queue_is_last(&chan->tx_q, skb))
1736 chan->tx_send_head = NULL;
1737 else
1738 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1739
1740 l2cap_do_send(chan, tx_skb);
1741 BT_DBG("Sent txseq %u", control->txseq);
1742 }
1743
1744 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1745 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1746
1747 return sent;
1748 }
1749
1750 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1751 {
1752 struct l2cap_ctrl control;
1753 struct sk_buff *skb;
1754 struct sk_buff *tx_skb;
1755 u16 seq;
1756
1757 BT_DBG("chan %p", chan);
1758
1759 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1760 return;
1761
1762 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1763 seq = l2cap_seq_list_pop(&chan->retrans_list);
1764
1765 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1766 if (!skb) {
1767 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1768 seq);
1769 continue;
1770 }
1771
1772 bt_cb(skb)->control.retries++;
1773 control = bt_cb(skb)->control;
1774
1775 if (chan->max_tx != 0 &&
1776 bt_cb(skb)->control.retries > chan->max_tx) {
1777 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1778 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1779 l2cap_seq_list_clear(&chan->retrans_list);
1780 break;
1781 }
1782
1783 control.reqseq = chan->buffer_seq;
1784 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1785 control.final = 1;
1786 else
1787 control.final = 0;
1788
1789 if (skb_cloned(skb)) {
1790 /* Cloned sk_buffs are read-only, so we need a
1791 * writeable copy
1792 */
1793 tx_skb = skb_copy(skb, GFP_ATOMIC);
1794 } else {
1795 tx_skb = skb_clone(skb, GFP_ATOMIC);
1796 }
1797
1798 if (!tx_skb) {
1799 l2cap_seq_list_clear(&chan->retrans_list);
1800 break;
1801 }
1802
1803 /* Update skb contents */
1804 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1805 put_unaligned_le32(__pack_extended_control(&control),
1806 tx_skb->data + L2CAP_HDR_SIZE);
1807 } else {
1808 put_unaligned_le16(__pack_enhanced_control(&control),
1809 tx_skb->data + L2CAP_HDR_SIZE);
1810 }
1811
1812 if (chan->fcs == L2CAP_FCS_CRC16) {
1813 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1814 put_unaligned_le16(fcs, skb_put(tx_skb,
1815 L2CAP_FCS_SIZE));
1816 }
1817
1818 l2cap_do_send(chan, tx_skb);
1819
1820 BT_DBG("Resent txseq %d", control.txseq);
1821
1822 chan->last_acked_seq = chan->buffer_seq;
1823 }
1824 }
1825
1826 static void l2cap_retransmit(struct l2cap_chan *chan,
1827 struct l2cap_ctrl *control)
1828 {
1829 BT_DBG("chan %p, control %p", chan, control);
1830
1831 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1832 l2cap_ertm_resend(chan);
1833 }
1834
1835 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1836 struct l2cap_ctrl *control)
1837 {
1838 struct sk_buff *skb;
1839
1840 BT_DBG("chan %p, control %p", chan, control);
1841
1842 if (control->poll)
1843 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1844
1845 l2cap_seq_list_clear(&chan->retrans_list);
1846
1847 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1848 return;
1849
1850 if (chan->unacked_frames) {
1851 skb_queue_walk(&chan->tx_q, skb) {
1852 if (bt_cb(skb)->control.txseq == control->reqseq ||
1853 skb == chan->tx_send_head)
1854 break;
1855 }
1856
1857 skb_queue_walk_from(&chan->tx_q, skb) {
1858 if (skb == chan->tx_send_head)
1859 break;
1860
1861 l2cap_seq_list_append(&chan->retrans_list,
1862 bt_cb(skb)->control.txseq);
1863 }
1864
1865 l2cap_ertm_resend(chan);
1866 }
1867 }
1868
1869 static void l2cap_send_ack(struct l2cap_chan *chan)
1870 {
1871 struct l2cap_ctrl control;
1872 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1873 chan->last_acked_seq);
1874 int threshold;
1875
1876 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1877 chan, chan->last_acked_seq, chan->buffer_seq);
1878
1879 memset(&control, 0, sizeof(control));
1880 control.sframe = 1;
1881
1882 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1883 chan->rx_state == L2CAP_RX_STATE_RECV) {
1884 __clear_ack_timer(chan);
1885 control.super = L2CAP_SUPER_RNR;
1886 control.reqseq = chan->buffer_seq;
1887 l2cap_send_sframe(chan, &control);
1888 } else {
1889 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1890 l2cap_ertm_send(chan);
1891 /* If any i-frames were sent, they included an ack */
1892 if (chan->buffer_seq == chan->last_acked_seq)
1893 frames_to_ack = 0;
1894 }
1895
1896 /* Ack now if the window is 3/4ths full.
1897 * Calculate without mul or div
1898 */
1899 threshold = chan->ack_win;
1900 threshold += threshold << 1;
1901 threshold >>= 2;
1902
1903 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1904 threshold);
1905
1906 if (frames_to_ack >= threshold) {
1907 __clear_ack_timer(chan);
1908 control.super = L2CAP_SUPER_RR;
1909 control.reqseq = chan->buffer_seq;
1910 l2cap_send_sframe(chan, &control);
1911 frames_to_ack = 0;
1912 }
1913
1914 if (frames_to_ack)
1915 __set_ack_timer(chan);
1916 }
1917 }
1918
1919 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1920 struct msghdr *msg, int len,
1921 int count, struct sk_buff *skb)
1922 {
1923 struct l2cap_conn *conn = chan->conn;
1924 struct sk_buff **frag;
1925 int sent = 0;
1926
1927 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1928 return -EFAULT;
1929
1930 sent += count;
1931 len -= count;
1932
1933 /* Continuation fragments (no L2CAP header) */
1934 frag = &skb_shinfo(skb)->frag_list;
1935 while (len) {
1936 struct sk_buff *tmp;
1937
1938 count = min_t(unsigned int, conn->mtu, len);
1939
1940 tmp = chan->ops->alloc_skb(chan, count,
1941 msg->msg_flags & MSG_DONTWAIT);
1942 if (IS_ERR(tmp))
1943 return PTR_ERR(tmp);
1944
1945 *frag = tmp;
1946
1947 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1948 return -EFAULT;
1949
1950 (*frag)->priority = skb->priority;
1951
1952 sent += count;
1953 len -= count;
1954
1955 skb->len += (*frag)->len;
1956 skb->data_len += (*frag)->len;
1957
1958 frag = &(*frag)->next;
1959 }
1960
1961 return sent;
1962 }
1963
1964 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1965 struct msghdr *msg, size_t len,
1966 u32 priority)
1967 {
1968 struct l2cap_conn *conn = chan->conn;
1969 struct sk_buff *skb;
1970 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1971 struct l2cap_hdr *lh;
1972
1973 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1974
1975 count = min_t(unsigned int, (conn->mtu - hlen), len);
1976
1977 skb = chan->ops->alloc_skb(chan, count + hlen,
1978 msg->msg_flags & MSG_DONTWAIT);
1979 if (IS_ERR(skb))
1980 return skb;
1981
1982 skb->priority = priority;
1983
1984 /* Create L2CAP header */
1985 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1986 lh->cid = cpu_to_le16(chan->dcid);
1987 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1988 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1989
1990 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1991 if (unlikely(err < 0)) {
1992 kfree_skb(skb);
1993 return ERR_PTR(err);
1994 }
1995 return skb;
1996 }
1997
1998 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1999 struct msghdr *msg, size_t len,
2000 u32 priority)
2001 {
2002 struct l2cap_conn *conn = chan->conn;
2003 struct sk_buff *skb;
2004 int err, count;
2005 struct l2cap_hdr *lh;
2006
2007 BT_DBG("chan %p len %zu", chan, len);
2008
2009 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2010
2011 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2012 msg->msg_flags & MSG_DONTWAIT);
2013 if (IS_ERR(skb))
2014 return skb;
2015
2016 skb->priority = priority;
2017
2018 /* Create L2CAP header */
2019 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2020 lh->cid = cpu_to_le16(chan->dcid);
2021 lh->len = cpu_to_le16(len);
2022
2023 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2024 if (unlikely(err < 0)) {
2025 kfree_skb(skb);
2026 return ERR_PTR(err);
2027 }
2028 return skb;
2029 }
2030
2031 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2032 struct msghdr *msg, size_t len,
2033 u16 sdulen)
2034 {
2035 struct l2cap_conn *conn = chan->conn;
2036 struct sk_buff *skb;
2037 int err, count, hlen;
2038 struct l2cap_hdr *lh;
2039
2040 BT_DBG("chan %p len %zu", chan, len);
2041
2042 if (!conn)
2043 return ERR_PTR(-ENOTCONN);
2044
2045 hlen = __ertm_hdr_size(chan);
2046
2047 if (sdulen)
2048 hlen += L2CAP_SDULEN_SIZE;
2049
2050 if (chan->fcs == L2CAP_FCS_CRC16)
2051 hlen += L2CAP_FCS_SIZE;
2052
2053 count = min_t(unsigned int, (conn->mtu - hlen), len);
2054
2055 skb = chan->ops->alloc_skb(chan, count + hlen,
2056 msg->msg_flags & MSG_DONTWAIT);
2057 if (IS_ERR(skb))
2058 return skb;
2059
2060 /* Create L2CAP header */
2061 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2062 lh->cid = cpu_to_le16(chan->dcid);
2063 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2064
2065 /* Control header is populated later */
2066 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2067 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2068 else
2069 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2070
2071 if (sdulen)
2072 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2073
2074 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2075 if (unlikely(err < 0)) {
2076 kfree_skb(skb);
2077 return ERR_PTR(err);
2078 }
2079
2080 bt_cb(skb)->control.fcs = chan->fcs;
2081 bt_cb(skb)->control.retries = 0;
2082 return skb;
2083 }
2084
2085 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2086 struct sk_buff_head *seg_queue,
2087 struct msghdr *msg, size_t len)
2088 {
2089 struct sk_buff *skb;
2090 u16 sdu_len;
2091 size_t pdu_len;
2092 u8 sar;
2093
2094 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2095
2096 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2097 * so fragmented skbs are not used. The HCI layer's handling
2098 * of fragmented skbs is not compatible with ERTM's queueing.
2099 */
2100
2101 /* PDU size is derived from the HCI MTU */
2102 pdu_len = chan->conn->mtu;
2103
2104 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2105
2106 /* Adjust for largest possible L2CAP overhead. */
2107 if (chan->fcs)
2108 pdu_len -= L2CAP_FCS_SIZE;
2109
2110 pdu_len -= __ertm_hdr_size(chan);
2111
2112 /* Remote device may have requested smaller PDUs */
2113 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2114
2115 if (len <= pdu_len) {
2116 sar = L2CAP_SAR_UNSEGMENTED;
2117 sdu_len = 0;
2118 pdu_len = len;
2119 } else {
2120 sar = L2CAP_SAR_START;
2121 sdu_len = len;
2122 pdu_len -= L2CAP_SDULEN_SIZE;
2123 }
2124
2125 while (len > 0) {
2126 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2127
2128 if (IS_ERR(skb)) {
2129 __skb_queue_purge(seg_queue);
2130 return PTR_ERR(skb);
2131 }
2132
2133 bt_cb(skb)->control.sar = sar;
2134 __skb_queue_tail(seg_queue, skb);
2135
2136 len -= pdu_len;
2137 if (sdu_len) {
2138 sdu_len = 0;
2139 pdu_len += L2CAP_SDULEN_SIZE;
2140 }
2141
2142 if (len <= pdu_len) {
2143 sar = L2CAP_SAR_END;
2144 pdu_len = len;
2145 } else {
2146 sar = L2CAP_SAR_CONTINUE;
2147 }
2148 }
2149
2150 return 0;
2151 }
2152
2153 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2154 u32 priority)
2155 {
2156 struct sk_buff *skb;
2157 int err;
2158 struct sk_buff_head seg_queue;
2159
2160 /* Connectionless channel */
2161 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2162 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2163 if (IS_ERR(skb))
2164 return PTR_ERR(skb);
2165
2166 l2cap_do_send(chan, skb);
2167 return len;
2168 }
2169
2170 switch (chan->mode) {
2171 case L2CAP_MODE_BASIC:
2172 /* Check outgoing MTU */
2173 if (len > chan->omtu)
2174 return -EMSGSIZE;
2175
2176 /* Create a basic PDU */
2177 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2178 if (IS_ERR(skb))
2179 return PTR_ERR(skb);
2180
2181 l2cap_do_send(chan, skb);
2182 err = len;
2183 break;
2184
2185 case L2CAP_MODE_ERTM:
2186 case L2CAP_MODE_STREAMING:
2187 /* Check outgoing MTU */
2188 if (len > chan->omtu) {
2189 err = -EMSGSIZE;
2190 break;
2191 }
2192
2193 __skb_queue_head_init(&seg_queue);
2194
2195 /* Do segmentation before calling in to the state machine,
2196 * since it's possible to block while waiting for memory
2197 * allocation.
2198 */
2199 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2200
2201 /* The channel could have been closed while segmenting,
2202 * check that it is still connected.
2203 */
2204 if (chan->state != BT_CONNECTED) {
2205 __skb_queue_purge(&seg_queue);
2206 err = -ENOTCONN;
2207 }
2208
2209 if (err)
2210 break;
2211
2212 if (chan->mode == L2CAP_MODE_ERTM)
2213 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2214 else
2215 l2cap_streaming_send(chan, &seg_queue);
2216
2217 err = len;
2218
2219 /* If the skbs were not queued for sending, they'll still be in
2220 * seg_queue and need to be purged.
2221 */
2222 __skb_queue_purge(&seg_queue);
2223 break;
2224
2225 default:
2226 BT_DBG("bad state %1.1x", chan->mode);
2227 err = -EBADFD;
2228 }
2229
2230 return err;
2231 }
2232
2233 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2234 {
2235 struct l2cap_ctrl control;
2236 u16 seq;
2237
2238 BT_DBG("chan %p, txseq %u", chan, txseq);
2239
2240 memset(&control, 0, sizeof(control));
2241 control.sframe = 1;
2242 control.super = L2CAP_SUPER_SREJ;
2243
2244 for (seq = chan->expected_tx_seq; seq != txseq;
2245 seq = __next_seq(chan, seq)) {
2246 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2247 control.reqseq = seq;
2248 l2cap_send_sframe(chan, &control);
2249 l2cap_seq_list_append(&chan->srej_list, seq);
2250 }
2251 }
2252
2253 chan->expected_tx_seq = __next_seq(chan, txseq);
2254 }
2255
2256 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2257 {
2258 struct l2cap_ctrl control;
2259
2260 BT_DBG("chan %p", chan);
2261
2262 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2263 return;
2264
2265 memset(&control, 0, sizeof(control));
2266 control.sframe = 1;
2267 control.super = L2CAP_SUPER_SREJ;
2268 control.reqseq = chan->srej_list.tail;
2269 l2cap_send_sframe(chan, &control);
2270 }
2271
2272 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2273 {
2274 struct l2cap_ctrl control;
2275 u16 initial_head;
2276 u16 seq;
2277
2278 BT_DBG("chan %p, txseq %u", chan, txseq);
2279
2280 memset(&control, 0, sizeof(control));
2281 control.sframe = 1;
2282 control.super = L2CAP_SUPER_SREJ;
2283
2284 /* Capture initial list head to allow only one pass through the list. */
2285 initial_head = chan->srej_list.head;
2286
2287 do {
2288 seq = l2cap_seq_list_pop(&chan->srej_list);
2289 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2290 break;
2291
2292 control.reqseq = seq;
2293 l2cap_send_sframe(chan, &control);
2294 l2cap_seq_list_append(&chan->srej_list, seq);
2295 } while (chan->srej_list.head != initial_head);
2296 }
2297
2298 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2299 {
2300 struct sk_buff *acked_skb;
2301 u16 ackseq;
2302
2303 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2304
2305 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2306 return;
2307
2308 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2309 chan->expected_ack_seq, chan->unacked_frames);
2310
2311 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2312 ackseq = __next_seq(chan, ackseq)) {
2313
2314 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2315 if (acked_skb) {
2316 skb_unlink(acked_skb, &chan->tx_q);
2317 kfree_skb(acked_skb);
2318 chan->unacked_frames--;
2319 }
2320 }
2321
2322 chan->expected_ack_seq = reqseq;
2323
2324 if (chan->unacked_frames == 0)
2325 __clear_retrans_timer(chan);
2326
2327 BT_DBG("unacked_frames %u", chan->unacked_frames);
2328 }
2329
2330 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2331 {
2332 BT_DBG("chan %p", chan);
2333
2334 chan->expected_tx_seq = chan->buffer_seq;
2335 l2cap_seq_list_clear(&chan->srej_list);
2336 skb_queue_purge(&chan->srej_q);
2337 chan->rx_state = L2CAP_RX_STATE_RECV;
2338 }
2339
2340 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2341 struct l2cap_ctrl *control,
2342 struct sk_buff_head *skbs, u8 event)
2343 {
2344 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2345 event);
2346
2347 switch (event) {
2348 case L2CAP_EV_DATA_REQUEST:
2349 if (chan->tx_send_head == NULL)
2350 chan->tx_send_head = skb_peek(skbs);
2351
2352 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2353 l2cap_ertm_send(chan);
2354 break;
2355 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2356 BT_DBG("Enter LOCAL_BUSY");
2357 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2358
2359 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2360 /* The SREJ_SENT state must be aborted if we are to
2361 * enter the LOCAL_BUSY state.
2362 */
2363 l2cap_abort_rx_srej_sent(chan);
2364 }
2365
2366 l2cap_send_ack(chan);
2367
2368 break;
2369 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2370 BT_DBG("Exit LOCAL_BUSY");
2371 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2372
2373 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2374 struct l2cap_ctrl local_control;
2375
2376 memset(&local_control, 0, sizeof(local_control));
2377 local_control.sframe = 1;
2378 local_control.super = L2CAP_SUPER_RR;
2379 local_control.poll = 1;
2380 local_control.reqseq = chan->buffer_seq;
2381 l2cap_send_sframe(chan, &local_control);
2382
2383 chan->retry_count = 1;
2384 __set_monitor_timer(chan);
2385 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2386 }
2387 break;
2388 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2389 l2cap_process_reqseq(chan, control->reqseq);
2390 break;
2391 case L2CAP_EV_EXPLICIT_POLL:
2392 l2cap_send_rr_or_rnr(chan, 1);
2393 chan->retry_count = 1;
2394 __set_monitor_timer(chan);
2395 __clear_ack_timer(chan);
2396 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2397 break;
2398 case L2CAP_EV_RETRANS_TO:
2399 l2cap_send_rr_or_rnr(chan, 1);
2400 chan->retry_count = 1;
2401 __set_monitor_timer(chan);
2402 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2403 break;
2404 case L2CAP_EV_RECV_FBIT:
2405 /* Nothing to process */
2406 break;
2407 default:
2408 break;
2409 }
2410 }
2411
2412 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2413 struct l2cap_ctrl *control,
2414 struct sk_buff_head *skbs, u8 event)
2415 {
2416 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2417 event);
2418
2419 switch (event) {
2420 case L2CAP_EV_DATA_REQUEST:
2421 if (chan->tx_send_head == NULL)
2422 chan->tx_send_head = skb_peek(skbs);
2423 /* Queue data, but don't send. */
2424 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2425 break;
2426 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2427 BT_DBG("Enter LOCAL_BUSY");
2428 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2429
2430 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2431 /* The SREJ_SENT state must be aborted if we are to
2432 * enter the LOCAL_BUSY state.
2433 */
2434 l2cap_abort_rx_srej_sent(chan);
2435 }
2436
2437 l2cap_send_ack(chan);
2438
2439 break;
2440 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2441 BT_DBG("Exit LOCAL_BUSY");
2442 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2443
2444 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2445 struct l2cap_ctrl local_control;
2446 memset(&local_control, 0, sizeof(local_control));
2447 local_control.sframe = 1;
2448 local_control.super = L2CAP_SUPER_RR;
2449 local_control.poll = 1;
2450 local_control.reqseq = chan->buffer_seq;
2451 l2cap_send_sframe(chan, &local_control);
2452
2453 chan->retry_count = 1;
2454 __set_monitor_timer(chan);
2455 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2456 }
2457 break;
2458 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2459 l2cap_process_reqseq(chan, control->reqseq);
2460
2461 /* Fall through */
2462
2463 case L2CAP_EV_RECV_FBIT:
2464 if (control && control->final) {
2465 __clear_monitor_timer(chan);
2466 if (chan->unacked_frames > 0)
2467 __set_retrans_timer(chan);
2468 chan->retry_count = 0;
2469 chan->tx_state = L2CAP_TX_STATE_XMIT;
2470 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2471 }
2472 break;
2473 case L2CAP_EV_EXPLICIT_POLL:
2474 /* Ignore */
2475 break;
2476 case L2CAP_EV_MONITOR_TO:
2477 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2478 l2cap_send_rr_or_rnr(chan, 1);
2479 __set_monitor_timer(chan);
2480 chan->retry_count++;
2481 } else {
2482 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2483 }
2484 break;
2485 default:
2486 break;
2487 }
2488 }
2489
2490 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2491 struct sk_buff_head *skbs, u8 event)
2492 {
2493 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2494 chan, control, skbs, event, chan->tx_state);
2495
2496 switch (chan->tx_state) {
2497 case L2CAP_TX_STATE_XMIT:
2498 l2cap_tx_state_xmit(chan, control, skbs, event);
2499 break;
2500 case L2CAP_TX_STATE_WAIT_F:
2501 l2cap_tx_state_wait_f(chan, control, skbs, event);
2502 break;
2503 default:
2504 /* Ignore event */
2505 break;
2506 }
2507 }
2508
2509 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2510 struct l2cap_ctrl *control)
2511 {
2512 BT_DBG("chan %p, control %p", chan, control);
2513 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2514 }
2515
2516 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2517 struct l2cap_ctrl *control)
2518 {
2519 BT_DBG("chan %p, control %p", chan, control);
2520 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2521 }
2522
2523 /* Copy frame to all raw sockets on that connection */
2524 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2525 {
2526 struct sk_buff *nskb;
2527 struct l2cap_chan *chan;
2528
2529 BT_DBG("conn %p", conn);
2530
2531 mutex_lock(&conn->chan_lock);
2532
2533 list_for_each_entry(chan, &conn->chan_l, list) {
2534 struct sock *sk = chan->sk;
2535 if (chan->chan_type != L2CAP_CHAN_RAW)
2536 continue;
2537
2538 /* Don't send frame to the socket it came from */
2539 if (skb->sk == sk)
2540 continue;
2541 nskb = skb_clone(skb, GFP_ATOMIC);
2542 if (!nskb)
2543 continue;
2544
2545 if (chan->ops->recv(chan, nskb))
2546 kfree_skb(nskb);
2547 }
2548
2549 mutex_unlock(&conn->chan_lock);
2550 }
2551
2552 /* ---- L2CAP signalling commands ---- */
2553 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2554 u8 ident, u16 dlen, void *data)
2555 {
2556 struct sk_buff *skb, **frag;
2557 struct l2cap_cmd_hdr *cmd;
2558 struct l2cap_hdr *lh;
2559 int len, count;
2560
2561 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2562 conn, code, ident, dlen);
2563
2564 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2565 count = min_t(unsigned int, conn->mtu, len);
2566
2567 skb = bt_skb_alloc(count, GFP_ATOMIC);
2568 if (!skb)
2569 return NULL;
2570
2571 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2572 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2573
2574 if (conn->hcon->type == LE_LINK)
2575 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2576 else
2577 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2578
2579 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2580 cmd->code = code;
2581 cmd->ident = ident;
2582 cmd->len = cpu_to_le16(dlen);
2583
2584 if (dlen) {
2585 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2586 memcpy(skb_put(skb, count), data, count);
2587 data += count;
2588 }
2589
2590 len -= skb->len;
2591
2592 /* Continuation fragments (no L2CAP header) */
2593 frag = &skb_shinfo(skb)->frag_list;
2594 while (len) {
2595 count = min_t(unsigned int, conn->mtu, len);
2596
2597 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2598 if (!*frag)
2599 goto fail;
2600
2601 memcpy(skb_put(*frag, count), data, count);
2602
2603 len -= count;
2604 data += count;
2605
2606 frag = &(*frag)->next;
2607 }
2608
2609 return skb;
2610
2611 fail:
2612 kfree_skb(skb);
2613 return NULL;
2614 }
2615
2616 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2617 {
2618 struct l2cap_conf_opt *opt = *ptr;
2619 int len;
2620
2621 len = L2CAP_CONF_OPT_SIZE + opt->len;
2622 *ptr += len;
2623
2624 *type = opt->type;
2625 *olen = opt->len;
2626
2627 switch (opt->len) {
2628 case 1:
2629 *val = *((u8 *) opt->val);
2630 break;
2631
2632 case 2:
2633 *val = get_unaligned_le16(opt->val);
2634 break;
2635
2636 case 4:
2637 *val = get_unaligned_le32(opt->val);
2638 break;
2639
2640 default:
2641 *val = (unsigned long) opt->val;
2642 break;
2643 }
2644
2645 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2646 return len;
2647 }
2648
2649 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2650 {
2651 struct l2cap_conf_opt *opt = *ptr;
2652
2653 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2654
2655 opt->type = type;
2656 opt->len = len;
2657
2658 switch (len) {
2659 case 1:
2660 *((u8 *) opt->val) = val;
2661 break;
2662
2663 case 2:
2664 put_unaligned_le16(val, opt->val);
2665 break;
2666
2667 case 4:
2668 put_unaligned_le32(val, opt->val);
2669 break;
2670
2671 default:
2672 memcpy(opt->val, (void *) val, len);
2673 break;
2674 }
2675
2676 *ptr += L2CAP_CONF_OPT_SIZE + len;
2677 }
2678
2679 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2680 {
2681 struct l2cap_conf_efs efs;
2682
2683 switch (chan->mode) {
2684 case L2CAP_MODE_ERTM:
2685 efs.id = chan->local_id;
2686 efs.stype = chan->local_stype;
2687 efs.msdu = cpu_to_le16(chan->local_msdu);
2688 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2689 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2690 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2691 break;
2692
2693 case L2CAP_MODE_STREAMING:
2694 efs.id = 1;
2695 efs.stype = L2CAP_SERV_BESTEFFORT;
2696 efs.msdu = cpu_to_le16(chan->local_msdu);
2697 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2698 efs.acc_lat = 0;
2699 efs.flush_to = 0;
2700 break;
2701
2702 default:
2703 return;
2704 }
2705
2706 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2707 (unsigned long) &efs);
2708 }
2709
2710 static void l2cap_ack_timeout(struct work_struct *work)
2711 {
2712 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2713 ack_timer.work);
2714 u16 frames_to_ack;
2715
2716 BT_DBG("chan %p", chan);
2717
2718 l2cap_chan_lock(chan);
2719
2720 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2721 chan->last_acked_seq);
2722
2723 if (frames_to_ack)
2724 l2cap_send_rr_or_rnr(chan, 0);
2725
2726 l2cap_chan_unlock(chan);
2727 l2cap_chan_put(chan);
2728 }
2729
2730 int l2cap_ertm_init(struct l2cap_chan *chan)
2731 {
2732 int err;
2733
2734 chan->next_tx_seq = 0;
2735 chan->expected_tx_seq = 0;
2736 chan->expected_ack_seq = 0;
2737 chan->unacked_frames = 0;
2738 chan->buffer_seq = 0;
2739 chan->frames_sent = 0;
2740 chan->last_acked_seq = 0;
2741 chan->sdu = NULL;
2742 chan->sdu_last_frag = NULL;
2743 chan->sdu_len = 0;
2744
2745 skb_queue_head_init(&chan->tx_q);
2746
2747 if (chan->mode != L2CAP_MODE_ERTM)
2748 return 0;
2749
2750 chan->rx_state = L2CAP_RX_STATE_RECV;
2751 chan->tx_state = L2CAP_TX_STATE_XMIT;
2752
2753 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2754 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2755 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2756
2757 skb_queue_head_init(&chan->srej_q);
2758
2759 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2760 if (err < 0)
2761 return err;
2762
2763 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2764 if (err < 0)
2765 l2cap_seq_list_free(&chan->srej_list);
2766
2767 return err;
2768 }
2769
2770 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2771 {
2772 switch (mode) {
2773 case L2CAP_MODE_STREAMING:
2774 case L2CAP_MODE_ERTM:
2775 if (l2cap_mode_supported(mode, remote_feat_mask))
2776 return mode;
2777 /* fall through */
2778 default:
2779 return L2CAP_MODE_BASIC;
2780 }
2781 }
2782
2783 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2784 {
2785 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2786 }
2787
2788 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2789 {
2790 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2791 }
2792
2793 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2794 {
2795 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2796 __l2cap_ews_supported(chan)) {
2797 /* use extended control field */
2798 set_bit(FLAG_EXT_CTRL, &chan->flags);
2799 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2800 } else {
2801 chan->tx_win = min_t(u16, chan->tx_win,
2802 L2CAP_DEFAULT_TX_WINDOW);
2803 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2804 }
2805 chan->ack_win = chan->tx_win;
2806 }
2807
2808 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2809 {
2810 struct l2cap_conf_req *req = data;
2811 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2812 void *ptr = req->data;
2813 u16 size;
2814
2815 BT_DBG("chan %p", chan);
2816
2817 if (chan->num_conf_req || chan->num_conf_rsp)
2818 goto done;
2819
2820 switch (chan->mode) {
2821 case L2CAP_MODE_STREAMING:
2822 case L2CAP_MODE_ERTM:
2823 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2824 break;
2825
2826 if (__l2cap_efs_supported(chan))
2827 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2828
2829 /* fall through */
2830 default:
2831 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2832 break;
2833 }
2834
2835 done:
2836 if (chan->imtu != L2CAP_DEFAULT_MTU)
2837 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2838
2839 switch (chan->mode) {
2840 case L2CAP_MODE_BASIC:
2841 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2842 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2843 break;
2844
2845 rfc.mode = L2CAP_MODE_BASIC;
2846 rfc.txwin_size = 0;
2847 rfc.max_transmit = 0;
2848 rfc.retrans_timeout = 0;
2849 rfc.monitor_timeout = 0;
2850 rfc.max_pdu_size = 0;
2851
2852 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2853 (unsigned long) &rfc);
2854 break;
2855
2856 case L2CAP_MODE_ERTM:
2857 rfc.mode = L2CAP_MODE_ERTM;
2858 rfc.max_transmit = chan->max_tx;
2859 rfc.retrans_timeout = 0;
2860 rfc.monitor_timeout = 0;
2861
2862 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2863 L2CAP_EXT_HDR_SIZE -
2864 L2CAP_SDULEN_SIZE -
2865 L2CAP_FCS_SIZE);
2866 rfc.max_pdu_size = cpu_to_le16(size);
2867
2868 l2cap_txwin_setup(chan);
2869
2870 rfc.txwin_size = min_t(u16, chan->tx_win,
2871 L2CAP_DEFAULT_TX_WINDOW);
2872
2873 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2874 (unsigned long) &rfc);
2875
2876 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2877 l2cap_add_opt_efs(&ptr, chan);
2878
2879 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2880 break;
2881
2882 if (chan->fcs == L2CAP_FCS_NONE ||
2883 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2884 chan->fcs = L2CAP_FCS_NONE;
2885 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2886 }
2887
2888 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2889 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2890 chan->tx_win);
2891 break;
2892
2893 case L2CAP_MODE_STREAMING:
2894 l2cap_txwin_setup(chan);
2895 rfc.mode = L2CAP_MODE_STREAMING;
2896 rfc.txwin_size = 0;
2897 rfc.max_transmit = 0;
2898 rfc.retrans_timeout = 0;
2899 rfc.monitor_timeout = 0;
2900
2901 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2902 L2CAP_EXT_HDR_SIZE -
2903 L2CAP_SDULEN_SIZE -
2904 L2CAP_FCS_SIZE);
2905 rfc.max_pdu_size = cpu_to_le16(size);
2906
2907 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2908 (unsigned long) &rfc);
2909
2910 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2911 l2cap_add_opt_efs(&ptr, chan);
2912
2913 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2914 break;
2915
2916 if (chan->fcs == L2CAP_FCS_NONE ||
2917 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2918 chan->fcs = L2CAP_FCS_NONE;
2919 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2920 }
2921 break;
2922 }
2923
2924 req->dcid = cpu_to_le16(chan->dcid);
2925 req->flags = __constant_cpu_to_le16(0);
2926
2927 return ptr - data;
2928 }
2929
2930 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2931 {
2932 struct l2cap_conf_rsp *rsp = data;
2933 void *ptr = rsp->data;
2934 void *req = chan->conf_req;
2935 int len = chan->conf_len;
2936 int type, hint, olen;
2937 unsigned long val;
2938 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2939 struct l2cap_conf_efs efs;
2940 u8 remote_efs = 0;
2941 u16 mtu = L2CAP_DEFAULT_MTU;
2942 u16 result = L2CAP_CONF_SUCCESS;
2943 u16 size;
2944
2945 BT_DBG("chan %p", chan);
2946
2947 while (len >= L2CAP_CONF_OPT_SIZE) {
2948 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2949
2950 hint = type & L2CAP_CONF_HINT;
2951 type &= L2CAP_CONF_MASK;
2952
2953 switch (type) {
2954 case L2CAP_CONF_MTU:
2955 mtu = val;
2956 break;
2957
2958 case L2CAP_CONF_FLUSH_TO:
2959 chan->flush_to = val;
2960 break;
2961
2962 case L2CAP_CONF_QOS:
2963 break;
2964
2965 case L2CAP_CONF_RFC:
2966 if (olen == sizeof(rfc))
2967 memcpy(&rfc, (void *) val, olen);
2968 break;
2969
2970 case L2CAP_CONF_FCS:
2971 if (val == L2CAP_FCS_NONE)
2972 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2973 break;
2974
2975 case L2CAP_CONF_EFS:
2976 remote_efs = 1;
2977 if (olen == sizeof(efs))
2978 memcpy(&efs, (void *) val, olen);
2979 break;
2980
2981 case L2CAP_CONF_EWS:
2982 if (!enable_hs)
2983 return -ECONNREFUSED;
2984
2985 set_bit(FLAG_EXT_CTRL, &chan->flags);
2986 set_bit(CONF_EWS_RECV, &chan->conf_state);
2987 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2988 chan->remote_tx_win = val;
2989 break;
2990
2991 default:
2992 if (hint)
2993 break;
2994
2995 result = L2CAP_CONF_UNKNOWN;
2996 *((u8 *) ptr++) = type;
2997 break;
2998 }
2999 }
3000
3001 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3002 goto done;
3003
3004 switch (chan->mode) {
3005 case L2CAP_MODE_STREAMING:
3006 case L2CAP_MODE_ERTM:
3007 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3008 chan->mode = l2cap_select_mode(rfc.mode,
3009 chan->conn->feat_mask);
3010 break;
3011 }
3012
3013 if (remote_efs) {
3014 if (__l2cap_efs_supported(chan))
3015 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3016 else
3017 return -ECONNREFUSED;
3018 }
3019
3020 if (chan->mode != rfc.mode)
3021 return -ECONNREFUSED;
3022
3023 break;
3024 }
3025
3026 done:
3027 if (chan->mode != rfc.mode) {
3028 result = L2CAP_CONF_UNACCEPT;
3029 rfc.mode = chan->mode;
3030
3031 if (chan->num_conf_rsp == 1)
3032 return -ECONNREFUSED;
3033
3034 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3035 sizeof(rfc), (unsigned long) &rfc);
3036 }
3037
3038 if (result == L2CAP_CONF_SUCCESS) {
3039 /* Configure output options and let the other side know
3040 * which ones we don't like. */
3041
3042 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3043 result = L2CAP_CONF_UNACCEPT;
3044 else {
3045 chan->omtu = mtu;
3046 set_bit(CONF_MTU_DONE, &chan->conf_state);
3047 }
3048 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3049
3050 if (remote_efs) {
3051 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3052 efs.stype != L2CAP_SERV_NOTRAFIC &&
3053 efs.stype != chan->local_stype) {
3054
3055 result = L2CAP_CONF_UNACCEPT;
3056
3057 if (chan->num_conf_req >= 1)
3058 return -ECONNREFUSED;
3059
3060 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3061 sizeof(efs),
3062 (unsigned long) &efs);
3063 } else {
3064 /* Send PENDING Conf Rsp */
3065 result = L2CAP_CONF_PENDING;
3066 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3067 }
3068 }
3069
3070 switch (rfc.mode) {
3071 case L2CAP_MODE_BASIC:
3072 chan->fcs = L2CAP_FCS_NONE;
3073 set_bit(CONF_MODE_DONE, &chan->conf_state);
3074 break;
3075
3076 case L2CAP_MODE_ERTM:
3077 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3078 chan->remote_tx_win = rfc.txwin_size;
3079 else
3080 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3081
3082 chan->remote_max_tx = rfc.max_transmit;
3083
3084 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3085 chan->conn->mtu -
3086 L2CAP_EXT_HDR_SIZE -
3087 L2CAP_SDULEN_SIZE -
3088 L2CAP_FCS_SIZE);
3089 rfc.max_pdu_size = cpu_to_le16(size);
3090 chan->remote_mps = size;
3091
3092 rfc.retrans_timeout =
3093 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3094 rfc.monitor_timeout =
3095 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3096
3097 set_bit(CONF_MODE_DONE, &chan->conf_state);
3098
3099 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3100 sizeof(rfc), (unsigned long) &rfc);
3101
3102 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3103 chan->remote_id = efs.id;
3104 chan->remote_stype = efs.stype;
3105 chan->remote_msdu = le16_to_cpu(efs.msdu);
3106 chan->remote_flush_to =
3107 le32_to_cpu(efs.flush_to);
3108 chan->remote_acc_lat =
3109 le32_to_cpu(efs.acc_lat);
3110 chan->remote_sdu_itime =
3111 le32_to_cpu(efs.sdu_itime);
3112 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3113 sizeof(efs), (unsigned long) &efs);
3114 }
3115 break;
3116
3117 case L2CAP_MODE_STREAMING:
3118 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3119 chan->conn->mtu -
3120 L2CAP_EXT_HDR_SIZE -
3121 L2CAP_SDULEN_SIZE -
3122 L2CAP_FCS_SIZE);
3123 rfc.max_pdu_size = cpu_to_le16(size);
3124 chan->remote_mps = size;
3125
3126 set_bit(CONF_MODE_DONE, &chan->conf_state);
3127
3128 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3129 sizeof(rfc), (unsigned long) &rfc);
3130
3131 break;
3132
3133 default:
3134 result = L2CAP_CONF_UNACCEPT;
3135
3136 memset(&rfc, 0, sizeof(rfc));
3137 rfc.mode = chan->mode;
3138 }
3139
3140 if (result == L2CAP_CONF_SUCCESS)
3141 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3142 }
3143 rsp->scid = cpu_to_le16(chan->dcid);
3144 rsp->result = cpu_to_le16(result);
3145 rsp->flags = __constant_cpu_to_le16(0);
3146
3147 return ptr - data;
3148 }
3149
3150 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3151 {
3152 struct l2cap_conf_req *req = data;
3153 void *ptr = req->data;
3154 int type, olen;
3155 unsigned long val;
3156 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3157 struct l2cap_conf_efs efs;
3158
3159 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3160
3161 while (len >= L2CAP_CONF_OPT_SIZE) {
3162 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3163
3164 switch (type) {
3165 case L2CAP_CONF_MTU:
3166 if (val < L2CAP_DEFAULT_MIN_MTU) {
3167 *result = L2CAP_CONF_UNACCEPT;
3168 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3169 } else
3170 chan->imtu = val;
3171 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3172 break;
3173
3174 case L2CAP_CONF_FLUSH_TO:
3175 chan->flush_to = val;
3176 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3177 2, chan->flush_to);
3178 break;
3179
3180 case L2CAP_CONF_RFC:
3181 if (olen == sizeof(rfc))
3182 memcpy(&rfc, (void *)val, olen);
3183
3184 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3185 rfc.mode != chan->mode)
3186 return -ECONNREFUSED;
3187
3188 chan->fcs = 0;
3189
3190 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3191 sizeof(rfc), (unsigned long) &rfc);
3192 break;
3193
3194 case L2CAP_CONF_EWS:
3195 chan->ack_win = min_t(u16, val, chan->ack_win);
3196 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3197 chan->tx_win);
3198 break;
3199
3200 case L2CAP_CONF_EFS:
3201 if (olen == sizeof(efs))
3202 memcpy(&efs, (void *)val, olen);
3203
3204 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3205 efs.stype != L2CAP_SERV_NOTRAFIC &&
3206 efs.stype != chan->local_stype)
3207 return -ECONNREFUSED;
3208
3209 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3210 sizeof(efs), (unsigned long) &efs);
3211 break;
3212 }
3213 }
3214
3215 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3216 return -ECONNREFUSED;
3217
3218 chan->mode = rfc.mode;
3219
3220 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3221 switch (rfc.mode) {
3222 case L2CAP_MODE_ERTM:
3223 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3224 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3225 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3226 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3227 chan->ack_win = min_t(u16, chan->ack_win,
3228 rfc.txwin_size);
3229
3230 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3231 chan->local_msdu = le16_to_cpu(efs.msdu);
3232 chan->local_sdu_itime =
3233 le32_to_cpu(efs.sdu_itime);
3234 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3235 chan->local_flush_to =
3236 le32_to_cpu(efs.flush_to);
3237 }
3238 break;
3239
3240 case L2CAP_MODE_STREAMING:
3241 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3242 }
3243 }
3244
3245 req->dcid = cpu_to_le16(chan->dcid);
3246 req->flags = __constant_cpu_to_le16(0);
3247
3248 return ptr - data;
3249 }
3250
3251 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3252 {
3253 struct l2cap_conf_rsp *rsp = data;
3254 void *ptr = rsp->data;
3255
3256 BT_DBG("chan %p", chan);
3257
3258 rsp->scid = cpu_to_le16(chan->dcid);
3259 rsp->result = cpu_to_le16(result);
3260 rsp->flags = cpu_to_le16(flags);
3261
3262 return ptr - data;
3263 }
3264
3265 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3266 {
3267 struct l2cap_conn_rsp rsp;
3268 struct l2cap_conn *conn = chan->conn;
3269 u8 buf[128];
3270
3271 rsp.scid = cpu_to_le16(chan->dcid);
3272 rsp.dcid = cpu_to_le16(chan->scid);
3273 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3274 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3275 l2cap_send_cmd(conn, chan->ident,
3276 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3277
3278 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3279 return;
3280
3281 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3282 l2cap_build_conf_req(chan, buf), buf);
3283 chan->num_conf_req++;
3284 }
3285
3286 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3287 {
3288 int type, olen;
3289 unsigned long val;
3290 /* Use sane default values in case a misbehaving remote device
3291 * did not send an RFC or extended window size option.
3292 */
3293 u16 txwin_ext = chan->ack_win;
3294 struct l2cap_conf_rfc rfc = {
3295 .mode = chan->mode,
3296 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3297 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3298 .max_pdu_size = cpu_to_le16(chan->imtu),
3299 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3300 };
3301
3302 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3303
3304 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3305 return;
3306
3307 while (len >= L2CAP_CONF_OPT_SIZE) {
3308 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3309
3310 switch (type) {
3311 case L2CAP_CONF_RFC:
3312 if (olen == sizeof(rfc))
3313 memcpy(&rfc, (void *)val, olen);
3314 break;
3315 case L2CAP_CONF_EWS:
3316 txwin_ext = val;
3317 break;
3318 }
3319 }
3320
3321 switch (rfc.mode) {
3322 case L2CAP_MODE_ERTM:
3323 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3324 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3325 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3326 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3327 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3328 else
3329 chan->ack_win = min_t(u16, chan->ack_win,
3330 rfc.txwin_size);
3331 break;
3332 case L2CAP_MODE_STREAMING:
3333 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3334 }
3335 }
3336
3337 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3338 {
3339 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3340
3341 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3342 return 0;
3343
3344 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3345 cmd->ident == conn->info_ident) {
3346 cancel_delayed_work(&conn->info_timer);
3347
3348 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3349 conn->info_ident = 0;
3350
3351 l2cap_conn_start(conn);
3352 }
3353
3354 return 0;
3355 }
3356
3357 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3358 {
3359 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3360 struct l2cap_conn_rsp rsp;
3361 struct l2cap_chan *chan = NULL, *pchan;
3362 struct sock *parent, *sk = NULL;
3363 int result, status = L2CAP_CS_NO_INFO;
3364
3365 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3366 __le16 psm = req->psm;
3367
3368 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3369
3370 /* Check if we have socket listening on psm */
3371 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3372 if (!pchan) {
3373 result = L2CAP_CR_BAD_PSM;
3374 goto sendresp;
3375 }
3376
3377 parent = pchan->sk;
3378
3379 mutex_lock(&conn->chan_lock);
3380 lock_sock(parent);
3381
3382 /* Check if the ACL is secure enough (if not SDP) */
3383 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3384 !hci_conn_check_link_mode(conn->hcon)) {
3385 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3386 result = L2CAP_CR_SEC_BLOCK;
3387 goto response;
3388 }
3389
3390 result = L2CAP_CR_NO_MEM;
3391
3392 /* Check if we already have channel with that dcid */
3393 if (__l2cap_get_chan_by_dcid(conn, scid))
3394 goto response;
3395
3396 chan = pchan->ops->new_connection(pchan);
3397 if (!chan)
3398 goto response;
3399
3400 sk = chan->sk;
3401
3402 hci_conn_hold(conn->hcon);
3403
3404 bacpy(&bt_sk(sk)->src, conn->src);
3405 bacpy(&bt_sk(sk)->dst, conn->dst);
3406 chan->psm = psm;
3407 chan->dcid = scid;
3408
3409 bt_accept_enqueue(parent, sk);
3410
3411 __l2cap_chan_add(conn, chan);
3412
3413 dcid = chan->scid;
3414
3415 __set_chan_timer(chan, sk->sk_sndtimeo);
3416
3417 chan->ident = cmd->ident;
3418
3419 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3420 if (l2cap_chan_check_security(chan)) {
3421 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3422 __l2cap_state_change(chan, BT_CONNECT2);
3423 result = L2CAP_CR_PEND;
3424 status = L2CAP_CS_AUTHOR_PEND;
3425 parent->sk_data_ready(parent, 0);
3426 } else {
3427 __l2cap_state_change(chan, BT_CONFIG);
3428 result = L2CAP_CR_SUCCESS;
3429 status = L2CAP_CS_NO_INFO;
3430 }
3431 } else {
3432 __l2cap_state_change(chan, BT_CONNECT2);
3433 result = L2CAP_CR_PEND;
3434 status = L2CAP_CS_AUTHEN_PEND;
3435 }
3436 } else {
3437 __l2cap_state_change(chan, BT_CONNECT2);
3438 result = L2CAP_CR_PEND;
3439 status = L2CAP_CS_NO_INFO;
3440 }
3441
3442 response:
3443 release_sock(parent);
3444 mutex_unlock(&conn->chan_lock);
3445
3446 sendresp:
3447 rsp.scid = cpu_to_le16(scid);
3448 rsp.dcid = cpu_to_le16(dcid);
3449 rsp.result = cpu_to_le16(result);
3450 rsp.status = cpu_to_le16(status);
3451 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3452
3453 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3454 struct l2cap_info_req info;
3455 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3456
3457 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3458 conn->info_ident = l2cap_get_ident(conn);
3459
3460 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3461
3462 l2cap_send_cmd(conn, conn->info_ident,
3463 L2CAP_INFO_REQ, sizeof(info), &info);
3464 }
3465
3466 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3467 result == L2CAP_CR_SUCCESS) {
3468 u8 buf[128];
3469 set_bit(CONF_REQ_SENT, &chan->conf_state);
3470 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3471 l2cap_build_conf_req(chan, buf), buf);
3472 chan->num_conf_req++;
3473 }
3474
3475 return 0;
3476 }
3477
3478 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3479 {
3480 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3481 u16 scid, dcid, result, status;
3482 struct l2cap_chan *chan;
3483 u8 req[128];
3484 int err;
3485
3486 scid = __le16_to_cpu(rsp->scid);
3487 dcid = __le16_to_cpu(rsp->dcid);
3488 result = __le16_to_cpu(rsp->result);
3489 status = __le16_to_cpu(rsp->status);
3490
3491 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3492 dcid, scid, result, status);
3493
3494 mutex_lock(&conn->chan_lock);
3495
3496 if (scid) {
3497 chan = __l2cap_get_chan_by_scid(conn, scid);
3498 if (!chan) {
3499 err = -EFAULT;
3500 goto unlock;
3501 }
3502 } else {
3503 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3504 if (!chan) {
3505 err = -EFAULT;
3506 goto unlock;
3507 }
3508 }
3509
3510 err = 0;
3511
3512 l2cap_chan_lock(chan);
3513
3514 switch (result) {
3515 case L2CAP_CR_SUCCESS:
3516 l2cap_state_change(chan, BT_CONFIG);
3517 chan->ident = 0;
3518 chan->dcid = dcid;
3519 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3520
3521 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3522 break;
3523
3524 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3525 l2cap_build_conf_req(chan, req), req);
3526 chan->num_conf_req++;
3527 break;
3528
3529 case L2CAP_CR_PEND:
3530 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3531 break;
3532
3533 default:
3534 l2cap_chan_del(chan, ECONNREFUSED);
3535 break;
3536 }
3537
3538 l2cap_chan_unlock(chan);
3539
3540 unlock:
3541 mutex_unlock(&conn->chan_lock);
3542
3543 return err;
3544 }
3545
3546 static inline void set_default_fcs(struct l2cap_chan *chan)
3547 {
3548 /* FCS is enabled only in ERTM or streaming mode, if one or both
3549 * sides request it.
3550 */
3551 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3552 chan->fcs = L2CAP_FCS_NONE;
3553 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3554 chan->fcs = L2CAP_FCS_CRC16;
3555 }
3556
3557 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3558 {
3559 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3560 u16 dcid, flags;
3561 u8 rsp[64];
3562 struct l2cap_chan *chan;
3563 int len, err = 0;
3564
3565 dcid = __le16_to_cpu(req->dcid);
3566 flags = __le16_to_cpu(req->flags);
3567
3568 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3569
3570 chan = l2cap_get_chan_by_scid(conn, dcid);
3571 if (!chan)
3572 return -ENOENT;
3573
3574 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3575 struct l2cap_cmd_rej_cid rej;
3576
3577 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3578 rej.scid = cpu_to_le16(chan->scid);
3579 rej.dcid = cpu_to_le16(chan->dcid);
3580
3581 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3582 sizeof(rej), &rej);
3583 goto unlock;
3584 }
3585
3586 /* Reject if config buffer is too small. */
3587 len = cmd_len - sizeof(*req);
3588 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3589 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3590 l2cap_build_conf_rsp(chan, rsp,
3591 L2CAP_CONF_REJECT, flags), rsp);
3592 goto unlock;
3593 }
3594
3595 /* Store config. */
3596 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3597 chan->conf_len += len;
3598
3599 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3600 /* Incomplete config. Send empty response. */
3601 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3602 l2cap_build_conf_rsp(chan, rsp,
3603 L2CAP_CONF_SUCCESS, flags), rsp);
3604 goto unlock;
3605 }
3606
3607 /* Complete config. */
3608 len = l2cap_parse_conf_req(chan, rsp);
3609 if (len < 0) {
3610 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3611 goto unlock;
3612 }
3613
3614 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3615 chan->num_conf_rsp++;
3616
3617 /* Reset config buffer. */
3618 chan->conf_len = 0;
3619
3620 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3621 goto unlock;
3622
3623 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3624 set_default_fcs(chan);
3625
3626 if (chan->mode == L2CAP_MODE_ERTM ||
3627 chan->mode == L2CAP_MODE_STREAMING)
3628 err = l2cap_ertm_init(chan);
3629
3630 if (err < 0)
3631 l2cap_send_disconn_req(chan->conn, chan, -err);
3632 else
3633 l2cap_chan_ready(chan);
3634
3635 goto unlock;
3636 }
3637
3638 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3639 u8 buf[64];
3640 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3641 l2cap_build_conf_req(chan, buf), buf);
3642 chan->num_conf_req++;
3643 }
3644
3645 /* Got Conf Rsp PENDING from remote side and asume we sent
3646 Conf Rsp PENDING in the code above */
3647 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3648 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3649
3650 /* check compatibility */
3651
3652 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3653 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3654
3655 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3656 l2cap_build_conf_rsp(chan, rsp,
3657 L2CAP_CONF_SUCCESS, flags), rsp);
3658 }
3659
3660 unlock:
3661 l2cap_chan_unlock(chan);
3662 return err;
3663 }
3664
3665 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3666 {
3667 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3668 u16 scid, flags, result;
3669 struct l2cap_chan *chan;
3670 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3671 int err = 0;
3672
3673 scid = __le16_to_cpu(rsp->scid);
3674 flags = __le16_to_cpu(rsp->flags);
3675 result = __le16_to_cpu(rsp->result);
3676
3677 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3678 result, len);
3679
3680 chan = l2cap_get_chan_by_scid(conn, scid);
3681 if (!chan)
3682 return 0;
3683
3684 switch (result) {
3685 case L2CAP_CONF_SUCCESS:
3686 l2cap_conf_rfc_get(chan, rsp->data, len);
3687 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3688 break;
3689
3690 case L2CAP_CONF_PENDING:
3691 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3692
3693 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3694 char buf[64];
3695
3696 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3697 buf, &result);
3698 if (len < 0) {
3699 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3700 goto done;
3701 }
3702
3703 /* check compatibility */
3704
3705 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3706 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3707
3708 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3709 l2cap_build_conf_rsp(chan, buf,
3710 L2CAP_CONF_SUCCESS, 0x0000), buf);
3711 }
3712 goto done;
3713
3714 case L2CAP_CONF_UNACCEPT:
3715 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3716 char req[64];
3717
3718 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3719 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3720 goto done;
3721 }
3722
3723 /* throw out any old stored conf requests */
3724 result = L2CAP_CONF_SUCCESS;
3725 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3726 req, &result);
3727 if (len < 0) {
3728 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3729 goto done;
3730 }
3731
3732 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3733 L2CAP_CONF_REQ, len, req);
3734 chan->num_conf_req++;
3735 if (result != L2CAP_CONF_SUCCESS)
3736 goto done;
3737 break;
3738 }
3739
3740 default:
3741 l2cap_chan_set_err(chan, ECONNRESET);
3742
3743 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3744 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3745 goto done;
3746 }
3747
3748 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3749 goto done;
3750
3751 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3752
3753 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3754 set_default_fcs(chan);
3755
3756 if (chan->mode == L2CAP_MODE_ERTM ||
3757 chan->mode == L2CAP_MODE_STREAMING)
3758 err = l2cap_ertm_init(chan);
3759
3760 if (err < 0)
3761 l2cap_send_disconn_req(chan->conn, chan, -err);
3762 else
3763 l2cap_chan_ready(chan);
3764 }
3765
3766 done:
3767 l2cap_chan_unlock(chan);
3768 return err;
3769 }
3770
3771 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3772 {
3773 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3774 struct l2cap_disconn_rsp rsp;
3775 u16 dcid, scid;
3776 struct l2cap_chan *chan;
3777 struct sock *sk;
3778
3779 scid = __le16_to_cpu(req->scid);
3780 dcid = __le16_to_cpu(req->dcid);
3781
3782 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3783
3784 mutex_lock(&conn->chan_lock);
3785
3786 chan = __l2cap_get_chan_by_scid(conn, dcid);
3787 if (!chan) {
3788 mutex_unlock(&conn->chan_lock);
3789 return 0;
3790 }
3791
3792 l2cap_chan_lock(chan);
3793
3794 sk = chan->sk;
3795
3796 rsp.dcid = cpu_to_le16(chan->scid);
3797 rsp.scid = cpu_to_le16(chan->dcid);
3798 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3799
3800 lock_sock(sk);
3801 sk->sk_shutdown = SHUTDOWN_MASK;
3802 release_sock(sk);
3803
3804 l2cap_chan_hold(chan);
3805 l2cap_chan_del(chan, ECONNRESET);
3806
3807 l2cap_chan_unlock(chan);
3808
3809 chan->ops->close(chan);
3810 l2cap_chan_put(chan);
3811
3812 mutex_unlock(&conn->chan_lock);
3813
3814 return 0;
3815 }
3816
3817 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3818 {
3819 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3820 u16 dcid, scid;
3821 struct l2cap_chan *chan;
3822
3823 scid = __le16_to_cpu(rsp->scid);
3824 dcid = __le16_to_cpu(rsp->dcid);
3825
3826 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3827
3828 mutex_lock(&conn->chan_lock);
3829
3830 chan = __l2cap_get_chan_by_scid(conn, scid);
3831 if (!chan) {
3832 mutex_unlock(&conn->chan_lock);
3833 return 0;
3834 }
3835
3836 l2cap_chan_lock(chan);
3837
3838 l2cap_chan_hold(chan);
3839 l2cap_chan_del(chan, 0);
3840
3841 l2cap_chan_unlock(chan);
3842
3843 chan->ops->close(chan);
3844 l2cap_chan_put(chan);
3845
3846 mutex_unlock(&conn->chan_lock);
3847
3848 return 0;
3849 }
3850
3851 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3852 {
3853 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3854 u16 type;
3855
3856 type = __le16_to_cpu(req->type);
3857
3858 BT_DBG("type 0x%4.4x", type);
3859
3860 if (type == L2CAP_IT_FEAT_MASK) {
3861 u8 buf[8];
3862 u32 feat_mask = l2cap_feat_mask;
3863 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3864 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3865 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3866 if (!disable_ertm)
3867 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3868 | L2CAP_FEAT_FCS;
3869 if (enable_hs)
3870 feat_mask |= L2CAP_FEAT_EXT_FLOW
3871 | L2CAP_FEAT_EXT_WINDOW;
3872
3873 put_unaligned_le32(feat_mask, rsp->data);
3874 l2cap_send_cmd(conn, cmd->ident,
3875 L2CAP_INFO_RSP, sizeof(buf), buf);
3876 } else if (type == L2CAP_IT_FIXED_CHAN) {
3877 u8 buf[12];
3878 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3879
3880 if (enable_hs)
3881 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3882 else
3883 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3884
3885 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3886 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3887 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3888 l2cap_send_cmd(conn, cmd->ident,
3889 L2CAP_INFO_RSP, sizeof(buf), buf);
3890 } else {
3891 struct l2cap_info_rsp rsp;
3892 rsp.type = cpu_to_le16(type);
3893 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3894 l2cap_send_cmd(conn, cmd->ident,
3895 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3896 }
3897
3898 return 0;
3899 }
3900
3901 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3902 {
3903 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3904 u16 type, result;
3905
3906 type = __le16_to_cpu(rsp->type);
3907 result = __le16_to_cpu(rsp->result);
3908
3909 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3910
3911 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3912 if (cmd->ident != conn->info_ident ||
3913 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3914 return 0;
3915
3916 cancel_delayed_work(&conn->info_timer);
3917
3918 if (result != L2CAP_IR_SUCCESS) {
3919 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3920 conn->info_ident = 0;
3921
3922 l2cap_conn_start(conn);
3923
3924 return 0;
3925 }
3926
3927 switch (type) {
3928 case L2CAP_IT_FEAT_MASK:
3929 conn->feat_mask = get_unaligned_le32(rsp->data);
3930
3931 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3932 struct l2cap_info_req req;
3933 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3934
3935 conn->info_ident = l2cap_get_ident(conn);
3936
3937 l2cap_send_cmd(conn, conn->info_ident,
3938 L2CAP_INFO_REQ, sizeof(req), &req);
3939 } else {
3940 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3941 conn->info_ident = 0;
3942
3943 l2cap_conn_start(conn);
3944 }
3945 break;
3946
3947 case L2CAP_IT_FIXED_CHAN:
3948 conn->fixed_chan_mask = rsp->data[0];
3949 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3950 conn->info_ident = 0;
3951
3952 l2cap_conn_start(conn);
3953 break;
3954 }
3955
3956 return 0;
3957 }
3958
3959 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3960 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3961 void *data)
3962 {
3963 struct l2cap_create_chan_req *req = data;
3964 struct l2cap_create_chan_rsp rsp;
3965 u16 psm, scid;
3966
3967 if (cmd_len != sizeof(*req))
3968 return -EPROTO;
3969
3970 if (!enable_hs)
3971 return -EINVAL;
3972
3973 psm = le16_to_cpu(req->psm);
3974 scid = le16_to_cpu(req->scid);
3975
3976 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3977
3978 /* Placeholder: Always reject */
3979 rsp.dcid = 0;
3980 rsp.scid = cpu_to_le16(scid);
3981 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3982 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3983
3984 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3985 sizeof(rsp), &rsp);
3986
3987 return 0;
3988 }
3989
3990 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3991 struct l2cap_cmd_hdr *cmd, void *data)
3992 {
3993 BT_DBG("conn %p", conn);
3994
3995 return l2cap_connect_rsp(conn, cmd, data);
3996 }
3997
3998 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3999 u16 icid, u16 result)
4000 {
4001 struct l2cap_move_chan_rsp rsp;
4002
4003 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4004
4005 rsp.icid = cpu_to_le16(icid);
4006 rsp.result = cpu_to_le16(result);
4007
4008 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4009 }
4010
4011 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4012 struct l2cap_chan *chan,
4013 u16 icid, u16 result)
4014 {
4015 struct l2cap_move_chan_cfm cfm;
4016 u8 ident;
4017
4018 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4019
4020 ident = l2cap_get_ident(conn);
4021 if (chan)
4022 chan->ident = ident;
4023
4024 cfm.icid = cpu_to_le16(icid);
4025 cfm.result = cpu_to_le16(result);
4026
4027 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4028 }
4029
4030 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4031 u16 icid)
4032 {
4033 struct l2cap_move_chan_cfm_rsp rsp;
4034
4035 BT_DBG("icid 0x%4.4x", icid);
4036
4037 rsp.icid = cpu_to_le16(icid);
4038 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4039 }
4040
4041 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4042 struct l2cap_cmd_hdr *cmd,
4043 u16 cmd_len, void *data)
4044 {
4045 struct l2cap_move_chan_req *req = data;
4046 u16 icid = 0;
4047 u16 result = L2CAP_MR_NOT_ALLOWED;
4048
4049 if (cmd_len != sizeof(*req))
4050 return -EPROTO;
4051
4052 icid = le16_to_cpu(req->icid);
4053
4054 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4055
4056 if (!enable_hs)
4057 return -EINVAL;
4058
4059 /* Placeholder: Always refuse */
4060 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4061
4062 return 0;
4063 }
4064
4065 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4066 struct l2cap_cmd_hdr *cmd,
4067 u16 cmd_len, void *data)
4068 {
4069 struct l2cap_move_chan_rsp *rsp = data;
4070 u16 icid, result;
4071
4072 if (cmd_len != sizeof(*rsp))
4073 return -EPROTO;
4074
4075 icid = le16_to_cpu(rsp->icid);
4076 result = le16_to_cpu(rsp->result);
4077
4078 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4079
4080 /* Placeholder: Always unconfirmed */
4081 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4082
4083 return 0;
4084 }
4085
4086 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4087 struct l2cap_cmd_hdr *cmd,
4088 u16 cmd_len, void *data)
4089 {
4090 struct l2cap_move_chan_cfm *cfm = data;
4091 u16 icid, result;
4092
4093 if (cmd_len != sizeof(*cfm))
4094 return -EPROTO;
4095
4096 icid = le16_to_cpu(cfm->icid);
4097 result = le16_to_cpu(cfm->result);
4098
4099 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4100
4101 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4102
4103 return 0;
4104 }
4105
4106 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4107 struct l2cap_cmd_hdr *cmd,
4108 u16 cmd_len, void *data)
4109 {
4110 struct l2cap_move_chan_cfm_rsp *rsp = data;
4111 u16 icid;
4112
4113 if (cmd_len != sizeof(*rsp))
4114 return -EPROTO;
4115
4116 icid = le16_to_cpu(rsp->icid);
4117
4118 BT_DBG("icid 0x%4.4x", icid);
4119
4120 return 0;
4121 }
4122
4123 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4124 u16 to_multiplier)
4125 {
4126 u16 max_latency;
4127
4128 if (min > max || min < 6 || max > 3200)
4129 return -EINVAL;
4130
4131 if (to_multiplier < 10 || to_multiplier > 3200)
4132 return -EINVAL;
4133
4134 if (max >= to_multiplier * 8)
4135 return -EINVAL;
4136
4137 max_latency = (to_multiplier * 8 / max) - 1;
4138 if (latency > 499 || latency > max_latency)
4139 return -EINVAL;
4140
4141 return 0;
4142 }
4143
4144 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4145 struct l2cap_cmd_hdr *cmd, u8 *data)
4146 {
4147 struct hci_conn *hcon = conn->hcon;
4148 struct l2cap_conn_param_update_req *req;
4149 struct l2cap_conn_param_update_rsp rsp;
4150 u16 min, max, latency, to_multiplier, cmd_len;
4151 int err;
4152
4153 if (!(hcon->link_mode & HCI_LM_MASTER))
4154 return -EINVAL;
4155
4156 cmd_len = __le16_to_cpu(cmd->len);
4157 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4158 return -EPROTO;
4159
4160 req = (struct l2cap_conn_param_update_req *) data;
4161 min = __le16_to_cpu(req->min);
4162 max = __le16_to_cpu(req->max);
4163 latency = __le16_to_cpu(req->latency);
4164 to_multiplier = __le16_to_cpu(req->to_multiplier);
4165
4166 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4167 min, max, latency, to_multiplier);
4168
4169 memset(&rsp, 0, sizeof(rsp));
4170
4171 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4172 if (err)
4173 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4174 else
4175 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4176
4177 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4178 sizeof(rsp), &rsp);
4179
4180 if (!err)
4181 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4182
4183 return 0;
4184 }
4185
4186 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4187 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4188 {
4189 int err = 0;
4190
4191 switch (cmd->code) {
4192 case L2CAP_COMMAND_REJ:
4193 l2cap_command_rej(conn, cmd, data);
4194 break;
4195
4196 case L2CAP_CONN_REQ:
4197 err = l2cap_connect_req(conn, cmd, data);
4198 break;
4199
4200 case L2CAP_CONN_RSP:
4201 err = l2cap_connect_rsp(conn, cmd, data);
4202 break;
4203
4204 case L2CAP_CONF_REQ:
4205 err = l2cap_config_req(conn, cmd, cmd_len, data);
4206 break;
4207
4208 case L2CAP_CONF_RSP:
4209 err = l2cap_config_rsp(conn, cmd, data);
4210 break;
4211
4212 case L2CAP_DISCONN_REQ:
4213 err = l2cap_disconnect_req(conn, cmd, data);
4214 break;
4215
4216 case L2CAP_DISCONN_RSP:
4217 err = l2cap_disconnect_rsp(conn, cmd, data);
4218 break;
4219
4220 case L2CAP_ECHO_REQ:
4221 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4222 break;
4223
4224 case L2CAP_ECHO_RSP:
4225 break;
4226
4227 case L2CAP_INFO_REQ:
4228 err = l2cap_information_req(conn, cmd, data);
4229 break;
4230
4231 case L2CAP_INFO_RSP:
4232 err = l2cap_information_rsp(conn, cmd, data);
4233 break;
4234
4235 case L2CAP_CREATE_CHAN_REQ:
4236 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4237 break;
4238
4239 case L2CAP_CREATE_CHAN_RSP:
4240 err = l2cap_create_channel_rsp(conn, cmd, data);
4241 break;
4242
4243 case L2CAP_MOVE_CHAN_REQ:
4244 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4245 break;
4246
4247 case L2CAP_MOVE_CHAN_RSP:
4248 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4249 break;
4250
4251 case L2CAP_MOVE_CHAN_CFM:
4252 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4253 break;
4254
4255 case L2CAP_MOVE_CHAN_CFM_RSP:
4256 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4257 break;
4258
4259 default:
4260 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4261 err = -EINVAL;
4262 break;
4263 }
4264
4265 return err;
4266 }
4267
4268 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4269 struct l2cap_cmd_hdr *cmd, u8 *data)
4270 {
4271 switch (cmd->code) {
4272 case L2CAP_COMMAND_REJ:
4273 return 0;
4274
4275 case L2CAP_CONN_PARAM_UPDATE_REQ:
4276 return l2cap_conn_param_update_req(conn, cmd, data);
4277
4278 case L2CAP_CONN_PARAM_UPDATE_RSP:
4279 return 0;
4280
4281 default:
4282 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4283 return -EINVAL;
4284 }
4285 }
4286
4287 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4288 struct sk_buff *skb)
4289 {
4290 u8 *data = skb->data;
4291 int len = skb->len;
4292 struct l2cap_cmd_hdr cmd;
4293 int err;
4294
4295 l2cap_raw_recv(conn, skb);
4296
4297 while (len >= L2CAP_CMD_HDR_SIZE) {
4298 u16 cmd_len;
4299 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4300 data += L2CAP_CMD_HDR_SIZE;
4301 len -= L2CAP_CMD_HDR_SIZE;
4302
4303 cmd_len = le16_to_cpu(cmd.len);
4304
4305 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4306
4307 if (cmd_len > len || !cmd.ident) {
4308 BT_DBG("corrupted command");
4309 break;
4310 }
4311
4312 if (conn->hcon->type == LE_LINK)
4313 err = l2cap_le_sig_cmd(conn, &cmd, data);
4314 else
4315 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4316
4317 if (err) {
4318 struct l2cap_cmd_rej_unk rej;
4319
4320 BT_ERR("Wrong link type (%d)", err);
4321
4322 /* FIXME: Map err to a valid reason */
4323 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4324 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4325 }
4326
4327 data += cmd_len;
4328 len -= cmd_len;
4329 }
4330
4331 kfree_skb(skb);
4332 }
4333
4334 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4335 {
4336 u16 our_fcs, rcv_fcs;
4337 int hdr_size;
4338
4339 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4340 hdr_size = L2CAP_EXT_HDR_SIZE;
4341 else
4342 hdr_size = L2CAP_ENH_HDR_SIZE;
4343
4344 if (chan->fcs == L2CAP_FCS_CRC16) {
4345 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4346 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4347 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4348
4349 if (our_fcs != rcv_fcs)
4350 return -EBADMSG;
4351 }
4352 return 0;
4353 }
4354
4355 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4356 {
4357 struct l2cap_ctrl control;
4358
4359 BT_DBG("chan %p", chan);
4360
4361 memset(&control, 0, sizeof(control));
4362 control.sframe = 1;
4363 control.final = 1;
4364 control.reqseq = chan->buffer_seq;
4365 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4366
4367 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4368 control.super = L2CAP_SUPER_RNR;
4369 l2cap_send_sframe(chan, &control);
4370 }
4371
4372 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4373 chan->unacked_frames > 0)
4374 __set_retrans_timer(chan);
4375
4376 /* Send pending iframes */
4377 l2cap_ertm_send(chan);
4378
4379 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4380 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4381 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4382 * send it now.
4383 */
4384 control.super = L2CAP_SUPER_RR;
4385 l2cap_send_sframe(chan, &control);
4386 }
4387 }
4388
4389 static void append_skb_frag(struct sk_buff *skb,
4390 struct sk_buff *new_frag, struct sk_buff **last_frag)
4391 {
4392 /* skb->len reflects data in skb as well as all fragments
4393 * skb->data_len reflects only data in fragments
4394 */
4395 if (!skb_has_frag_list(skb))
4396 skb_shinfo(skb)->frag_list = new_frag;
4397
4398 new_frag->next = NULL;
4399
4400 (*last_frag)->next = new_frag;
4401 *last_frag = new_frag;
4402
4403 skb->len += new_frag->len;
4404 skb->data_len += new_frag->len;
4405 skb->truesize += new_frag->truesize;
4406 }
4407
4408 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4409 struct l2cap_ctrl *control)
4410 {
4411 int err = -EINVAL;
4412
4413 switch (control->sar) {
4414 case L2CAP_SAR_UNSEGMENTED:
4415 if (chan->sdu)
4416 break;
4417
4418 err = chan->ops->recv(chan, skb);
4419 break;
4420
4421 case L2CAP_SAR_START:
4422 if (chan->sdu)
4423 break;
4424
4425 chan->sdu_len = get_unaligned_le16(skb->data);
4426 skb_pull(skb, L2CAP_SDULEN_SIZE);
4427
4428 if (chan->sdu_len > chan->imtu) {
4429 err = -EMSGSIZE;
4430 break;
4431 }
4432
4433 if (skb->len >= chan->sdu_len)
4434 break;
4435
4436 chan->sdu = skb;
4437 chan->sdu_last_frag = skb;
4438
4439 skb = NULL;
4440 err = 0;
4441 break;
4442
4443 case L2CAP_SAR_CONTINUE:
4444 if (!chan->sdu)
4445 break;
4446
4447 append_skb_frag(chan->sdu, skb,
4448 &chan->sdu_last_frag);
4449 skb = NULL;
4450
4451 if (chan->sdu->len >= chan->sdu_len)
4452 break;
4453
4454 err = 0;
4455 break;
4456
4457 case L2CAP_SAR_END:
4458 if (!chan->sdu)
4459 break;
4460
4461 append_skb_frag(chan->sdu, skb,
4462 &chan->sdu_last_frag);
4463 skb = NULL;
4464
4465 if (chan->sdu->len != chan->sdu_len)
4466 break;
4467
4468 err = chan->ops->recv(chan, chan->sdu);
4469
4470 if (!err) {
4471 /* Reassembly complete */
4472 chan->sdu = NULL;
4473 chan->sdu_last_frag = NULL;
4474 chan->sdu_len = 0;
4475 }
4476 break;
4477 }
4478
4479 if (err) {
4480 kfree_skb(skb);
4481 kfree_skb(chan->sdu);
4482 chan->sdu = NULL;
4483 chan->sdu_last_frag = NULL;
4484 chan->sdu_len = 0;
4485 }
4486
4487 return err;
4488 }
4489
4490 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4491 {
4492 u8 event;
4493
4494 if (chan->mode != L2CAP_MODE_ERTM)
4495 return;
4496
4497 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4498 l2cap_tx(chan, NULL, NULL, event);
4499 }
4500
4501 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4502 {
4503 int err = 0;
4504 /* Pass sequential frames to l2cap_reassemble_sdu()
4505 * until a gap is encountered.
4506 */
4507
4508 BT_DBG("chan %p", chan);
4509
4510 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4511 struct sk_buff *skb;
4512 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4513 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4514
4515 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4516
4517 if (!skb)
4518 break;
4519
4520 skb_unlink(skb, &chan->srej_q);
4521 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4522 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4523 if (err)
4524 break;
4525 }
4526
4527 if (skb_queue_empty(&chan->srej_q)) {
4528 chan->rx_state = L2CAP_RX_STATE_RECV;
4529 l2cap_send_ack(chan);
4530 }
4531
4532 return err;
4533 }
4534
4535 static void l2cap_handle_srej(struct l2cap_chan *chan,
4536 struct l2cap_ctrl *control)
4537 {
4538 struct sk_buff *skb;
4539
4540 BT_DBG("chan %p, control %p", chan, control);
4541
4542 if (control->reqseq == chan->next_tx_seq) {
4543 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4544 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4545 return;
4546 }
4547
4548 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4549
4550 if (skb == NULL) {
4551 BT_DBG("Seq %d not available for retransmission",
4552 control->reqseq);
4553 return;
4554 }
4555
4556 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4557 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4558 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4559 return;
4560 }
4561
4562 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4563
4564 if (control->poll) {
4565 l2cap_pass_to_tx(chan, control);
4566
4567 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4568 l2cap_retransmit(chan, control);
4569 l2cap_ertm_send(chan);
4570
4571 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4572 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4573 chan->srej_save_reqseq = control->reqseq;
4574 }
4575 } else {
4576 l2cap_pass_to_tx_fbit(chan, control);
4577
4578 if (control->final) {
4579 if (chan->srej_save_reqseq != control->reqseq ||
4580 !test_and_clear_bit(CONN_SREJ_ACT,
4581 &chan->conn_state))
4582 l2cap_retransmit(chan, control);
4583 } else {
4584 l2cap_retransmit(chan, control);
4585 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4586 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4587 chan->srej_save_reqseq = control->reqseq;
4588 }
4589 }
4590 }
4591 }
4592
4593 static void l2cap_handle_rej(struct l2cap_chan *chan,
4594 struct l2cap_ctrl *control)
4595 {
4596 struct sk_buff *skb;
4597
4598 BT_DBG("chan %p, control %p", chan, control);
4599
4600 if (control->reqseq == chan->next_tx_seq) {
4601 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4602 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4603 return;
4604 }
4605
4606 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4607
4608 if (chan->max_tx && skb &&
4609 bt_cb(skb)->control.retries >= chan->max_tx) {
4610 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4611 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4612 return;
4613 }
4614
4615 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4616
4617 l2cap_pass_to_tx(chan, control);
4618
4619 if (control->final) {
4620 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4621 l2cap_retransmit_all(chan, control);
4622 } else {
4623 l2cap_retransmit_all(chan, control);
4624 l2cap_ertm_send(chan);
4625 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4626 set_bit(CONN_REJ_ACT, &chan->conn_state);
4627 }
4628 }
4629
4630 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4631 {
4632 BT_DBG("chan %p, txseq %d", chan, txseq);
4633
4634 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4635 chan->expected_tx_seq);
4636
4637 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4638 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4639 chan->tx_win) {
4640 /* See notes below regarding "double poll" and
4641 * invalid packets.
4642 */
4643 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4644 BT_DBG("Invalid/Ignore - after SREJ");
4645 return L2CAP_TXSEQ_INVALID_IGNORE;
4646 } else {
4647 BT_DBG("Invalid - in window after SREJ sent");
4648 return L2CAP_TXSEQ_INVALID;
4649 }
4650 }
4651
4652 if (chan->srej_list.head == txseq) {
4653 BT_DBG("Expected SREJ");
4654 return L2CAP_TXSEQ_EXPECTED_SREJ;
4655 }
4656
4657 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4658 BT_DBG("Duplicate SREJ - txseq already stored");
4659 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4660 }
4661
4662 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4663 BT_DBG("Unexpected SREJ - not requested");
4664 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4665 }
4666 }
4667
4668 if (chan->expected_tx_seq == txseq) {
4669 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4670 chan->tx_win) {
4671 BT_DBG("Invalid - txseq outside tx window");
4672 return L2CAP_TXSEQ_INVALID;
4673 } else {
4674 BT_DBG("Expected");
4675 return L2CAP_TXSEQ_EXPECTED;
4676 }
4677 }
4678
4679 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4680 __seq_offset(chan, chan->expected_tx_seq,
4681 chan->last_acked_seq)){
4682 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4683 return L2CAP_TXSEQ_DUPLICATE;
4684 }
4685
4686 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4687 /* A source of invalid packets is a "double poll" condition,
4688 * where delays cause us to send multiple poll packets. If
4689 * the remote stack receives and processes both polls,
4690 * sequence numbers can wrap around in such a way that a
4691 * resent frame has a sequence number that looks like new data
4692 * with a sequence gap. This would trigger an erroneous SREJ
4693 * request.
4694 *
4695 * Fortunately, this is impossible with a tx window that's
4696 * less than half of the maximum sequence number, which allows
4697 * invalid frames to be safely ignored.
4698 *
4699 * With tx window sizes greater than half of the tx window
4700 * maximum, the frame is invalid and cannot be ignored. This
4701 * causes a disconnect.
4702 */
4703
4704 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4705 BT_DBG("Invalid/Ignore - txseq outside tx window");
4706 return L2CAP_TXSEQ_INVALID_IGNORE;
4707 } else {
4708 BT_DBG("Invalid - txseq outside tx window");
4709 return L2CAP_TXSEQ_INVALID;
4710 }
4711 } else {
4712 BT_DBG("Unexpected - txseq indicates missing frames");
4713 return L2CAP_TXSEQ_UNEXPECTED;
4714 }
4715 }
4716
4717 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4718 struct l2cap_ctrl *control,
4719 struct sk_buff *skb, u8 event)
4720 {
4721 int err = 0;
4722 bool skb_in_use = 0;
4723
4724 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4725 event);
4726
4727 switch (event) {
4728 case L2CAP_EV_RECV_IFRAME:
4729 switch (l2cap_classify_txseq(chan, control->txseq)) {
4730 case L2CAP_TXSEQ_EXPECTED:
4731 l2cap_pass_to_tx(chan, control);
4732
4733 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4734 BT_DBG("Busy, discarding expected seq %d",
4735 control->txseq);
4736 break;
4737 }
4738
4739 chan->expected_tx_seq = __next_seq(chan,
4740 control->txseq);
4741
4742 chan->buffer_seq = chan->expected_tx_seq;
4743 skb_in_use = 1;
4744
4745 err = l2cap_reassemble_sdu(chan, skb, control);
4746 if (err)
4747 break;
4748
4749 if (control->final) {
4750 if (!test_and_clear_bit(CONN_REJ_ACT,
4751 &chan->conn_state)) {
4752 control->final = 0;
4753 l2cap_retransmit_all(chan, control);
4754 l2cap_ertm_send(chan);
4755 }
4756 }
4757
4758 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4759 l2cap_send_ack(chan);
4760 break;
4761 case L2CAP_TXSEQ_UNEXPECTED:
4762 l2cap_pass_to_tx(chan, control);
4763
4764 /* Can't issue SREJ frames in the local busy state.
4765 * Drop this frame, it will be seen as missing
4766 * when local busy is exited.
4767 */
4768 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4769 BT_DBG("Busy, discarding unexpected seq %d",
4770 control->txseq);
4771 break;
4772 }
4773
4774 /* There was a gap in the sequence, so an SREJ
4775 * must be sent for each missing frame. The
4776 * current frame is stored for later use.
4777 */
4778 skb_queue_tail(&chan->srej_q, skb);
4779 skb_in_use = 1;
4780 BT_DBG("Queued %p (queue len %d)", skb,
4781 skb_queue_len(&chan->srej_q));
4782
4783 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4784 l2cap_seq_list_clear(&chan->srej_list);
4785 l2cap_send_srej(chan, control->txseq);
4786
4787 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4788 break;
4789 case L2CAP_TXSEQ_DUPLICATE:
4790 l2cap_pass_to_tx(chan, control);
4791 break;
4792 case L2CAP_TXSEQ_INVALID_IGNORE:
4793 break;
4794 case L2CAP_TXSEQ_INVALID:
4795 default:
4796 l2cap_send_disconn_req(chan->conn, chan,
4797 ECONNRESET);
4798 break;
4799 }
4800 break;
4801 case L2CAP_EV_RECV_RR:
4802 l2cap_pass_to_tx(chan, control);
4803 if (control->final) {
4804 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4805
4806 if (!test_and_clear_bit(CONN_REJ_ACT,
4807 &chan->conn_state)) {
4808 control->final = 0;
4809 l2cap_retransmit_all(chan, control);
4810 }
4811
4812 l2cap_ertm_send(chan);
4813 } else if (control->poll) {
4814 l2cap_send_i_or_rr_or_rnr(chan);
4815 } else {
4816 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4817 &chan->conn_state) &&
4818 chan->unacked_frames)
4819 __set_retrans_timer(chan);
4820
4821 l2cap_ertm_send(chan);
4822 }
4823 break;
4824 case L2CAP_EV_RECV_RNR:
4825 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4826 l2cap_pass_to_tx(chan, control);
4827 if (control && control->poll) {
4828 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4829 l2cap_send_rr_or_rnr(chan, 0);
4830 }
4831 __clear_retrans_timer(chan);
4832 l2cap_seq_list_clear(&chan->retrans_list);
4833 break;
4834 case L2CAP_EV_RECV_REJ:
4835 l2cap_handle_rej(chan, control);
4836 break;
4837 case L2CAP_EV_RECV_SREJ:
4838 l2cap_handle_srej(chan, control);
4839 break;
4840 default:
4841 break;
4842 }
4843
4844 if (skb && !skb_in_use) {
4845 BT_DBG("Freeing %p", skb);
4846 kfree_skb(skb);
4847 }
4848
4849 return err;
4850 }
4851
4852 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4853 struct l2cap_ctrl *control,
4854 struct sk_buff *skb, u8 event)
4855 {
4856 int err = 0;
4857 u16 txseq = control->txseq;
4858 bool skb_in_use = 0;
4859
4860 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4861 event);
4862
4863 switch (event) {
4864 case L2CAP_EV_RECV_IFRAME:
4865 switch (l2cap_classify_txseq(chan, txseq)) {
4866 case L2CAP_TXSEQ_EXPECTED:
4867 /* Keep frame for reassembly later */
4868 l2cap_pass_to_tx(chan, control);
4869 skb_queue_tail(&chan->srej_q, skb);
4870 skb_in_use = 1;
4871 BT_DBG("Queued %p (queue len %d)", skb,
4872 skb_queue_len(&chan->srej_q));
4873
4874 chan->expected_tx_seq = __next_seq(chan, txseq);
4875 break;
4876 case L2CAP_TXSEQ_EXPECTED_SREJ:
4877 l2cap_seq_list_pop(&chan->srej_list);
4878
4879 l2cap_pass_to_tx(chan, control);
4880 skb_queue_tail(&chan->srej_q, skb);
4881 skb_in_use = 1;
4882 BT_DBG("Queued %p (queue len %d)", skb,
4883 skb_queue_len(&chan->srej_q));
4884
4885 err = l2cap_rx_queued_iframes(chan);
4886 if (err)
4887 break;
4888
4889 break;
4890 case L2CAP_TXSEQ_UNEXPECTED:
4891 /* Got a frame that can't be reassembled yet.
4892 * Save it for later, and send SREJs to cover
4893 * the missing frames.
4894 */
4895 skb_queue_tail(&chan->srej_q, skb);
4896 skb_in_use = 1;
4897 BT_DBG("Queued %p (queue len %d)", skb,
4898 skb_queue_len(&chan->srej_q));
4899
4900 l2cap_pass_to_tx(chan, control);
4901 l2cap_send_srej(chan, control->txseq);
4902 break;
4903 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4904 /* This frame was requested with an SREJ, but
4905 * some expected retransmitted frames are
4906 * missing. Request retransmission of missing
4907 * SREJ'd frames.
4908 */
4909 skb_queue_tail(&chan->srej_q, skb);
4910 skb_in_use = 1;
4911 BT_DBG("Queued %p (queue len %d)", skb,
4912 skb_queue_len(&chan->srej_q));
4913
4914 l2cap_pass_to_tx(chan, control);
4915 l2cap_send_srej_list(chan, control->txseq);
4916 break;
4917 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4918 /* We've already queued this frame. Drop this copy. */
4919 l2cap_pass_to_tx(chan, control);
4920 break;
4921 case L2CAP_TXSEQ_DUPLICATE:
4922 /* Expecting a later sequence number, so this frame
4923 * was already received. Ignore it completely.
4924 */
4925 break;
4926 case L2CAP_TXSEQ_INVALID_IGNORE:
4927 break;
4928 case L2CAP_TXSEQ_INVALID:
4929 default:
4930 l2cap_send_disconn_req(chan->conn, chan,
4931 ECONNRESET);
4932 break;
4933 }
4934 break;
4935 case L2CAP_EV_RECV_RR:
4936 l2cap_pass_to_tx(chan, control);
4937 if (control->final) {
4938 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4939
4940 if (!test_and_clear_bit(CONN_REJ_ACT,
4941 &chan->conn_state)) {
4942 control->final = 0;
4943 l2cap_retransmit_all(chan, control);
4944 }
4945
4946 l2cap_ertm_send(chan);
4947 } else if (control->poll) {
4948 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4949 &chan->conn_state) &&
4950 chan->unacked_frames) {
4951 __set_retrans_timer(chan);
4952 }
4953
4954 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4955 l2cap_send_srej_tail(chan);
4956 } else {
4957 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4958 &chan->conn_state) &&
4959 chan->unacked_frames)
4960 __set_retrans_timer(chan);
4961
4962 l2cap_send_ack(chan);
4963 }
4964 break;
4965 case L2CAP_EV_RECV_RNR:
4966 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4967 l2cap_pass_to_tx(chan, control);
4968 if (control->poll) {
4969 l2cap_send_srej_tail(chan);
4970 } else {
4971 struct l2cap_ctrl rr_control;
4972 memset(&rr_control, 0, sizeof(rr_control));
4973 rr_control.sframe = 1;
4974 rr_control.super = L2CAP_SUPER_RR;
4975 rr_control.reqseq = chan->buffer_seq;
4976 l2cap_send_sframe(chan, &rr_control);
4977 }
4978
4979 break;
4980 case L2CAP_EV_RECV_REJ:
4981 l2cap_handle_rej(chan, control);
4982 break;
4983 case L2CAP_EV_RECV_SREJ:
4984 l2cap_handle_srej(chan, control);
4985 break;
4986 }
4987
4988 if (skb && !skb_in_use) {
4989 BT_DBG("Freeing %p", skb);
4990 kfree_skb(skb);
4991 }
4992
4993 return err;
4994 }
4995
4996 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4997 {
4998 /* Make sure reqseq is for a packet that has been sent but not acked */
4999 u16 unacked;
5000
5001 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5002 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5003 }
5004
5005 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5006 struct sk_buff *skb, u8 event)
5007 {
5008 int err = 0;
5009
5010 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5011 control, skb, event, chan->rx_state);
5012
5013 if (__valid_reqseq(chan, control->reqseq)) {
5014 switch (chan->rx_state) {
5015 case L2CAP_RX_STATE_RECV:
5016 err = l2cap_rx_state_recv(chan, control, skb, event);
5017 break;
5018 case L2CAP_RX_STATE_SREJ_SENT:
5019 err = l2cap_rx_state_srej_sent(chan, control, skb,
5020 event);
5021 break;
5022 default:
5023 /* shut it down */
5024 break;
5025 }
5026 } else {
5027 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5028 control->reqseq, chan->next_tx_seq,
5029 chan->expected_ack_seq);
5030 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5031 }
5032
5033 return err;
5034 }
5035
5036 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5037 struct sk_buff *skb)
5038 {
5039 int err = 0;
5040
5041 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5042 chan->rx_state);
5043
5044 if (l2cap_classify_txseq(chan, control->txseq) ==
5045 L2CAP_TXSEQ_EXPECTED) {
5046 l2cap_pass_to_tx(chan, control);
5047
5048 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5049 __next_seq(chan, chan->buffer_seq));
5050
5051 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5052
5053 l2cap_reassemble_sdu(chan, skb, control);
5054 } else {
5055 if (chan->sdu) {
5056 kfree_skb(chan->sdu);
5057 chan->sdu = NULL;
5058 }
5059 chan->sdu_last_frag = NULL;
5060 chan->sdu_len = 0;
5061
5062 if (skb) {
5063 BT_DBG("Freeing %p", skb);
5064 kfree_skb(skb);
5065 }
5066 }
5067
5068 chan->last_acked_seq = control->txseq;
5069 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5070
5071 return err;
5072 }
5073
5074 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5075 {
5076 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5077 u16 len;
5078 u8 event;
5079
5080 __unpack_control(chan, skb);
5081
5082 len = skb->len;
5083
5084 /*
5085 * We can just drop the corrupted I-frame here.
5086 * Receiver will miss it and start proper recovery
5087 * procedures and ask for retransmission.
5088 */
5089 if (l2cap_check_fcs(chan, skb))
5090 goto drop;
5091
5092 if (!control->sframe && control->sar == L2CAP_SAR_START)
5093 len -= L2CAP_SDULEN_SIZE;
5094
5095 if (chan->fcs == L2CAP_FCS_CRC16)
5096 len -= L2CAP_FCS_SIZE;
5097
5098 if (len > chan->mps) {
5099 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5100 goto drop;
5101 }
5102
5103 if (!control->sframe) {
5104 int err;
5105
5106 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5107 control->sar, control->reqseq, control->final,
5108 control->txseq);
5109
5110 /* Validate F-bit - F=0 always valid, F=1 only
5111 * valid in TX WAIT_F
5112 */
5113 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5114 goto drop;
5115
5116 if (chan->mode != L2CAP_MODE_STREAMING) {
5117 event = L2CAP_EV_RECV_IFRAME;
5118 err = l2cap_rx(chan, control, skb, event);
5119 } else {
5120 err = l2cap_stream_rx(chan, control, skb);
5121 }
5122
5123 if (err)
5124 l2cap_send_disconn_req(chan->conn, chan,
5125 ECONNRESET);
5126 } else {
5127 const u8 rx_func_to_event[4] = {
5128 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5129 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5130 };
5131
5132 /* Only I-frames are expected in streaming mode */
5133 if (chan->mode == L2CAP_MODE_STREAMING)
5134 goto drop;
5135
5136 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5137 control->reqseq, control->final, control->poll,
5138 control->super);
5139
5140 if (len != 0) {
5141 BT_ERR("%d", len);
5142 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5143 goto drop;
5144 }
5145
5146 /* Validate F and P bits */
5147 if (control->final && (control->poll ||
5148 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5149 goto drop;
5150
5151 event = rx_func_to_event[control->super];
5152 if (l2cap_rx(chan, control, skb, event))
5153 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5154 }
5155
5156 return 0;
5157
5158 drop:
5159 kfree_skb(skb);
5160 return 0;
5161 }
5162
5163 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5164 struct sk_buff *skb)
5165 {
5166 struct l2cap_chan *chan;
5167
5168 chan = l2cap_get_chan_by_scid(conn, cid);
5169 if (!chan) {
5170 if (cid == L2CAP_CID_A2MP) {
5171 chan = a2mp_channel_create(conn, skb);
5172 if (!chan) {
5173 kfree_skb(skb);
5174 return;
5175 }
5176
5177 l2cap_chan_lock(chan);
5178 } else {
5179 BT_DBG("unknown cid 0x%4.4x", cid);
5180 /* Drop packet and return */
5181 kfree_skb(skb);
5182 return;
5183 }
5184 }
5185
5186 BT_DBG("chan %p, len %d", chan, skb->len);
5187
5188 if (chan->state != BT_CONNECTED)
5189 goto drop;
5190
5191 switch (chan->mode) {
5192 case L2CAP_MODE_BASIC:
5193 /* If socket recv buffers overflows we drop data here
5194 * which is *bad* because L2CAP has to be reliable.
5195 * But we don't have any other choice. L2CAP doesn't
5196 * provide flow control mechanism. */
5197
5198 if (chan->imtu < skb->len)
5199 goto drop;
5200
5201 if (!chan->ops->recv(chan, skb))
5202 goto done;
5203 break;
5204
5205 case L2CAP_MODE_ERTM:
5206 case L2CAP_MODE_STREAMING:
5207 l2cap_data_rcv(chan, skb);
5208 goto done;
5209
5210 default:
5211 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5212 break;
5213 }
5214
5215 drop:
5216 kfree_skb(skb);
5217
5218 done:
5219 l2cap_chan_unlock(chan);
5220 }
5221
5222 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5223 struct sk_buff *skb)
5224 {
5225 struct l2cap_chan *chan;
5226
5227 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5228 if (!chan)
5229 goto drop;
5230
5231 BT_DBG("chan %p, len %d", chan, skb->len);
5232
5233 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5234 goto drop;
5235
5236 if (chan->imtu < skb->len)
5237 goto drop;
5238
5239 if (!chan->ops->recv(chan, skb))
5240 return;
5241
5242 drop:
5243 kfree_skb(skb);
5244 }
5245
5246 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5247 struct sk_buff *skb)
5248 {
5249 struct l2cap_chan *chan;
5250
5251 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5252 if (!chan)
5253 goto drop;
5254
5255 BT_DBG("chan %p, len %d", chan, skb->len);
5256
5257 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5258 goto drop;
5259
5260 if (chan->imtu < skb->len)
5261 goto drop;
5262
5263 if (!chan->ops->recv(chan, skb))
5264 return;
5265
5266 drop:
5267 kfree_skb(skb);
5268 }
5269
5270 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5271 {
5272 struct l2cap_hdr *lh = (void *) skb->data;
5273 u16 cid, len;
5274 __le16 psm;
5275
5276 skb_pull(skb, L2CAP_HDR_SIZE);
5277 cid = __le16_to_cpu(lh->cid);
5278 len = __le16_to_cpu(lh->len);
5279
5280 if (len != skb->len) {
5281 kfree_skb(skb);
5282 return;
5283 }
5284
5285 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5286
5287 switch (cid) {
5288 case L2CAP_CID_LE_SIGNALING:
5289 case L2CAP_CID_SIGNALING:
5290 l2cap_sig_channel(conn, skb);
5291 break;
5292
5293 case L2CAP_CID_CONN_LESS:
5294 psm = get_unaligned((__le16 *) skb->data);
5295 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5296 l2cap_conless_channel(conn, psm, skb);
5297 break;
5298
5299 case L2CAP_CID_LE_DATA:
5300 l2cap_att_channel(conn, cid, skb);
5301 break;
5302
5303 case L2CAP_CID_SMP:
5304 if (smp_sig_channel(conn, skb))
5305 l2cap_conn_del(conn->hcon, EACCES);
5306 break;
5307
5308 default:
5309 l2cap_data_channel(conn, cid, skb);
5310 break;
5311 }
5312 }
5313
5314 /* ---- L2CAP interface with lower layer (HCI) ---- */
5315
5316 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5317 {
5318 int exact = 0, lm1 = 0, lm2 = 0;
5319 struct l2cap_chan *c;
5320
5321 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5322
5323 /* Find listening sockets and check their link_mode */
5324 read_lock(&chan_list_lock);
5325 list_for_each_entry(c, &chan_list, global_l) {
5326 struct sock *sk = c->sk;
5327
5328 if (c->state != BT_LISTEN)
5329 continue;
5330
5331 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5332 lm1 |= HCI_LM_ACCEPT;
5333 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5334 lm1 |= HCI_LM_MASTER;
5335 exact++;
5336 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5337 lm2 |= HCI_LM_ACCEPT;
5338 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5339 lm2 |= HCI_LM_MASTER;
5340 }
5341 }
5342 read_unlock(&chan_list_lock);
5343
5344 return exact ? lm1 : lm2;
5345 }
5346
5347 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5348 {
5349 struct l2cap_conn *conn;
5350
5351 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5352
5353 if (!status) {
5354 conn = l2cap_conn_add(hcon, status);
5355 if (conn)
5356 l2cap_conn_ready(conn);
5357 } else
5358 l2cap_conn_del(hcon, bt_to_errno(status));
5359
5360 }
5361
5362 int l2cap_disconn_ind(struct hci_conn *hcon)
5363 {
5364 struct l2cap_conn *conn = hcon->l2cap_data;
5365
5366 BT_DBG("hcon %p", hcon);
5367
5368 if (!conn)
5369 return HCI_ERROR_REMOTE_USER_TERM;
5370 return conn->disc_reason;
5371 }
5372
5373 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5374 {
5375 BT_DBG("hcon %p reason %d", hcon, reason);
5376
5377 l2cap_conn_del(hcon, bt_to_errno(reason));
5378 }
5379
5380 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5381 {
5382 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5383 return;
5384
5385 if (encrypt == 0x00) {
5386 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5387 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5388 } else if (chan->sec_level == BT_SECURITY_HIGH)
5389 l2cap_chan_close(chan, ECONNREFUSED);
5390 } else {
5391 if (chan->sec_level == BT_SECURITY_MEDIUM)
5392 __clear_chan_timer(chan);
5393 }
5394 }
5395
5396 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5397 {
5398 struct l2cap_conn *conn = hcon->l2cap_data;
5399 struct l2cap_chan *chan;
5400
5401 if (!conn)
5402 return 0;
5403
5404 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5405
5406 if (hcon->type == LE_LINK) {
5407 if (!status && encrypt)
5408 smp_distribute_keys(conn, 0);
5409 cancel_delayed_work(&conn->security_timer);
5410 }
5411
5412 mutex_lock(&conn->chan_lock);
5413
5414 list_for_each_entry(chan, &conn->chan_l, list) {
5415 l2cap_chan_lock(chan);
5416
5417 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5418 state_to_string(chan->state));
5419
5420 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5421 l2cap_chan_unlock(chan);
5422 continue;
5423 }
5424
5425 if (chan->scid == L2CAP_CID_LE_DATA) {
5426 if (!status && encrypt) {
5427 chan->sec_level = hcon->sec_level;
5428 l2cap_chan_ready(chan);
5429 }
5430
5431 l2cap_chan_unlock(chan);
5432 continue;
5433 }
5434
5435 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5436 l2cap_chan_unlock(chan);
5437 continue;
5438 }
5439
5440 if (!status && (chan->state == BT_CONNECTED ||
5441 chan->state == BT_CONFIG)) {
5442 struct sock *sk = chan->sk;
5443
5444 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5445 sk->sk_state_change(sk);
5446
5447 l2cap_check_encryption(chan, encrypt);
5448 l2cap_chan_unlock(chan);
5449 continue;
5450 }
5451
5452 if (chan->state == BT_CONNECT) {
5453 if (!status) {
5454 l2cap_send_conn_req(chan);
5455 } else {
5456 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5457 }
5458 } else if (chan->state == BT_CONNECT2) {
5459 struct sock *sk = chan->sk;
5460 struct l2cap_conn_rsp rsp;
5461 __u16 res, stat;
5462
5463 lock_sock(sk);
5464
5465 if (!status) {
5466 if (test_bit(BT_SK_DEFER_SETUP,
5467 &bt_sk(sk)->flags)) {
5468 struct sock *parent = bt_sk(sk)->parent;
5469 res = L2CAP_CR_PEND;
5470 stat = L2CAP_CS_AUTHOR_PEND;
5471 if (parent)
5472 parent->sk_data_ready(parent, 0);
5473 } else {
5474 __l2cap_state_change(chan, BT_CONFIG);
5475 res = L2CAP_CR_SUCCESS;
5476 stat = L2CAP_CS_NO_INFO;
5477 }
5478 } else {
5479 __l2cap_state_change(chan, BT_DISCONN);
5480 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5481 res = L2CAP_CR_SEC_BLOCK;
5482 stat = L2CAP_CS_NO_INFO;
5483 }
5484
5485 release_sock(sk);
5486
5487 rsp.scid = cpu_to_le16(chan->dcid);
5488 rsp.dcid = cpu_to_le16(chan->scid);
5489 rsp.result = cpu_to_le16(res);
5490 rsp.status = cpu_to_le16(stat);
5491 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5492 sizeof(rsp), &rsp);
5493
5494 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5495 res == L2CAP_CR_SUCCESS) {
5496 char buf[128];
5497 set_bit(CONF_REQ_SENT, &chan->conf_state);
5498 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5499 L2CAP_CONF_REQ,
5500 l2cap_build_conf_req(chan, buf),
5501 buf);
5502 chan->num_conf_req++;
5503 }
5504 }
5505
5506 l2cap_chan_unlock(chan);
5507 }
5508
5509 mutex_unlock(&conn->chan_lock);
5510
5511 return 0;
5512 }
5513
5514 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5515 {
5516 struct l2cap_conn *conn = hcon->l2cap_data;
5517
5518 if (!conn)
5519 conn = l2cap_conn_add(hcon, 0);
5520
5521 if (!conn)
5522 goto drop;
5523
5524 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5525
5526 if (!(flags & ACL_CONT)) {
5527 struct l2cap_hdr *hdr;
5528 int len;
5529
5530 if (conn->rx_len) {
5531 BT_ERR("Unexpected start frame (len %d)", skb->len);
5532 kfree_skb(conn->rx_skb);
5533 conn->rx_skb = NULL;
5534 conn->rx_len = 0;
5535 l2cap_conn_unreliable(conn, ECOMM);
5536 }
5537
5538 /* Start fragment always begin with Basic L2CAP header */
5539 if (skb->len < L2CAP_HDR_SIZE) {
5540 BT_ERR("Frame is too short (len %d)", skb->len);
5541 l2cap_conn_unreliable(conn, ECOMM);
5542 goto drop;
5543 }
5544
5545 hdr = (struct l2cap_hdr *) skb->data;
5546 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5547
5548 if (len == skb->len) {
5549 /* Complete frame received */
5550 l2cap_recv_frame(conn, skb);
5551 return 0;
5552 }
5553
5554 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5555
5556 if (skb->len > len) {
5557 BT_ERR("Frame is too long (len %d, expected len %d)",
5558 skb->len, len);
5559 l2cap_conn_unreliable(conn, ECOMM);
5560 goto drop;
5561 }
5562
5563 /* Allocate skb for the complete frame (with header) */
5564 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5565 if (!conn->rx_skb)
5566 goto drop;
5567
5568 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5569 skb->len);
5570 conn->rx_len = len - skb->len;
5571 } else {
5572 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5573
5574 if (!conn->rx_len) {
5575 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5576 l2cap_conn_unreliable(conn, ECOMM);
5577 goto drop;
5578 }
5579
5580 if (skb->len > conn->rx_len) {
5581 BT_ERR("Fragment is too long (len %d, expected %d)",
5582 skb->len, conn->rx_len);
5583 kfree_skb(conn->rx_skb);
5584 conn->rx_skb = NULL;
5585 conn->rx_len = 0;
5586 l2cap_conn_unreliable(conn, ECOMM);
5587 goto drop;
5588 }
5589
5590 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5591 skb->len);
5592 conn->rx_len -= skb->len;
5593
5594 if (!conn->rx_len) {
5595 /* Complete frame received */
5596 l2cap_recv_frame(conn, conn->rx_skb);
5597 conn->rx_skb = NULL;
5598 }
5599 }
5600
5601 drop:
5602 kfree_skb(skb);
5603 return 0;
5604 }
5605
5606 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5607 {
5608 struct l2cap_chan *c;
5609
5610 read_lock(&chan_list_lock);
5611
5612 list_for_each_entry(c, &chan_list, global_l) {
5613 struct sock *sk = c->sk;
5614
5615 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5616 batostr(&bt_sk(sk)->src),
5617 batostr(&bt_sk(sk)->dst),
5618 c->state, __le16_to_cpu(c->psm),
5619 c->scid, c->dcid, c->imtu, c->omtu,
5620 c->sec_level, c->mode);
5621 }
5622
5623 read_unlock(&chan_list_lock);
5624
5625 return 0;
5626 }
5627
5628 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5629 {
5630 return single_open(file, l2cap_debugfs_show, inode->i_private);
5631 }
5632
5633 static const struct file_operations l2cap_debugfs_fops = {
5634 .open = l2cap_debugfs_open,
5635 .read = seq_read,
5636 .llseek = seq_lseek,
5637 .release = single_release,
5638 };
5639
5640 static struct dentry *l2cap_debugfs;
5641
5642 int __init l2cap_init(void)
5643 {
5644 int err;
5645
5646 err = l2cap_init_sockets();
5647 if (err < 0)
5648 return err;
5649
5650 if (bt_debugfs) {
5651 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5652 bt_debugfs, NULL, &l2cap_debugfs_fops);
5653 if (!l2cap_debugfs)
5654 BT_ERR("Failed to create L2CAP debug file");
5655 }
5656
5657 return 0;
5658 }
5659
5660 void l2cap_exit(void)
5661 {
5662 debugfs_remove(l2cap_debugfs);
5663 l2cap_cleanup_sockets();
5664 }
5665
5666 module_param(disable_ertm, bool, 0644);
5667 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.24389 seconds and 5 git commands to generate.