Bluetooth: Create function to return the ERTM header size
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 struct l2cap_chan *c;
66
67 list_for_each_entry(c, &conn->chan_l, list) {
68 if (c->dcid == cid)
69 return c;
70 }
71 return NULL;
72 }
73
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 struct l2cap_chan *c;
77
78 list_for_each_entry(c, &conn->chan_l, list) {
79 if (c->scid == cid)
80 return c;
81 }
82 return NULL;
83 }
84
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 struct l2cap_chan *c;
90
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
93 if (c)
94 l2cap_chan_lock(c);
95 mutex_unlock(&conn->chan_lock);
96
97 return c;
98 }
99
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 struct l2cap_chan *c;
103
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
106 return c;
107 }
108 return NULL;
109 }
110
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 struct l2cap_chan *c;
114
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 return c;
118 }
119 return NULL;
120 }
121
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 int err;
125
126 write_lock(&chan_list_lock);
127
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 err = -EADDRINUSE;
130 goto done;
131 }
132
133 if (psm) {
134 chan->psm = psm;
135 chan->sport = psm;
136 err = 0;
137 } else {
138 u16 p;
139
140 err = -EINVAL;
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
145 err = 0;
146 break;
147 }
148 }
149
150 done:
151 write_unlock(&chan_list_lock);
152 return err;
153 }
154
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 {
157 write_lock(&chan_list_lock);
158
159 chan->scid = scid;
160
161 write_unlock(&chan_list_lock);
162
163 return 0;
164 }
165
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 u16 cid = L2CAP_CID_DYN_START;
169
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
172 return cid;
173 }
174
175 return 0;
176 }
177
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
182
183 chan->state = state;
184 chan->ops->state_change(chan, state);
185 }
186
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 struct sock *sk = chan->sk;
190
191 lock_sock(sk);
192 __l2cap_state_change(chan, state);
193 release_sock(sk);
194 }
195
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 struct sock *sk = chan->sk;
199
200 sk->sk_err = err;
201 }
202
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 struct sock *sk = chan->sk;
206
207 lock_sock(sk);
208 __l2cap_chan_set_err(chan, err);
209 release_sock(sk);
210 }
211
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219 }
220
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426 }
427
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 {
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
435
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
437 }
438
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440 {
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
443
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
445
446 chan->conn = conn;
447
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
451 /* LE connection */
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
455 } else {
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
459 }
460 break;
461
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
467 break;
468
469 case L2CAP_CHAN_CONN_FIX_A2MP:
470 chan->scid = L2CAP_CID_A2MP;
471 chan->dcid = L2CAP_CID_A2MP;
472 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
473 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
474 break;
475
476 default:
477 /* Raw socket can send/recv signalling messages only */
478 chan->scid = L2CAP_CID_SIGNALING;
479 chan->dcid = L2CAP_CID_SIGNALING;
480 chan->omtu = L2CAP_DEFAULT_MTU;
481 }
482
483 chan->local_id = L2CAP_BESTEFFORT_ID;
484 chan->local_stype = L2CAP_SERV_BESTEFFORT;
485 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
486 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
487 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
488 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
489
490 l2cap_chan_hold(chan);
491
492 list_add(&chan->list, &conn->chan_l);
493 }
494
495 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
496 {
497 mutex_lock(&conn->chan_lock);
498 __l2cap_chan_add(conn, chan);
499 mutex_unlock(&conn->chan_lock);
500 }
501
502 void l2cap_chan_del(struct l2cap_chan *chan, int err)
503 {
504 struct l2cap_conn *conn = chan->conn;
505
506 __clear_chan_timer(chan);
507
508 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
509
510 if (conn) {
511 /* Delete from channel list */
512 list_del(&chan->list);
513
514 l2cap_chan_put(chan);
515
516 chan->conn = NULL;
517
518 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
519 hci_conn_put(conn->hcon);
520 }
521
522 if (chan->ops->teardown)
523 chan->ops->teardown(chan, err);
524
525 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
526 return;
527
528 switch(chan->mode) {
529 case L2CAP_MODE_BASIC:
530 break;
531
532 case L2CAP_MODE_ERTM:
533 __clear_retrans_timer(chan);
534 __clear_monitor_timer(chan);
535 __clear_ack_timer(chan);
536
537 skb_queue_purge(&chan->srej_q);
538
539 l2cap_seq_list_free(&chan->srej_list);
540 l2cap_seq_list_free(&chan->retrans_list);
541
542 /* fall through */
543
544 case L2CAP_MODE_STREAMING:
545 skb_queue_purge(&chan->tx_q);
546 break;
547 }
548
549 return;
550 }
551
552 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
553 {
554 struct l2cap_conn *conn = chan->conn;
555 struct sock *sk = chan->sk;
556
557 BT_DBG("chan %p state %s sk %p", chan,
558 state_to_string(chan->state), sk);
559
560 switch (chan->state) {
561 case BT_LISTEN:
562 if (chan->ops->teardown)
563 chan->ops->teardown(chan, 0);
564 break;
565
566 case BT_CONNECTED:
567 case BT_CONFIG:
568 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
569 conn->hcon->type == ACL_LINK) {
570 __set_chan_timer(chan, sk->sk_sndtimeo);
571 l2cap_send_disconn_req(conn, chan, reason);
572 } else
573 l2cap_chan_del(chan, reason);
574 break;
575
576 case BT_CONNECT2:
577 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
578 conn->hcon->type == ACL_LINK) {
579 struct l2cap_conn_rsp rsp;
580 __u16 result;
581
582 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
583 result = L2CAP_CR_SEC_BLOCK;
584 else
585 result = L2CAP_CR_BAD_PSM;
586 l2cap_state_change(chan, BT_DISCONN);
587
588 rsp.scid = cpu_to_le16(chan->dcid);
589 rsp.dcid = cpu_to_le16(chan->scid);
590 rsp.result = cpu_to_le16(result);
591 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
592 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
593 sizeof(rsp), &rsp);
594 }
595
596 l2cap_chan_del(chan, reason);
597 break;
598
599 case BT_CONNECT:
600 case BT_DISCONN:
601 l2cap_chan_del(chan, reason);
602 break;
603
604 default:
605 if (chan->ops->teardown)
606 chan->ops->teardown(chan, 0);
607 break;
608 }
609 }
610
611 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
612 {
613 if (chan->chan_type == L2CAP_CHAN_RAW) {
614 switch (chan->sec_level) {
615 case BT_SECURITY_HIGH:
616 return HCI_AT_DEDICATED_BONDING_MITM;
617 case BT_SECURITY_MEDIUM:
618 return HCI_AT_DEDICATED_BONDING;
619 default:
620 return HCI_AT_NO_BONDING;
621 }
622 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
623 if (chan->sec_level == BT_SECURITY_LOW)
624 chan->sec_level = BT_SECURITY_SDP;
625
626 if (chan->sec_level == BT_SECURITY_HIGH)
627 return HCI_AT_NO_BONDING_MITM;
628 else
629 return HCI_AT_NO_BONDING;
630 } else {
631 switch (chan->sec_level) {
632 case BT_SECURITY_HIGH:
633 return HCI_AT_GENERAL_BONDING_MITM;
634 case BT_SECURITY_MEDIUM:
635 return HCI_AT_GENERAL_BONDING;
636 default:
637 return HCI_AT_NO_BONDING;
638 }
639 }
640 }
641
642 /* Service level security */
643 int l2cap_chan_check_security(struct l2cap_chan *chan)
644 {
645 struct l2cap_conn *conn = chan->conn;
646 __u8 auth_type;
647
648 auth_type = l2cap_get_auth_type(chan);
649
650 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
651 }
652
653 static u8 l2cap_get_ident(struct l2cap_conn *conn)
654 {
655 u8 id;
656
657 /* Get next available identificator.
658 * 1 - 128 are used by kernel.
659 * 129 - 199 are reserved.
660 * 200 - 254 are used by utilities like l2ping, etc.
661 */
662
663 spin_lock(&conn->lock);
664
665 if (++conn->tx_ident > 128)
666 conn->tx_ident = 1;
667
668 id = conn->tx_ident;
669
670 spin_unlock(&conn->lock);
671
672 return id;
673 }
674
675 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
676 {
677 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
678 u8 flags;
679
680 BT_DBG("code 0x%2.2x", code);
681
682 if (!skb)
683 return;
684
685 if (lmp_no_flush_capable(conn->hcon->hdev))
686 flags = ACL_START_NO_FLUSH;
687 else
688 flags = ACL_START;
689
690 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
691 skb->priority = HCI_PRIO_MAX;
692
693 hci_send_acl(conn->hchan, skb, flags);
694 }
695
696 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
697 {
698 struct hci_conn *hcon = chan->conn->hcon;
699 u16 flags;
700
701 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
702 skb->priority);
703
704 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
705 lmp_no_flush_capable(hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
707 else
708 flags = ACL_START;
709
710 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
711 hci_send_acl(chan->conn->hchan, skb, flags);
712 }
713
714 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
715 {
716 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
717 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
718
719 if (enh & L2CAP_CTRL_FRAME_TYPE) {
720 /* S-Frame */
721 control->sframe = 1;
722 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
723 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
724
725 control->sar = 0;
726 control->txseq = 0;
727 } else {
728 /* I-Frame */
729 control->sframe = 0;
730 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
731 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
732
733 control->poll = 0;
734 control->super = 0;
735 }
736 }
737
738 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
739 {
740 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
741 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
742
743 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
744 /* S-Frame */
745 control->sframe = 1;
746 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
747 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
748
749 control->sar = 0;
750 control->txseq = 0;
751 } else {
752 /* I-Frame */
753 control->sframe = 0;
754 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
755 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
756
757 control->poll = 0;
758 control->super = 0;
759 }
760 }
761
762 static inline void __unpack_control(struct l2cap_chan *chan,
763 struct sk_buff *skb)
764 {
765 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
766 __unpack_extended_control(get_unaligned_le32(skb->data),
767 &bt_cb(skb)->control);
768 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
769 } else {
770 __unpack_enhanced_control(get_unaligned_le16(skb->data),
771 &bt_cb(skb)->control);
772 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
773 }
774 }
775
776 static u32 __pack_extended_control(struct l2cap_ctrl *control)
777 {
778 u32 packed;
779
780 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
781 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
782
783 if (control->sframe) {
784 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
785 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
786 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
787 } else {
788 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
789 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
790 }
791
792 return packed;
793 }
794
795 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
796 {
797 u16 packed;
798
799 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
800 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
801
802 if (control->sframe) {
803 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
804 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
805 packed |= L2CAP_CTRL_FRAME_TYPE;
806 } else {
807 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
808 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
809 }
810
811 return packed;
812 }
813
814 static inline void __pack_control(struct l2cap_chan *chan,
815 struct l2cap_ctrl *control,
816 struct sk_buff *skb)
817 {
818 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
819 put_unaligned_le32(__pack_extended_control(control),
820 skb->data + L2CAP_HDR_SIZE);
821 } else {
822 put_unaligned_le16(__pack_enhanced_control(control),
823 skb->data + L2CAP_HDR_SIZE);
824 }
825 }
826
827 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
828 {
829 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
830 return L2CAP_EXT_HDR_SIZE;
831 else
832 return L2CAP_ENH_HDR_SIZE;
833 }
834
835 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
836 u32 control)
837 {
838 struct sk_buff *skb;
839 struct l2cap_hdr *lh;
840 int hlen = __ertm_hdr_size(chan);
841
842 if (chan->fcs == L2CAP_FCS_CRC16)
843 hlen += L2CAP_FCS_SIZE;
844
845 skb = bt_skb_alloc(hlen, GFP_KERNEL);
846
847 if (!skb)
848 return ERR_PTR(-ENOMEM);
849
850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
851 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
852 lh->cid = cpu_to_le16(chan->dcid);
853
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
856 else
857 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
858
859 if (chan->fcs == L2CAP_FCS_CRC16) {
860 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
861 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
862 }
863
864 skb->priority = HCI_PRIO_MAX;
865 return skb;
866 }
867
868 static void l2cap_send_sframe(struct l2cap_chan *chan,
869 struct l2cap_ctrl *control)
870 {
871 struct sk_buff *skb;
872 u32 control_field;
873
874 BT_DBG("chan %p, control %p", chan, control);
875
876 if (!control->sframe)
877 return;
878
879 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
880 !control->poll)
881 control->final = 1;
882
883 if (control->super == L2CAP_SUPER_RR)
884 clear_bit(CONN_RNR_SENT, &chan->conn_state);
885 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state);
887
888 if (control->super != L2CAP_SUPER_SREJ) {
889 chan->last_acked_seq = control->reqseq;
890 __clear_ack_timer(chan);
891 }
892
893 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
894 control->final, control->poll, control->super);
895
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
897 control_field = __pack_extended_control(control);
898 else
899 control_field = __pack_enhanced_control(control);
900
901 skb = l2cap_create_sframe_pdu(chan, control_field);
902 if (!IS_ERR(skb))
903 l2cap_do_send(chan, skb);
904 }
905
906 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
907 {
908 struct l2cap_ctrl control;
909
910 BT_DBG("chan %p, poll %d", chan, poll);
911
912 memset(&control, 0, sizeof(control));
913 control.sframe = 1;
914 control.poll = poll;
915
916 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
917 control.super = L2CAP_SUPER_RNR;
918 else
919 control.super = L2CAP_SUPER_RR;
920
921 control.reqseq = chan->buffer_seq;
922 l2cap_send_sframe(chan, &control);
923 }
924
925 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
926 {
927 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
928 }
929
930 static void l2cap_send_conn_req(struct l2cap_chan *chan)
931 {
932 struct l2cap_conn *conn = chan->conn;
933 struct l2cap_conn_req req;
934
935 req.scid = cpu_to_le16(chan->scid);
936 req.psm = chan->psm;
937
938 chan->ident = l2cap_get_ident(conn);
939
940 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
941
942 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
943 }
944
945 static void l2cap_chan_ready(struct l2cap_chan *chan)
946 {
947 /* This clears all conf flags, including CONF_NOT_COMPLETE */
948 chan->conf_state = 0;
949 __clear_chan_timer(chan);
950
951 chan->state = BT_CONNECTED;
952
953 chan->ops->ready(chan);
954 }
955
956 static void l2cap_do_start(struct l2cap_chan *chan)
957 {
958 struct l2cap_conn *conn = chan->conn;
959
960 if (conn->hcon->type == LE_LINK) {
961 l2cap_chan_ready(chan);
962 return;
963 }
964
965 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
966 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
967 return;
968
969 if (l2cap_chan_check_security(chan) &&
970 __l2cap_no_conn_pending(chan))
971 l2cap_send_conn_req(chan);
972 } else {
973 struct l2cap_info_req req;
974 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
975
976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
977 conn->info_ident = l2cap_get_ident(conn);
978
979 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
980
981 l2cap_send_cmd(conn, conn->info_ident,
982 L2CAP_INFO_REQ, sizeof(req), &req);
983 }
984 }
985
986 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
987 {
988 u32 local_feat_mask = l2cap_feat_mask;
989 if (!disable_ertm)
990 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
991
992 switch (mode) {
993 case L2CAP_MODE_ERTM:
994 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
995 case L2CAP_MODE_STREAMING:
996 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
997 default:
998 return 0x00;
999 }
1000 }
1001
1002 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1003 {
1004 struct sock *sk = chan->sk;
1005 struct l2cap_disconn_req req;
1006
1007 if (!conn)
1008 return;
1009
1010 if (chan->mode == L2CAP_MODE_ERTM) {
1011 __clear_retrans_timer(chan);
1012 __clear_monitor_timer(chan);
1013 __clear_ack_timer(chan);
1014 }
1015
1016 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1017 __l2cap_state_change(chan, BT_DISCONN);
1018 return;
1019 }
1020
1021 req.dcid = cpu_to_le16(chan->dcid);
1022 req.scid = cpu_to_le16(chan->scid);
1023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1024 L2CAP_DISCONN_REQ, sizeof(req), &req);
1025
1026 lock_sock(sk);
1027 __l2cap_state_change(chan, BT_DISCONN);
1028 __l2cap_chan_set_err(chan, err);
1029 release_sock(sk);
1030 }
1031
1032 /* ---- L2CAP connections ---- */
1033 static void l2cap_conn_start(struct l2cap_conn *conn)
1034 {
1035 struct l2cap_chan *chan, *tmp;
1036
1037 BT_DBG("conn %p", conn);
1038
1039 mutex_lock(&conn->chan_lock);
1040
1041 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1042 struct sock *sk = chan->sk;
1043
1044 l2cap_chan_lock(chan);
1045
1046 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1047 l2cap_chan_unlock(chan);
1048 continue;
1049 }
1050
1051 if (chan->state == BT_CONNECT) {
1052 if (!l2cap_chan_check_security(chan) ||
1053 !__l2cap_no_conn_pending(chan)) {
1054 l2cap_chan_unlock(chan);
1055 continue;
1056 }
1057
1058 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1059 && test_bit(CONF_STATE2_DEVICE,
1060 &chan->conf_state)) {
1061 l2cap_chan_close(chan, ECONNRESET);
1062 l2cap_chan_unlock(chan);
1063 continue;
1064 }
1065
1066 l2cap_send_conn_req(chan);
1067
1068 } else if (chan->state == BT_CONNECT2) {
1069 struct l2cap_conn_rsp rsp;
1070 char buf[128];
1071 rsp.scid = cpu_to_le16(chan->dcid);
1072 rsp.dcid = cpu_to_le16(chan->scid);
1073
1074 if (l2cap_chan_check_security(chan)) {
1075 lock_sock(sk);
1076 if (test_bit(BT_SK_DEFER_SETUP,
1077 &bt_sk(sk)->flags)) {
1078 struct sock *parent = bt_sk(sk)->parent;
1079 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1080 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1081 if (parent)
1082 parent->sk_data_ready(parent, 0);
1083
1084 } else {
1085 __l2cap_state_change(chan, BT_CONFIG);
1086 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1087 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1088 }
1089 release_sock(sk);
1090 } else {
1091 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1092 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1093 }
1094
1095 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1096 sizeof(rsp), &rsp);
1097
1098 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1099 rsp.result != L2CAP_CR_SUCCESS) {
1100 l2cap_chan_unlock(chan);
1101 continue;
1102 }
1103
1104 set_bit(CONF_REQ_SENT, &chan->conf_state);
1105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1106 l2cap_build_conf_req(chan, buf), buf);
1107 chan->num_conf_req++;
1108 }
1109
1110 l2cap_chan_unlock(chan);
1111 }
1112
1113 mutex_unlock(&conn->chan_lock);
1114 }
1115
1116 /* Find socket with cid and source/destination bdaddr.
1117 * Returns closest match, locked.
1118 */
1119 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1120 bdaddr_t *src,
1121 bdaddr_t *dst)
1122 {
1123 struct l2cap_chan *c, *c1 = NULL;
1124
1125 read_lock(&chan_list_lock);
1126
1127 list_for_each_entry(c, &chan_list, global_l) {
1128 struct sock *sk = c->sk;
1129
1130 if (state && c->state != state)
1131 continue;
1132
1133 if (c->scid == cid) {
1134 int src_match, dst_match;
1135 int src_any, dst_any;
1136
1137 /* Exact match. */
1138 src_match = !bacmp(&bt_sk(sk)->src, src);
1139 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1140 if (src_match && dst_match) {
1141 read_unlock(&chan_list_lock);
1142 return c;
1143 }
1144
1145 /* Closest match */
1146 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1147 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1148 if ((src_match && dst_any) || (src_any && dst_match) ||
1149 (src_any && dst_any))
1150 c1 = c;
1151 }
1152 }
1153
1154 read_unlock(&chan_list_lock);
1155
1156 return c1;
1157 }
1158
1159 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1160 {
1161 struct sock *parent, *sk;
1162 struct l2cap_chan *chan, *pchan;
1163
1164 BT_DBG("");
1165
1166 /* Check if we have socket listening on cid */
1167 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1168 conn->src, conn->dst);
1169 if (!pchan)
1170 return;
1171
1172 parent = pchan->sk;
1173
1174 lock_sock(parent);
1175
1176 chan = pchan->ops->new_connection(pchan);
1177 if (!chan)
1178 goto clean;
1179
1180 sk = chan->sk;
1181
1182 hci_conn_hold(conn->hcon);
1183
1184 bacpy(&bt_sk(sk)->src, conn->src);
1185 bacpy(&bt_sk(sk)->dst, conn->dst);
1186
1187 bt_accept_enqueue(parent, sk);
1188
1189 l2cap_chan_add(conn, chan);
1190
1191 l2cap_chan_ready(chan);
1192
1193 clean:
1194 release_sock(parent);
1195 }
1196
1197 static void l2cap_conn_ready(struct l2cap_conn *conn)
1198 {
1199 struct l2cap_chan *chan;
1200
1201 BT_DBG("conn %p", conn);
1202
1203 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1204 l2cap_le_conn_ready(conn);
1205
1206 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1207 smp_conn_security(conn, conn->hcon->pending_sec_level);
1208
1209 mutex_lock(&conn->chan_lock);
1210
1211 list_for_each_entry(chan, &conn->chan_l, list) {
1212
1213 l2cap_chan_lock(chan);
1214
1215 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1216 l2cap_chan_unlock(chan);
1217 continue;
1218 }
1219
1220 if (conn->hcon->type == LE_LINK) {
1221 if (smp_conn_security(conn, chan->sec_level))
1222 l2cap_chan_ready(chan);
1223
1224 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1225 struct sock *sk = chan->sk;
1226 __clear_chan_timer(chan);
1227 lock_sock(sk);
1228 __l2cap_state_change(chan, BT_CONNECTED);
1229 sk->sk_state_change(sk);
1230 release_sock(sk);
1231
1232 } else if (chan->state == BT_CONNECT)
1233 l2cap_do_start(chan);
1234
1235 l2cap_chan_unlock(chan);
1236 }
1237
1238 mutex_unlock(&conn->chan_lock);
1239 }
1240
1241 /* Notify sockets that we cannot guaranty reliability anymore */
1242 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1243 {
1244 struct l2cap_chan *chan;
1245
1246 BT_DBG("conn %p", conn);
1247
1248 mutex_lock(&conn->chan_lock);
1249
1250 list_for_each_entry(chan, &conn->chan_l, list) {
1251 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1252 __l2cap_chan_set_err(chan, err);
1253 }
1254
1255 mutex_unlock(&conn->chan_lock);
1256 }
1257
1258 static void l2cap_info_timeout(struct work_struct *work)
1259 {
1260 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1261 info_timer.work);
1262
1263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1264 conn->info_ident = 0;
1265
1266 l2cap_conn_start(conn);
1267 }
1268
1269 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1270 {
1271 struct l2cap_conn *conn = hcon->l2cap_data;
1272 struct l2cap_chan *chan, *l;
1273
1274 if (!conn)
1275 return;
1276
1277 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1278
1279 kfree_skb(conn->rx_skb);
1280
1281 mutex_lock(&conn->chan_lock);
1282
1283 /* Kill channels */
1284 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1285 l2cap_chan_hold(chan);
1286 l2cap_chan_lock(chan);
1287
1288 l2cap_chan_del(chan, err);
1289
1290 l2cap_chan_unlock(chan);
1291
1292 chan->ops->close(chan);
1293 l2cap_chan_put(chan);
1294 }
1295
1296 mutex_unlock(&conn->chan_lock);
1297
1298 hci_chan_del(conn->hchan);
1299
1300 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1301 cancel_delayed_work_sync(&conn->info_timer);
1302
1303 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1304 cancel_delayed_work_sync(&conn->security_timer);
1305 smp_chan_destroy(conn);
1306 }
1307
1308 hcon->l2cap_data = NULL;
1309 kfree(conn);
1310 }
1311
1312 static void security_timeout(struct work_struct *work)
1313 {
1314 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1315 security_timer.work);
1316
1317 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1318 }
1319
1320 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1321 {
1322 struct l2cap_conn *conn = hcon->l2cap_data;
1323 struct hci_chan *hchan;
1324
1325 if (conn || status)
1326 return conn;
1327
1328 hchan = hci_chan_create(hcon);
1329 if (!hchan)
1330 return NULL;
1331
1332 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1333 if (!conn) {
1334 hci_chan_del(hchan);
1335 return NULL;
1336 }
1337
1338 hcon->l2cap_data = conn;
1339 conn->hcon = hcon;
1340 conn->hchan = hchan;
1341
1342 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1343
1344 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1345 conn->mtu = hcon->hdev->le_mtu;
1346 else
1347 conn->mtu = hcon->hdev->acl_mtu;
1348
1349 conn->src = &hcon->hdev->bdaddr;
1350 conn->dst = &hcon->dst;
1351
1352 conn->feat_mask = 0;
1353
1354 spin_lock_init(&conn->lock);
1355 mutex_init(&conn->chan_lock);
1356
1357 INIT_LIST_HEAD(&conn->chan_l);
1358
1359 if (hcon->type == LE_LINK)
1360 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1361 else
1362 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1363
1364 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1365
1366 return conn;
1367 }
1368
1369 /* ---- Socket interface ---- */
1370
1371 /* Find socket with psm and source / destination bdaddr.
1372 * Returns closest match.
1373 */
1374 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1375 bdaddr_t *src,
1376 bdaddr_t *dst)
1377 {
1378 struct l2cap_chan *c, *c1 = NULL;
1379
1380 read_lock(&chan_list_lock);
1381
1382 list_for_each_entry(c, &chan_list, global_l) {
1383 struct sock *sk = c->sk;
1384
1385 if (state && c->state != state)
1386 continue;
1387
1388 if (c->psm == psm) {
1389 int src_match, dst_match;
1390 int src_any, dst_any;
1391
1392 /* Exact match. */
1393 src_match = !bacmp(&bt_sk(sk)->src, src);
1394 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1395 if (src_match && dst_match) {
1396 read_unlock(&chan_list_lock);
1397 return c;
1398 }
1399
1400 /* Closest match */
1401 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1402 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1403 if ((src_match && dst_any) || (src_any && dst_match) ||
1404 (src_any && dst_any))
1405 c1 = c;
1406 }
1407 }
1408
1409 read_unlock(&chan_list_lock);
1410
1411 return c1;
1412 }
1413
1414 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1415 bdaddr_t *dst, u8 dst_type)
1416 {
1417 struct sock *sk = chan->sk;
1418 bdaddr_t *src = &bt_sk(sk)->src;
1419 struct l2cap_conn *conn;
1420 struct hci_conn *hcon;
1421 struct hci_dev *hdev;
1422 __u8 auth_type;
1423 int err;
1424
1425 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1426 dst_type, __le16_to_cpu(chan->psm));
1427
1428 hdev = hci_get_route(dst, src);
1429 if (!hdev)
1430 return -EHOSTUNREACH;
1431
1432 hci_dev_lock(hdev);
1433
1434 l2cap_chan_lock(chan);
1435
1436 /* PSM must be odd and lsb of upper byte must be 0 */
1437 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1438 chan->chan_type != L2CAP_CHAN_RAW) {
1439 err = -EINVAL;
1440 goto done;
1441 }
1442
1443 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1444 err = -EINVAL;
1445 goto done;
1446 }
1447
1448 switch (chan->mode) {
1449 case L2CAP_MODE_BASIC:
1450 break;
1451 case L2CAP_MODE_ERTM:
1452 case L2CAP_MODE_STREAMING:
1453 if (!disable_ertm)
1454 break;
1455 /* fall through */
1456 default:
1457 err = -ENOTSUPP;
1458 goto done;
1459 }
1460
1461 switch (chan->state) {
1462 case BT_CONNECT:
1463 case BT_CONNECT2:
1464 case BT_CONFIG:
1465 /* Already connecting */
1466 err = 0;
1467 goto done;
1468
1469 case BT_CONNECTED:
1470 /* Already connected */
1471 err = -EISCONN;
1472 goto done;
1473
1474 case BT_OPEN:
1475 case BT_BOUND:
1476 /* Can connect */
1477 break;
1478
1479 default:
1480 err = -EBADFD;
1481 goto done;
1482 }
1483
1484 /* Set destination address and psm */
1485 lock_sock(sk);
1486 bacpy(&bt_sk(sk)->dst, dst);
1487 release_sock(sk);
1488
1489 chan->psm = psm;
1490 chan->dcid = cid;
1491
1492 auth_type = l2cap_get_auth_type(chan);
1493
1494 if (chan->dcid == L2CAP_CID_LE_DATA)
1495 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1496 chan->sec_level, auth_type);
1497 else
1498 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1499 chan->sec_level, auth_type);
1500
1501 if (IS_ERR(hcon)) {
1502 err = PTR_ERR(hcon);
1503 goto done;
1504 }
1505
1506 conn = l2cap_conn_add(hcon, 0);
1507 if (!conn) {
1508 hci_conn_put(hcon);
1509 err = -ENOMEM;
1510 goto done;
1511 }
1512
1513 if (hcon->type == LE_LINK) {
1514 err = 0;
1515
1516 if (!list_empty(&conn->chan_l)) {
1517 err = -EBUSY;
1518 hci_conn_put(hcon);
1519 }
1520
1521 if (err)
1522 goto done;
1523 }
1524
1525 /* Update source addr of the socket */
1526 bacpy(src, conn->src);
1527
1528 l2cap_chan_unlock(chan);
1529 l2cap_chan_add(conn, chan);
1530 l2cap_chan_lock(chan);
1531
1532 l2cap_state_change(chan, BT_CONNECT);
1533 __set_chan_timer(chan, sk->sk_sndtimeo);
1534
1535 if (hcon->state == BT_CONNECTED) {
1536 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1537 __clear_chan_timer(chan);
1538 if (l2cap_chan_check_security(chan))
1539 l2cap_state_change(chan, BT_CONNECTED);
1540 } else
1541 l2cap_do_start(chan);
1542 }
1543
1544 err = 0;
1545
1546 done:
1547 l2cap_chan_unlock(chan);
1548 hci_dev_unlock(hdev);
1549 hci_dev_put(hdev);
1550 return err;
1551 }
1552
1553 int __l2cap_wait_ack(struct sock *sk)
1554 {
1555 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1556 DECLARE_WAITQUEUE(wait, current);
1557 int err = 0;
1558 int timeo = HZ/5;
1559
1560 add_wait_queue(sk_sleep(sk), &wait);
1561 set_current_state(TASK_INTERRUPTIBLE);
1562 while (chan->unacked_frames > 0 && chan->conn) {
1563 if (!timeo)
1564 timeo = HZ/5;
1565
1566 if (signal_pending(current)) {
1567 err = sock_intr_errno(timeo);
1568 break;
1569 }
1570
1571 release_sock(sk);
1572 timeo = schedule_timeout(timeo);
1573 lock_sock(sk);
1574 set_current_state(TASK_INTERRUPTIBLE);
1575
1576 err = sock_error(sk);
1577 if (err)
1578 break;
1579 }
1580 set_current_state(TASK_RUNNING);
1581 remove_wait_queue(sk_sleep(sk), &wait);
1582 return err;
1583 }
1584
1585 static void l2cap_monitor_timeout(struct work_struct *work)
1586 {
1587 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1588 monitor_timer.work);
1589
1590 BT_DBG("chan %p", chan);
1591
1592 l2cap_chan_lock(chan);
1593
1594 if (!chan->conn) {
1595 l2cap_chan_unlock(chan);
1596 l2cap_chan_put(chan);
1597 return;
1598 }
1599
1600 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1601
1602 l2cap_chan_unlock(chan);
1603 l2cap_chan_put(chan);
1604 }
1605
1606 static void l2cap_retrans_timeout(struct work_struct *work)
1607 {
1608 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1609 retrans_timer.work);
1610
1611 BT_DBG("chan %p", chan);
1612
1613 l2cap_chan_lock(chan);
1614
1615 if (!chan->conn) {
1616 l2cap_chan_unlock(chan);
1617 l2cap_chan_put(chan);
1618 return;
1619 }
1620
1621 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1624 }
1625
1626 static void l2cap_streaming_send(struct l2cap_chan *chan,
1627 struct sk_buff_head *skbs)
1628 {
1629 struct sk_buff *skb;
1630 struct l2cap_ctrl *control;
1631
1632 BT_DBG("chan %p, skbs %p", chan, skbs);
1633
1634 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1635
1636 while (!skb_queue_empty(&chan->tx_q)) {
1637
1638 skb = skb_dequeue(&chan->tx_q);
1639
1640 bt_cb(skb)->control.retries = 1;
1641 control = &bt_cb(skb)->control;
1642
1643 control->reqseq = 0;
1644 control->txseq = chan->next_tx_seq;
1645
1646 __pack_control(chan, control, skb);
1647
1648 if (chan->fcs == L2CAP_FCS_CRC16) {
1649 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1650 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1651 }
1652
1653 l2cap_do_send(chan, skb);
1654
1655 BT_DBG("Sent txseq %d", (int)control->txseq);
1656
1657 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1658 chan->frames_sent++;
1659 }
1660 }
1661
1662 static int l2cap_ertm_send(struct l2cap_chan *chan)
1663 {
1664 struct sk_buff *skb, *tx_skb;
1665 struct l2cap_ctrl *control;
1666 int sent = 0;
1667
1668 BT_DBG("chan %p", chan);
1669
1670 if (chan->state != BT_CONNECTED)
1671 return -ENOTCONN;
1672
1673 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1674 return 0;
1675
1676 while (chan->tx_send_head &&
1677 chan->unacked_frames < chan->remote_tx_win &&
1678 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1679
1680 skb = chan->tx_send_head;
1681
1682 bt_cb(skb)->control.retries = 1;
1683 control = &bt_cb(skb)->control;
1684
1685 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1686 control->final = 1;
1687
1688 control->reqseq = chan->buffer_seq;
1689 chan->last_acked_seq = chan->buffer_seq;
1690 control->txseq = chan->next_tx_seq;
1691
1692 __pack_control(chan, control, skb);
1693
1694 if (chan->fcs == L2CAP_FCS_CRC16) {
1695 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1696 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1697 }
1698
1699 /* Clone after data has been modified. Data is assumed to be
1700 read-only (for locking purposes) on cloned sk_buffs.
1701 */
1702 tx_skb = skb_clone(skb, GFP_KERNEL);
1703
1704 if (!tx_skb)
1705 break;
1706
1707 __set_retrans_timer(chan);
1708
1709 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1710 chan->unacked_frames++;
1711 chan->frames_sent++;
1712 sent++;
1713
1714 if (skb_queue_is_last(&chan->tx_q, skb))
1715 chan->tx_send_head = NULL;
1716 else
1717 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1718
1719 l2cap_do_send(chan, tx_skb);
1720 BT_DBG("Sent txseq %d", (int)control->txseq);
1721 }
1722
1723 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1724 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1725
1726 return sent;
1727 }
1728
1729 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1730 {
1731 struct l2cap_ctrl control;
1732 struct sk_buff *skb;
1733 struct sk_buff *tx_skb;
1734 u16 seq;
1735
1736 BT_DBG("chan %p", chan);
1737
1738 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1739 return;
1740
1741 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1742 seq = l2cap_seq_list_pop(&chan->retrans_list);
1743
1744 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1745 if (!skb) {
1746 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1747 seq);
1748 continue;
1749 }
1750
1751 bt_cb(skb)->control.retries++;
1752 control = bt_cb(skb)->control;
1753
1754 if (chan->max_tx != 0 &&
1755 bt_cb(skb)->control.retries > chan->max_tx) {
1756 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1757 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1758 l2cap_seq_list_clear(&chan->retrans_list);
1759 break;
1760 }
1761
1762 control.reqseq = chan->buffer_seq;
1763 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1764 control.final = 1;
1765 else
1766 control.final = 0;
1767
1768 if (skb_cloned(skb)) {
1769 /* Cloned sk_buffs are read-only, so we need a
1770 * writeable copy
1771 */
1772 tx_skb = skb_copy(skb, GFP_ATOMIC);
1773 } else {
1774 tx_skb = skb_clone(skb, GFP_ATOMIC);
1775 }
1776
1777 if (!tx_skb) {
1778 l2cap_seq_list_clear(&chan->retrans_list);
1779 break;
1780 }
1781
1782 /* Update skb contents */
1783 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1784 put_unaligned_le32(__pack_extended_control(&control),
1785 tx_skb->data + L2CAP_HDR_SIZE);
1786 } else {
1787 put_unaligned_le16(__pack_enhanced_control(&control),
1788 tx_skb->data + L2CAP_HDR_SIZE);
1789 }
1790
1791 if (chan->fcs == L2CAP_FCS_CRC16) {
1792 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1793 put_unaligned_le16(fcs, skb_put(tx_skb,
1794 L2CAP_FCS_SIZE));
1795 }
1796
1797 l2cap_do_send(chan, tx_skb);
1798
1799 BT_DBG("Resent txseq %d", control.txseq);
1800
1801 chan->last_acked_seq = chan->buffer_seq;
1802 }
1803 }
1804
1805 static void l2cap_retransmit(struct l2cap_chan *chan,
1806 struct l2cap_ctrl *control)
1807 {
1808 BT_DBG("chan %p, control %p", chan, control);
1809
1810 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1811 l2cap_ertm_resend(chan);
1812 }
1813
1814 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1815 struct l2cap_ctrl *control)
1816 {
1817 struct sk_buff *skb;
1818
1819 BT_DBG("chan %p, control %p", chan, control);
1820
1821 if (control->poll)
1822 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1823
1824 l2cap_seq_list_clear(&chan->retrans_list);
1825
1826 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1827 return;
1828
1829 if (chan->unacked_frames) {
1830 skb_queue_walk(&chan->tx_q, skb) {
1831 if (bt_cb(skb)->control.txseq == control->reqseq ||
1832 skb == chan->tx_send_head)
1833 break;
1834 }
1835
1836 skb_queue_walk_from(&chan->tx_q, skb) {
1837 if (skb == chan->tx_send_head)
1838 break;
1839
1840 l2cap_seq_list_append(&chan->retrans_list,
1841 bt_cb(skb)->control.txseq);
1842 }
1843
1844 l2cap_ertm_resend(chan);
1845 }
1846 }
1847
1848 static void l2cap_send_ack(struct l2cap_chan *chan)
1849 {
1850 struct l2cap_ctrl control;
1851 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1852 chan->last_acked_seq);
1853 int threshold;
1854
1855 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1856 chan, chan->last_acked_seq, chan->buffer_seq);
1857
1858 memset(&control, 0, sizeof(control));
1859 control.sframe = 1;
1860
1861 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1862 chan->rx_state == L2CAP_RX_STATE_RECV) {
1863 __clear_ack_timer(chan);
1864 control.super = L2CAP_SUPER_RNR;
1865 control.reqseq = chan->buffer_seq;
1866 l2cap_send_sframe(chan, &control);
1867 } else {
1868 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1869 l2cap_ertm_send(chan);
1870 /* If any i-frames were sent, they included an ack */
1871 if (chan->buffer_seq == chan->last_acked_seq)
1872 frames_to_ack = 0;
1873 }
1874
1875 /* Ack now if the tx window is 3/4ths full.
1876 * Calculate without mul or div
1877 */
1878 threshold = chan->tx_win;
1879 threshold += threshold << 1;
1880 threshold >>= 2;
1881
1882 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1883 threshold);
1884
1885 if (frames_to_ack >= threshold) {
1886 __clear_ack_timer(chan);
1887 control.super = L2CAP_SUPER_RR;
1888 control.reqseq = chan->buffer_seq;
1889 l2cap_send_sframe(chan, &control);
1890 frames_to_ack = 0;
1891 }
1892
1893 if (frames_to_ack)
1894 __set_ack_timer(chan);
1895 }
1896 }
1897
1898 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1899 struct msghdr *msg, int len,
1900 int count, struct sk_buff *skb)
1901 {
1902 struct l2cap_conn *conn = chan->conn;
1903 struct sk_buff **frag;
1904 int sent = 0;
1905
1906 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1907 return -EFAULT;
1908
1909 sent += count;
1910 len -= count;
1911
1912 /* Continuation fragments (no L2CAP header) */
1913 frag = &skb_shinfo(skb)->frag_list;
1914 while (len) {
1915 struct sk_buff *tmp;
1916
1917 count = min_t(unsigned int, conn->mtu, len);
1918
1919 tmp = chan->ops->alloc_skb(chan, count,
1920 msg->msg_flags & MSG_DONTWAIT);
1921 if (IS_ERR(tmp))
1922 return PTR_ERR(tmp);
1923
1924 *frag = tmp;
1925
1926 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1927 return -EFAULT;
1928
1929 (*frag)->priority = skb->priority;
1930
1931 sent += count;
1932 len -= count;
1933
1934 skb->len += (*frag)->len;
1935 skb->data_len += (*frag)->len;
1936
1937 frag = &(*frag)->next;
1938 }
1939
1940 return sent;
1941 }
1942
1943 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1944 struct msghdr *msg, size_t len,
1945 u32 priority)
1946 {
1947 struct l2cap_conn *conn = chan->conn;
1948 struct sk_buff *skb;
1949 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1950 struct l2cap_hdr *lh;
1951
1952 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1953
1954 count = min_t(unsigned int, (conn->mtu - hlen), len);
1955
1956 skb = chan->ops->alloc_skb(chan, count + hlen,
1957 msg->msg_flags & MSG_DONTWAIT);
1958 if (IS_ERR(skb))
1959 return skb;
1960
1961 skb->priority = priority;
1962
1963 /* Create L2CAP header */
1964 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1965 lh->cid = cpu_to_le16(chan->dcid);
1966 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1967 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1968
1969 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1970 if (unlikely(err < 0)) {
1971 kfree_skb(skb);
1972 return ERR_PTR(err);
1973 }
1974 return skb;
1975 }
1976
1977 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1978 struct msghdr *msg, size_t len,
1979 u32 priority)
1980 {
1981 struct l2cap_conn *conn = chan->conn;
1982 struct sk_buff *skb;
1983 int err, count;
1984 struct l2cap_hdr *lh;
1985
1986 BT_DBG("chan %p len %d", chan, (int)len);
1987
1988 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1989
1990 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1991 msg->msg_flags & MSG_DONTWAIT);
1992 if (IS_ERR(skb))
1993 return skb;
1994
1995 skb->priority = priority;
1996
1997 /* Create L2CAP header */
1998 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1999 lh->cid = cpu_to_le16(chan->dcid);
2000 lh->len = cpu_to_le16(len);
2001
2002 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2003 if (unlikely(err < 0)) {
2004 kfree_skb(skb);
2005 return ERR_PTR(err);
2006 }
2007 return skb;
2008 }
2009
2010 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2011 struct msghdr *msg, size_t len,
2012 u16 sdulen)
2013 {
2014 struct l2cap_conn *conn = chan->conn;
2015 struct sk_buff *skb;
2016 int err, count, hlen;
2017 struct l2cap_hdr *lh;
2018
2019 BT_DBG("chan %p len %d", chan, (int)len);
2020
2021 if (!conn)
2022 return ERR_PTR(-ENOTCONN);
2023
2024 hlen = __ertm_hdr_size(chan);
2025
2026 if (sdulen)
2027 hlen += L2CAP_SDULEN_SIZE;
2028
2029 if (chan->fcs == L2CAP_FCS_CRC16)
2030 hlen += L2CAP_FCS_SIZE;
2031
2032 count = min_t(unsigned int, (conn->mtu - hlen), len);
2033
2034 skb = chan->ops->alloc_skb(chan, count + hlen,
2035 msg->msg_flags & MSG_DONTWAIT);
2036 if (IS_ERR(skb))
2037 return skb;
2038
2039 /* Create L2CAP header */
2040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2041 lh->cid = cpu_to_le16(chan->dcid);
2042 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2043
2044 /* Control header is populated later */
2045 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2046 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2047 else
2048 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2049
2050 if (sdulen)
2051 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2052
2053 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2054 if (unlikely(err < 0)) {
2055 kfree_skb(skb);
2056 return ERR_PTR(err);
2057 }
2058
2059 bt_cb(skb)->control.fcs = chan->fcs;
2060 bt_cb(skb)->control.retries = 0;
2061 return skb;
2062 }
2063
2064 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2065 struct sk_buff_head *seg_queue,
2066 struct msghdr *msg, size_t len)
2067 {
2068 struct sk_buff *skb;
2069 u16 sdu_len;
2070 size_t pdu_len;
2071 int err = 0;
2072 u8 sar;
2073
2074 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2075
2076 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2077 * so fragmented skbs are not used. The HCI layer's handling
2078 * of fragmented skbs is not compatible with ERTM's queueing.
2079 */
2080
2081 /* PDU size is derived from the HCI MTU */
2082 pdu_len = chan->conn->mtu;
2083
2084 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2085
2086 /* Adjust for largest possible L2CAP overhead. */
2087 if (chan->fcs)
2088 pdu_len -= L2CAP_FCS_SIZE;
2089
2090 pdu_len -= __ertm_hdr_size(chan);
2091
2092 /* Remote device may have requested smaller PDUs */
2093 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2094
2095 if (len <= pdu_len) {
2096 sar = L2CAP_SAR_UNSEGMENTED;
2097 sdu_len = 0;
2098 pdu_len = len;
2099 } else {
2100 sar = L2CAP_SAR_START;
2101 sdu_len = len;
2102 pdu_len -= L2CAP_SDULEN_SIZE;
2103 }
2104
2105 while (len > 0) {
2106 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2107
2108 if (IS_ERR(skb)) {
2109 __skb_queue_purge(seg_queue);
2110 return PTR_ERR(skb);
2111 }
2112
2113 bt_cb(skb)->control.sar = sar;
2114 __skb_queue_tail(seg_queue, skb);
2115
2116 len -= pdu_len;
2117 if (sdu_len) {
2118 sdu_len = 0;
2119 pdu_len += L2CAP_SDULEN_SIZE;
2120 }
2121
2122 if (len <= pdu_len) {
2123 sar = L2CAP_SAR_END;
2124 pdu_len = len;
2125 } else {
2126 sar = L2CAP_SAR_CONTINUE;
2127 }
2128 }
2129
2130 return err;
2131 }
2132
2133 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2134 u32 priority)
2135 {
2136 struct sk_buff *skb;
2137 int err;
2138 struct sk_buff_head seg_queue;
2139
2140 /* Connectionless channel */
2141 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2142 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2143 if (IS_ERR(skb))
2144 return PTR_ERR(skb);
2145
2146 l2cap_do_send(chan, skb);
2147 return len;
2148 }
2149
2150 switch (chan->mode) {
2151 case L2CAP_MODE_BASIC:
2152 /* Check outgoing MTU */
2153 if (len > chan->omtu)
2154 return -EMSGSIZE;
2155
2156 /* Create a basic PDU */
2157 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2158 if (IS_ERR(skb))
2159 return PTR_ERR(skb);
2160
2161 l2cap_do_send(chan, skb);
2162 err = len;
2163 break;
2164
2165 case L2CAP_MODE_ERTM:
2166 case L2CAP_MODE_STREAMING:
2167 /* Check outgoing MTU */
2168 if (len > chan->omtu) {
2169 err = -EMSGSIZE;
2170 break;
2171 }
2172
2173 __skb_queue_head_init(&seg_queue);
2174
2175 /* Do segmentation before calling in to the state machine,
2176 * since it's possible to block while waiting for memory
2177 * allocation.
2178 */
2179 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2180
2181 /* The channel could have been closed while segmenting,
2182 * check that it is still connected.
2183 */
2184 if (chan->state != BT_CONNECTED) {
2185 __skb_queue_purge(&seg_queue);
2186 err = -ENOTCONN;
2187 }
2188
2189 if (err)
2190 break;
2191
2192 if (chan->mode == L2CAP_MODE_ERTM)
2193 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2194 else
2195 l2cap_streaming_send(chan, &seg_queue);
2196
2197 err = len;
2198
2199 /* If the skbs were not queued for sending, they'll still be in
2200 * seg_queue and need to be purged.
2201 */
2202 __skb_queue_purge(&seg_queue);
2203 break;
2204
2205 default:
2206 BT_DBG("bad state %1.1x", chan->mode);
2207 err = -EBADFD;
2208 }
2209
2210 return err;
2211 }
2212
2213 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2214 {
2215 struct l2cap_ctrl control;
2216 u16 seq;
2217
2218 BT_DBG("chan %p, txseq %d", chan, txseq);
2219
2220 memset(&control, 0, sizeof(control));
2221 control.sframe = 1;
2222 control.super = L2CAP_SUPER_SREJ;
2223
2224 for (seq = chan->expected_tx_seq; seq != txseq;
2225 seq = __next_seq(chan, seq)) {
2226 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2227 control.reqseq = seq;
2228 l2cap_send_sframe(chan, &control);
2229 l2cap_seq_list_append(&chan->srej_list, seq);
2230 }
2231 }
2232
2233 chan->expected_tx_seq = __next_seq(chan, txseq);
2234 }
2235
2236 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2237 {
2238 struct l2cap_ctrl control;
2239
2240 BT_DBG("chan %p", chan);
2241
2242 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2243 return;
2244
2245 memset(&control, 0, sizeof(control));
2246 control.sframe = 1;
2247 control.super = L2CAP_SUPER_SREJ;
2248 control.reqseq = chan->srej_list.tail;
2249 l2cap_send_sframe(chan, &control);
2250 }
2251
2252 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2253 {
2254 struct l2cap_ctrl control;
2255 u16 initial_head;
2256 u16 seq;
2257
2258 BT_DBG("chan %p, txseq %d", chan, txseq);
2259
2260 memset(&control, 0, sizeof(control));
2261 control.sframe = 1;
2262 control.super = L2CAP_SUPER_SREJ;
2263
2264 /* Capture initial list head to allow only one pass through the list. */
2265 initial_head = chan->srej_list.head;
2266
2267 do {
2268 seq = l2cap_seq_list_pop(&chan->srej_list);
2269 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2270 break;
2271
2272 control.reqseq = seq;
2273 l2cap_send_sframe(chan, &control);
2274 l2cap_seq_list_append(&chan->srej_list, seq);
2275 } while (chan->srej_list.head != initial_head);
2276 }
2277
2278 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2279 {
2280 struct sk_buff *acked_skb;
2281 u16 ackseq;
2282
2283 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2284
2285 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2286 return;
2287
2288 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2289 chan->expected_ack_seq, chan->unacked_frames);
2290
2291 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2292 ackseq = __next_seq(chan, ackseq)) {
2293
2294 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2295 if (acked_skb) {
2296 skb_unlink(acked_skb, &chan->tx_q);
2297 kfree_skb(acked_skb);
2298 chan->unacked_frames--;
2299 }
2300 }
2301
2302 chan->expected_ack_seq = reqseq;
2303
2304 if (chan->unacked_frames == 0)
2305 __clear_retrans_timer(chan);
2306
2307 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2308 }
2309
2310 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2311 {
2312 BT_DBG("chan %p", chan);
2313
2314 chan->expected_tx_seq = chan->buffer_seq;
2315 l2cap_seq_list_clear(&chan->srej_list);
2316 skb_queue_purge(&chan->srej_q);
2317 chan->rx_state = L2CAP_RX_STATE_RECV;
2318 }
2319
2320 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2321 struct l2cap_ctrl *control,
2322 struct sk_buff_head *skbs, u8 event)
2323 {
2324 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2325 event);
2326
2327 switch (event) {
2328 case L2CAP_EV_DATA_REQUEST:
2329 if (chan->tx_send_head == NULL)
2330 chan->tx_send_head = skb_peek(skbs);
2331
2332 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2333 l2cap_ertm_send(chan);
2334 break;
2335 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2336 BT_DBG("Enter LOCAL_BUSY");
2337 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2338
2339 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2340 /* The SREJ_SENT state must be aborted if we are to
2341 * enter the LOCAL_BUSY state.
2342 */
2343 l2cap_abort_rx_srej_sent(chan);
2344 }
2345
2346 l2cap_send_ack(chan);
2347
2348 break;
2349 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2350 BT_DBG("Exit LOCAL_BUSY");
2351 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2352
2353 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2354 struct l2cap_ctrl local_control;
2355
2356 memset(&local_control, 0, sizeof(local_control));
2357 local_control.sframe = 1;
2358 local_control.super = L2CAP_SUPER_RR;
2359 local_control.poll = 1;
2360 local_control.reqseq = chan->buffer_seq;
2361 l2cap_send_sframe(chan, &local_control);
2362
2363 chan->retry_count = 1;
2364 __set_monitor_timer(chan);
2365 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2366 }
2367 break;
2368 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2369 l2cap_process_reqseq(chan, control->reqseq);
2370 break;
2371 case L2CAP_EV_EXPLICIT_POLL:
2372 l2cap_send_rr_or_rnr(chan, 1);
2373 chan->retry_count = 1;
2374 __set_monitor_timer(chan);
2375 __clear_ack_timer(chan);
2376 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2377 break;
2378 case L2CAP_EV_RETRANS_TO:
2379 l2cap_send_rr_or_rnr(chan, 1);
2380 chan->retry_count = 1;
2381 __set_monitor_timer(chan);
2382 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2383 break;
2384 case L2CAP_EV_RECV_FBIT:
2385 /* Nothing to process */
2386 break;
2387 default:
2388 break;
2389 }
2390 }
2391
2392 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2393 struct l2cap_ctrl *control,
2394 struct sk_buff_head *skbs, u8 event)
2395 {
2396 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2397 event);
2398
2399 switch (event) {
2400 case L2CAP_EV_DATA_REQUEST:
2401 if (chan->tx_send_head == NULL)
2402 chan->tx_send_head = skb_peek(skbs);
2403 /* Queue data, but don't send. */
2404 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2405 break;
2406 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2407 BT_DBG("Enter LOCAL_BUSY");
2408 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2409
2410 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2411 /* The SREJ_SENT state must be aborted if we are to
2412 * enter the LOCAL_BUSY state.
2413 */
2414 l2cap_abort_rx_srej_sent(chan);
2415 }
2416
2417 l2cap_send_ack(chan);
2418
2419 break;
2420 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2421 BT_DBG("Exit LOCAL_BUSY");
2422 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2423
2424 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2425 struct l2cap_ctrl local_control;
2426 memset(&local_control, 0, sizeof(local_control));
2427 local_control.sframe = 1;
2428 local_control.super = L2CAP_SUPER_RR;
2429 local_control.poll = 1;
2430 local_control.reqseq = chan->buffer_seq;
2431 l2cap_send_sframe(chan, &local_control);
2432
2433 chan->retry_count = 1;
2434 __set_monitor_timer(chan);
2435 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2436 }
2437 break;
2438 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2439 l2cap_process_reqseq(chan, control->reqseq);
2440
2441 /* Fall through */
2442
2443 case L2CAP_EV_RECV_FBIT:
2444 if (control && control->final) {
2445 __clear_monitor_timer(chan);
2446 if (chan->unacked_frames > 0)
2447 __set_retrans_timer(chan);
2448 chan->retry_count = 0;
2449 chan->tx_state = L2CAP_TX_STATE_XMIT;
2450 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2451 }
2452 break;
2453 case L2CAP_EV_EXPLICIT_POLL:
2454 /* Ignore */
2455 break;
2456 case L2CAP_EV_MONITOR_TO:
2457 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2458 l2cap_send_rr_or_rnr(chan, 1);
2459 __set_monitor_timer(chan);
2460 chan->retry_count++;
2461 } else {
2462 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2463 }
2464 break;
2465 default:
2466 break;
2467 }
2468 }
2469
2470 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2471 struct sk_buff_head *skbs, u8 event)
2472 {
2473 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2474 chan, control, skbs, event, chan->tx_state);
2475
2476 switch (chan->tx_state) {
2477 case L2CAP_TX_STATE_XMIT:
2478 l2cap_tx_state_xmit(chan, control, skbs, event);
2479 break;
2480 case L2CAP_TX_STATE_WAIT_F:
2481 l2cap_tx_state_wait_f(chan, control, skbs, event);
2482 break;
2483 default:
2484 /* Ignore event */
2485 break;
2486 }
2487 }
2488
2489 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2490 struct l2cap_ctrl *control)
2491 {
2492 BT_DBG("chan %p, control %p", chan, control);
2493 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2494 }
2495
2496 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2497 struct l2cap_ctrl *control)
2498 {
2499 BT_DBG("chan %p, control %p", chan, control);
2500 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2501 }
2502
2503 /* Copy frame to all raw sockets on that connection */
2504 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2505 {
2506 struct sk_buff *nskb;
2507 struct l2cap_chan *chan;
2508
2509 BT_DBG("conn %p", conn);
2510
2511 mutex_lock(&conn->chan_lock);
2512
2513 list_for_each_entry(chan, &conn->chan_l, list) {
2514 struct sock *sk = chan->sk;
2515 if (chan->chan_type != L2CAP_CHAN_RAW)
2516 continue;
2517
2518 /* Don't send frame to the socket it came from */
2519 if (skb->sk == sk)
2520 continue;
2521 nskb = skb_clone(skb, GFP_ATOMIC);
2522 if (!nskb)
2523 continue;
2524
2525 if (chan->ops->recv(chan, nskb))
2526 kfree_skb(nskb);
2527 }
2528
2529 mutex_unlock(&conn->chan_lock);
2530 }
2531
2532 /* ---- L2CAP signalling commands ---- */
2533 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2534 u8 code, u8 ident, u16 dlen, void *data)
2535 {
2536 struct sk_buff *skb, **frag;
2537 struct l2cap_cmd_hdr *cmd;
2538 struct l2cap_hdr *lh;
2539 int len, count;
2540
2541 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2542 conn, code, ident, dlen);
2543
2544 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2545 count = min_t(unsigned int, conn->mtu, len);
2546
2547 skb = bt_skb_alloc(count, GFP_ATOMIC);
2548 if (!skb)
2549 return NULL;
2550
2551 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2552 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2553
2554 if (conn->hcon->type == LE_LINK)
2555 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2556 else
2557 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2558
2559 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2560 cmd->code = code;
2561 cmd->ident = ident;
2562 cmd->len = cpu_to_le16(dlen);
2563
2564 if (dlen) {
2565 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2566 memcpy(skb_put(skb, count), data, count);
2567 data += count;
2568 }
2569
2570 len -= skb->len;
2571
2572 /* Continuation fragments (no L2CAP header) */
2573 frag = &skb_shinfo(skb)->frag_list;
2574 while (len) {
2575 count = min_t(unsigned int, conn->mtu, len);
2576
2577 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2578 if (!*frag)
2579 goto fail;
2580
2581 memcpy(skb_put(*frag, count), data, count);
2582
2583 len -= count;
2584 data += count;
2585
2586 frag = &(*frag)->next;
2587 }
2588
2589 return skb;
2590
2591 fail:
2592 kfree_skb(skb);
2593 return NULL;
2594 }
2595
2596 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2597 {
2598 struct l2cap_conf_opt *opt = *ptr;
2599 int len;
2600
2601 len = L2CAP_CONF_OPT_SIZE + opt->len;
2602 *ptr += len;
2603
2604 *type = opt->type;
2605 *olen = opt->len;
2606
2607 switch (opt->len) {
2608 case 1:
2609 *val = *((u8 *) opt->val);
2610 break;
2611
2612 case 2:
2613 *val = get_unaligned_le16(opt->val);
2614 break;
2615
2616 case 4:
2617 *val = get_unaligned_le32(opt->val);
2618 break;
2619
2620 default:
2621 *val = (unsigned long) opt->val;
2622 break;
2623 }
2624
2625 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2626 return len;
2627 }
2628
2629 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2630 {
2631 struct l2cap_conf_opt *opt = *ptr;
2632
2633 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2634
2635 opt->type = type;
2636 opt->len = len;
2637
2638 switch (len) {
2639 case 1:
2640 *((u8 *) opt->val) = val;
2641 break;
2642
2643 case 2:
2644 put_unaligned_le16(val, opt->val);
2645 break;
2646
2647 case 4:
2648 put_unaligned_le32(val, opt->val);
2649 break;
2650
2651 default:
2652 memcpy(opt->val, (void *) val, len);
2653 break;
2654 }
2655
2656 *ptr += L2CAP_CONF_OPT_SIZE + len;
2657 }
2658
2659 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2660 {
2661 struct l2cap_conf_efs efs;
2662
2663 switch (chan->mode) {
2664 case L2CAP_MODE_ERTM:
2665 efs.id = chan->local_id;
2666 efs.stype = chan->local_stype;
2667 efs.msdu = cpu_to_le16(chan->local_msdu);
2668 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2669 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2670 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2671 break;
2672
2673 case L2CAP_MODE_STREAMING:
2674 efs.id = 1;
2675 efs.stype = L2CAP_SERV_BESTEFFORT;
2676 efs.msdu = cpu_to_le16(chan->local_msdu);
2677 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2678 efs.acc_lat = 0;
2679 efs.flush_to = 0;
2680 break;
2681
2682 default:
2683 return;
2684 }
2685
2686 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2687 (unsigned long) &efs);
2688 }
2689
2690 static void l2cap_ack_timeout(struct work_struct *work)
2691 {
2692 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2693 ack_timer.work);
2694 u16 frames_to_ack;
2695
2696 BT_DBG("chan %p", chan);
2697
2698 l2cap_chan_lock(chan);
2699
2700 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2701 chan->last_acked_seq);
2702
2703 if (frames_to_ack)
2704 l2cap_send_rr_or_rnr(chan, 0);
2705
2706 l2cap_chan_unlock(chan);
2707 l2cap_chan_put(chan);
2708 }
2709
2710 int l2cap_ertm_init(struct l2cap_chan *chan)
2711 {
2712 int err;
2713
2714 chan->next_tx_seq = 0;
2715 chan->expected_tx_seq = 0;
2716 chan->expected_ack_seq = 0;
2717 chan->unacked_frames = 0;
2718 chan->buffer_seq = 0;
2719 chan->frames_sent = 0;
2720 chan->last_acked_seq = 0;
2721 chan->sdu = NULL;
2722 chan->sdu_last_frag = NULL;
2723 chan->sdu_len = 0;
2724
2725 skb_queue_head_init(&chan->tx_q);
2726
2727 if (chan->mode != L2CAP_MODE_ERTM)
2728 return 0;
2729
2730 chan->rx_state = L2CAP_RX_STATE_RECV;
2731 chan->tx_state = L2CAP_TX_STATE_XMIT;
2732
2733 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2734 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2735 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2736
2737 skb_queue_head_init(&chan->srej_q);
2738
2739 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2740 if (err < 0)
2741 return err;
2742
2743 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2744 if (err < 0)
2745 l2cap_seq_list_free(&chan->srej_list);
2746
2747 return err;
2748 }
2749
2750 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2751 {
2752 switch (mode) {
2753 case L2CAP_MODE_STREAMING:
2754 case L2CAP_MODE_ERTM:
2755 if (l2cap_mode_supported(mode, remote_feat_mask))
2756 return mode;
2757 /* fall through */
2758 default:
2759 return L2CAP_MODE_BASIC;
2760 }
2761 }
2762
2763 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2764 {
2765 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2766 }
2767
2768 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2769 {
2770 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2771 }
2772
2773 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2774 {
2775 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2776 __l2cap_ews_supported(chan)) {
2777 /* use extended control field */
2778 set_bit(FLAG_EXT_CTRL, &chan->flags);
2779 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2780 } else {
2781 chan->tx_win = min_t(u16, chan->tx_win,
2782 L2CAP_DEFAULT_TX_WINDOW);
2783 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2784 }
2785 }
2786
2787 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2788 {
2789 struct l2cap_conf_req *req = data;
2790 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2791 void *ptr = req->data;
2792 u16 size;
2793
2794 BT_DBG("chan %p", chan);
2795
2796 if (chan->num_conf_req || chan->num_conf_rsp)
2797 goto done;
2798
2799 switch (chan->mode) {
2800 case L2CAP_MODE_STREAMING:
2801 case L2CAP_MODE_ERTM:
2802 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2803 break;
2804
2805 if (__l2cap_efs_supported(chan))
2806 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2807
2808 /* fall through */
2809 default:
2810 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2811 break;
2812 }
2813
2814 done:
2815 if (chan->imtu != L2CAP_DEFAULT_MTU)
2816 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2817
2818 switch (chan->mode) {
2819 case L2CAP_MODE_BASIC:
2820 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2821 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2822 break;
2823
2824 rfc.mode = L2CAP_MODE_BASIC;
2825 rfc.txwin_size = 0;
2826 rfc.max_transmit = 0;
2827 rfc.retrans_timeout = 0;
2828 rfc.monitor_timeout = 0;
2829 rfc.max_pdu_size = 0;
2830
2831 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2832 (unsigned long) &rfc);
2833 break;
2834
2835 case L2CAP_MODE_ERTM:
2836 rfc.mode = L2CAP_MODE_ERTM;
2837 rfc.max_transmit = chan->max_tx;
2838 rfc.retrans_timeout = 0;
2839 rfc.monitor_timeout = 0;
2840
2841 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2842 L2CAP_EXT_HDR_SIZE -
2843 L2CAP_SDULEN_SIZE -
2844 L2CAP_FCS_SIZE);
2845 rfc.max_pdu_size = cpu_to_le16(size);
2846
2847 l2cap_txwin_setup(chan);
2848
2849 rfc.txwin_size = min_t(u16, chan->tx_win,
2850 L2CAP_DEFAULT_TX_WINDOW);
2851
2852 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2853 (unsigned long) &rfc);
2854
2855 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2856 l2cap_add_opt_efs(&ptr, chan);
2857
2858 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2859 break;
2860
2861 if (chan->fcs == L2CAP_FCS_NONE ||
2862 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2863 chan->fcs = L2CAP_FCS_NONE;
2864 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2865 }
2866
2867 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2868 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2869 chan->tx_win);
2870 break;
2871
2872 case L2CAP_MODE_STREAMING:
2873 l2cap_txwin_setup(chan);
2874 rfc.mode = L2CAP_MODE_STREAMING;
2875 rfc.txwin_size = 0;
2876 rfc.max_transmit = 0;
2877 rfc.retrans_timeout = 0;
2878 rfc.monitor_timeout = 0;
2879
2880 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2881 L2CAP_EXT_HDR_SIZE -
2882 L2CAP_SDULEN_SIZE -
2883 L2CAP_FCS_SIZE);
2884 rfc.max_pdu_size = cpu_to_le16(size);
2885
2886 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2887 (unsigned long) &rfc);
2888
2889 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2890 l2cap_add_opt_efs(&ptr, chan);
2891
2892 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2893 break;
2894
2895 if (chan->fcs == L2CAP_FCS_NONE ||
2896 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2897 chan->fcs = L2CAP_FCS_NONE;
2898 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2899 }
2900 break;
2901 }
2902
2903 req->dcid = cpu_to_le16(chan->dcid);
2904 req->flags = __constant_cpu_to_le16(0);
2905
2906 return ptr - data;
2907 }
2908
2909 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2910 {
2911 struct l2cap_conf_rsp *rsp = data;
2912 void *ptr = rsp->data;
2913 void *req = chan->conf_req;
2914 int len = chan->conf_len;
2915 int type, hint, olen;
2916 unsigned long val;
2917 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2918 struct l2cap_conf_efs efs;
2919 u8 remote_efs = 0;
2920 u16 mtu = L2CAP_DEFAULT_MTU;
2921 u16 result = L2CAP_CONF_SUCCESS;
2922 u16 size;
2923
2924 BT_DBG("chan %p", chan);
2925
2926 while (len >= L2CAP_CONF_OPT_SIZE) {
2927 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2928
2929 hint = type & L2CAP_CONF_HINT;
2930 type &= L2CAP_CONF_MASK;
2931
2932 switch (type) {
2933 case L2CAP_CONF_MTU:
2934 mtu = val;
2935 break;
2936
2937 case L2CAP_CONF_FLUSH_TO:
2938 chan->flush_to = val;
2939 break;
2940
2941 case L2CAP_CONF_QOS:
2942 break;
2943
2944 case L2CAP_CONF_RFC:
2945 if (olen == sizeof(rfc))
2946 memcpy(&rfc, (void *) val, olen);
2947 break;
2948
2949 case L2CAP_CONF_FCS:
2950 if (val == L2CAP_FCS_NONE)
2951 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2952 break;
2953
2954 case L2CAP_CONF_EFS:
2955 remote_efs = 1;
2956 if (olen == sizeof(efs))
2957 memcpy(&efs, (void *) val, olen);
2958 break;
2959
2960 case L2CAP_CONF_EWS:
2961 if (!enable_hs)
2962 return -ECONNREFUSED;
2963
2964 set_bit(FLAG_EXT_CTRL, &chan->flags);
2965 set_bit(CONF_EWS_RECV, &chan->conf_state);
2966 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2967 chan->remote_tx_win = val;
2968 break;
2969
2970 default:
2971 if (hint)
2972 break;
2973
2974 result = L2CAP_CONF_UNKNOWN;
2975 *((u8 *) ptr++) = type;
2976 break;
2977 }
2978 }
2979
2980 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2981 goto done;
2982
2983 switch (chan->mode) {
2984 case L2CAP_MODE_STREAMING:
2985 case L2CAP_MODE_ERTM:
2986 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2987 chan->mode = l2cap_select_mode(rfc.mode,
2988 chan->conn->feat_mask);
2989 break;
2990 }
2991
2992 if (remote_efs) {
2993 if (__l2cap_efs_supported(chan))
2994 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2995 else
2996 return -ECONNREFUSED;
2997 }
2998
2999 if (chan->mode != rfc.mode)
3000 return -ECONNREFUSED;
3001
3002 break;
3003 }
3004
3005 done:
3006 if (chan->mode != rfc.mode) {
3007 result = L2CAP_CONF_UNACCEPT;
3008 rfc.mode = chan->mode;
3009
3010 if (chan->num_conf_rsp == 1)
3011 return -ECONNREFUSED;
3012
3013 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3014 sizeof(rfc), (unsigned long) &rfc);
3015 }
3016
3017 if (result == L2CAP_CONF_SUCCESS) {
3018 /* Configure output options and let the other side know
3019 * which ones we don't like. */
3020
3021 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3022 result = L2CAP_CONF_UNACCEPT;
3023 else {
3024 chan->omtu = mtu;
3025 set_bit(CONF_MTU_DONE, &chan->conf_state);
3026 }
3027 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3028
3029 if (remote_efs) {
3030 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3031 efs.stype != L2CAP_SERV_NOTRAFIC &&
3032 efs.stype != chan->local_stype) {
3033
3034 result = L2CAP_CONF_UNACCEPT;
3035
3036 if (chan->num_conf_req >= 1)
3037 return -ECONNREFUSED;
3038
3039 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3040 sizeof(efs),
3041 (unsigned long) &efs);
3042 } else {
3043 /* Send PENDING Conf Rsp */
3044 result = L2CAP_CONF_PENDING;
3045 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3046 }
3047 }
3048
3049 switch (rfc.mode) {
3050 case L2CAP_MODE_BASIC:
3051 chan->fcs = L2CAP_FCS_NONE;
3052 set_bit(CONF_MODE_DONE, &chan->conf_state);
3053 break;
3054
3055 case L2CAP_MODE_ERTM:
3056 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3057 chan->remote_tx_win = rfc.txwin_size;
3058 else
3059 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3060
3061 chan->remote_max_tx = rfc.max_transmit;
3062
3063 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3064 chan->conn->mtu -
3065 L2CAP_EXT_HDR_SIZE -
3066 L2CAP_SDULEN_SIZE -
3067 L2CAP_FCS_SIZE);
3068 rfc.max_pdu_size = cpu_to_le16(size);
3069 chan->remote_mps = size;
3070
3071 rfc.retrans_timeout =
3072 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3073 rfc.monitor_timeout =
3074 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3075
3076 set_bit(CONF_MODE_DONE, &chan->conf_state);
3077
3078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3079 sizeof(rfc), (unsigned long) &rfc);
3080
3081 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3082 chan->remote_id = efs.id;
3083 chan->remote_stype = efs.stype;
3084 chan->remote_msdu = le16_to_cpu(efs.msdu);
3085 chan->remote_flush_to =
3086 le32_to_cpu(efs.flush_to);
3087 chan->remote_acc_lat =
3088 le32_to_cpu(efs.acc_lat);
3089 chan->remote_sdu_itime =
3090 le32_to_cpu(efs.sdu_itime);
3091 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3092 sizeof(efs), (unsigned long) &efs);
3093 }
3094 break;
3095
3096 case L2CAP_MODE_STREAMING:
3097 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3098 chan->conn->mtu -
3099 L2CAP_EXT_HDR_SIZE -
3100 L2CAP_SDULEN_SIZE -
3101 L2CAP_FCS_SIZE);
3102 rfc.max_pdu_size = cpu_to_le16(size);
3103 chan->remote_mps = size;
3104
3105 set_bit(CONF_MODE_DONE, &chan->conf_state);
3106
3107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3108 sizeof(rfc), (unsigned long) &rfc);
3109
3110 break;
3111
3112 default:
3113 result = L2CAP_CONF_UNACCEPT;
3114
3115 memset(&rfc, 0, sizeof(rfc));
3116 rfc.mode = chan->mode;
3117 }
3118
3119 if (result == L2CAP_CONF_SUCCESS)
3120 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3121 }
3122 rsp->scid = cpu_to_le16(chan->dcid);
3123 rsp->result = cpu_to_le16(result);
3124 rsp->flags = __constant_cpu_to_le16(0);
3125
3126 return ptr - data;
3127 }
3128
3129 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3130 {
3131 struct l2cap_conf_req *req = data;
3132 void *ptr = req->data;
3133 int type, olen;
3134 unsigned long val;
3135 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3136 struct l2cap_conf_efs efs;
3137
3138 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3139
3140 while (len >= L2CAP_CONF_OPT_SIZE) {
3141 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3142
3143 switch (type) {
3144 case L2CAP_CONF_MTU:
3145 if (val < L2CAP_DEFAULT_MIN_MTU) {
3146 *result = L2CAP_CONF_UNACCEPT;
3147 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3148 } else
3149 chan->imtu = val;
3150 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3151 break;
3152
3153 case L2CAP_CONF_FLUSH_TO:
3154 chan->flush_to = val;
3155 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3156 2, chan->flush_to);
3157 break;
3158
3159 case L2CAP_CONF_RFC:
3160 if (olen == sizeof(rfc))
3161 memcpy(&rfc, (void *)val, olen);
3162
3163 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3164 rfc.mode != chan->mode)
3165 return -ECONNREFUSED;
3166
3167 chan->fcs = 0;
3168
3169 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3170 sizeof(rfc), (unsigned long) &rfc);
3171 break;
3172
3173 case L2CAP_CONF_EWS:
3174 chan->tx_win = min_t(u16, val,
3175 L2CAP_DEFAULT_EXT_WINDOW);
3176 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3177 chan->tx_win);
3178 break;
3179
3180 case L2CAP_CONF_EFS:
3181 if (olen == sizeof(efs))
3182 memcpy(&efs, (void *)val, olen);
3183
3184 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3185 efs.stype != L2CAP_SERV_NOTRAFIC &&
3186 efs.stype != chan->local_stype)
3187 return -ECONNREFUSED;
3188
3189 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3190 sizeof(efs), (unsigned long) &efs);
3191 break;
3192 }
3193 }
3194
3195 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3196 return -ECONNREFUSED;
3197
3198 chan->mode = rfc.mode;
3199
3200 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3201 switch (rfc.mode) {
3202 case L2CAP_MODE_ERTM:
3203 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3204 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3205 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3206
3207 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3208 chan->local_msdu = le16_to_cpu(efs.msdu);
3209 chan->local_sdu_itime =
3210 le32_to_cpu(efs.sdu_itime);
3211 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3212 chan->local_flush_to =
3213 le32_to_cpu(efs.flush_to);
3214 }
3215 break;
3216
3217 case L2CAP_MODE_STREAMING:
3218 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3219 }
3220 }
3221
3222 req->dcid = cpu_to_le16(chan->dcid);
3223 req->flags = __constant_cpu_to_le16(0);
3224
3225 return ptr - data;
3226 }
3227
3228 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3229 {
3230 struct l2cap_conf_rsp *rsp = data;
3231 void *ptr = rsp->data;
3232
3233 BT_DBG("chan %p", chan);
3234
3235 rsp->scid = cpu_to_le16(chan->dcid);
3236 rsp->result = cpu_to_le16(result);
3237 rsp->flags = cpu_to_le16(flags);
3238
3239 return ptr - data;
3240 }
3241
3242 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3243 {
3244 struct l2cap_conn_rsp rsp;
3245 struct l2cap_conn *conn = chan->conn;
3246 u8 buf[128];
3247
3248 rsp.scid = cpu_to_le16(chan->dcid);
3249 rsp.dcid = cpu_to_le16(chan->scid);
3250 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3251 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3252 l2cap_send_cmd(conn, chan->ident,
3253 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3254
3255 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3256 return;
3257
3258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3259 l2cap_build_conf_req(chan, buf), buf);
3260 chan->num_conf_req++;
3261 }
3262
3263 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3264 {
3265 int type, olen;
3266 unsigned long val;
3267 struct l2cap_conf_rfc rfc;
3268
3269 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3270
3271 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3272 return;
3273
3274 while (len >= L2CAP_CONF_OPT_SIZE) {
3275 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3276
3277 switch (type) {
3278 case L2CAP_CONF_RFC:
3279 if (olen == sizeof(rfc))
3280 memcpy(&rfc, (void *)val, olen);
3281 goto done;
3282 }
3283 }
3284
3285 /* Use sane default values in case a misbehaving remote device
3286 * did not send an RFC option.
3287 */
3288 rfc.mode = chan->mode;
3289 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3290 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3291 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3292
3293 BT_ERR("Expected RFC option was not found, using defaults");
3294
3295 done:
3296 switch (rfc.mode) {
3297 case L2CAP_MODE_ERTM:
3298 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3299 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3300 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3301 break;
3302 case L2CAP_MODE_STREAMING:
3303 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3304 }
3305 }
3306
3307 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3308 {
3309 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3310
3311 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3312 return 0;
3313
3314 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3315 cmd->ident == conn->info_ident) {
3316 cancel_delayed_work(&conn->info_timer);
3317
3318 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3319 conn->info_ident = 0;
3320
3321 l2cap_conn_start(conn);
3322 }
3323
3324 return 0;
3325 }
3326
3327 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3328 {
3329 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3330 struct l2cap_conn_rsp rsp;
3331 struct l2cap_chan *chan = NULL, *pchan;
3332 struct sock *parent, *sk = NULL;
3333 int result, status = L2CAP_CS_NO_INFO;
3334
3335 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3336 __le16 psm = req->psm;
3337
3338 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3339
3340 /* Check if we have socket listening on psm */
3341 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3342 if (!pchan) {
3343 result = L2CAP_CR_BAD_PSM;
3344 goto sendresp;
3345 }
3346
3347 parent = pchan->sk;
3348
3349 mutex_lock(&conn->chan_lock);
3350 lock_sock(parent);
3351
3352 /* Check if the ACL is secure enough (if not SDP) */
3353 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3354 !hci_conn_check_link_mode(conn->hcon)) {
3355 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3356 result = L2CAP_CR_SEC_BLOCK;
3357 goto response;
3358 }
3359
3360 result = L2CAP_CR_NO_MEM;
3361
3362 /* Check if we already have channel with that dcid */
3363 if (__l2cap_get_chan_by_dcid(conn, scid))
3364 goto response;
3365
3366 chan = pchan->ops->new_connection(pchan);
3367 if (!chan)
3368 goto response;
3369
3370 sk = chan->sk;
3371
3372 hci_conn_hold(conn->hcon);
3373
3374 bacpy(&bt_sk(sk)->src, conn->src);
3375 bacpy(&bt_sk(sk)->dst, conn->dst);
3376 chan->psm = psm;
3377 chan->dcid = scid;
3378
3379 bt_accept_enqueue(parent, sk);
3380
3381 __l2cap_chan_add(conn, chan);
3382
3383 dcid = chan->scid;
3384
3385 __set_chan_timer(chan, sk->sk_sndtimeo);
3386
3387 chan->ident = cmd->ident;
3388
3389 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3390 if (l2cap_chan_check_security(chan)) {
3391 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3392 __l2cap_state_change(chan, BT_CONNECT2);
3393 result = L2CAP_CR_PEND;
3394 status = L2CAP_CS_AUTHOR_PEND;
3395 parent->sk_data_ready(parent, 0);
3396 } else {
3397 __l2cap_state_change(chan, BT_CONFIG);
3398 result = L2CAP_CR_SUCCESS;
3399 status = L2CAP_CS_NO_INFO;
3400 }
3401 } else {
3402 __l2cap_state_change(chan, BT_CONNECT2);
3403 result = L2CAP_CR_PEND;
3404 status = L2CAP_CS_AUTHEN_PEND;
3405 }
3406 } else {
3407 __l2cap_state_change(chan, BT_CONNECT2);
3408 result = L2CAP_CR_PEND;
3409 status = L2CAP_CS_NO_INFO;
3410 }
3411
3412 response:
3413 release_sock(parent);
3414 mutex_unlock(&conn->chan_lock);
3415
3416 sendresp:
3417 rsp.scid = cpu_to_le16(scid);
3418 rsp.dcid = cpu_to_le16(dcid);
3419 rsp.result = cpu_to_le16(result);
3420 rsp.status = cpu_to_le16(status);
3421 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3422
3423 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3424 struct l2cap_info_req info;
3425 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3426
3427 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3428 conn->info_ident = l2cap_get_ident(conn);
3429
3430 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3431
3432 l2cap_send_cmd(conn, conn->info_ident,
3433 L2CAP_INFO_REQ, sizeof(info), &info);
3434 }
3435
3436 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3437 result == L2CAP_CR_SUCCESS) {
3438 u8 buf[128];
3439 set_bit(CONF_REQ_SENT, &chan->conf_state);
3440 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3441 l2cap_build_conf_req(chan, buf), buf);
3442 chan->num_conf_req++;
3443 }
3444
3445 return 0;
3446 }
3447
3448 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3449 {
3450 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3451 u16 scid, dcid, result, status;
3452 struct l2cap_chan *chan;
3453 u8 req[128];
3454 int err;
3455
3456 scid = __le16_to_cpu(rsp->scid);
3457 dcid = __le16_to_cpu(rsp->dcid);
3458 result = __le16_to_cpu(rsp->result);
3459 status = __le16_to_cpu(rsp->status);
3460
3461 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3462 dcid, scid, result, status);
3463
3464 mutex_lock(&conn->chan_lock);
3465
3466 if (scid) {
3467 chan = __l2cap_get_chan_by_scid(conn, scid);
3468 if (!chan) {
3469 err = -EFAULT;
3470 goto unlock;
3471 }
3472 } else {
3473 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3474 if (!chan) {
3475 err = -EFAULT;
3476 goto unlock;
3477 }
3478 }
3479
3480 err = 0;
3481
3482 l2cap_chan_lock(chan);
3483
3484 switch (result) {
3485 case L2CAP_CR_SUCCESS:
3486 l2cap_state_change(chan, BT_CONFIG);
3487 chan->ident = 0;
3488 chan->dcid = dcid;
3489 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3490
3491 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3492 break;
3493
3494 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3495 l2cap_build_conf_req(chan, req), req);
3496 chan->num_conf_req++;
3497 break;
3498
3499 case L2CAP_CR_PEND:
3500 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3501 break;
3502
3503 default:
3504 l2cap_chan_del(chan, ECONNREFUSED);
3505 break;
3506 }
3507
3508 l2cap_chan_unlock(chan);
3509
3510 unlock:
3511 mutex_unlock(&conn->chan_lock);
3512
3513 return err;
3514 }
3515
3516 static inline void set_default_fcs(struct l2cap_chan *chan)
3517 {
3518 /* FCS is enabled only in ERTM or streaming mode, if one or both
3519 * sides request it.
3520 */
3521 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3522 chan->fcs = L2CAP_FCS_NONE;
3523 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3524 chan->fcs = L2CAP_FCS_CRC16;
3525 }
3526
3527 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3528 {
3529 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3530 u16 dcid, flags;
3531 u8 rsp[64];
3532 struct l2cap_chan *chan;
3533 int len, err = 0;
3534
3535 dcid = __le16_to_cpu(req->dcid);
3536 flags = __le16_to_cpu(req->flags);
3537
3538 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3539
3540 chan = l2cap_get_chan_by_scid(conn, dcid);
3541 if (!chan)
3542 return -ENOENT;
3543
3544 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3545 struct l2cap_cmd_rej_cid rej;
3546
3547 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3548 rej.scid = cpu_to_le16(chan->scid);
3549 rej.dcid = cpu_to_le16(chan->dcid);
3550
3551 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3552 sizeof(rej), &rej);
3553 goto unlock;
3554 }
3555
3556 /* Reject if config buffer is too small. */
3557 len = cmd_len - sizeof(*req);
3558 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3559 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3560 l2cap_build_conf_rsp(chan, rsp,
3561 L2CAP_CONF_REJECT, flags), rsp);
3562 goto unlock;
3563 }
3564
3565 /* Store config. */
3566 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3567 chan->conf_len += len;
3568
3569 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3570 /* Incomplete config. Send empty response. */
3571 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3572 l2cap_build_conf_rsp(chan, rsp,
3573 L2CAP_CONF_SUCCESS, flags), rsp);
3574 goto unlock;
3575 }
3576
3577 /* Complete config. */
3578 len = l2cap_parse_conf_req(chan, rsp);
3579 if (len < 0) {
3580 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3581 goto unlock;
3582 }
3583
3584 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3585 chan->num_conf_rsp++;
3586
3587 /* Reset config buffer. */
3588 chan->conf_len = 0;
3589
3590 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3591 goto unlock;
3592
3593 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3594 set_default_fcs(chan);
3595
3596 if (chan->mode == L2CAP_MODE_ERTM ||
3597 chan->mode == L2CAP_MODE_STREAMING)
3598 err = l2cap_ertm_init(chan);
3599
3600 if (err < 0)
3601 l2cap_send_disconn_req(chan->conn, chan, -err);
3602 else
3603 l2cap_chan_ready(chan);
3604
3605 goto unlock;
3606 }
3607
3608 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3609 u8 buf[64];
3610 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3611 l2cap_build_conf_req(chan, buf), buf);
3612 chan->num_conf_req++;
3613 }
3614
3615 /* Got Conf Rsp PENDING from remote side and asume we sent
3616 Conf Rsp PENDING in the code above */
3617 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3618 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3619
3620 /* check compatibility */
3621
3622 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3623 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3624
3625 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3626 l2cap_build_conf_rsp(chan, rsp,
3627 L2CAP_CONF_SUCCESS, flags), rsp);
3628 }
3629
3630 unlock:
3631 l2cap_chan_unlock(chan);
3632 return err;
3633 }
3634
3635 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3636 {
3637 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3638 u16 scid, flags, result;
3639 struct l2cap_chan *chan;
3640 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3641 int err = 0;
3642
3643 scid = __le16_to_cpu(rsp->scid);
3644 flags = __le16_to_cpu(rsp->flags);
3645 result = __le16_to_cpu(rsp->result);
3646
3647 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3648 result, len);
3649
3650 chan = l2cap_get_chan_by_scid(conn, scid);
3651 if (!chan)
3652 return 0;
3653
3654 switch (result) {
3655 case L2CAP_CONF_SUCCESS:
3656 l2cap_conf_rfc_get(chan, rsp->data, len);
3657 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3658 break;
3659
3660 case L2CAP_CONF_PENDING:
3661 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3662
3663 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3664 char buf[64];
3665
3666 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3667 buf, &result);
3668 if (len < 0) {
3669 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3670 goto done;
3671 }
3672
3673 /* check compatibility */
3674
3675 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3676 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3677
3678 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3679 l2cap_build_conf_rsp(chan, buf,
3680 L2CAP_CONF_SUCCESS, 0x0000), buf);
3681 }
3682 goto done;
3683
3684 case L2CAP_CONF_UNACCEPT:
3685 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3686 char req[64];
3687
3688 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3689 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3690 goto done;
3691 }
3692
3693 /* throw out any old stored conf requests */
3694 result = L2CAP_CONF_SUCCESS;
3695 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3696 req, &result);
3697 if (len < 0) {
3698 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3699 goto done;
3700 }
3701
3702 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3703 L2CAP_CONF_REQ, len, req);
3704 chan->num_conf_req++;
3705 if (result != L2CAP_CONF_SUCCESS)
3706 goto done;
3707 break;
3708 }
3709
3710 default:
3711 l2cap_chan_set_err(chan, ECONNRESET);
3712
3713 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3714 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3715 goto done;
3716 }
3717
3718 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3719 goto done;
3720
3721 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3722
3723 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3724 set_default_fcs(chan);
3725
3726 if (chan->mode == L2CAP_MODE_ERTM ||
3727 chan->mode == L2CAP_MODE_STREAMING)
3728 err = l2cap_ertm_init(chan);
3729
3730 if (err < 0)
3731 l2cap_send_disconn_req(chan->conn, chan, -err);
3732 else
3733 l2cap_chan_ready(chan);
3734 }
3735
3736 done:
3737 l2cap_chan_unlock(chan);
3738 return err;
3739 }
3740
3741 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3742 {
3743 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3744 struct l2cap_disconn_rsp rsp;
3745 u16 dcid, scid;
3746 struct l2cap_chan *chan;
3747 struct sock *sk;
3748
3749 scid = __le16_to_cpu(req->scid);
3750 dcid = __le16_to_cpu(req->dcid);
3751
3752 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3753
3754 mutex_lock(&conn->chan_lock);
3755
3756 chan = __l2cap_get_chan_by_scid(conn, dcid);
3757 if (!chan) {
3758 mutex_unlock(&conn->chan_lock);
3759 return 0;
3760 }
3761
3762 l2cap_chan_lock(chan);
3763
3764 sk = chan->sk;
3765
3766 rsp.dcid = cpu_to_le16(chan->scid);
3767 rsp.scid = cpu_to_le16(chan->dcid);
3768 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3769
3770 lock_sock(sk);
3771 sk->sk_shutdown = SHUTDOWN_MASK;
3772 release_sock(sk);
3773
3774 l2cap_chan_hold(chan);
3775 l2cap_chan_del(chan, ECONNRESET);
3776
3777 l2cap_chan_unlock(chan);
3778
3779 chan->ops->close(chan);
3780 l2cap_chan_put(chan);
3781
3782 mutex_unlock(&conn->chan_lock);
3783
3784 return 0;
3785 }
3786
3787 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3788 {
3789 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3790 u16 dcid, scid;
3791 struct l2cap_chan *chan;
3792
3793 scid = __le16_to_cpu(rsp->scid);
3794 dcid = __le16_to_cpu(rsp->dcid);
3795
3796 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3797
3798 mutex_lock(&conn->chan_lock);
3799
3800 chan = __l2cap_get_chan_by_scid(conn, scid);
3801 if (!chan) {
3802 mutex_unlock(&conn->chan_lock);
3803 return 0;
3804 }
3805
3806 l2cap_chan_lock(chan);
3807
3808 l2cap_chan_hold(chan);
3809 l2cap_chan_del(chan, 0);
3810
3811 l2cap_chan_unlock(chan);
3812
3813 chan->ops->close(chan);
3814 l2cap_chan_put(chan);
3815
3816 mutex_unlock(&conn->chan_lock);
3817
3818 return 0;
3819 }
3820
3821 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3822 {
3823 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3824 u16 type;
3825
3826 type = __le16_to_cpu(req->type);
3827
3828 BT_DBG("type 0x%4.4x", type);
3829
3830 if (type == L2CAP_IT_FEAT_MASK) {
3831 u8 buf[8];
3832 u32 feat_mask = l2cap_feat_mask;
3833 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3834 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3835 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3836 if (!disable_ertm)
3837 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3838 | L2CAP_FEAT_FCS;
3839 if (enable_hs)
3840 feat_mask |= L2CAP_FEAT_EXT_FLOW
3841 | L2CAP_FEAT_EXT_WINDOW;
3842
3843 put_unaligned_le32(feat_mask, rsp->data);
3844 l2cap_send_cmd(conn, cmd->ident,
3845 L2CAP_INFO_RSP, sizeof(buf), buf);
3846 } else if (type == L2CAP_IT_FIXED_CHAN) {
3847 u8 buf[12];
3848 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3849
3850 if (enable_hs)
3851 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3852 else
3853 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3854
3855 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3856 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3857 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3858 l2cap_send_cmd(conn, cmd->ident,
3859 L2CAP_INFO_RSP, sizeof(buf), buf);
3860 } else {
3861 struct l2cap_info_rsp rsp;
3862 rsp.type = cpu_to_le16(type);
3863 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3864 l2cap_send_cmd(conn, cmd->ident,
3865 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3866 }
3867
3868 return 0;
3869 }
3870
3871 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3872 {
3873 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3874 u16 type, result;
3875
3876 type = __le16_to_cpu(rsp->type);
3877 result = __le16_to_cpu(rsp->result);
3878
3879 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3880
3881 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3882 if (cmd->ident != conn->info_ident ||
3883 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3884 return 0;
3885
3886 cancel_delayed_work(&conn->info_timer);
3887
3888 if (result != L2CAP_IR_SUCCESS) {
3889 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3890 conn->info_ident = 0;
3891
3892 l2cap_conn_start(conn);
3893
3894 return 0;
3895 }
3896
3897 switch (type) {
3898 case L2CAP_IT_FEAT_MASK:
3899 conn->feat_mask = get_unaligned_le32(rsp->data);
3900
3901 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3902 struct l2cap_info_req req;
3903 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3904
3905 conn->info_ident = l2cap_get_ident(conn);
3906
3907 l2cap_send_cmd(conn, conn->info_ident,
3908 L2CAP_INFO_REQ, sizeof(req), &req);
3909 } else {
3910 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3911 conn->info_ident = 0;
3912
3913 l2cap_conn_start(conn);
3914 }
3915 break;
3916
3917 case L2CAP_IT_FIXED_CHAN:
3918 conn->fixed_chan_mask = rsp->data[0];
3919 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3920 conn->info_ident = 0;
3921
3922 l2cap_conn_start(conn);
3923 break;
3924 }
3925
3926 return 0;
3927 }
3928
3929 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3930 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3931 void *data)
3932 {
3933 struct l2cap_create_chan_req *req = data;
3934 struct l2cap_create_chan_rsp rsp;
3935 u16 psm, scid;
3936
3937 if (cmd_len != sizeof(*req))
3938 return -EPROTO;
3939
3940 if (!enable_hs)
3941 return -EINVAL;
3942
3943 psm = le16_to_cpu(req->psm);
3944 scid = le16_to_cpu(req->scid);
3945
3946 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3947
3948 /* Placeholder: Always reject */
3949 rsp.dcid = 0;
3950 rsp.scid = cpu_to_le16(scid);
3951 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3952 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3953
3954 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3955 sizeof(rsp), &rsp);
3956
3957 return 0;
3958 }
3959
3960 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3961 struct l2cap_cmd_hdr *cmd, void *data)
3962 {
3963 BT_DBG("conn %p", conn);
3964
3965 return l2cap_connect_rsp(conn, cmd, data);
3966 }
3967
3968 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3969 u16 icid, u16 result)
3970 {
3971 struct l2cap_move_chan_rsp rsp;
3972
3973 BT_DBG("icid %d, result %d", icid, result);
3974
3975 rsp.icid = cpu_to_le16(icid);
3976 rsp.result = cpu_to_le16(result);
3977
3978 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3979 }
3980
3981 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3982 struct l2cap_chan *chan, u16 icid, u16 result)
3983 {
3984 struct l2cap_move_chan_cfm cfm;
3985 u8 ident;
3986
3987 BT_DBG("icid %d, result %d", icid, result);
3988
3989 ident = l2cap_get_ident(conn);
3990 if (chan)
3991 chan->ident = ident;
3992
3993 cfm.icid = cpu_to_le16(icid);
3994 cfm.result = cpu_to_le16(result);
3995
3996 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3997 }
3998
3999 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4000 u16 icid)
4001 {
4002 struct l2cap_move_chan_cfm_rsp rsp;
4003
4004 BT_DBG("icid %d", icid);
4005
4006 rsp.icid = cpu_to_le16(icid);
4007 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4008 }
4009
4010 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4011 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4012 {
4013 struct l2cap_move_chan_req *req = data;
4014 u16 icid = 0;
4015 u16 result = L2CAP_MR_NOT_ALLOWED;
4016
4017 if (cmd_len != sizeof(*req))
4018 return -EPROTO;
4019
4020 icid = le16_to_cpu(req->icid);
4021
4022 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4023
4024 if (!enable_hs)
4025 return -EINVAL;
4026
4027 /* Placeholder: Always refuse */
4028 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4029
4030 return 0;
4031 }
4032
4033 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4034 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4035 {
4036 struct l2cap_move_chan_rsp *rsp = data;
4037 u16 icid, result;
4038
4039 if (cmd_len != sizeof(*rsp))
4040 return -EPROTO;
4041
4042 icid = le16_to_cpu(rsp->icid);
4043 result = le16_to_cpu(rsp->result);
4044
4045 BT_DBG("icid %d, result %d", icid, result);
4046
4047 /* Placeholder: Always unconfirmed */
4048 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4049
4050 return 0;
4051 }
4052
4053 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4054 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4055 {
4056 struct l2cap_move_chan_cfm *cfm = data;
4057 u16 icid, result;
4058
4059 if (cmd_len != sizeof(*cfm))
4060 return -EPROTO;
4061
4062 icid = le16_to_cpu(cfm->icid);
4063 result = le16_to_cpu(cfm->result);
4064
4065 BT_DBG("icid %d, result %d", icid, result);
4066
4067 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4068
4069 return 0;
4070 }
4071
4072 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4073 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4074 {
4075 struct l2cap_move_chan_cfm_rsp *rsp = data;
4076 u16 icid;
4077
4078 if (cmd_len != sizeof(*rsp))
4079 return -EPROTO;
4080
4081 icid = le16_to_cpu(rsp->icid);
4082
4083 BT_DBG("icid %d", icid);
4084
4085 return 0;
4086 }
4087
4088 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4089 u16 to_multiplier)
4090 {
4091 u16 max_latency;
4092
4093 if (min > max || min < 6 || max > 3200)
4094 return -EINVAL;
4095
4096 if (to_multiplier < 10 || to_multiplier > 3200)
4097 return -EINVAL;
4098
4099 if (max >= to_multiplier * 8)
4100 return -EINVAL;
4101
4102 max_latency = (to_multiplier * 8 / max) - 1;
4103 if (latency > 499 || latency > max_latency)
4104 return -EINVAL;
4105
4106 return 0;
4107 }
4108
4109 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4110 struct l2cap_cmd_hdr *cmd, u8 *data)
4111 {
4112 struct hci_conn *hcon = conn->hcon;
4113 struct l2cap_conn_param_update_req *req;
4114 struct l2cap_conn_param_update_rsp rsp;
4115 u16 min, max, latency, to_multiplier, cmd_len;
4116 int err;
4117
4118 if (!(hcon->link_mode & HCI_LM_MASTER))
4119 return -EINVAL;
4120
4121 cmd_len = __le16_to_cpu(cmd->len);
4122 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4123 return -EPROTO;
4124
4125 req = (struct l2cap_conn_param_update_req *) data;
4126 min = __le16_to_cpu(req->min);
4127 max = __le16_to_cpu(req->max);
4128 latency = __le16_to_cpu(req->latency);
4129 to_multiplier = __le16_to_cpu(req->to_multiplier);
4130
4131 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4132 min, max, latency, to_multiplier);
4133
4134 memset(&rsp, 0, sizeof(rsp));
4135
4136 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4137 if (err)
4138 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4139 else
4140 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4141
4142 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4143 sizeof(rsp), &rsp);
4144
4145 if (!err)
4146 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4147
4148 return 0;
4149 }
4150
4151 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4152 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4153 {
4154 int err = 0;
4155
4156 switch (cmd->code) {
4157 case L2CAP_COMMAND_REJ:
4158 l2cap_command_rej(conn, cmd, data);
4159 break;
4160
4161 case L2CAP_CONN_REQ:
4162 err = l2cap_connect_req(conn, cmd, data);
4163 break;
4164
4165 case L2CAP_CONN_RSP:
4166 err = l2cap_connect_rsp(conn, cmd, data);
4167 break;
4168
4169 case L2CAP_CONF_REQ:
4170 err = l2cap_config_req(conn, cmd, cmd_len, data);
4171 break;
4172
4173 case L2CAP_CONF_RSP:
4174 err = l2cap_config_rsp(conn, cmd, data);
4175 break;
4176
4177 case L2CAP_DISCONN_REQ:
4178 err = l2cap_disconnect_req(conn, cmd, data);
4179 break;
4180
4181 case L2CAP_DISCONN_RSP:
4182 err = l2cap_disconnect_rsp(conn, cmd, data);
4183 break;
4184
4185 case L2CAP_ECHO_REQ:
4186 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4187 break;
4188
4189 case L2CAP_ECHO_RSP:
4190 break;
4191
4192 case L2CAP_INFO_REQ:
4193 err = l2cap_information_req(conn, cmd, data);
4194 break;
4195
4196 case L2CAP_INFO_RSP:
4197 err = l2cap_information_rsp(conn, cmd, data);
4198 break;
4199
4200 case L2CAP_CREATE_CHAN_REQ:
4201 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4202 break;
4203
4204 case L2CAP_CREATE_CHAN_RSP:
4205 err = l2cap_create_channel_rsp(conn, cmd, data);
4206 break;
4207
4208 case L2CAP_MOVE_CHAN_REQ:
4209 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4210 break;
4211
4212 case L2CAP_MOVE_CHAN_RSP:
4213 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4214 break;
4215
4216 case L2CAP_MOVE_CHAN_CFM:
4217 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4218 break;
4219
4220 case L2CAP_MOVE_CHAN_CFM_RSP:
4221 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4222 break;
4223
4224 default:
4225 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4226 err = -EINVAL;
4227 break;
4228 }
4229
4230 return err;
4231 }
4232
4233 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4234 struct l2cap_cmd_hdr *cmd, u8 *data)
4235 {
4236 switch (cmd->code) {
4237 case L2CAP_COMMAND_REJ:
4238 return 0;
4239
4240 case L2CAP_CONN_PARAM_UPDATE_REQ:
4241 return l2cap_conn_param_update_req(conn, cmd, data);
4242
4243 case L2CAP_CONN_PARAM_UPDATE_RSP:
4244 return 0;
4245
4246 default:
4247 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4248 return -EINVAL;
4249 }
4250 }
4251
4252 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4253 struct sk_buff *skb)
4254 {
4255 u8 *data = skb->data;
4256 int len = skb->len;
4257 struct l2cap_cmd_hdr cmd;
4258 int err;
4259
4260 l2cap_raw_recv(conn, skb);
4261
4262 while (len >= L2CAP_CMD_HDR_SIZE) {
4263 u16 cmd_len;
4264 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4265 data += L2CAP_CMD_HDR_SIZE;
4266 len -= L2CAP_CMD_HDR_SIZE;
4267
4268 cmd_len = le16_to_cpu(cmd.len);
4269
4270 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4271
4272 if (cmd_len > len || !cmd.ident) {
4273 BT_DBG("corrupted command");
4274 break;
4275 }
4276
4277 if (conn->hcon->type == LE_LINK)
4278 err = l2cap_le_sig_cmd(conn, &cmd, data);
4279 else
4280 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4281
4282 if (err) {
4283 struct l2cap_cmd_rej_unk rej;
4284
4285 BT_ERR("Wrong link type (%d)", err);
4286
4287 /* FIXME: Map err to a valid reason */
4288 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4289 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4290 }
4291
4292 data += cmd_len;
4293 len -= cmd_len;
4294 }
4295
4296 kfree_skb(skb);
4297 }
4298
4299 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4300 {
4301 u16 our_fcs, rcv_fcs;
4302 int hdr_size;
4303
4304 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4305 hdr_size = L2CAP_EXT_HDR_SIZE;
4306 else
4307 hdr_size = L2CAP_ENH_HDR_SIZE;
4308
4309 if (chan->fcs == L2CAP_FCS_CRC16) {
4310 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4311 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4312 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4313
4314 if (our_fcs != rcv_fcs)
4315 return -EBADMSG;
4316 }
4317 return 0;
4318 }
4319
4320 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4321 {
4322 struct l2cap_ctrl control;
4323
4324 BT_DBG("chan %p", chan);
4325
4326 memset(&control, 0, sizeof(control));
4327 control.sframe = 1;
4328 control.final = 1;
4329 control.reqseq = chan->buffer_seq;
4330 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4331
4332 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4333 control.super = L2CAP_SUPER_RNR;
4334 l2cap_send_sframe(chan, &control);
4335 }
4336
4337 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4338 chan->unacked_frames > 0)
4339 __set_retrans_timer(chan);
4340
4341 /* Send pending iframes */
4342 l2cap_ertm_send(chan);
4343
4344 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4345 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4346 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4347 * send it now.
4348 */
4349 control.super = L2CAP_SUPER_RR;
4350 l2cap_send_sframe(chan, &control);
4351 }
4352 }
4353
4354 static void append_skb_frag(struct sk_buff *skb,
4355 struct sk_buff *new_frag, struct sk_buff **last_frag)
4356 {
4357 /* skb->len reflects data in skb as well as all fragments
4358 * skb->data_len reflects only data in fragments
4359 */
4360 if (!skb_has_frag_list(skb))
4361 skb_shinfo(skb)->frag_list = new_frag;
4362
4363 new_frag->next = NULL;
4364
4365 (*last_frag)->next = new_frag;
4366 *last_frag = new_frag;
4367
4368 skb->len += new_frag->len;
4369 skb->data_len += new_frag->len;
4370 skb->truesize += new_frag->truesize;
4371 }
4372
4373 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4374 struct l2cap_ctrl *control)
4375 {
4376 int err = -EINVAL;
4377
4378 switch (control->sar) {
4379 case L2CAP_SAR_UNSEGMENTED:
4380 if (chan->sdu)
4381 break;
4382
4383 err = chan->ops->recv(chan, skb);
4384 break;
4385
4386 case L2CAP_SAR_START:
4387 if (chan->sdu)
4388 break;
4389
4390 chan->sdu_len = get_unaligned_le16(skb->data);
4391 skb_pull(skb, L2CAP_SDULEN_SIZE);
4392
4393 if (chan->sdu_len > chan->imtu) {
4394 err = -EMSGSIZE;
4395 break;
4396 }
4397
4398 if (skb->len >= chan->sdu_len)
4399 break;
4400
4401 chan->sdu = skb;
4402 chan->sdu_last_frag = skb;
4403
4404 skb = NULL;
4405 err = 0;
4406 break;
4407
4408 case L2CAP_SAR_CONTINUE:
4409 if (!chan->sdu)
4410 break;
4411
4412 append_skb_frag(chan->sdu, skb,
4413 &chan->sdu_last_frag);
4414 skb = NULL;
4415
4416 if (chan->sdu->len >= chan->sdu_len)
4417 break;
4418
4419 err = 0;
4420 break;
4421
4422 case L2CAP_SAR_END:
4423 if (!chan->sdu)
4424 break;
4425
4426 append_skb_frag(chan->sdu, skb,
4427 &chan->sdu_last_frag);
4428 skb = NULL;
4429
4430 if (chan->sdu->len != chan->sdu_len)
4431 break;
4432
4433 err = chan->ops->recv(chan, chan->sdu);
4434
4435 if (!err) {
4436 /* Reassembly complete */
4437 chan->sdu = NULL;
4438 chan->sdu_last_frag = NULL;
4439 chan->sdu_len = 0;
4440 }
4441 break;
4442 }
4443
4444 if (err) {
4445 kfree_skb(skb);
4446 kfree_skb(chan->sdu);
4447 chan->sdu = NULL;
4448 chan->sdu_last_frag = NULL;
4449 chan->sdu_len = 0;
4450 }
4451
4452 return err;
4453 }
4454
4455 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4456 {
4457 u8 event;
4458
4459 if (chan->mode != L2CAP_MODE_ERTM)
4460 return;
4461
4462 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4463 l2cap_tx(chan, NULL, NULL, event);
4464 }
4465
4466 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4467 {
4468 int err = 0;
4469 /* Pass sequential frames to l2cap_reassemble_sdu()
4470 * until a gap is encountered.
4471 */
4472
4473 BT_DBG("chan %p", chan);
4474
4475 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4476 struct sk_buff *skb;
4477 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4478 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4479
4480 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4481
4482 if (!skb)
4483 break;
4484
4485 skb_unlink(skb, &chan->srej_q);
4486 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4487 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4488 if (err)
4489 break;
4490 }
4491
4492 if (skb_queue_empty(&chan->srej_q)) {
4493 chan->rx_state = L2CAP_RX_STATE_RECV;
4494 l2cap_send_ack(chan);
4495 }
4496
4497 return err;
4498 }
4499
4500 static void l2cap_handle_srej(struct l2cap_chan *chan,
4501 struct l2cap_ctrl *control)
4502 {
4503 struct sk_buff *skb;
4504
4505 BT_DBG("chan %p, control %p", chan, control);
4506
4507 if (control->reqseq == chan->next_tx_seq) {
4508 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4509 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4510 return;
4511 }
4512
4513 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4514
4515 if (skb == NULL) {
4516 BT_DBG("Seq %d not available for retransmission",
4517 control->reqseq);
4518 return;
4519 }
4520
4521 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4522 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4523 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4524 return;
4525 }
4526
4527 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4528
4529 if (control->poll) {
4530 l2cap_pass_to_tx(chan, control);
4531
4532 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4533 l2cap_retransmit(chan, control);
4534 l2cap_ertm_send(chan);
4535
4536 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4537 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4538 chan->srej_save_reqseq = control->reqseq;
4539 }
4540 } else {
4541 l2cap_pass_to_tx_fbit(chan, control);
4542
4543 if (control->final) {
4544 if (chan->srej_save_reqseq != control->reqseq ||
4545 !test_and_clear_bit(CONN_SREJ_ACT,
4546 &chan->conn_state))
4547 l2cap_retransmit(chan, control);
4548 } else {
4549 l2cap_retransmit(chan, control);
4550 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4551 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4552 chan->srej_save_reqseq = control->reqseq;
4553 }
4554 }
4555 }
4556 }
4557
4558 static void l2cap_handle_rej(struct l2cap_chan *chan,
4559 struct l2cap_ctrl *control)
4560 {
4561 struct sk_buff *skb;
4562
4563 BT_DBG("chan %p, control %p", chan, control);
4564
4565 if (control->reqseq == chan->next_tx_seq) {
4566 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4567 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4568 return;
4569 }
4570
4571 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4572
4573 if (chan->max_tx && skb &&
4574 bt_cb(skb)->control.retries >= chan->max_tx) {
4575 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4576 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4577 return;
4578 }
4579
4580 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4581
4582 l2cap_pass_to_tx(chan, control);
4583
4584 if (control->final) {
4585 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4586 l2cap_retransmit_all(chan, control);
4587 } else {
4588 l2cap_retransmit_all(chan, control);
4589 l2cap_ertm_send(chan);
4590 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4591 set_bit(CONN_REJ_ACT, &chan->conn_state);
4592 }
4593 }
4594
4595 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4596 {
4597 BT_DBG("chan %p, txseq %d", chan, txseq);
4598
4599 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4600 chan->expected_tx_seq);
4601
4602 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4603 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4604 chan->tx_win) {
4605 /* See notes below regarding "double poll" and
4606 * invalid packets.
4607 */
4608 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4609 BT_DBG("Invalid/Ignore - after SREJ");
4610 return L2CAP_TXSEQ_INVALID_IGNORE;
4611 } else {
4612 BT_DBG("Invalid - in window after SREJ sent");
4613 return L2CAP_TXSEQ_INVALID;
4614 }
4615 }
4616
4617 if (chan->srej_list.head == txseq) {
4618 BT_DBG("Expected SREJ");
4619 return L2CAP_TXSEQ_EXPECTED_SREJ;
4620 }
4621
4622 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4623 BT_DBG("Duplicate SREJ - txseq already stored");
4624 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4625 }
4626
4627 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4628 BT_DBG("Unexpected SREJ - not requested");
4629 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4630 }
4631 }
4632
4633 if (chan->expected_tx_seq == txseq) {
4634 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4635 chan->tx_win) {
4636 BT_DBG("Invalid - txseq outside tx window");
4637 return L2CAP_TXSEQ_INVALID;
4638 } else {
4639 BT_DBG("Expected");
4640 return L2CAP_TXSEQ_EXPECTED;
4641 }
4642 }
4643
4644 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4645 __seq_offset(chan, chan->expected_tx_seq,
4646 chan->last_acked_seq)){
4647 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4648 return L2CAP_TXSEQ_DUPLICATE;
4649 }
4650
4651 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4652 /* A source of invalid packets is a "double poll" condition,
4653 * where delays cause us to send multiple poll packets. If
4654 * the remote stack receives and processes both polls,
4655 * sequence numbers can wrap around in such a way that a
4656 * resent frame has a sequence number that looks like new data
4657 * with a sequence gap. This would trigger an erroneous SREJ
4658 * request.
4659 *
4660 * Fortunately, this is impossible with a tx window that's
4661 * less than half of the maximum sequence number, which allows
4662 * invalid frames to be safely ignored.
4663 *
4664 * With tx window sizes greater than half of the tx window
4665 * maximum, the frame is invalid and cannot be ignored. This
4666 * causes a disconnect.
4667 */
4668
4669 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4670 BT_DBG("Invalid/Ignore - txseq outside tx window");
4671 return L2CAP_TXSEQ_INVALID_IGNORE;
4672 } else {
4673 BT_DBG("Invalid - txseq outside tx window");
4674 return L2CAP_TXSEQ_INVALID;
4675 }
4676 } else {
4677 BT_DBG("Unexpected - txseq indicates missing frames");
4678 return L2CAP_TXSEQ_UNEXPECTED;
4679 }
4680 }
4681
4682 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4683 struct l2cap_ctrl *control,
4684 struct sk_buff *skb, u8 event)
4685 {
4686 int err = 0;
4687 bool skb_in_use = 0;
4688
4689 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4690 event);
4691
4692 switch (event) {
4693 case L2CAP_EV_RECV_IFRAME:
4694 switch (l2cap_classify_txseq(chan, control->txseq)) {
4695 case L2CAP_TXSEQ_EXPECTED:
4696 l2cap_pass_to_tx(chan, control);
4697
4698 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4699 BT_DBG("Busy, discarding expected seq %d",
4700 control->txseq);
4701 break;
4702 }
4703
4704 chan->expected_tx_seq = __next_seq(chan,
4705 control->txseq);
4706
4707 chan->buffer_seq = chan->expected_tx_seq;
4708 skb_in_use = 1;
4709
4710 err = l2cap_reassemble_sdu(chan, skb, control);
4711 if (err)
4712 break;
4713
4714 if (control->final) {
4715 if (!test_and_clear_bit(CONN_REJ_ACT,
4716 &chan->conn_state)) {
4717 control->final = 0;
4718 l2cap_retransmit_all(chan, control);
4719 l2cap_ertm_send(chan);
4720 }
4721 }
4722
4723 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4724 l2cap_send_ack(chan);
4725 break;
4726 case L2CAP_TXSEQ_UNEXPECTED:
4727 l2cap_pass_to_tx(chan, control);
4728
4729 /* Can't issue SREJ frames in the local busy state.
4730 * Drop this frame, it will be seen as missing
4731 * when local busy is exited.
4732 */
4733 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4734 BT_DBG("Busy, discarding unexpected seq %d",
4735 control->txseq);
4736 break;
4737 }
4738
4739 /* There was a gap in the sequence, so an SREJ
4740 * must be sent for each missing frame. The
4741 * current frame is stored for later use.
4742 */
4743 skb_queue_tail(&chan->srej_q, skb);
4744 skb_in_use = 1;
4745 BT_DBG("Queued %p (queue len %d)", skb,
4746 skb_queue_len(&chan->srej_q));
4747
4748 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4749 l2cap_seq_list_clear(&chan->srej_list);
4750 l2cap_send_srej(chan, control->txseq);
4751
4752 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4753 break;
4754 case L2CAP_TXSEQ_DUPLICATE:
4755 l2cap_pass_to_tx(chan, control);
4756 break;
4757 case L2CAP_TXSEQ_INVALID_IGNORE:
4758 break;
4759 case L2CAP_TXSEQ_INVALID:
4760 default:
4761 l2cap_send_disconn_req(chan->conn, chan,
4762 ECONNRESET);
4763 break;
4764 }
4765 break;
4766 case L2CAP_EV_RECV_RR:
4767 l2cap_pass_to_tx(chan, control);
4768 if (control->final) {
4769 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4770
4771 if (!test_and_clear_bit(CONN_REJ_ACT,
4772 &chan->conn_state)) {
4773 control->final = 0;
4774 l2cap_retransmit_all(chan, control);
4775 }
4776
4777 l2cap_ertm_send(chan);
4778 } else if (control->poll) {
4779 l2cap_send_i_or_rr_or_rnr(chan);
4780 } else {
4781 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4782 &chan->conn_state) &&
4783 chan->unacked_frames)
4784 __set_retrans_timer(chan);
4785
4786 l2cap_ertm_send(chan);
4787 }
4788 break;
4789 case L2CAP_EV_RECV_RNR:
4790 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4791 l2cap_pass_to_tx(chan, control);
4792 if (control && control->poll) {
4793 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4794 l2cap_send_rr_or_rnr(chan, 0);
4795 }
4796 __clear_retrans_timer(chan);
4797 l2cap_seq_list_clear(&chan->retrans_list);
4798 break;
4799 case L2CAP_EV_RECV_REJ:
4800 l2cap_handle_rej(chan, control);
4801 break;
4802 case L2CAP_EV_RECV_SREJ:
4803 l2cap_handle_srej(chan, control);
4804 break;
4805 default:
4806 break;
4807 }
4808
4809 if (skb && !skb_in_use) {
4810 BT_DBG("Freeing %p", skb);
4811 kfree_skb(skb);
4812 }
4813
4814 return err;
4815 }
4816
4817 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4818 struct l2cap_ctrl *control,
4819 struct sk_buff *skb, u8 event)
4820 {
4821 int err = 0;
4822 u16 txseq = control->txseq;
4823 bool skb_in_use = 0;
4824
4825 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4826 event);
4827
4828 switch (event) {
4829 case L2CAP_EV_RECV_IFRAME:
4830 switch (l2cap_classify_txseq(chan, txseq)) {
4831 case L2CAP_TXSEQ_EXPECTED:
4832 /* Keep frame for reassembly later */
4833 l2cap_pass_to_tx(chan, control);
4834 skb_queue_tail(&chan->srej_q, skb);
4835 skb_in_use = 1;
4836 BT_DBG("Queued %p (queue len %d)", skb,
4837 skb_queue_len(&chan->srej_q));
4838
4839 chan->expected_tx_seq = __next_seq(chan, txseq);
4840 break;
4841 case L2CAP_TXSEQ_EXPECTED_SREJ:
4842 l2cap_seq_list_pop(&chan->srej_list);
4843
4844 l2cap_pass_to_tx(chan, control);
4845 skb_queue_tail(&chan->srej_q, skb);
4846 skb_in_use = 1;
4847 BT_DBG("Queued %p (queue len %d)", skb,
4848 skb_queue_len(&chan->srej_q));
4849
4850 err = l2cap_rx_queued_iframes(chan);
4851 if (err)
4852 break;
4853
4854 break;
4855 case L2CAP_TXSEQ_UNEXPECTED:
4856 /* Got a frame that can't be reassembled yet.
4857 * Save it for later, and send SREJs to cover
4858 * the missing frames.
4859 */
4860 skb_queue_tail(&chan->srej_q, skb);
4861 skb_in_use = 1;
4862 BT_DBG("Queued %p (queue len %d)", skb,
4863 skb_queue_len(&chan->srej_q));
4864
4865 l2cap_pass_to_tx(chan, control);
4866 l2cap_send_srej(chan, control->txseq);
4867 break;
4868 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4869 /* This frame was requested with an SREJ, but
4870 * some expected retransmitted frames are
4871 * missing. Request retransmission of missing
4872 * SREJ'd frames.
4873 */
4874 skb_queue_tail(&chan->srej_q, skb);
4875 skb_in_use = 1;
4876 BT_DBG("Queued %p (queue len %d)", skb,
4877 skb_queue_len(&chan->srej_q));
4878
4879 l2cap_pass_to_tx(chan, control);
4880 l2cap_send_srej_list(chan, control->txseq);
4881 break;
4882 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4883 /* We've already queued this frame. Drop this copy. */
4884 l2cap_pass_to_tx(chan, control);
4885 break;
4886 case L2CAP_TXSEQ_DUPLICATE:
4887 /* Expecting a later sequence number, so this frame
4888 * was already received. Ignore it completely.
4889 */
4890 break;
4891 case L2CAP_TXSEQ_INVALID_IGNORE:
4892 break;
4893 case L2CAP_TXSEQ_INVALID:
4894 default:
4895 l2cap_send_disconn_req(chan->conn, chan,
4896 ECONNRESET);
4897 break;
4898 }
4899 break;
4900 case L2CAP_EV_RECV_RR:
4901 l2cap_pass_to_tx(chan, control);
4902 if (control->final) {
4903 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4904
4905 if (!test_and_clear_bit(CONN_REJ_ACT,
4906 &chan->conn_state)) {
4907 control->final = 0;
4908 l2cap_retransmit_all(chan, control);
4909 }
4910
4911 l2cap_ertm_send(chan);
4912 } else if (control->poll) {
4913 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4914 &chan->conn_state) &&
4915 chan->unacked_frames) {
4916 __set_retrans_timer(chan);
4917 }
4918
4919 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4920 l2cap_send_srej_tail(chan);
4921 } else {
4922 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4923 &chan->conn_state) &&
4924 chan->unacked_frames)
4925 __set_retrans_timer(chan);
4926
4927 l2cap_send_ack(chan);
4928 }
4929 break;
4930 case L2CAP_EV_RECV_RNR:
4931 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4932 l2cap_pass_to_tx(chan, control);
4933 if (control->poll) {
4934 l2cap_send_srej_tail(chan);
4935 } else {
4936 struct l2cap_ctrl rr_control;
4937 memset(&rr_control, 0, sizeof(rr_control));
4938 rr_control.sframe = 1;
4939 rr_control.super = L2CAP_SUPER_RR;
4940 rr_control.reqseq = chan->buffer_seq;
4941 l2cap_send_sframe(chan, &rr_control);
4942 }
4943
4944 break;
4945 case L2CAP_EV_RECV_REJ:
4946 l2cap_handle_rej(chan, control);
4947 break;
4948 case L2CAP_EV_RECV_SREJ:
4949 l2cap_handle_srej(chan, control);
4950 break;
4951 }
4952
4953 if (skb && !skb_in_use) {
4954 BT_DBG("Freeing %p", skb);
4955 kfree_skb(skb);
4956 }
4957
4958 return err;
4959 }
4960
4961 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4962 {
4963 /* Make sure reqseq is for a packet that has been sent but not acked */
4964 u16 unacked;
4965
4966 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4967 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4968 }
4969
4970 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4971 struct sk_buff *skb, u8 event)
4972 {
4973 int err = 0;
4974
4975 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4976 control, skb, event, chan->rx_state);
4977
4978 if (__valid_reqseq(chan, control->reqseq)) {
4979 switch (chan->rx_state) {
4980 case L2CAP_RX_STATE_RECV:
4981 err = l2cap_rx_state_recv(chan, control, skb, event);
4982 break;
4983 case L2CAP_RX_STATE_SREJ_SENT:
4984 err = l2cap_rx_state_srej_sent(chan, control, skb,
4985 event);
4986 break;
4987 default:
4988 /* shut it down */
4989 break;
4990 }
4991 } else {
4992 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4993 control->reqseq, chan->next_tx_seq,
4994 chan->expected_ack_seq);
4995 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4996 }
4997
4998 return err;
4999 }
5000
5001 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5002 struct sk_buff *skb)
5003 {
5004 int err = 0;
5005
5006 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5007 chan->rx_state);
5008
5009 if (l2cap_classify_txseq(chan, control->txseq) ==
5010 L2CAP_TXSEQ_EXPECTED) {
5011 l2cap_pass_to_tx(chan, control);
5012
5013 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5014 __next_seq(chan, chan->buffer_seq));
5015
5016 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5017
5018 l2cap_reassemble_sdu(chan, skb, control);
5019 } else {
5020 if (chan->sdu) {
5021 kfree_skb(chan->sdu);
5022 chan->sdu = NULL;
5023 }
5024 chan->sdu_last_frag = NULL;
5025 chan->sdu_len = 0;
5026
5027 if (skb) {
5028 BT_DBG("Freeing %p", skb);
5029 kfree_skb(skb);
5030 }
5031 }
5032
5033 chan->last_acked_seq = control->txseq;
5034 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5035
5036 return err;
5037 }
5038
5039 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5040 {
5041 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5042 u16 len;
5043 u8 event;
5044
5045 __unpack_control(chan, skb);
5046
5047 len = skb->len;
5048
5049 /*
5050 * We can just drop the corrupted I-frame here.
5051 * Receiver will miss it and start proper recovery
5052 * procedures and ask for retransmission.
5053 */
5054 if (l2cap_check_fcs(chan, skb))
5055 goto drop;
5056
5057 if (!control->sframe && control->sar == L2CAP_SAR_START)
5058 len -= L2CAP_SDULEN_SIZE;
5059
5060 if (chan->fcs == L2CAP_FCS_CRC16)
5061 len -= L2CAP_FCS_SIZE;
5062
5063 if (len > chan->mps) {
5064 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5065 goto drop;
5066 }
5067
5068 if (!control->sframe) {
5069 int err;
5070
5071 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5072 control->sar, control->reqseq, control->final,
5073 control->txseq);
5074
5075 /* Validate F-bit - F=0 always valid, F=1 only
5076 * valid in TX WAIT_F
5077 */
5078 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5079 goto drop;
5080
5081 if (chan->mode != L2CAP_MODE_STREAMING) {
5082 event = L2CAP_EV_RECV_IFRAME;
5083 err = l2cap_rx(chan, control, skb, event);
5084 } else {
5085 err = l2cap_stream_rx(chan, control, skb);
5086 }
5087
5088 if (err)
5089 l2cap_send_disconn_req(chan->conn, chan,
5090 ECONNRESET);
5091 } else {
5092 const u8 rx_func_to_event[4] = {
5093 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5094 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5095 };
5096
5097 /* Only I-frames are expected in streaming mode */
5098 if (chan->mode == L2CAP_MODE_STREAMING)
5099 goto drop;
5100
5101 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5102 control->reqseq, control->final, control->poll,
5103 control->super);
5104
5105 if (len != 0) {
5106 BT_ERR("%d", len);
5107 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5108 goto drop;
5109 }
5110
5111 /* Validate F and P bits */
5112 if (control->final && (control->poll ||
5113 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5114 goto drop;
5115
5116 event = rx_func_to_event[control->super];
5117 if (l2cap_rx(chan, control, skb, event))
5118 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5119 }
5120
5121 return 0;
5122
5123 drop:
5124 kfree_skb(skb);
5125 return 0;
5126 }
5127
5128 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5129 struct sk_buff *skb)
5130 {
5131 struct l2cap_chan *chan;
5132
5133 chan = l2cap_get_chan_by_scid(conn, cid);
5134 if (!chan) {
5135 if (cid == L2CAP_CID_A2MP) {
5136 chan = a2mp_channel_create(conn, skb);
5137 if (!chan) {
5138 kfree_skb(skb);
5139 return;
5140 }
5141
5142 l2cap_chan_lock(chan);
5143 } else {
5144 BT_DBG("unknown cid 0x%4.4x", cid);
5145 /* Drop packet and return */
5146 kfree_skb(skb);
5147 return;
5148 }
5149 }
5150
5151 BT_DBG("chan %p, len %d", chan, skb->len);
5152
5153 if (chan->state != BT_CONNECTED)
5154 goto drop;
5155
5156 switch (chan->mode) {
5157 case L2CAP_MODE_BASIC:
5158 /* If socket recv buffers overflows we drop data here
5159 * which is *bad* because L2CAP has to be reliable.
5160 * But we don't have any other choice. L2CAP doesn't
5161 * provide flow control mechanism. */
5162
5163 if (chan->imtu < skb->len)
5164 goto drop;
5165
5166 if (!chan->ops->recv(chan, skb))
5167 goto done;
5168 break;
5169
5170 case L2CAP_MODE_ERTM:
5171 case L2CAP_MODE_STREAMING:
5172 l2cap_data_rcv(chan, skb);
5173 goto done;
5174
5175 default:
5176 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5177 break;
5178 }
5179
5180 drop:
5181 kfree_skb(skb);
5182
5183 done:
5184 l2cap_chan_unlock(chan);
5185 }
5186
5187 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5188 struct sk_buff *skb)
5189 {
5190 struct l2cap_chan *chan;
5191
5192 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5193 if (!chan)
5194 goto drop;
5195
5196 BT_DBG("chan %p, len %d", chan, skb->len);
5197
5198 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5199 goto drop;
5200
5201 if (chan->imtu < skb->len)
5202 goto drop;
5203
5204 if (!chan->ops->recv(chan, skb))
5205 return;
5206
5207 drop:
5208 kfree_skb(skb);
5209 }
5210
5211 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5212 struct sk_buff *skb)
5213 {
5214 struct l2cap_chan *chan;
5215
5216 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5217 if (!chan)
5218 goto drop;
5219
5220 BT_DBG("chan %p, len %d", chan, skb->len);
5221
5222 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5223 goto drop;
5224
5225 if (chan->imtu < skb->len)
5226 goto drop;
5227
5228 if (!chan->ops->recv(chan, skb))
5229 return;
5230
5231 drop:
5232 kfree_skb(skb);
5233 }
5234
5235 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5236 {
5237 struct l2cap_hdr *lh = (void *) skb->data;
5238 u16 cid, len;
5239 __le16 psm;
5240
5241 skb_pull(skb, L2CAP_HDR_SIZE);
5242 cid = __le16_to_cpu(lh->cid);
5243 len = __le16_to_cpu(lh->len);
5244
5245 if (len != skb->len) {
5246 kfree_skb(skb);
5247 return;
5248 }
5249
5250 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5251
5252 switch (cid) {
5253 case L2CAP_CID_LE_SIGNALING:
5254 case L2CAP_CID_SIGNALING:
5255 l2cap_sig_channel(conn, skb);
5256 break;
5257
5258 case L2CAP_CID_CONN_LESS:
5259 psm = get_unaligned((__le16 *) skb->data);
5260 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5261 l2cap_conless_channel(conn, psm, skb);
5262 break;
5263
5264 case L2CAP_CID_LE_DATA:
5265 l2cap_att_channel(conn, cid, skb);
5266 break;
5267
5268 case L2CAP_CID_SMP:
5269 if (smp_sig_channel(conn, skb))
5270 l2cap_conn_del(conn->hcon, EACCES);
5271 break;
5272
5273 default:
5274 l2cap_data_channel(conn, cid, skb);
5275 break;
5276 }
5277 }
5278
5279 /* ---- L2CAP interface with lower layer (HCI) ---- */
5280
5281 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5282 {
5283 int exact = 0, lm1 = 0, lm2 = 0;
5284 struct l2cap_chan *c;
5285
5286 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5287
5288 /* Find listening sockets and check their link_mode */
5289 read_lock(&chan_list_lock);
5290 list_for_each_entry(c, &chan_list, global_l) {
5291 struct sock *sk = c->sk;
5292
5293 if (c->state != BT_LISTEN)
5294 continue;
5295
5296 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5297 lm1 |= HCI_LM_ACCEPT;
5298 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5299 lm1 |= HCI_LM_MASTER;
5300 exact++;
5301 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5302 lm2 |= HCI_LM_ACCEPT;
5303 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5304 lm2 |= HCI_LM_MASTER;
5305 }
5306 }
5307 read_unlock(&chan_list_lock);
5308
5309 return exact ? lm1 : lm2;
5310 }
5311
5312 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5313 {
5314 struct l2cap_conn *conn;
5315
5316 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5317
5318 if (!status) {
5319 conn = l2cap_conn_add(hcon, status);
5320 if (conn)
5321 l2cap_conn_ready(conn);
5322 } else
5323 l2cap_conn_del(hcon, bt_to_errno(status));
5324
5325 return 0;
5326 }
5327
5328 int l2cap_disconn_ind(struct hci_conn *hcon)
5329 {
5330 struct l2cap_conn *conn = hcon->l2cap_data;
5331
5332 BT_DBG("hcon %p", hcon);
5333
5334 if (!conn)
5335 return HCI_ERROR_REMOTE_USER_TERM;
5336 return conn->disc_reason;
5337 }
5338
5339 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5340 {
5341 BT_DBG("hcon %p reason %d", hcon, reason);
5342
5343 l2cap_conn_del(hcon, bt_to_errno(reason));
5344 return 0;
5345 }
5346
5347 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5348 {
5349 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5350 return;
5351
5352 if (encrypt == 0x00) {
5353 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5354 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5355 } else if (chan->sec_level == BT_SECURITY_HIGH)
5356 l2cap_chan_close(chan, ECONNREFUSED);
5357 } else {
5358 if (chan->sec_level == BT_SECURITY_MEDIUM)
5359 __clear_chan_timer(chan);
5360 }
5361 }
5362
5363 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5364 {
5365 struct l2cap_conn *conn = hcon->l2cap_data;
5366 struct l2cap_chan *chan;
5367
5368 if (!conn)
5369 return 0;
5370
5371 BT_DBG("conn %p", conn);
5372
5373 if (hcon->type == LE_LINK) {
5374 if (!status && encrypt)
5375 smp_distribute_keys(conn, 0);
5376 cancel_delayed_work(&conn->security_timer);
5377 }
5378
5379 mutex_lock(&conn->chan_lock);
5380
5381 list_for_each_entry(chan, &conn->chan_l, list) {
5382 l2cap_chan_lock(chan);
5383
5384 BT_DBG("chan->scid %d", chan->scid);
5385
5386 if (chan->scid == L2CAP_CID_LE_DATA) {
5387 if (!status && encrypt) {
5388 chan->sec_level = hcon->sec_level;
5389 l2cap_chan_ready(chan);
5390 }
5391
5392 l2cap_chan_unlock(chan);
5393 continue;
5394 }
5395
5396 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5397 l2cap_chan_unlock(chan);
5398 continue;
5399 }
5400
5401 if (!status && (chan->state == BT_CONNECTED ||
5402 chan->state == BT_CONFIG)) {
5403 struct sock *sk = chan->sk;
5404
5405 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5406 sk->sk_state_change(sk);
5407
5408 l2cap_check_encryption(chan, encrypt);
5409 l2cap_chan_unlock(chan);
5410 continue;
5411 }
5412
5413 if (chan->state == BT_CONNECT) {
5414 if (!status) {
5415 l2cap_send_conn_req(chan);
5416 } else {
5417 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5418 }
5419 } else if (chan->state == BT_CONNECT2) {
5420 struct sock *sk = chan->sk;
5421 struct l2cap_conn_rsp rsp;
5422 __u16 res, stat;
5423
5424 lock_sock(sk);
5425
5426 if (!status) {
5427 if (test_bit(BT_SK_DEFER_SETUP,
5428 &bt_sk(sk)->flags)) {
5429 struct sock *parent = bt_sk(sk)->parent;
5430 res = L2CAP_CR_PEND;
5431 stat = L2CAP_CS_AUTHOR_PEND;
5432 if (parent)
5433 parent->sk_data_ready(parent, 0);
5434 } else {
5435 __l2cap_state_change(chan, BT_CONFIG);
5436 res = L2CAP_CR_SUCCESS;
5437 stat = L2CAP_CS_NO_INFO;
5438 }
5439 } else {
5440 __l2cap_state_change(chan, BT_DISCONN);
5441 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5442 res = L2CAP_CR_SEC_BLOCK;
5443 stat = L2CAP_CS_NO_INFO;
5444 }
5445
5446 release_sock(sk);
5447
5448 rsp.scid = cpu_to_le16(chan->dcid);
5449 rsp.dcid = cpu_to_le16(chan->scid);
5450 rsp.result = cpu_to_le16(res);
5451 rsp.status = cpu_to_le16(stat);
5452 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5453 sizeof(rsp), &rsp);
5454
5455 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5456 res == L2CAP_CR_SUCCESS) {
5457 char buf[128];
5458 set_bit(CONF_REQ_SENT, &chan->conf_state);
5459 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5460 L2CAP_CONF_REQ,
5461 l2cap_build_conf_req(chan, buf),
5462 buf);
5463 chan->num_conf_req++;
5464 }
5465 }
5466
5467 l2cap_chan_unlock(chan);
5468 }
5469
5470 mutex_unlock(&conn->chan_lock);
5471
5472 return 0;
5473 }
5474
5475 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5476 {
5477 struct l2cap_conn *conn = hcon->l2cap_data;
5478
5479 if (!conn)
5480 conn = l2cap_conn_add(hcon, 0);
5481
5482 if (!conn)
5483 goto drop;
5484
5485 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5486
5487 if (!(flags & ACL_CONT)) {
5488 struct l2cap_hdr *hdr;
5489 int len;
5490
5491 if (conn->rx_len) {
5492 BT_ERR("Unexpected start frame (len %d)", skb->len);
5493 kfree_skb(conn->rx_skb);
5494 conn->rx_skb = NULL;
5495 conn->rx_len = 0;
5496 l2cap_conn_unreliable(conn, ECOMM);
5497 }
5498
5499 /* Start fragment always begin with Basic L2CAP header */
5500 if (skb->len < L2CAP_HDR_SIZE) {
5501 BT_ERR("Frame is too short (len %d)", skb->len);
5502 l2cap_conn_unreliable(conn, ECOMM);
5503 goto drop;
5504 }
5505
5506 hdr = (struct l2cap_hdr *) skb->data;
5507 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5508
5509 if (len == skb->len) {
5510 /* Complete frame received */
5511 l2cap_recv_frame(conn, skb);
5512 return 0;
5513 }
5514
5515 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5516
5517 if (skb->len > len) {
5518 BT_ERR("Frame is too long (len %d, expected len %d)",
5519 skb->len, len);
5520 l2cap_conn_unreliable(conn, ECOMM);
5521 goto drop;
5522 }
5523
5524 /* Allocate skb for the complete frame (with header) */
5525 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5526 if (!conn->rx_skb)
5527 goto drop;
5528
5529 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5530 skb->len);
5531 conn->rx_len = len - skb->len;
5532 } else {
5533 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5534
5535 if (!conn->rx_len) {
5536 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5537 l2cap_conn_unreliable(conn, ECOMM);
5538 goto drop;
5539 }
5540
5541 if (skb->len > conn->rx_len) {
5542 BT_ERR("Fragment is too long (len %d, expected %d)",
5543 skb->len, conn->rx_len);
5544 kfree_skb(conn->rx_skb);
5545 conn->rx_skb = NULL;
5546 conn->rx_len = 0;
5547 l2cap_conn_unreliable(conn, ECOMM);
5548 goto drop;
5549 }
5550
5551 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5552 skb->len);
5553 conn->rx_len -= skb->len;
5554
5555 if (!conn->rx_len) {
5556 /* Complete frame received */
5557 l2cap_recv_frame(conn, conn->rx_skb);
5558 conn->rx_skb = NULL;
5559 }
5560 }
5561
5562 drop:
5563 kfree_skb(skb);
5564 return 0;
5565 }
5566
5567 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5568 {
5569 struct l2cap_chan *c;
5570
5571 read_lock(&chan_list_lock);
5572
5573 list_for_each_entry(c, &chan_list, global_l) {
5574 struct sock *sk = c->sk;
5575
5576 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5577 batostr(&bt_sk(sk)->src),
5578 batostr(&bt_sk(sk)->dst),
5579 c->state, __le16_to_cpu(c->psm),
5580 c->scid, c->dcid, c->imtu, c->omtu,
5581 c->sec_level, c->mode);
5582 }
5583
5584 read_unlock(&chan_list_lock);
5585
5586 return 0;
5587 }
5588
5589 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5590 {
5591 return single_open(file, l2cap_debugfs_show, inode->i_private);
5592 }
5593
5594 static const struct file_operations l2cap_debugfs_fops = {
5595 .open = l2cap_debugfs_open,
5596 .read = seq_read,
5597 .llseek = seq_lseek,
5598 .release = single_release,
5599 };
5600
5601 static struct dentry *l2cap_debugfs;
5602
5603 int __init l2cap_init(void)
5604 {
5605 int err;
5606
5607 err = l2cap_init_sockets();
5608 if (err < 0)
5609 return err;
5610
5611 if (bt_debugfs) {
5612 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5613 bt_debugfs, NULL, &l2cap_debugfs_fops);
5614 if (!l2cap_debugfs)
5615 BT_ERR("Failed to create L2CAP debug file");
5616 }
5617
5618 return 0;
5619 }
5620
5621 void l2cap_exit(void)
5622 {
5623 debugfs_remove(l2cap_debugfs);
5624 l2cap_cleanup_sockets();
5625 }
5626
5627 module_param(disable_ertm, bool, 0644);
5628 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.198142 seconds and 6 git commands to generate.