Bluetooth: A2MP: Manage incoming connections
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 struct l2cap_chan *c;
66
67 list_for_each_entry(c, &conn->chan_l, list) {
68 if (c->dcid == cid)
69 return c;
70 }
71 return NULL;
72 }
73
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 struct l2cap_chan *c;
77
78 list_for_each_entry(c, &conn->chan_l, list) {
79 if (c->scid == cid)
80 return c;
81 }
82 return NULL;
83 }
84
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 struct l2cap_chan *c;
90
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
93 if (c)
94 l2cap_chan_lock(c);
95 mutex_unlock(&conn->chan_lock);
96
97 return c;
98 }
99
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 struct l2cap_chan *c;
103
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
106 return c;
107 }
108 return NULL;
109 }
110
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 struct l2cap_chan *c;
114
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 return c;
118 }
119 return NULL;
120 }
121
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 int err;
125
126 write_lock(&chan_list_lock);
127
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 err = -EADDRINUSE;
130 goto done;
131 }
132
133 if (psm) {
134 chan->psm = psm;
135 chan->sport = psm;
136 err = 0;
137 } else {
138 u16 p;
139
140 err = -EINVAL;
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
145 err = 0;
146 break;
147 }
148 }
149
150 done:
151 write_unlock(&chan_list_lock);
152 return err;
153 }
154
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 {
157 write_lock(&chan_list_lock);
158
159 chan->scid = scid;
160
161 write_unlock(&chan_list_lock);
162
163 return 0;
164 }
165
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 u16 cid = L2CAP_CID_DYN_START;
169
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
172 return cid;
173 }
174
175 return 0;
176 }
177
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
182
183 chan->state = state;
184 chan->ops->state_change(chan, state);
185 }
186
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 struct sock *sk = chan->sk;
190
191 lock_sock(sk);
192 __l2cap_state_change(chan, state);
193 release_sock(sk);
194 }
195
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 struct sock *sk = chan->sk;
199
200 sk->sk_err = err;
201 }
202
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 struct sock *sk = chan->sk;
206
207 lock_sock(sk);
208 __l2cap_chan_set_err(chan, err);
209 release_sock(sk);
210 }
211
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219 }
220
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426 }
427
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 {
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
435
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
437 }
438
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440 {
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
443
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
445
446 chan->conn = conn;
447
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
451 /* LE connection */
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
455 } else {
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
459 }
460 break;
461
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
467 break;
468
469 case L2CAP_CHAN_CONN_FIX_A2MP:
470 chan->scid = L2CAP_CID_A2MP;
471 chan->dcid = L2CAP_CID_A2MP;
472 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
473 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
474 break;
475
476 default:
477 /* Raw socket can send/recv signalling messages only */
478 chan->scid = L2CAP_CID_SIGNALING;
479 chan->dcid = L2CAP_CID_SIGNALING;
480 chan->omtu = L2CAP_DEFAULT_MTU;
481 }
482
483 chan->local_id = L2CAP_BESTEFFORT_ID;
484 chan->local_stype = L2CAP_SERV_BESTEFFORT;
485 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
486 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
487 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
488 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
489
490 l2cap_chan_hold(chan);
491
492 list_add(&chan->list, &conn->chan_l);
493 }
494
495 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
496 {
497 mutex_lock(&conn->chan_lock);
498 __l2cap_chan_add(conn, chan);
499 mutex_unlock(&conn->chan_lock);
500 }
501
502 void l2cap_chan_del(struct l2cap_chan *chan, int err)
503 {
504 struct l2cap_conn *conn = chan->conn;
505
506 __clear_chan_timer(chan);
507
508 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
509
510 if (conn) {
511 /* Delete from channel list */
512 list_del(&chan->list);
513
514 l2cap_chan_put(chan);
515
516 chan->conn = NULL;
517 hci_conn_put(conn->hcon);
518 }
519
520 if (chan->ops->teardown)
521 chan->ops->teardown(chan, err);
522
523 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
524 return;
525
526 switch(chan->mode) {
527 case L2CAP_MODE_BASIC:
528 break;
529
530 case L2CAP_MODE_ERTM:
531 __clear_retrans_timer(chan);
532 __clear_monitor_timer(chan);
533 __clear_ack_timer(chan);
534
535 skb_queue_purge(&chan->srej_q);
536
537 l2cap_seq_list_free(&chan->srej_list);
538 l2cap_seq_list_free(&chan->retrans_list);
539
540 /* fall through */
541
542 case L2CAP_MODE_STREAMING:
543 skb_queue_purge(&chan->tx_q);
544 break;
545 }
546
547 return;
548 }
549
550 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
551 {
552 struct l2cap_conn *conn = chan->conn;
553 struct sock *sk = chan->sk;
554
555 BT_DBG("chan %p state %s sk %p", chan,
556 state_to_string(chan->state), sk);
557
558 switch (chan->state) {
559 case BT_LISTEN:
560 if (chan->ops->teardown)
561 chan->ops->teardown(chan, 0);
562 break;
563
564 case BT_CONNECTED:
565 case BT_CONFIG:
566 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
567 conn->hcon->type == ACL_LINK) {
568 __set_chan_timer(chan, sk->sk_sndtimeo);
569 l2cap_send_disconn_req(conn, chan, reason);
570 } else
571 l2cap_chan_del(chan, reason);
572 break;
573
574 case BT_CONNECT2:
575 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
576 conn->hcon->type == ACL_LINK) {
577 struct l2cap_conn_rsp rsp;
578 __u16 result;
579
580 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
581 result = L2CAP_CR_SEC_BLOCK;
582 else
583 result = L2CAP_CR_BAD_PSM;
584 l2cap_state_change(chan, BT_DISCONN);
585
586 rsp.scid = cpu_to_le16(chan->dcid);
587 rsp.dcid = cpu_to_le16(chan->scid);
588 rsp.result = cpu_to_le16(result);
589 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
590 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
591 sizeof(rsp), &rsp);
592 }
593
594 l2cap_chan_del(chan, reason);
595 break;
596
597 case BT_CONNECT:
598 case BT_DISCONN:
599 l2cap_chan_del(chan, reason);
600 break;
601
602 default:
603 if (chan->ops->teardown)
604 chan->ops->teardown(chan, 0);
605 break;
606 }
607 }
608
609 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
610 {
611 if (chan->chan_type == L2CAP_CHAN_RAW) {
612 switch (chan->sec_level) {
613 case BT_SECURITY_HIGH:
614 return HCI_AT_DEDICATED_BONDING_MITM;
615 case BT_SECURITY_MEDIUM:
616 return HCI_AT_DEDICATED_BONDING;
617 default:
618 return HCI_AT_NO_BONDING;
619 }
620 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
621 if (chan->sec_level == BT_SECURITY_LOW)
622 chan->sec_level = BT_SECURITY_SDP;
623
624 if (chan->sec_level == BT_SECURITY_HIGH)
625 return HCI_AT_NO_BONDING_MITM;
626 else
627 return HCI_AT_NO_BONDING;
628 } else {
629 switch (chan->sec_level) {
630 case BT_SECURITY_HIGH:
631 return HCI_AT_GENERAL_BONDING_MITM;
632 case BT_SECURITY_MEDIUM:
633 return HCI_AT_GENERAL_BONDING;
634 default:
635 return HCI_AT_NO_BONDING;
636 }
637 }
638 }
639
640 /* Service level security */
641 int l2cap_chan_check_security(struct l2cap_chan *chan)
642 {
643 struct l2cap_conn *conn = chan->conn;
644 __u8 auth_type;
645
646 auth_type = l2cap_get_auth_type(chan);
647
648 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
649 }
650
651 static u8 l2cap_get_ident(struct l2cap_conn *conn)
652 {
653 u8 id;
654
655 /* Get next available identificator.
656 * 1 - 128 are used by kernel.
657 * 129 - 199 are reserved.
658 * 200 - 254 are used by utilities like l2ping, etc.
659 */
660
661 spin_lock(&conn->lock);
662
663 if (++conn->tx_ident > 128)
664 conn->tx_ident = 1;
665
666 id = conn->tx_ident;
667
668 spin_unlock(&conn->lock);
669
670 return id;
671 }
672
673 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
674 {
675 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
676 u8 flags;
677
678 BT_DBG("code 0x%2.2x", code);
679
680 if (!skb)
681 return;
682
683 if (lmp_no_flush_capable(conn->hcon->hdev))
684 flags = ACL_START_NO_FLUSH;
685 else
686 flags = ACL_START;
687
688 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
689 skb->priority = HCI_PRIO_MAX;
690
691 hci_send_acl(conn->hchan, skb, flags);
692 }
693
694 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
695 {
696 struct hci_conn *hcon = chan->conn->hcon;
697 u16 flags;
698
699 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
700 skb->priority);
701
702 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
703 lmp_no_flush_capable(hcon->hdev))
704 flags = ACL_START_NO_FLUSH;
705 else
706 flags = ACL_START;
707
708 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
709 hci_send_acl(chan->conn->hchan, skb, flags);
710 }
711
712 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
713 {
714 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
715 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
716
717 if (enh & L2CAP_CTRL_FRAME_TYPE) {
718 /* S-Frame */
719 control->sframe = 1;
720 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
721 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
722
723 control->sar = 0;
724 control->txseq = 0;
725 } else {
726 /* I-Frame */
727 control->sframe = 0;
728 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
729 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
730
731 control->poll = 0;
732 control->super = 0;
733 }
734 }
735
736 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
737 {
738 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
739 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
740
741 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
742 /* S-Frame */
743 control->sframe = 1;
744 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
745 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
746
747 control->sar = 0;
748 control->txseq = 0;
749 } else {
750 /* I-Frame */
751 control->sframe = 0;
752 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
753 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
754
755 control->poll = 0;
756 control->super = 0;
757 }
758 }
759
760 static inline void __unpack_control(struct l2cap_chan *chan,
761 struct sk_buff *skb)
762 {
763 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
764 __unpack_extended_control(get_unaligned_le32(skb->data),
765 &bt_cb(skb)->control);
766 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
767 } else {
768 __unpack_enhanced_control(get_unaligned_le16(skb->data),
769 &bt_cb(skb)->control);
770 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
771 }
772 }
773
774 static u32 __pack_extended_control(struct l2cap_ctrl *control)
775 {
776 u32 packed;
777
778 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
779 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
780
781 if (control->sframe) {
782 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
783 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
784 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
785 } else {
786 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
787 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
788 }
789
790 return packed;
791 }
792
793 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
794 {
795 u16 packed;
796
797 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
798 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
799
800 if (control->sframe) {
801 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
802 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
803 packed |= L2CAP_CTRL_FRAME_TYPE;
804 } else {
805 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
806 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
807 }
808
809 return packed;
810 }
811
812 static inline void __pack_control(struct l2cap_chan *chan,
813 struct l2cap_ctrl *control,
814 struct sk_buff *skb)
815 {
816 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
817 put_unaligned_le32(__pack_extended_control(control),
818 skb->data + L2CAP_HDR_SIZE);
819 } else {
820 put_unaligned_le16(__pack_enhanced_control(control),
821 skb->data + L2CAP_HDR_SIZE);
822 }
823 }
824
825 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
826 u32 control)
827 {
828 struct sk_buff *skb;
829 struct l2cap_hdr *lh;
830 int hlen;
831
832 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
833 hlen = L2CAP_EXT_HDR_SIZE;
834 else
835 hlen = L2CAP_ENH_HDR_SIZE;
836
837 if (chan->fcs == L2CAP_FCS_CRC16)
838 hlen += L2CAP_FCS_SIZE;
839
840 skb = bt_skb_alloc(hlen, GFP_KERNEL);
841
842 if (!skb)
843 return ERR_PTR(-ENOMEM);
844
845 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
846 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
847 lh->cid = cpu_to_le16(chan->dcid);
848
849 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
850 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
851 else
852 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
853
854 if (chan->fcs == L2CAP_FCS_CRC16) {
855 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
856 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
857 }
858
859 skb->priority = HCI_PRIO_MAX;
860 return skb;
861 }
862
863 static void l2cap_send_sframe(struct l2cap_chan *chan,
864 struct l2cap_ctrl *control)
865 {
866 struct sk_buff *skb;
867 u32 control_field;
868
869 BT_DBG("chan %p, control %p", chan, control);
870
871 if (!control->sframe)
872 return;
873
874 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
875 !control->poll)
876 control->final = 1;
877
878 if (control->super == L2CAP_SUPER_RR)
879 clear_bit(CONN_RNR_SENT, &chan->conn_state);
880 else if (control->super == L2CAP_SUPER_RNR)
881 set_bit(CONN_RNR_SENT, &chan->conn_state);
882
883 if (control->super != L2CAP_SUPER_SREJ) {
884 chan->last_acked_seq = control->reqseq;
885 __clear_ack_timer(chan);
886 }
887
888 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
889 control->final, control->poll, control->super);
890
891 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
892 control_field = __pack_extended_control(control);
893 else
894 control_field = __pack_enhanced_control(control);
895
896 skb = l2cap_create_sframe_pdu(chan, control_field);
897 if (!IS_ERR(skb))
898 l2cap_do_send(chan, skb);
899 }
900
901 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
902 {
903 struct l2cap_ctrl control;
904
905 BT_DBG("chan %p, poll %d", chan, poll);
906
907 memset(&control, 0, sizeof(control));
908 control.sframe = 1;
909 control.poll = poll;
910
911 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
912 control.super = L2CAP_SUPER_RNR;
913 else
914 control.super = L2CAP_SUPER_RR;
915
916 control.reqseq = chan->buffer_seq;
917 l2cap_send_sframe(chan, &control);
918 }
919
920 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
921 {
922 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
923 }
924
925 static void l2cap_send_conn_req(struct l2cap_chan *chan)
926 {
927 struct l2cap_conn *conn = chan->conn;
928 struct l2cap_conn_req req;
929
930 req.scid = cpu_to_le16(chan->scid);
931 req.psm = chan->psm;
932
933 chan->ident = l2cap_get_ident(conn);
934
935 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
936
937 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
938 }
939
940 static void l2cap_chan_ready(struct l2cap_chan *chan)
941 {
942 /* This clears all conf flags, including CONF_NOT_COMPLETE */
943 chan->conf_state = 0;
944 __clear_chan_timer(chan);
945
946 chan->state = BT_CONNECTED;
947
948 if (chan->ops->ready)
949 chan->ops->ready(chan);
950 }
951
952 static void l2cap_do_start(struct l2cap_chan *chan)
953 {
954 struct l2cap_conn *conn = chan->conn;
955
956 if (conn->hcon->type == LE_LINK) {
957 l2cap_chan_ready(chan);
958 return;
959 }
960
961 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
962 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
963 return;
964
965 if (l2cap_chan_check_security(chan) &&
966 __l2cap_no_conn_pending(chan))
967 l2cap_send_conn_req(chan);
968 } else {
969 struct l2cap_info_req req;
970 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
971
972 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
973 conn->info_ident = l2cap_get_ident(conn);
974
975 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
976
977 l2cap_send_cmd(conn, conn->info_ident,
978 L2CAP_INFO_REQ, sizeof(req), &req);
979 }
980 }
981
982 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
983 {
984 u32 local_feat_mask = l2cap_feat_mask;
985 if (!disable_ertm)
986 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
987
988 switch (mode) {
989 case L2CAP_MODE_ERTM:
990 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
991 case L2CAP_MODE_STREAMING:
992 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
993 default:
994 return 0x00;
995 }
996 }
997
998 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
999 {
1000 struct sock *sk = chan->sk;
1001 struct l2cap_disconn_req req;
1002
1003 if (!conn)
1004 return;
1005
1006 if (chan->mode == L2CAP_MODE_ERTM) {
1007 __clear_retrans_timer(chan);
1008 __clear_monitor_timer(chan);
1009 __clear_ack_timer(chan);
1010 }
1011
1012 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1013 __l2cap_state_change(chan, BT_DISCONN);
1014 return;
1015 }
1016
1017 req.dcid = cpu_to_le16(chan->dcid);
1018 req.scid = cpu_to_le16(chan->scid);
1019 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1020 L2CAP_DISCONN_REQ, sizeof(req), &req);
1021
1022 lock_sock(sk);
1023 __l2cap_state_change(chan, BT_DISCONN);
1024 __l2cap_chan_set_err(chan, err);
1025 release_sock(sk);
1026 }
1027
1028 /* ---- L2CAP connections ---- */
1029 static void l2cap_conn_start(struct l2cap_conn *conn)
1030 {
1031 struct l2cap_chan *chan, *tmp;
1032
1033 BT_DBG("conn %p", conn);
1034
1035 mutex_lock(&conn->chan_lock);
1036
1037 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1038 struct sock *sk = chan->sk;
1039
1040 l2cap_chan_lock(chan);
1041
1042 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1043 l2cap_chan_unlock(chan);
1044 continue;
1045 }
1046
1047 if (chan->state == BT_CONNECT) {
1048 if (!l2cap_chan_check_security(chan) ||
1049 !__l2cap_no_conn_pending(chan)) {
1050 l2cap_chan_unlock(chan);
1051 continue;
1052 }
1053
1054 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1055 && test_bit(CONF_STATE2_DEVICE,
1056 &chan->conf_state)) {
1057 l2cap_chan_close(chan, ECONNRESET);
1058 l2cap_chan_unlock(chan);
1059 continue;
1060 }
1061
1062 l2cap_send_conn_req(chan);
1063
1064 } else if (chan->state == BT_CONNECT2) {
1065 struct l2cap_conn_rsp rsp;
1066 char buf[128];
1067 rsp.scid = cpu_to_le16(chan->dcid);
1068 rsp.dcid = cpu_to_le16(chan->scid);
1069
1070 if (l2cap_chan_check_security(chan)) {
1071 lock_sock(sk);
1072 if (test_bit(BT_SK_DEFER_SETUP,
1073 &bt_sk(sk)->flags)) {
1074 struct sock *parent = bt_sk(sk)->parent;
1075 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1076 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1077 if (parent)
1078 parent->sk_data_ready(parent, 0);
1079
1080 } else {
1081 __l2cap_state_change(chan, BT_CONFIG);
1082 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1083 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1084 }
1085 release_sock(sk);
1086 } else {
1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1089 }
1090
1091 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1092 sizeof(rsp), &rsp);
1093
1094 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1095 rsp.result != L2CAP_CR_SUCCESS) {
1096 l2cap_chan_unlock(chan);
1097 continue;
1098 }
1099
1100 set_bit(CONF_REQ_SENT, &chan->conf_state);
1101 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1102 l2cap_build_conf_req(chan, buf), buf);
1103 chan->num_conf_req++;
1104 }
1105
1106 l2cap_chan_unlock(chan);
1107 }
1108
1109 mutex_unlock(&conn->chan_lock);
1110 }
1111
1112 /* Find socket with cid and source/destination bdaddr.
1113 * Returns closest match, locked.
1114 */
1115 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1116 bdaddr_t *src,
1117 bdaddr_t *dst)
1118 {
1119 struct l2cap_chan *c, *c1 = NULL;
1120
1121 read_lock(&chan_list_lock);
1122
1123 list_for_each_entry(c, &chan_list, global_l) {
1124 struct sock *sk = c->sk;
1125
1126 if (state && c->state != state)
1127 continue;
1128
1129 if (c->scid == cid) {
1130 int src_match, dst_match;
1131 int src_any, dst_any;
1132
1133 /* Exact match. */
1134 src_match = !bacmp(&bt_sk(sk)->src, src);
1135 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1136 if (src_match && dst_match) {
1137 read_unlock(&chan_list_lock);
1138 return c;
1139 }
1140
1141 /* Closest match */
1142 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1143 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1144 if ((src_match && dst_any) || (src_any && dst_match) ||
1145 (src_any && dst_any))
1146 c1 = c;
1147 }
1148 }
1149
1150 read_unlock(&chan_list_lock);
1151
1152 return c1;
1153 }
1154
1155 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1156 {
1157 struct sock *parent, *sk;
1158 struct l2cap_chan *chan, *pchan;
1159
1160 BT_DBG("");
1161
1162 /* Check if we have socket listening on cid */
1163 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1164 conn->src, conn->dst);
1165 if (!pchan)
1166 return;
1167
1168 parent = pchan->sk;
1169
1170 lock_sock(parent);
1171
1172 chan = pchan->ops->new_connection(pchan);
1173 if (!chan)
1174 goto clean;
1175
1176 sk = chan->sk;
1177
1178 hci_conn_hold(conn->hcon);
1179
1180 bacpy(&bt_sk(sk)->src, conn->src);
1181 bacpy(&bt_sk(sk)->dst, conn->dst);
1182
1183 bt_accept_enqueue(parent, sk);
1184
1185 l2cap_chan_add(conn, chan);
1186
1187 l2cap_chan_ready(chan);
1188
1189 clean:
1190 release_sock(parent);
1191 }
1192
1193 static void l2cap_conn_ready(struct l2cap_conn *conn)
1194 {
1195 struct l2cap_chan *chan;
1196
1197 BT_DBG("conn %p", conn);
1198
1199 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1200 l2cap_le_conn_ready(conn);
1201
1202 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1203 smp_conn_security(conn, conn->hcon->pending_sec_level);
1204
1205 mutex_lock(&conn->chan_lock);
1206
1207 list_for_each_entry(chan, &conn->chan_l, list) {
1208
1209 l2cap_chan_lock(chan);
1210
1211 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1212 l2cap_chan_unlock(chan);
1213 continue;
1214 }
1215
1216 if (conn->hcon->type == LE_LINK) {
1217 if (smp_conn_security(conn, chan->sec_level))
1218 l2cap_chan_ready(chan);
1219
1220 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1221 struct sock *sk = chan->sk;
1222 __clear_chan_timer(chan);
1223 lock_sock(sk);
1224 __l2cap_state_change(chan, BT_CONNECTED);
1225 sk->sk_state_change(sk);
1226 release_sock(sk);
1227
1228 } else if (chan->state == BT_CONNECT)
1229 l2cap_do_start(chan);
1230
1231 l2cap_chan_unlock(chan);
1232 }
1233
1234 mutex_unlock(&conn->chan_lock);
1235 }
1236
1237 /* Notify sockets that we cannot guaranty reliability anymore */
1238 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1239 {
1240 struct l2cap_chan *chan;
1241
1242 BT_DBG("conn %p", conn);
1243
1244 mutex_lock(&conn->chan_lock);
1245
1246 list_for_each_entry(chan, &conn->chan_l, list) {
1247 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1248 __l2cap_chan_set_err(chan, err);
1249 }
1250
1251 mutex_unlock(&conn->chan_lock);
1252 }
1253
1254 static void l2cap_info_timeout(struct work_struct *work)
1255 {
1256 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1257 info_timer.work);
1258
1259 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1260 conn->info_ident = 0;
1261
1262 l2cap_conn_start(conn);
1263 }
1264
1265 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1266 {
1267 struct l2cap_conn *conn = hcon->l2cap_data;
1268 struct l2cap_chan *chan, *l;
1269
1270 if (!conn)
1271 return;
1272
1273 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1274
1275 kfree_skb(conn->rx_skb);
1276
1277 mutex_lock(&conn->chan_lock);
1278
1279 /* Kill channels */
1280 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1281 l2cap_chan_hold(chan);
1282 l2cap_chan_lock(chan);
1283
1284 l2cap_chan_del(chan, err);
1285
1286 l2cap_chan_unlock(chan);
1287
1288 chan->ops->close(chan);
1289 l2cap_chan_put(chan);
1290 }
1291
1292 mutex_unlock(&conn->chan_lock);
1293
1294 hci_chan_del(conn->hchan);
1295
1296 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1297 cancel_delayed_work_sync(&conn->info_timer);
1298
1299 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1300 cancel_delayed_work_sync(&conn->security_timer);
1301 smp_chan_destroy(conn);
1302 }
1303
1304 hcon->l2cap_data = NULL;
1305 kfree(conn);
1306 }
1307
1308 static void security_timeout(struct work_struct *work)
1309 {
1310 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1311 security_timer.work);
1312
1313 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1314 }
1315
1316 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1317 {
1318 struct l2cap_conn *conn = hcon->l2cap_data;
1319 struct hci_chan *hchan;
1320
1321 if (conn || status)
1322 return conn;
1323
1324 hchan = hci_chan_create(hcon);
1325 if (!hchan)
1326 return NULL;
1327
1328 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1329 if (!conn) {
1330 hci_chan_del(hchan);
1331 return NULL;
1332 }
1333
1334 hcon->l2cap_data = conn;
1335 conn->hcon = hcon;
1336 conn->hchan = hchan;
1337
1338 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1339
1340 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1341 conn->mtu = hcon->hdev->le_mtu;
1342 else
1343 conn->mtu = hcon->hdev->acl_mtu;
1344
1345 conn->src = &hcon->hdev->bdaddr;
1346 conn->dst = &hcon->dst;
1347
1348 conn->feat_mask = 0;
1349
1350 spin_lock_init(&conn->lock);
1351 mutex_init(&conn->chan_lock);
1352
1353 INIT_LIST_HEAD(&conn->chan_l);
1354
1355 if (hcon->type == LE_LINK)
1356 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1357 else
1358 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1359
1360 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1361
1362 return conn;
1363 }
1364
1365 /* ---- Socket interface ---- */
1366
1367 /* Find socket with psm and source / destination bdaddr.
1368 * Returns closest match.
1369 */
1370 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1371 bdaddr_t *src,
1372 bdaddr_t *dst)
1373 {
1374 struct l2cap_chan *c, *c1 = NULL;
1375
1376 read_lock(&chan_list_lock);
1377
1378 list_for_each_entry(c, &chan_list, global_l) {
1379 struct sock *sk = c->sk;
1380
1381 if (state && c->state != state)
1382 continue;
1383
1384 if (c->psm == psm) {
1385 int src_match, dst_match;
1386 int src_any, dst_any;
1387
1388 /* Exact match. */
1389 src_match = !bacmp(&bt_sk(sk)->src, src);
1390 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1391 if (src_match && dst_match) {
1392 read_unlock(&chan_list_lock);
1393 return c;
1394 }
1395
1396 /* Closest match */
1397 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1398 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1399 if ((src_match && dst_any) || (src_any && dst_match) ||
1400 (src_any && dst_any))
1401 c1 = c;
1402 }
1403 }
1404
1405 read_unlock(&chan_list_lock);
1406
1407 return c1;
1408 }
1409
1410 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1411 bdaddr_t *dst, u8 dst_type)
1412 {
1413 struct sock *sk = chan->sk;
1414 bdaddr_t *src = &bt_sk(sk)->src;
1415 struct l2cap_conn *conn;
1416 struct hci_conn *hcon;
1417 struct hci_dev *hdev;
1418 __u8 auth_type;
1419 int err;
1420
1421 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1422 dst_type, __le16_to_cpu(chan->psm));
1423
1424 hdev = hci_get_route(dst, src);
1425 if (!hdev)
1426 return -EHOSTUNREACH;
1427
1428 hci_dev_lock(hdev);
1429
1430 l2cap_chan_lock(chan);
1431
1432 /* PSM must be odd and lsb of upper byte must be 0 */
1433 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1434 chan->chan_type != L2CAP_CHAN_RAW) {
1435 err = -EINVAL;
1436 goto done;
1437 }
1438
1439 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1440 err = -EINVAL;
1441 goto done;
1442 }
1443
1444 switch (chan->mode) {
1445 case L2CAP_MODE_BASIC:
1446 break;
1447 case L2CAP_MODE_ERTM:
1448 case L2CAP_MODE_STREAMING:
1449 if (!disable_ertm)
1450 break;
1451 /* fall through */
1452 default:
1453 err = -ENOTSUPP;
1454 goto done;
1455 }
1456
1457 switch (chan->state) {
1458 case BT_CONNECT:
1459 case BT_CONNECT2:
1460 case BT_CONFIG:
1461 /* Already connecting */
1462 err = 0;
1463 goto done;
1464
1465 case BT_CONNECTED:
1466 /* Already connected */
1467 err = -EISCONN;
1468 goto done;
1469
1470 case BT_OPEN:
1471 case BT_BOUND:
1472 /* Can connect */
1473 break;
1474
1475 default:
1476 err = -EBADFD;
1477 goto done;
1478 }
1479
1480 /* Set destination address and psm */
1481 lock_sock(sk);
1482 bacpy(&bt_sk(sk)->dst, dst);
1483 release_sock(sk);
1484
1485 chan->psm = psm;
1486 chan->dcid = cid;
1487
1488 auth_type = l2cap_get_auth_type(chan);
1489
1490 if (chan->dcid == L2CAP_CID_LE_DATA)
1491 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1492 chan->sec_level, auth_type);
1493 else
1494 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1495 chan->sec_level, auth_type);
1496
1497 if (IS_ERR(hcon)) {
1498 err = PTR_ERR(hcon);
1499 goto done;
1500 }
1501
1502 conn = l2cap_conn_add(hcon, 0);
1503 if (!conn) {
1504 hci_conn_put(hcon);
1505 err = -ENOMEM;
1506 goto done;
1507 }
1508
1509 if (hcon->type == LE_LINK) {
1510 err = 0;
1511
1512 if (!list_empty(&conn->chan_l)) {
1513 err = -EBUSY;
1514 hci_conn_put(hcon);
1515 }
1516
1517 if (err)
1518 goto done;
1519 }
1520
1521 /* Update source addr of the socket */
1522 bacpy(src, conn->src);
1523
1524 l2cap_chan_unlock(chan);
1525 l2cap_chan_add(conn, chan);
1526 l2cap_chan_lock(chan);
1527
1528 l2cap_state_change(chan, BT_CONNECT);
1529 __set_chan_timer(chan, sk->sk_sndtimeo);
1530
1531 if (hcon->state == BT_CONNECTED) {
1532 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1533 __clear_chan_timer(chan);
1534 if (l2cap_chan_check_security(chan))
1535 l2cap_state_change(chan, BT_CONNECTED);
1536 } else
1537 l2cap_do_start(chan);
1538 }
1539
1540 err = 0;
1541
1542 done:
1543 l2cap_chan_unlock(chan);
1544 hci_dev_unlock(hdev);
1545 hci_dev_put(hdev);
1546 return err;
1547 }
1548
1549 int __l2cap_wait_ack(struct sock *sk)
1550 {
1551 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1552 DECLARE_WAITQUEUE(wait, current);
1553 int err = 0;
1554 int timeo = HZ/5;
1555
1556 add_wait_queue(sk_sleep(sk), &wait);
1557 set_current_state(TASK_INTERRUPTIBLE);
1558 while (chan->unacked_frames > 0 && chan->conn) {
1559 if (!timeo)
1560 timeo = HZ/5;
1561
1562 if (signal_pending(current)) {
1563 err = sock_intr_errno(timeo);
1564 break;
1565 }
1566
1567 release_sock(sk);
1568 timeo = schedule_timeout(timeo);
1569 lock_sock(sk);
1570 set_current_state(TASK_INTERRUPTIBLE);
1571
1572 err = sock_error(sk);
1573 if (err)
1574 break;
1575 }
1576 set_current_state(TASK_RUNNING);
1577 remove_wait_queue(sk_sleep(sk), &wait);
1578 return err;
1579 }
1580
1581 static void l2cap_monitor_timeout(struct work_struct *work)
1582 {
1583 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1584 monitor_timer.work);
1585
1586 BT_DBG("chan %p", chan);
1587
1588 l2cap_chan_lock(chan);
1589
1590 if (!chan->conn) {
1591 l2cap_chan_unlock(chan);
1592 l2cap_chan_put(chan);
1593 return;
1594 }
1595
1596 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1597
1598 l2cap_chan_unlock(chan);
1599 l2cap_chan_put(chan);
1600 }
1601
1602 static void l2cap_retrans_timeout(struct work_struct *work)
1603 {
1604 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1605 retrans_timer.work);
1606
1607 BT_DBG("chan %p", chan);
1608
1609 l2cap_chan_lock(chan);
1610
1611 if (!chan->conn) {
1612 l2cap_chan_unlock(chan);
1613 l2cap_chan_put(chan);
1614 return;
1615 }
1616
1617 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1618 l2cap_chan_unlock(chan);
1619 l2cap_chan_put(chan);
1620 }
1621
1622 static void l2cap_streaming_send(struct l2cap_chan *chan,
1623 struct sk_buff_head *skbs)
1624 {
1625 struct sk_buff *skb;
1626 struct l2cap_ctrl *control;
1627
1628 BT_DBG("chan %p, skbs %p", chan, skbs);
1629
1630 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1631
1632 while (!skb_queue_empty(&chan->tx_q)) {
1633
1634 skb = skb_dequeue(&chan->tx_q);
1635
1636 bt_cb(skb)->control.retries = 1;
1637 control = &bt_cb(skb)->control;
1638
1639 control->reqseq = 0;
1640 control->txseq = chan->next_tx_seq;
1641
1642 __pack_control(chan, control, skb);
1643
1644 if (chan->fcs == L2CAP_FCS_CRC16) {
1645 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1646 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1647 }
1648
1649 l2cap_do_send(chan, skb);
1650
1651 BT_DBG("Sent txseq %d", (int)control->txseq);
1652
1653 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1654 chan->frames_sent++;
1655 }
1656 }
1657
1658 static int l2cap_ertm_send(struct l2cap_chan *chan)
1659 {
1660 struct sk_buff *skb, *tx_skb;
1661 struct l2cap_ctrl *control;
1662 int sent = 0;
1663
1664 BT_DBG("chan %p", chan);
1665
1666 if (chan->state != BT_CONNECTED)
1667 return -ENOTCONN;
1668
1669 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1670 return 0;
1671
1672 while (chan->tx_send_head &&
1673 chan->unacked_frames < chan->remote_tx_win &&
1674 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1675
1676 skb = chan->tx_send_head;
1677
1678 bt_cb(skb)->control.retries = 1;
1679 control = &bt_cb(skb)->control;
1680
1681 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1682 control->final = 1;
1683
1684 control->reqseq = chan->buffer_seq;
1685 chan->last_acked_seq = chan->buffer_seq;
1686 control->txseq = chan->next_tx_seq;
1687
1688 __pack_control(chan, control, skb);
1689
1690 if (chan->fcs == L2CAP_FCS_CRC16) {
1691 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1692 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1693 }
1694
1695 /* Clone after data has been modified. Data is assumed to be
1696 read-only (for locking purposes) on cloned sk_buffs.
1697 */
1698 tx_skb = skb_clone(skb, GFP_KERNEL);
1699
1700 if (!tx_skb)
1701 break;
1702
1703 __set_retrans_timer(chan);
1704
1705 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1706 chan->unacked_frames++;
1707 chan->frames_sent++;
1708 sent++;
1709
1710 if (skb_queue_is_last(&chan->tx_q, skb))
1711 chan->tx_send_head = NULL;
1712 else
1713 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1714
1715 l2cap_do_send(chan, tx_skb);
1716 BT_DBG("Sent txseq %d", (int)control->txseq);
1717 }
1718
1719 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1720 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1721
1722 return sent;
1723 }
1724
1725 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1726 {
1727 struct l2cap_ctrl control;
1728 struct sk_buff *skb;
1729 struct sk_buff *tx_skb;
1730 u16 seq;
1731
1732 BT_DBG("chan %p", chan);
1733
1734 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1735 return;
1736
1737 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1738 seq = l2cap_seq_list_pop(&chan->retrans_list);
1739
1740 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1741 if (!skb) {
1742 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1743 seq);
1744 continue;
1745 }
1746
1747 bt_cb(skb)->control.retries++;
1748 control = bt_cb(skb)->control;
1749
1750 if (chan->max_tx != 0 &&
1751 bt_cb(skb)->control.retries > chan->max_tx) {
1752 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1753 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1754 l2cap_seq_list_clear(&chan->retrans_list);
1755 break;
1756 }
1757
1758 control.reqseq = chan->buffer_seq;
1759 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1760 control.final = 1;
1761 else
1762 control.final = 0;
1763
1764 if (skb_cloned(skb)) {
1765 /* Cloned sk_buffs are read-only, so we need a
1766 * writeable copy
1767 */
1768 tx_skb = skb_copy(skb, GFP_ATOMIC);
1769 } else {
1770 tx_skb = skb_clone(skb, GFP_ATOMIC);
1771 }
1772
1773 if (!tx_skb) {
1774 l2cap_seq_list_clear(&chan->retrans_list);
1775 break;
1776 }
1777
1778 /* Update skb contents */
1779 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1780 put_unaligned_le32(__pack_extended_control(&control),
1781 tx_skb->data + L2CAP_HDR_SIZE);
1782 } else {
1783 put_unaligned_le16(__pack_enhanced_control(&control),
1784 tx_skb->data + L2CAP_HDR_SIZE);
1785 }
1786
1787 if (chan->fcs == L2CAP_FCS_CRC16) {
1788 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1789 put_unaligned_le16(fcs, skb_put(tx_skb,
1790 L2CAP_FCS_SIZE));
1791 }
1792
1793 l2cap_do_send(chan, tx_skb);
1794
1795 BT_DBG("Resent txseq %d", control.txseq);
1796
1797 chan->last_acked_seq = chan->buffer_seq;
1798 }
1799 }
1800
1801 static void l2cap_retransmit(struct l2cap_chan *chan,
1802 struct l2cap_ctrl *control)
1803 {
1804 BT_DBG("chan %p, control %p", chan, control);
1805
1806 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1807 l2cap_ertm_resend(chan);
1808 }
1809
1810 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1811 struct l2cap_ctrl *control)
1812 {
1813 struct sk_buff *skb;
1814
1815 BT_DBG("chan %p, control %p", chan, control);
1816
1817 if (control->poll)
1818 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1819
1820 l2cap_seq_list_clear(&chan->retrans_list);
1821
1822 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1823 return;
1824
1825 if (chan->unacked_frames) {
1826 skb_queue_walk(&chan->tx_q, skb) {
1827 if (bt_cb(skb)->control.txseq == control->reqseq ||
1828 skb == chan->tx_send_head)
1829 break;
1830 }
1831
1832 skb_queue_walk_from(&chan->tx_q, skb) {
1833 if (skb == chan->tx_send_head)
1834 break;
1835
1836 l2cap_seq_list_append(&chan->retrans_list,
1837 bt_cb(skb)->control.txseq);
1838 }
1839
1840 l2cap_ertm_resend(chan);
1841 }
1842 }
1843
1844 static void l2cap_send_ack(struct l2cap_chan *chan)
1845 {
1846 struct l2cap_ctrl control;
1847 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1848 chan->last_acked_seq);
1849 int threshold;
1850
1851 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1852 chan, chan->last_acked_seq, chan->buffer_seq);
1853
1854 memset(&control, 0, sizeof(control));
1855 control.sframe = 1;
1856
1857 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1858 chan->rx_state == L2CAP_RX_STATE_RECV) {
1859 __clear_ack_timer(chan);
1860 control.super = L2CAP_SUPER_RNR;
1861 control.reqseq = chan->buffer_seq;
1862 l2cap_send_sframe(chan, &control);
1863 } else {
1864 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1865 l2cap_ertm_send(chan);
1866 /* If any i-frames were sent, they included an ack */
1867 if (chan->buffer_seq == chan->last_acked_seq)
1868 frames_to_ack = 0;
1869 }
1870
1871 /* Ack now if the tx window is 3/4ths full.
1872 * Calculate without mul or div
1873 */
1874 threshold = chan->tx_win;
1875 threshold += threshold << 1;
1876 threshold >>= 2;
1877
1878 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1879 threshold);
1880
1881 if (frames_to_ack >= threshold) {
1882 __clear_ack_timer(chan);
1883 control.super = L2CAP_SUPER_RR;
1884 control.reqseq = chan->buffer_seq;
1885 l2cap_send_sframe(chan, &control);
1886 frames_to_ack = 0;
1887 }
1888
1889 if (frames_to_ack)
1890 __set_ack_timer(chan);
1891 }
1892 }
1893
1894 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1895 struct msghdr *msg, int len,
1896 int count, struct sk_buff *skb)
1897 {
1898 struct l2cap_conn *conn = chan->conn;
1899 struct sk_buff **frag;
1900 int sent = 0;
1901
1902 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1903 return -EFAULT;
1904
1905 sent += count;
1906 len -= count;
1907
1908 /* Continuation fragments (no L2CAP header) */
1909 frag = &skb_shinfo(skb)->frag_list;
1910 while (len) {
1911 struct sk_buff *tmp;
1912
1913 count = min_t(unsigned int, conn->mtu, len);
1914
1915 tmp = chan->ops->alloc_skb(chan, count,
1916 msg->msg_flags & MSG_DONTWAIT);
1917 if (IS_ERR(tmp))
1918 return PTR_ERR(tmp);
1919
1920 *frag = tmp;
1921
1922 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1923 return -EFAULT;
1924
1925 (*frag)->priority = skb->priority;
1926
1927 sent += count;
1928 len -= count;
1929
1930 skb->len += (*frag)->len;
1931 skb->data_len += (*frag)->len;
1932
1933 frag = &(*frag)->next;
1934 }
1935
1936 return sent;
1937 }
1938
1939 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1940 struct msghdr *msg, size_t len,
1941 u32 priority)
1942 {
1943 struct l2cap_conn *conn = chan->conn;
1944 struct sk_buff *skb;
1945 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1946 struct l2cap_hdr *lh;
1947
1948 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1949
1950 count = min_t(unsigned int, (conn->mtu - hlen), len);
1951
1952 skb = chan->ops->alloc_skb(chan, count + hlen,
1953 msg->msg_flags & MSG_DONTWAIT);
1954 if (IS_ERR(skb))
1955 return skb;
1956
1957 skb->priority = priority;
1958
1959 /* Create L2CAP header */
1960 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1961 lh->cid = cpu_to_le16(chan->dcid);
1962 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1963 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1964
1965 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1966 if (unlikely(err < 0)) {
1967 kfree_skb(skb);
1968 return ERR_PTR(err);
1969 }
1970 return skb;
1971 }
1972
1973 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1974 struct msghdr *msg, size_t len,
1975 u32 priority)
1976 {
1977 struct l2cap_conn *conn = chan->conn;
1978 struct sk_buff *skb;
1979 int err, count;
1980 struct l2cap_hdr *lh;
1981
1982 BT_DBG("chan %p len %d", chan, (int)len);
1983
1984 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1985
1986 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1987 msg->msg_flags & MSG_DONTWAIT);
1988 if (IS_ERR(skb))
1989 return skb;
1990
1991 skb->priority = priority;
1992
1993 /* Create L2CAP header */
1994 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1995 lh->cid = cpu_to_le16(chan->dcid);
1996 lh->len = cpu_to_le16(len);
1997
1998 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1999 if (unlikely(err < 0)) {
2000 kfree_skb(skb);
2001 return ERR_PTR(err);
2002 }
2003 return skb;
2004 }
2005
2006 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2007 struct msghdr *msg, size_t len,
2008 u16 sdulen)
2009 {
2010 struct l2cap_conn *conn = chan->conn;
2011 struct sk_buff *skb;
2012 int err, count, hlen;
2013 struct l2cap_hdr *lh;
2014
2015 BT_DBG("chan %p len %d", chan, (int)len);
2016
2017 if (!conn)
2018 return ERR_PTR(-ENOTCONN);
2019
2020 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2021 hlen = L2CAP_EXT_HDR_SIZE;
2022 else
2023 hlen = L2CAP_ENH_HDR_SIZE;
2024
2025 if (sdulen)
2026 hlen += L2CAP_SDULEN_SIZE;
2027
2028 if (chan->fcs == L2CAP_FCS_CRC16)
2029 hlen += L2CAP_FCS_SIZE;
2030
2031 count = min_t(unsigned int, (conn->mtu - hlen), len);
2032
2033 skb = chan->ops->alloc_skb(chan, count + hlen,
2034 msg->msg_flags & MSG_DONTWAIT);
2035 if (IS_ERR(skb))
2036 return skb;
2037
2038 /* Create L2CAP header */
2039 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2040 lh->cid = cpu_to_le16(chan->dcid);
2041 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2042
2043 /* Control header is populated later */
2044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2045 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2046 else
2047 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2048
2049 if (sdulen)
2050 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2051
2052 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2053 if (unlikely(err < 0)) {
2054 kfree_skb(skb);
2055 return ERR_PTR(err);
2056 }
2057
2058 bt_cb(skb)->control.fcs = chan->fcs;
2059 bt_cb(skb)->control.retries = 0;
2060 return skb;
2061 }
2062
2063 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2064 struct sk_buff_head *seg_queue,
2065 struct msghdr *msg, size_t len)
2066 {
2067 struct sk_buff *skb;
2068 u16 sdu_len;
2069 size_t pdu_len;
2070 int err = 0;
2071 u8 sar;
2072
2073 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2074
2075 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2076 * so fragmented skbs are not used. The HCI layer's handling
2077 * of fragmented skbs is not compatible with ERTM's queueing.
2078 */
2079
2080 /* PDU size is derived from the HCI MTU */
2081 pdu_len = chan->conn->mtu;
2082
2083 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2084
2085 /* Adjust for largest possible L2CAP overhead. */
2086 if (chan->fcs)
2087 pdu_len -= L2CAP_FCS_SIZE;
2088
2089 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2090 pdu_len -= L2CAP_EXT_HDR_SIZE;
2091 else
2092 pdu_len -= L2CAP_ENH_HDR_SIZE;
2093
2094 /* Remote device may have requested smaller PDUs */
2095 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2096
2097 if (len <= pdu_len) {
2098 sar = L2CAP_SAR_UNSEGMENTED;
2099 sdu_len = 0;
2100 pdu_len = len;
2101 } else {
2102 sar = L2CAP_SAR_START;
2103 sdu_len = len;
2104 pdu_len -= L2CAP_SDULEN_SIZE;
2105 }
2106
2107 while (len > 0) {
2108 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2109
2110 if (IS_ERR(skb)) {
2111 __skb_queue_purge(seg_queue);
2112 return PTR_ERR(skb);
2113 }
2114
2115 bt_cb(skb)->control.sar = sar;
2116 __skb_queue_tail(seg_queue, skb);
2117
2118 len -= pdu_len;
2119 if (sdu_len) {
2120 sdu_len = 0;
2121 pdu_len += L2CAP_SDULEN_SIZE;
2122 }
2123
2124 if (len <= pdu_len) {
2125 sar = L2CAP_SAR_END;
2126 pdu_len = len;
2127 } else {
2128 sar = L2CAP_SAR_CONTINUE;
2129 }
2130 }
2131
2132 return err;
2133 }
2134
2135 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2136 u32 priority)
2137 {
2138 struct sk_buff *skb;
2139 int err;
2140 struct sk_buff_head seg_queue;
2141
2142 /* Connectionless channel */
2143 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2144 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2145 if (IS_ERR(skb))
2146 return PTR_ERR(skb);
2147
2148 l2cap_do_send(chan, skb);
2149 return len;
2150 }
2151
2152 switch (chan->mode) {
2153 case L2CAP_MODE_BASIC:
2154 /* Check outgoing MTU */
2155 if (len > chan->omtu)
2156 return -EMSGSIZE;
2157
2158 /* Create a basic PDU */
2159 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2160 if (IS_ERR(skb))
2161 return PTR_ERR(skb);
2162
2163 l2cap_do_send(chan, skb);
2164 err = len;
2165 break;
2166
2167 case L2CAP_MODE_ERTM:
2168 case L2CAP_MODE_STREAMING:
2169 /* Check outgoing MTU */
2170 if (len > chan->omtu) {
2171 err = -EMSGSIZE;
2172 break;
2173 }
2174
2175 __skb_queue_head_init(&seg_queue);
2176
2177 /* Do segmentation before calling in to the state machine,
2178 * since it's possible to block while waiting for memory
2179 * allocation.
2180 */
2181 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2182
2183 /* The channel could have been closed while segmenting,
2184 * check that it is still connected.
2185 */
2186 if (chan->state != BT_CONNECTED) {
2187 __skb_queue_purge(&seg_queue);
2188 err = -ENOTCONN;
2189 }
2190
2191 if (err)
2192 break;
2193
2194 if (chan->mode == L2CAP_MODE_ERTM)
2195 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2196 else
2197 l2cap_streaming_send(chan, &seg_queue);
2198
2199 err = len;
2200
2201 /* If the skbs were not queued for sending, they'll still be in
2202 * seg_queue and need to be purged.
2203 */
2204 __skb_queue_purge(&seg_queue);
2205 break;
2206
2207 default:
2208 BT_DBG("bad state %1.1x", chan->mode);
2209 err = -EBADFD;
2210 }
2211
2212 return err;
2213 }
2214
2215 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2216 {
2217 struct l2cap_ctrl control;
2218 u16 seq;
2219
2220 BT_DBG("chan %p, txseq %d", chan, txseq);
2221
2222 memset(&control, 0, sizeof(control));
2223 control.sframe = 1;
2224 control.super = L2CAP_SUPER_SREJ;
2225
2226 for (seq = chan->expected_tx_seq; seq != txseq;
2227 seq = __next_seq(chan, seq)) {
2228 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2229 control.reqseq = seq;
2230 l2cap_send_sframe(chan, &control);
2231 l2cap_seq_list_append(&chan->srej_list, seq);
2232 }
2233 }
2234
2235 chan->expected_tx_seq = __next_seq(chan, txseq);
2236 }
2237
2238 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2239 {
2240 struct l2cap_ctrl control;
2241
2242 BT_DBG("chan %p", chan);
2243
2244 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2245 return;
2246
2247 memset(&control, 0, sizeof(control));
2248 control.sframe = 1;
2249 control.super = L2CAP_SUPER_SREJ;
2250 control.reqseq = chan->srej_list.tail;
2251 l2cap_send_sframe(chan, &control);
2252 }
2253
2254 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2255 {
2256 struct l2cap_ctrl control;
2257 u16 initial_head;
2258 u16 seq;
2259
2260 BT_DBG("chan %p, txseq %d", chan, txseq);
2261
2262 memset(&control, 0, sizeof(control));
2263 control.sframe = 1;
2264 control.super = L2CAP_SUPER_SREJ;
2265
2266 /* Capture initial list head to allow only one pass through the list. */
2267 initial_head = chan->srej_list.head;
2268
2269 do {
2270 seq = l2cap_seq_list_pop(&chan->srej_list);
2271 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2272 break;
2273
2274 control.reqseq = seq;
2275 l2cap_send_sframe(chan, &control);
2276 l2cap_seq_list_append(&chan->srej_list, seq);
2277 } while (chan->srej_list.head != initial_head);
2278 }
2279
2280 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2281 {
2282 struct sk_buff *acked_skb;
2283 u16 ackseq;
2284
2285 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2286
2287 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2288 return;
2289
2290 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2291 chan->expected_ack_seq, chan->unacked_frames);
2292
2293 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2294 ackseq = __next_seq(chan, ackseq)) {
2295
2296 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2297 if (acked_skb) {
2298 skb_unlink(acked_skb, &chan->tx_q);
2299 kfree_skb(acked_skb);
2300 chan->unacked_frames--;
2301 }
2302 }
2303
2304 chan->expected_ack_seq = reqseq;
2305
2306 if (chan->unacked_frames == 0)
2307 __clear_retrans_timer(chan);
2308
2309 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2310 }
2311
2312 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2313 {
2314 BT_DBG("chan %p", chan);
2315
2316 chan->expected_tx_seq = chan->buffer_seq;
2317 l2cap_seq_list_clear(&chan->srej_list);
2318 skb_queue_purge(&chan->srej_q);
2319 chan->rx_state = L2CAP_RX_STATE_RECV;
2320 }
2321
2322 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2323 struct l2cap_ctrl *control,
2324 struct sk_buff_head *skbs, u8 event)
2325 {
2326 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2327 event);
2328
2329 switch (event) {
2330 case L2CAP_EV_DATA_REQUEST:
2331 if (chan->tx_send_head == NULL)
2332 chan->tx_send_head = skb_peek(skbs);
2333
2334 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2335 l2cap_ertm_send(chan);
2336 break;
2337 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2338 BT_DBG("Enter LOCAL_BUSY");
2339 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2340
2341 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2342 /* The SREJ_SENT state must be aborted if we are to
2343 * enter the LOCAL_BUSY state.
2344 */
2345 l2cap_abort_rx_srej_sent(chan);
2346 }
2347
2348 l2cap_send_ack(chan);
2349
2350 break;
2351 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2352 BT_DBG("Exit LOCAL_BUSY");
2353 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2354
2355 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2356 struct l2cap_ctrl local_control;
2357
2358 memset(&local_control, 0, sizeof(local_control));
2359 local_control.sframe = 1;
2360 local_control.super = L2CAP_SUPER_RR;
2361 local_control.poll = 1;
2362 local_control.reqseq = chan->buffer_seq;
2363 l2cap_send_sframe(chan, &local_control);
2364
2365 chan->retry_count = 1;
2366 __set_monitor_timer(chan);
2367 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2368 }
2369 break;
2370 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2371 l2cap_process_reqseq(chan, control->reqseq);
2372 break;
2373 case L2CAP_EV_EXPLICIT_POLL:
2374 l2cap_send_rr_or_rnr(chan, 1);
2375 chan->retry_count = 1;
2376 __set_monitor_timer(chan);
2377 __clear_ack_timer(chan);
2378 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2379 break;
2380 case L2CAP_EV_RETRANS_TO:
2381 l2cap_send_rr_or_rnr(chan, 1);
2382 chan->retry_count = 1;
2383 __set_monitor_timer(chan);
2384 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2385 break;
2386 case L2CAP_EV_RECV_FBIT:
2387 /* Nothing to process */
2388 break;
2389 default:
2390 break;
2391 }
2392 }
2393
2394 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2395 struct l2cap_ctrl *control,
2396 struct sk_buff_head *skbs, u8 event)
2397 {
2398 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2399 event);
2400
2401 switch (event) {
2402 case L2CAP_EV_DATA_REQUEST:
2403 if (chan->tx_send_head == NULL)
2404 chan->tx_send_head = skb_peek(skbs);
2405 /* Queue data, but don't send. */
2406 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2407 break;
2408 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2409 BT_DBG("Enter LOCAL_BUSY");
2410 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2411
2412 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2413 /* The SREJ_SENT state must be aborted if we are to
2414 * enter the LOCAL_BUSY state.
2415 */
2416 l2cap_abort_rx_srej_sent(chan);
2417 }
2418
2419 l2cap_send_ack(chan);
2420
2421 break;
2422 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2423 BT_DBG("Exit LOCAL_BUSY");
2424 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2425
2426 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2427 struct l2cap_ctrl local_control;
2428 memset(&local_control, 0, sizeof(local_control));
2429 local_control.sframe = 1;
2430 local_control.super = L2CAP_SUPER_RR;
2431 local_control.poll = 1;
2432 local_control.reqseq = chan->buffer_seq;
2433 l2cap_send_sframe(chan, &local_control);
2434
2435 chan->retry_count = 1;
2436 __set_monitor_timer(chan);
2437 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2438 }
2439 break;
2440 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2441 l2cap_process_reqseq(chan, control->reqseq);
2442
2443 /* Fall through */
2444
2445 case L2CAP_EV_RECV_FBIT:
2446 if (control && control->final) {
2447 __clear_monitor_timer(chan);
2448 if (chan->unacked_frames > 0)
2449 __set_retrans_timer(chan);
2450 chan->retry_count = 0;
2451 chan->tx_state = L2CAP_TX_STATE_XMIT;
2452 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2453 }
2454 break;
2455 case L2CAP_EV_EXPLICIT_POLL:
2456 /* Ignore */
2457 break;
2458 case L2CAP_EV_MONITOR_TO:
2459 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2460 l2cap_send_rr_or_rnr(chan, 1);
2461 __set_monitor_timer(chan);
2462 chan->retry_count++;
2463 } else {
2464 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2465 }
2466 break;
2467 default:
2468 break;
2469 }
2470 }
2471
2472 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2473 struct sk_buff_head *skbs, u8 event)
2474 {
2475 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2476 chan, control, skbs, event, chan->tx_state);
2477
2478 switch (chan->tx_state) {
2479 case L2CAP_TX_STATE_XMIT:
2480 l2cap_tx_state_xmit(chan, control, skbs, event);
2481 break;
2482 case L2CAP_TX_STATE_WAIT_F:
2483 l2cap_tx_state_wait_f(chan, control, skbs, event);
2484 break;
2485 default:
2486 /* Ignore event */
2487 break;
2488 }
2489 }
2490
2491 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2492 struct l2cap_ctrl *control)
2493 {
2494 BT_DBG("chan %p, control %p", chan, control);
2495 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2496 }
2497
2498 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2499 struct l2cap_ctrl *control)
2500 {
2501 BT_DBG("chan %p, control %p", chan, control);
2502 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2503 }
2504
2505 /* Copy frame to all raw sockets on that connection */
2506 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2507 {
2508 struct sk_buff *nskb;
2509 struct l2cap_chan *chan;
2510
2511 BT_DBG("conn %p", conn);
2512
2513 mutex_lock(&conn->chan_lock);
2514
2515 list_for_each_entry(chan, &conn->chan_l, list) {
2516 struct sock *sk = chan->sk;
2517 if (chan->chan_type != L2CAP_CHAN_RAW)
2518 continue;
2519
2520 /* Don't send frame to the socket it came from */
2521 if (skb->sk == sk)
2522 continue;
2523 nskb = skb_clone(skb, GFP_ATOMIC);
2524 if (!nskb)
2525 continue;
2526
2527 if (chan->ops->recv(chan, nskb))
2528 kfree_skb(nskb);
2529 }
2530
2531 mutex_unlock(&conn->chan_lock);
2532 }
2533
2534 /* ---- L2CAP signalling commands ---- */
2535 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2536 u8 code, u8 ident, u16 dlen, void *data)
2537 {
2538 struct sk_buff *skb, **frag;
2539 struct l2cap_cmd_hdr *cmd;
2540 struct l2cap_hdr *lh;
2541 int len, count;
2542
2543 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2544 conn, code, ident, dlen);
2545
2546 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2547 count = min_t(unsigned int, conn->mtu, len);
2548
2549 skb = bt_skb_alloc(count, GFP_ATOMIC);
2550 if (!skb)
2551 return NULL;
2552
2553 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2554 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2555
2556 if (conn->hcon->type == LE_LINK)
2557 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2558 else
2559 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2560
2561 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2562 cmd->code = code;
2563 cmd->ident = ident;
2564 cmd->len = cpu_to_le16(dlen);
2565
2566 if (dlen) {
2567 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2568 memcpy(skb_put(skb, count), data, count);
2569 data += count;
2570 }
2571
2572 len -= skb->len;
2573
2574 /* Continuation fragments (no L2CAP header) */
2575 frag = &skb_shinfo(skb)->frag_list;
2576 while (len) {
2577 count = min_t(unsigned int, conn->mtu, len);
2578
2579 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2580 if (!*frag)
2581 goto fail;
2582
2583 memcpy(skb_put(*frag, count), data, count);
2584
2585 len -= count;
2586 data += count;
2587
2588 frag = &(*frag)->next;
2589 }
2590
2591 return skb;
2592
2593 fail:
2594 kfree_skb(skb);
2595 return NULL;
2596 }
2597
2598 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2599 {
2600 struct l2cap_conf_opt *opt = *ptr;
2601 int len;
2602
2603 len = L2CAP_CONF_OPT_SIZE + opt->len;
2604 *ptr += len;
2605
2606 *type = opt->type;
2607 *olen = opt->len;
2608
2609 switch (opt->len) {
2610 case 1:
2611 *val = *((u8 *) opt->val);
2612 break;
2613
2614 case 2:
2615 *val = get_unaligned_le16(opt->val);
2616 break;
2617
2618 case 4:
2619 *val = get_unaligned_le32(opt->val);
2620 break;
2621
2622 default:
2623 *val = (unsigned long) opt->val;
2624 break;
2625 }
2626
2627 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2628 return len;
2629 }
2630
2631 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2632 {
2633 struct l2cap_conf_opt *opt = *ptr;
2634
2635 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2636
2637 opt->type = type;
2638 opt->len = len;
2639
2640 switch (len) {
2641 case 1:
2642 *((u8 *) opt->val) = val;
2643 break;
2644
2645 case 2:
2646 put_unaligned_le16(val, opt->val);
2647 break;
2648
2649 case 4:
2650 put_unaligned_le32(val, opt->val);
2651 break;
2652
2653 default:
2654 memcpy(opt->val, (void *) val, len);
2655 break;
2656 }
2657
2658 *ptr += L2CAP_CONF_OPT_SIZE + len;
2659 }
2660
2661 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2662 {
2663 struct l2cap_conf_efs efs;
2664
2665 switch (chan->mode) {
2666 case L2CAP_MODE_ERTM:
2667 efs.id = chan->local_id;
2668 efs.stype = chan->local_stype;
2669 efs.msdu = cpu_to_le16(chan->local_msdu);
2670 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2671 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2672 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2673 break;
2674
2675 case L2CAP_MODE_STREAMING:
2676 efs.id = 1;
2677 efs.stype = L2CAP_SERV_BESTEFFORT;
2678 efs.msdu = cpu_to_le16(chan->local_msdu);
2679 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2680 efs.acc_lat = 0;
2681 efs.flush_to = 0;
2682 break;
2683
2684 default:
2685 return;
2686 }
2687
2688 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2689 (unsigned long) &efs);
2690 }
2691
2692 static void l2cap_ack_timeout(struct work_struct *work)
2693 {
2694 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2695 ack_timer.work);
2696 u16 frames_to_ack;
2697
2698 BT_DBG("chan %p", chan);
2699
2700 l2cap_chan_lock(chan);
2701
2702 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2703 chan->last_acked_seq);
2704
2705 if (frames_to_ack)
2706 l2cap_send_rr_or_rnr(chan, 0);
2707
2708 l2cap_chan_unlock(chan);
2709 l2cap_chan_put(chan);
2710 }
2711
2712 int l2cap_ertm_init(struct l2cap_chan *chan)
2713 {
2714 int err;
2715
2716 chan->next_tx_seq = 0;
2717 chan->expected_tx_seq = 0;
2718 chan->expected_ack_seq = 0;
2719 chan->unacked_frames = 0;
2720 chan->buffer_seq = 0;
2721 chan->frames_sent = 0;
2722 chan->last_acked_seq = 0;
2723 chan->sdu = NULL;
2724 chan->sdu_last_frag = NULL;
2725 chan->sdu_len = 0;
2726
2727 skb_queue_head_init(&chan->tx_q);
2728
2729 if (chan->mode != L2CAP_MODE_ERTM)
2730 return 0;
2731
2732 chan->rx_state = L2CAP_RX_STATE_RECV;
2733 chan->tx_state = L2CAP_TX_STATE_XMIT;
2734
2735 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2736 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2737 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2738
2739 skb_queue_head_init(&chan->srej_q);
2740
2741 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2742 if (err < 0)
2743 return err;
2744
2745 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2746 if (err < 0)
2747 l2cap_seq_list_free(&chan->srej_list);
2748
2749 return err;
2750 }
2751
2752 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2753 {
2754 switch (mode) {
2755 case L2CAP_MODE_STREAMING:
2756 case L2CAP_MODE_ERTM:
2757 if (l2cap_mode_supported(mode, remote_feat_mask))
2758 return mode;
2759 /* fall through */
2760 default:
2761 return L2CAP_MODE_BASIC;
2762 }
2763 }
2764
2765 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2766 {
2767 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2768 }
2769
2770 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2771 {
2772 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2773 }
2774
2775 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2776 {
2777 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2778 __l2cap_ews_supported(chan)) {
2779 /* use extended control field */
2780 set_bit(FLAG_EXT_CTRL, &chan->flags);
2781 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2782 } else {
2783 chan->tx_win = min_t(u16, chan->tx_win,
2784 L2CAP_DEFAULT_TX_WINDOW);
2785 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2786 }
2787 }
2788
2789 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2790 {
2791 struct l2cap_conf_req *req = data;
2792 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2793 void *ptr = req->data;
2794 u16 size;
2795
2796 BT_DBG("chan %p", chan);
2797
2798 if (chan->num_conf_req || chan->num_conf_rsp)
2799 goto done;
2800
2801 switch (chan->mode) {
2802 case L2CAP_MODE_STREAMING:
2803 case L2CAP_MODE_ERTM:
2804 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2805 break;
2806
2807 if (__l2cap_efs_supported(chan))
2808 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2809
2810 /* fall through */
2811 default:
2812 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2813 break;
2814 }
2815
2816 done:
2817 if (chan->imtu != L2CAP_DEFAULT_MTU)
2818 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2819
2820 switch (chan->mode) {
2821 case L2CAP_MODE_BASIC:
2822 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2823 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2824 break;
2825
2826 rfc.mode = L2CAP_MODE_BASIC;
2827 rfc.txwin_size = 0;
2828 rfc.max_transmit = 0;
2829 rfc.retrans_timeout = 0;
2830 rfc.monitor_timeout = 0;
2831 rfc.max_pdu_size = 0;
2832
2833 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2834 (unsigned long) &rfc);
2835 break;
2836
2837 case L2CAP_MODE_ERTM:
2838 rfc.mode = L2CAP_MODE_ERTM;
2839 rfc.max_transmit = chan->max_tx;
2840 rfc.retrans_timeout = 0;
2841 rfc.monitor_timeout = 0;
2842
2843 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2844 L2CAP_EXT_HDR_SIZE -
2845 L2CAP_SDULEN_SIZE -
2846 L2CAP_FCS_SIZE);
2847 rfc.max_pdu_size = cpu_to_le16(size);
2848
2849 l2cap_txwin_setup(chan);
2850
2851 rfc.txwin_size = min_t(u16, chan->tx_win,
2852 L2CAP_DEFAULT_TX_WINDOW);
2853
2854 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2855 (unsigned long) &rfc);
2856
2857 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2858 l2cap_add_opt_efs(&ptr, chan);
2859
2860 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2861 break;
2862
2863 if (chan->fcs == L2CAP_FCS_NONE ||
2864 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2865 chan->fcs = L2CAP_FCS_NONE;
2866 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2867 }
2868
2869 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2871 chan->tx_win);
2872 break;
2873
2874 case L2CAP_MODE_STREAMING:
2875 l2cap_txwin_setup(chan);
2876 rfc.mode = L2CAP_MODE_STREAMING;
2877 rfc.txwin_size = 0;
2878 rfc.max_transmit = 0;
2879 rfc.retrans_timeout = 0;
2880 rfc.monitor_timeout = 0;
2881
2882 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2883 L2CAP_EXT_HDR_SIZE -
2884 L2CAP_SDULEN_SIZE -
2885 L2CAP_FCS_SIZE);
2886 rfc.max_pdu_size = cpu_to_le16(size);
2887
2888 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2889 (unsigned long) &rfc);
2890
2891 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2892 l2cap_add_opt_efs(&ptr, chan);
2893
2894 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2895 break;
2896
2897 if (chan->fcs == L2CAP_FCS_NONE ||
2898 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2899 chan->fcs = L2CAP_FCS_NONE;
2900 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2901 }
2902 break;
2903 }
2904
2905 req->dcid = cpu_to_le16(chan->dcid);
2906 req->flags = __constant_cpu_to_le16(0);
2907
2908 return ptr - data;
2909 }
2910
2911 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2912 {
2913 struct l2cap_conf_rsp *rsp = data;
2914 void *ptr = rsp->data;
2915 void *req = chan->conf_req;
2916 int len = chan->conf_len;
2917 int type, hint, olen;
2918 unsigned long val;
2919 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2920 struct l2cap_conf_efs efs;
2921 u8 remote_efs = 0;
2922 u16 mtu = L2CAP_DEFAULT_MTU;
2923 u16 result = L2CAP_CONF_SUCCESS;
2924 u16 size;
2925
2926 BT_DBG("chan %p", chan);
2927
2928 while (len >= L2CAP_CONF_OPT_SIZE) {
2929 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2930
2931 hint = type & L2CAP_CONF_HINT;
2932 type &= L2CAP_CONF_MASK;
2933
2934 switch (type) {
2935 case L2CAP_CONF_MTU:
2936 mtu = val;
2937 break;
2938
2939 case L2CAP_CONF_FLUSH_TO:
2940 chan->flush_to = val;
2941 break;
2942
2943 case L2CAP_CONF_QOS:
2944 break;
2945
2946 case L2CAP_CONF_RFC:
2947 if (olen == sizeof(rfc))
2948 memcpy(&rfc, (void *) val, olen);
2949 break;
2950
2951 case L2CAP_CONF_FCS:
2952 if (val == L2CAP_FCS_NONE)
2953 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2954 break;
2955
2956 case L2CAP_CONF_EFS:
2957 remote_efs = 1;
2958 if (olen == sizeof(efs))
2959 memcpy(&efs, (void *) val, olen);
2960 break;
2961
2962 case L2CAP_CONF_EWS:
2963 if (!enable_hs)
2964 return -ECONNREFUSED;
2965
2966 set_bit(FLAG_EXT_CTRL, &chan->flags);
2967 set_bit(CONF_EWS_RECV, &chan->conf_state);
2968 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2969 chan->remote_tx_win = val;
2970 break;
2971
2972 default:
2973 if (hint)
2974 break;
2975
2976 result = L2CAP_CONF_UNKNOWN;
2977 *((u8 *) ptr++) = type;
2978 break;
2979 }
2980 }
2981
2982 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2983 goto done;
2984
2985 switch (chan->mode) {
2986 case L2CAP_MODE_STREAMING:
2987 case L2CAP_MODE_ERTM:
2988 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2989 chan->mode = l2cap_select_mode(rfc.mode,
2990 chan->conn->feat_mask);
2991 break;
2992 }
2993
2994 if (remote_efs) {
2995 if (__l2cap_efs_supported(chan))
2996 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2997 else
2998 return -ECONNREFUSED;
2999 }
3000
3001 if (chan->mode != rfc.mode)
3002 return -ECONNREFUSED;
3003
3004 break;
3005 }
3006
3007 done:
3008 if (chan->mode != rfc.mode) {
3009 result = L2CAP_CONF_UNACCEPT;
3010 rfc.mode = chan->mode;
3011
3012 if (chan->num_conf_rsp == 1)
3013 return -ECONNREFUSED;
3014
3015 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3016 sizeof(rfc), (unsigned long) &rfc);
3017 }
3018
3019 if (result == L2CAP_CONF_SUCCESS) {
3020 /* Configure output options and let the other side know
3021 * which ones we don't like. */
3022
3023 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3024 result = L2CAP_CONF_UNACCEPT;
3025 else {
3026 chan->omtu = mtu;
3027 set_bit(CONF_MTU_DONE, &chan->conf_state);
3028 }
3029 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3030
3031 if (remote_efs) {
3032 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3033 efs.stype != L2CAP_SERV_NOTRAFIC &&
3034 efs.stype != chan->local_stype) {
3035
3036 result = L2CAP_CONF_UNACCEPT;
3037
3038 if (chan->num_conf_req >= 1)
3039 return -ECONNREFUSED;
3040
3041 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3042 sizeof(efs),
3043 (unsigned long) &efs);
3044 } else {
3045 /* Send PENDING Conf Rsp */
3046 result = L2CAP_CONF_PENDING;
3047 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3048 }
3049 }
3050
3051 switch (rfc.mode) {
3052 case L2CAP_MODE_BASIC:
3053 chan->fcs = L2CAP_FCS_NONE;
3054 set_bit(CONF_MODE_DONE, &chan->conf_state);
3055 break;
3056
3057 case L2CAP_MODE_ERTM:
3058 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3059 chan->remote_tx_win = rfc.txwin_size;
3060 else
3061 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3062
3063 chan->remote_max_tx = rfc.max_transmit;
3064
3065 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3066 chan->conn->mtu -
3067 L2CAP_EXT_HDR_SIZE -
3068 L2CAP_SDULEN_SIZE -
3069 L2CAP_FCS_SIZE);
3070 rfc.max_pdu_size = cpu_to_le16(size);
3071 chan->remote_mps = size;
3072
3073 rfc.retrans_timeout =
3074 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3075 rfc.monitor_timeout =
3076 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3077
3078 set_bit(CONF_MODE_DONE, &chan->conf_state);
3079
3080 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3081 sizeof(rfc), (unsigned long) &rfc);
3082
3083 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3084 chan->remote_id = efs.id;
3085 chan->remote_stype = efs.stype;
3086 chan->remote_msdu = le16_to_cpu(efs.msdu);
3087 chan->remote_flush_to =
3088 le32_to_cpu(efs.flush_to);
3089 chan->remote_acc_lat =
3090 le32_to_cpu(efs.acc_lat);
3091 chan->remote_sdu_itime =
3092 le32_to_cpu(efs.sdu_itime);
3093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3094 sizeof(efs), (unsigned long) &efs);
3095 }
3096 break;
3097
3098 case L2CAP_MODE_STREAMING:
3099 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3100 chan->conn->mtu -
3101 L2CAP_EXT_HDR_SIZE -
3102 L2CAP_SDULEN_SIZE -
3103 L2CAP_FCS_SIZE);
3104 rfc.max_pdu_size = cpu_to_le16(size);
3105 chan->remote_mps = size;
3106
3107 set_bit(CONF_MODE_DONE, &chan->conf_state);
3108
3109 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3110 sizeof(rfc), (unsigned long) &rfc);
3111
3112 break;
3113
3114 default:
3115 result = L2CAP_CONF_UNACCEPT;
3116
3117 memset(&rfc, 0, sizeof(rfc));
3118 rfc.mode = chan->mode;
3119 }
3120
3121 if (result == L2CAP_CONF_SUCCESS)
3122 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3123 }
3124 rsp->scid = cpu_to_le16(chan->dcid);
3125 rsp->result = cpu_to_le16(result);
3126 rsp->flags = __constant_cpu_to_le16(0);
3127
3128 return ptr - data;
3129 }
3130
3131 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3132 {
3133 struct l2cap_conf_req *req = data;
3134 void *ptr = req->data;
3135 int type, olen;
3136 unsigned long val;
3137 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3138 struct l2cap_conf_efs efs;
3139
3140 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3141
3142 while (len >= L2CAP_CONF_OPT_SIZE) {
3143 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3144
3145 switch (type) {
3146 case L2CAP_CONF_MTU:
3147 if (val < L2CAP_DEFAULT_MIN_MTU) {
3148 *result = L2CAP_CONF_UNACCEPT;
3149 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3150 } else
3151 chan->imtu = val;
3152 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3153 break;
3154
3155 case L2CAP_CONF_FLUSH_TO:
3156 chan->flush_to = val;
3157 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3158 2, chan->flush_to);
3159 break;
3160
3161 case L2CAP_CONF_RFC:
3162 if (olen == sizeof(rfc))
3163 memcpy(&rfc, (void *)val, olen);
3164
3165 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3166 rfc.mode != chan->mode)
3167 return -ECONNREFUSED;
3168
3169 chan->fcs = 0;
3170
3171 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3172 sizeof(rfc), (unsigned long) &rfc);
3173 break;
3174
3175 case L2CAP_CONF_EWS:
3176 chan->tx_win = min_t(u16, val,
3177 L2CAP_DEFAULT_EXT_WINDOW);
3178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3179 chan->tx_win);
3180 break;
3181
3182 case L2CAP_CONF_EFS:
3183 if (olen == sizeof(efs))
3184 memcpy(&efs, (void *)val, olen);
3185
3186 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3187 efs.stype != L2CAP_SERV_NOTRAFIC &&
3188 efs.stype != chan->local_stype)
3189 return -ECONNREFUSED;
3190
3191 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3192 sizeof(efs), (unsigned long) &efs);
3193 break;
3194 }
3195 }
3196
3197 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3198 return -ECONNREFUSED;
3199
3200 chan->mode = rfc.mode;
3201
3202 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3203 switch (rfc.mode) {
3204 case L2CAP_MODE_ERTM:
3205 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3206 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3207 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3208
3209 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3210 chan->local_msdu = le16_to_cpu(efs.msdu);
3211 chan->local_sdu_itime =
3212 le32_to_cpu(efs.sdu_itime);
3213 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3214 chan->local_flush_to =
3215 le32_to_cpu(efs.flush_to);
3216 }
3217 break;
3218
3219 case L2CAP_MODE_STREAMING:
3220 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3221 }
3222 }
3223
3224 req->dcid = cpu_to_le16(chan->dcid);
3225 req->flags = __constant_cpu_to_le16(0);
3226
3227 return ptr - data;
3228 }
3229
3230 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3231 {
3232 struct l2cap_conf_rsp *rsp = data;
3233 void *ptr = rsp->data;
3234
3235 BT_DBG("chan %p", chan);
3236
3237 rsp->scid = cpu_to_le16(chan->dcid);
3238 rsp->result = cpu_to_le16(result);
3239 rsp->flags = cpu_to_le16(flags);
3240
3241 return ptr - data;
3242 }
3243
3244 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3245 {
3246 struct l2cap_conn_rsp rsp;
3247 struct l2cap_conn *conn = chan->conn;
3248 u8 buf[128];
3249
3250 rsp.scid = cpu_to_le16(chan->dcid);
3251 rsp.dcid = cpu_to_le16(chan->scid);
3252 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3253 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3254 l2cap_send_cmd(conn, chan->ident,
3255 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3256
3257 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3258 return;
3259
3260 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3261 l2cap_build_conf_req(chan, buf), buf);
3262 chan->num_conf_req++;
3263 }
3264
3265 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3266 {
3267 int type, olen;
3268 unsigned long val;
3269 struct l2cap_conf_rfc rfc;
3270
3271 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3272
3273 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3274 return;
3275
3276 while (len >= L2CAP_CONF_OPT_SIZE) {
3277 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3278
3279 switch (type) {
3280 case L2CAP_CONF_RFC:
3281 if (olen == sizeof(rfc))
3282 memcpy(&rfc, (void *)val, olen);
3283 goto done;
3284 }
3285 }
3286
3287 /* Use sane default values in case a misbehaving remote device
3288 * did not send an RFC option.
3289 */
3290 rfc.mode = chan->mode;
3291 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3292 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3293 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3294
3295 BT_ERR("Expected RFC option was not found, using defaults");
3296
3297 done:
3298 switch (rfc.mode) {
3299 case L2CAP_MODE_ERTM:
3300 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3301 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3302 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3303 break;
3304 case L2CAP_MODE_STREAMING:
3305 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3306 }
3307 }
3308
3309 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3310 {
3311 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3312
3313 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3314 return 0;
3315
3316 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3317 cmd->ident == conn->info_ident) {
3318 cancel_delayed_work(&conn->info_timer);
3319
3320 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3321 conn->info_ident = 0;
3322
3323 l2cap_conn_start(conn);
3324 }
3325
3326 return 0;
3327 }
3328
3329 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3330 {
3331 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3332 struct l2cap_conn_rsp rsp;
3333 struct l2cap_chan *chan = NULL, *pchan;
3334 struct sock *parent, *sk = NULL;
3335 int result, status = L2CAP_CS_NO_INFO;
3336
3337 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3338 __le16 psm = req->psm;
3339
3340 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3341
3342 /* Check if we have socket listening on psm */
3343 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3344 if (!pchan) {
3345 result = L2CAP_CR_BAD_PSM;
3346 goto sendresp;
3347 }
3348
3349 parent = pchan->sk;
3350
3351 mutex_lock(&conn->chan_lock);
3352 lock_sock(parent);
3353
3354 /* Check if the ACL is secure enough (if not SDP) */
3355 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3356 !hci_conn_check_link_mode(conn->hcon)) {
3357 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3358 result = L2CAP_CR_SEC_BLOCK;
3359 goto response;
3360 }
3361
3362 result = L2CAP_CR_NO_MEM;
3363
3364 /* Check if we already have channel with that dcid */
3365 if (__l2cap_get_chan_by_dcid(conn, scid))
3366 goto response;
3367
3368 chan = pchan->ops->new_connection(pchan);
3369 if (!chan)
3370 goto response;
3371
3372 sk = chan->sk;
3373
3374 hci_conn_hold(conn->hcon);
3375
3376 bacpy(&bt_sk(sk)->src, conn->src);
3377 bacpy(&bt_sk(sk)->dst, conn->dst);
3378 chan->psm = psm;
3379 chan->dcid = scid;
3380
3381 bt_accept_enqueue(parent, sk);
3382
3383 __l2cap_chan_add(conn, chan);
3384
3385 dcid = chan->scid;
3386
3387 __set_chan_timer(chan, sk->sk_sndtimeo);
3388
3389 chan->ident = cmd->ident;
3390
3391 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3392 if (l2cap_chan_check_security(chan)) {
3393 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3394 __l2cap_state_change(chan, BT_CONNECT2);
3395 result = L2CAP_CR_PEND;
3396 status = L2CAP_CS_AUTHOR_PEND;
3397 parent->sk_data_ready(parent, 0);
3398 } else {
3399 __l2cap_state_change(chan, BT_CONFIG);
3400 result = L2CAP_CR_SUCCESS;
3401 status = L2CAP_CS_NO_INFO;
3402 }
3403 } else {
3404 __l2cap_state_change(chan, BT_CONNECT2);
3405 result = L2CAP_CR_PEND;
3406 status = L2CAP_CS_AUTHEN_PEND;
3407 }
3408 } else {
3409 __l2cap_state_change(chan, BT_CONNECT2);
3410 result = L2CAP_CR_PEND;
3411 status = L2CAP_CS_NO_INFO;
3412 }
3413
3414 response:
3415 release_sock(parent);
3416 mutex_unlock(&conn->chan_lock);
3417
3418 sendresp:
3419 rsp.scid = cpu_to_le16(scid);
3420 rsp.dcid = cpu_to_le16(dcid);
3421 rsp.result = cpu_to_le16(result);
3422 rsp.status = cpu_to_le16(status);
3423 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3424
3425 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3426 struct l2cap_info_req info;
3427 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3428
3429 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3430 conn->info_ident = l2cap_get_ident(conn);
3431
3432 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3433
3434 l2cap_send_cmd(conn, conn->info_ident,
3435 L2CAP_INFO_REQ, sizeof(info), &info);
3436 }
3437
3438 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3439 result == L2CAP_CR_SUCCESS) {
3440 u8 buf[128];
3441 set_bit(CONF_REQ_SENT, &chan->conf_state);
3442 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3443 l2cap_build_conf_req(chan, buf), buf);
3444 chan->num_conf_req++;
3445 }
3446
3447 return 0;
3448 }
3449
3450 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3451 {
3452 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3453 u16 scid, dcid, result, status;
3454 struct l2cap_chan *chan;
3455 u8 req[128];
3456 int err;
3457
3458 scid = __le16_to_cpu(rsp->scid);
3459 dcid = __le16_to_cpu(rsp->dcid);
3460 result = __le16_to_cpu(rsp->result);
3461 status = __le16_to_cpu(rsp->status);
3462
3463 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3464 dcid, scid, result, status);
3465
3466 mutex_lock(&conn->chan_lock);
3467
3468 if (scid) {
3469 chan = __l2cap_get_chan_by_scid(conn, scid);
3470 if (!chan) {
3471 err = -EFAULT;
3472 goto unlock;
3473 }
3474 } else {
3475 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3476 if (!chan) {
3477 err = -EFAULT;
3478 goto unlock;
3479 }
3480 }
3481
3482 err = 0;
3483
3484 l2cap_chan_lock(chan);
3485
3486 switch (result) {
3487 case L2CAP_CR_SUCCESS:
3488 l2cap_state_change(chan, BT_CONFIG);
3489 chan->ident = 0;
3490 chan->dcid = dcid;
3491 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3492
3493 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3494 break;
3495
3496 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3497 l2cap_build_conf_req(chan, req), req);
3498 chan->num_conf_req++;
3499 break;
3500
3501 case L2CAP_CR_PEND:
3502 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3503 break;
3504
3505 default:
3506 l2cap_chan_del(chan, ECONNREFUSED);
3507 break;
3508 }
3509
3510 l2cap_chan_unlock(chan);
3511
3512 unlock:
3513 mutex_unlock(&conn->chan_lock);
3514
3515 return err;
3516 }
3517
3518 static inline void set_default_fcs(struct l2cap_chan *chan)
3519 {
3520 /* FCS is enabled only in ERTM or streaming mode, if one or both
3521 * sides request it.
3522 */
3523 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3524 chan->fcs = L2CAP_FCS_NONE;
3525 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3526 chan->fcs = L2CAP_FCS_CRC16;
3527 }
3528
3529 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3530 {
3531 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3532 u16 dcid, flags;
3533 u8 rsp[64];
3534 struct l2cap_chan *chan;
3535 int len, err = 0;
3536
3537 dcid = __le16_to_cpu(req->dcid);
3538 flags = __le16_to_cpu(req->flags);
3539
3540 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3541
3542 chan = l2cap_get_chan_by_scid(conn, dcid);
3543 if (!chan)
3544 return -ENOENT;
3545
3546 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3547 struct l2cap_cmd_rej_cid rej;
3548
3549 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3550 rej.scid = cpu_to_le16(chan->scid);
3551 rej.dcid = cpu_to_le16(chan->dcid);
3552
3553 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3554 sizeof(rej), &rej);
3555 goto unlock;
3556 }
3557
3558 /* Reject if config buffer is too small. */
3559 len = cmd_len - sizeof(*req);
3560 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3561 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3562 l2cap_build_conf_rsp(chan, rsp,
3563 L2CAP_CONF_REJECT, flags), rsp);
3564 goto unlock;
3565 }
3566
3567 /* Store config. */
3568 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3569 chan->conf_len += len;
3570
3571 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3572 /* Incomplete config. Send empty response. */
3573 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3574 l2cap_build_conf_rsp(chan, rsp,
3575 L2CAP_CONF_SUCCESS, flags), rsp);
3576 goto unlock;
3577 }
3578
3579 /* Complete config. */
3580 len = l2cap_parse_conf_req(chan, rsp);
3581 if (len < 0) {
3582 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3583 goto unlock;
3584 }
3585
3586 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3587 chan->num_conf_rsp++;
3588
3589 /* Reset config buffer. */
3590 chan->conf_len = 0;
3591
3592 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3593 goto unlock;
3594
3595 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3596 set_default_fcs(chan);
3597
3598 if (chan->mode == L2CAP_MODE_ERTM ||
3599 chan->mode == L2CAP_MODE_STREAMING)
3600 err = l2cap_ertm_init(chan);
3601
3602 if (err < 0)
3603 l2cap_send_disconn_req(chan->conn, chan, -err);
3604 else
3605 l2cap_chan_ready(chan);
3606
3607 goto unlock;
3608 }
3609
3610 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3611 u8 buf[64];
3612 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3613 l2cap_build_conf_req(chan, buf), buf);
3614 chan->num_conf_req++;
3615 }
3616
3617 /* Got Conf Rsp PENDING from remote side and asume we sent
3618 Conf Rsp PENDING in the code above */
3619 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3620 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3621
3622 /* check compatibility */
3623
3624 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3625 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3626
3627 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3628 l2cap_build_conf_rsp(chan, rsp,
3629 L2CAP_CONF_SUCCESS, flags), rsp);
3630 }
3631
3632 unlock:
3633 l2cap_chan_unlock(chan);
3634 return err;
3635 }
3636
3637 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3638 {
3639 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3640 u16 scid, flags, result;
3641 struct l2cap_chan *chan;
3642 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3643 int err = 0;
3644
3645 scid = __le16_to_cpu(rsp->scid);
3646 flags = __le16_to_cpu(rsp->flags);
3647 result = __le16_to_cpu(rsp->result);
3648
3649 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3650 result, len);
3651
3652 chan = l2cap_get_chan_by_scid(conn, scid);
3653 if (!chan)
3654 return 0;
3655
3656 switch (result) {
3657 case L2CAP_CONF_SUCCESS:
3658 l2cap_conf_rfc_get(chan, rsp->data, len);
3659 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3660 break;
3661
3662 case L2CAP_CONF_PENDING:
3663 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3664
3665 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3666 char buf[64];
3667
3668 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3669 buf, &result);
3670 if (len < 0) {
3671 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3672 goto done;
3673 }
3674
3675 /* check compatibility */
3676
3677 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3678 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3679
3680 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3681 l2cap_build_conf_rsp(chan, buf,
3682 L2CAP_CONF_SUCCESS, 0x0000), buf);
3683 }
3684 goto done;
3685
3686 case L2CAP_CONF_UNACCEPT:
3687 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3688 char req[64];
3689
3690 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3691 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3692 goto done;
3693 }
3694
3695 /* throw out any old stored conf requests */
3696 result = L2CAP_CONF_SUCCESS;
3697 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3698 req, &result);
3699 if (len < 0) {
3700 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3701 goto done;
3702 }
3703
3704 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3705 L2CAP_CONF_REQ, len, req);
3706 chan->num_conf_req++;
3707 if (result != L2CAP_CONF_SUCCESS)
3708 goto done;
3709 break;
3710 }
3711
3712 default:
3713 l2cap_chan_set_err(chan, ECONNRESET);
3714
3715 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3716 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3717 goto done;
3718 }
3719
3720 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3721 goto done;
3722
3723 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3724
3725 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3726 set_default_fcs(chan);
3727
3728 if (chan->mode == L2CAP_MODE_ERTM ||
3729 chan->mode == L2CAP_MODE_STREAMING)
3730 err = l2cap_ertm_init(chan);
3731
3732 if (err < 0)
3733 l2cap_send_disconn_req(chan->conn, chan, -err);
3734 else
3735 l2cap_chan_ready(chan);
3736 }
3737
3738 done:
3739 l2cap_chan_unlock(chan);
3740 return err;
3741 }
3742
3743 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3744 {
3745 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3746 struct l2cap_disconn_rsp rsp;
3747 u16 dcid, scid;
3748 struct l2cap_chan *chan;
3749 struct sock *sk;
3750
3751 scid = __le16_to_cpu(req->scid);
3752 dcid = __le16_to_cpu(req->dcid);
3753
3754 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3755
3756 mutex_lock(&conn->chan_lock);
3757
3758 chan = __l2cap_get_chan_by_scid(conn, dcid);
3759 if (!chan) {
3760 mutex_unlock(&conn->chan_lock);
3761 return 0;
3762 }
3763
3764 l2cap_chan_lock(chan);
3765
3766 sk = chan->sk;
3767
3768 rsp.dcid = cpu_to_le16(chan->scid);
3769 rsp.scid = cpu_to_le16(chan->dcid);
3770 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3771
3772 lock_sock(sk);
3773 sk->sk_shutdown = SHUTDOWN_MASK;
3774 release_sock(sk);
3775
3776 l2cap_chan_hold(chan);
3777 l2cap_chan_del(chan, ECONNRESET);
3778
3779 l2cap_chan_unlock(chan);
3780
3781 chan->ops->close(chan);
3782 l2cap_chan_put(chan);
3783
3784 mutex_unlock(&conn->chan_lock);
3785
3786 return 0;
3787 }
3788
3789 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3790 {
3791 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3792 u16 dcid, scid;
3793 struct l2cap_chan *chan;
3794
3795 scid = __le16_to_cpu(rsp->scid);
3796 dcid = __le16_to_cpu(rsp->dcid);
3797
3798 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3799
3800 mutex_lock(&conn->chan_lock);
3801
3802 chan = __l2cap_get_chan_by_scid(conn, scid);
3803 if (!chan) {
3804 mutex_unlock(&conn->chan_lock);
3805 return 0;
3806 }
3807
3808 l2cap_chan_lock(chan);
3809
3810 l2cap_chan_hold(chan);
3811 l2cap_chan_del(chan, 0);
3812
3813 l2cap_chan_unlock(chan);
3814
3815 chan->ops->close(chan);
3816 l2cap_chan_put(chan);
3817
3818 mutex_unlock(&conn->chan_lock);
3819
3820 return 0;
3821 }
3822
3823 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3824 {
3825 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3826 u16 type;
3827
3828 type = __le16_to_cpu(req->type);
3829
3830 BT_DBG("type 0x%4.4x", type);
3831
3832 if (type == L2CAP_IT_FEAT_MASK) {
3833 u8 buf[8];
3834 u32 feat_mask = l2cap_feat_mask;
3835 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3836 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3837 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3838 if (!disable_ertm)
3839 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3840 | L2CAP_FEAT_FCS;
3841 if (enable_hs)
3842 feat_mask |= L2CAP_FEAT_EXT_FLOW
3843 | L2CAP_FEAT_EXT_WINDOW;
3844
3845 put_unaligned_le32(feat_mask, rsp->data);
3846 l2cap_send_cmd(conn, cmd->ident,
3847 L2CAP_INFO_RSP, sizeof(buf), buf);
3848 } else if (type == L2CAP_IT_FIXED_CHAN) {
3849 u8 buf[12];
3850 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3851
3852 if (enable_hs)
3853 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3854 else
3855 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3856
3857 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3858 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3859 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3860 l2cap_send_cmd(conn, cmd->ident,
3861 L2CAP_INFO_RSP, sizeof(buf), buf);
3862 } else {
3863 struct l2cap_info_rsp rsp;
3864 rsp.type = cpu_to_le16(type);
3865 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3866 l2cap_send_cmd(conn, cmd->ident,
3867 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3868 }
3869
3870 return 0;
3871 }
3872
3873 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3874 {
3875 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3876 u16 type, result;
3877
3878 type = __le16_to_cpu(rsp->type);
3879 result = __le16_to_cpu(rsp->result);
3880
3881 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3882
3883 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3884 if (cmd->ident != conn->info_ident ||
3885 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3886 return 0;
3887
3888 cancel_delayed_work(&conn->info_timer);
3889
3890 if (result != L2CAP_IR_SUCCESS) {
3891 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3892 conn->info_ident = 0;
3893
3894 l2cap_conn_start(conn);
3895
3896 return 0;
3897 }
3898
3899 switch (type) {
3900 case L2CAP_IT_FEAT_MASK:
3901 conn->feat_mask = get_unaligned_le32(rsp->data);
3902
3903 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3904 struct l2cap_info_req req;
3905 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3906
3907 conn->info_ident = l2cap_get_ident(conn);
3908
3909 l2cap_send_cmd(conn, conn->info_ident,
3910 L2CAP_INFO_REQ, sizeof(req), &req);
3911 } else {
3912 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3913 conn->info_ident = 0;
3914
3915 l2cap_conn_start(conn);
3916 }
3917 break;
3918
3919 case L2CAP_IT_FIXED_CHAN:
3920 conn->fixed_chan_mask = rsp->data[0];
3921 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3922 conn->info_ident = 0;
3923
3924 l2cap_conn_start(conn);
3925 break;
3926 }
3927
3928 return 0;
3929 }
3930
3931 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3932 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3933 void *data)
3934 {
3935 struct l2cap_create_chan_req *req = data;
3936 struct l2cap_create_chan_rsp rsp;
3937 u16 psm, scid;
3938
3939 if (cmd_len != sizeof(*req))
3940 return -EPROTO;
3941
3942 if (!enable_hs)
3943 return -EINVAL;
3944
3945 psm = le16_to_cpu(req->psm);
3946 scid = le16_to_cpu(req->scid);
3947
3948 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3949
3950 /* Placeholder: Always reject */
3951 rsp.dcid = 0;
3952 rsp.scid = cpu_to_le16(scid);
3953 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3954 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3955
3956 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3957 sizeof(rsp), &rsp);
3958
3959 return 0;
3960 }
3961
3962 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3963 struct l2cap_cmd_hdr *cmd, void *data)
3964 {
3965 BT_DBG("conn %p", conn);
3966
3967 return l2cap_connect_rsp(conn, cmd, data);
3968 }
3969
3970 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3971 u16 icid, u16 result)
3972 {
3973 struct l2cap_move_chan_rsp rsp;
3974
3975 BT_DBG("icid %d, result %d", icid, result);
3976
3977 rsp.icid = cpu_to_le16(icid);
3978 rsp.result = cpu_to_le16(result);
3979
3980 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3981 }
3982
3983 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3984 struct l2cap_chan *chan, u16 icid, u16 result)
3985 {
3986 struct l2cap_move_chan_cfm cfm;
3987 u8 ident;
3988
3989 BT_DBG("icid %d, result %d", icid, result);
3990
3991 ident = l2cap_get_ident(conn);
3992 if (chan)
3993 chan->ident = ident;
3994
3995 cfm.icid = cpu_to_le16(icid);
3996 cfm.result = cpu_to_le16(result);
3997
3998 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3999 }
4000
4001 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4002 u16 icid)
4003 {
4004 struct l2cap_move_chan_cfm_rsp rsp;
4005
4006 BT_DBG("icid %d", icid);
4007
4008 rsp.icid = cpu_to_le16(icid);
4009 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4010 }
4011
4012 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4013 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4014 {
4015 struct l2cap_move_chan_req *req = data;
4016 u16 icid = 0;
4017 u16 result = L2CAP_MR_NOT_ALLOWED;
4018
4019 if (cmd_len != sizeof(*req))
4020 return -EPROTO;
4021
4022 icid = le16_to_cpu(req->icid);
4023
4024 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4025
4026 if (!enable_hs)
4027 return -EINVAL;
4028
4029 /* Placeholder: Always refuse */
4030 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4031
4032 return 0;
4033 }
4034
4035 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4036 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4037 {
4038 struct l2cap_move_chan_rsp *rsp = data;
4039 u16 icid, result;
4040
4041 if (cmd_len != sizeof(*rsp))
4042 return -EPROTO;
4043
4044 icid = le16_to_cpu(rsp->icid);
4045 result = le16_to_cpu(rsp->result);
4046
4047 BT_DBG("icid %d, result %d", icid, result);
4048
4049 /* Placeholder: Always unconfirmed */
4050 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4051
4052 return 0;
4053 }
4054
4055 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4056 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4057 {
4058 struct l2cap_move_chan_cfm *cfm = data;
4059 u16 icid, result;
4060
4061 if (cmd_len != sizeof(*cfm))
4062 return -EPROTO;
4063
4064 icid = le16_to_cpu(cfm->icid);
4065 result = le16_to_cpu(cfm->result);
4066
4067 BT_DBG("icid %d, result %d", icid, result);
4068
4069 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4070
4071 return 0;
4072 }
4073
4074 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4075 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4076 {
4077 struct l2cap_move_chan_cfm_rsp *rsp = data;
4078 u16 icid;
4079
4080 if (cmd_len != sizeof(*rsp))
4081 return -EPROTO;
4082
4083 icid = le16_to_cpu(rsp->icid);
4084
4085 BT_DBG("icid %d", icid);
4086
4087 return 0;
4088 }
4089
4090 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4091 u16 to_multiplier)
4092 {
4093 u16 max_latency;
4094
4095 if (min > max || min < 6 || max > 3200)
4096 return -EINVAL;
4097
4098 if (to_multiplier < 10 || to_multiplier > 3200)
4099 return -EINVAL;
4100
4101 if (max >= to_multiplier * 8)
4102 return -EINVAL;
4103
4104 max_latency = (to_multiplier * 8 / max) - 1;
4105 if (latency > 499 || latency > max_latency)
4106 return -EINVAL;
4107
4108 return 0;
4109 }
4110
4111 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4112 struct l2cap_cmd_hdr *cmd, u8 *data)
4113 {
4114 struct hci_conn *hcon = conn->hcon;
4115 struct l2cap_conn_param_update_req *req;
4116 struct l2cap_conn_param_update_rsp rsp;
4117 u16 min, max, latency, to_multiplier, cmd_len;
4118 int err;
4119
4120 if (!(hcon->link_mode & HCI_LM_MASTER))
4121 return -EINVAL;
4122
4123 cmd_len = __le16_to_cpu(cmd->len);
4124 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4125 return -EPROTO;
4126
4127 req = (struct l2cap_conn_param_update_req *) data;
4128 min = __le16_to_cpu(req->min);
4129 max = __le16_to_cpu(req->max);
4130 latency = __le16_to_cpu(req->latency);
4131 to_multiplier = __le16_to_cpu(req->to_multiplier);
4132
4133 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4134 min, max, latency, to_multiplier);
4135
4136 memset(&rsp, 0, sizeof(rsp));
4137
4138 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4139 if (err)
4140 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4141 else
4142 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4143
4144 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4145 sizeof(rsp), &rsp);
4146
4147 if (!err)
4148 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4149
4150 return 0;
4151 }
4152
4153 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4154 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4155 {
4156 int err = 0;
4157
4158 switch (cmd->code) {
4159 case L2CAP_COMMAND_REJ:
4160 l2cap_command_rej(conn, cmd, data);
4161 break;
4162
4163 case L2CAP_CONN_REQ:
4164 err = l2cap_connect_req(conn, cmd, data);
4165 break;
4166
4167 case L2CAP_CONN_RSP:
4168 err = l2cap_connect_rsp(conn, cmd, data);
4169 break;
4170
4171 case L2CAP_CONF_REQ:
4172 err = l2cap_config_req(conn, cmd, cmd_len, data);
4173 break;
4174
4175 case L2CAP_CONF_RSP:
4176 err = l2cap_config_rsp(conn, cmd, data);
4177 break;
4178
4179 case L2CAP_DISCONN_REQ:
4180 err = l2cap_disconnect_req(conn, cmd, data);
4181 break;
4182
4183 case L2CAP_DISCONN_RSP:
4184 err = l2cap_disconnect_rsp(conn, cmd, data);
4185 break;
4186
4187 case L2CAP_ECHO_REQ:
4188 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4189 break;
4190
4191 case L2CAP_ECHO_RSP:
4192 break;
4193
4194 case L2CAP_INFO_REQ:
4195 err = l2cap_information_req(conn, cmd, data);
4196 break;
4197
4198 case L2CAP_INFO_RSP:
4199 err = l2cap_information_rsp(conn, cmd, data);
4200 break;
4201
4202 case L2CAP_CREATE_CHAN_REQ:
4203 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4204 break;
4205
4206 case L2CAP_CREATE_CHAN_RSP:
4207 err = l2cap_create_channel_rsp(conn, cmd, data);
4208 break;
4209
4210 case L2CAP_MOVE_CHAN_REQ:
4211 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4212 break;
4213
4214 case L2CAP_MOVE_CHAN_RSP:
4215 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4216 break;
4217
4218 case L2CAP_MOVE_CHAN_CFM:
4219 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4220 break;
4221
4222 case L2CAP_MOVE_CHAN_CFM_RSP:
4223 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4224 break;
4225
4226 default:
4227 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4228 err = -EINVAL;
4229 break;
4230 }
4231
4232 return err;
4233 }
4234
4235 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4236 struct l2cap_cmd_hdr *cmd, u8 *data)
4237 {
4238 switch (cmd->code) {
4239 case L2CAP_COMMAND_REJ:
4240 return 0;
4241
4242 case L2CAP_CONN_PARAM_UPDATE_REQ:
4243 return l2cap_conn_param_update_req(conn, cmd, data);
4244
4245 case L2CAP_CONN_PARAM_UPDATE_RSP:
4246 return 0;
4247
4248 default:
4249 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4250 return -EINVAL;
4251 }
4252 }
4253
4254 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4255 struct sk_buff *skb)
4256 {
4257 u8 *data = skb->data;
4258 int len = skb->len;
4259 struct l2cap_cmd_hdr cmd;
4260 int err;
4261
4262 l2cap_raw_recv(conn, skb);
4263
4264 while (len >= L2CAP_CMD_HDR_SIZE) {
4265 u16 cmd_len;
4266 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4267 data += L2CAP_CMD_HDR_SIZE;
4268 len -= L2CAP_CMD_HDR_SIZE;
4269
4270 cmd_len = le16_to_cpu(cmd.len);
4271
4272 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4273
4274 if (cmd_len > len || !cmd.ident) {
4275 BT_DBG("corrupted command");
4276 break;
4277 }
4278
4279 if (conn->hcon->type == LE_LINK)
4280 err = l2cap_le_sig_cmd(conn, &cmd, data);
4281 else
4282 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4283
4284 if (err) {
4285 struct l2cap_cmd_rej_unk rej;
4286
4287 BT_ERR("Wrong link type (%d)", err);
4288
4289 /* FIXME: Map err to a valid reason */
4290 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4291 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4292 }
4293
4294 data += cmd_len;
4295 len -= cmd_len;
4296 }
4297
4298 kfree_skb(skb);
4299 }
4300
4301 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4302 {
4303 u16 our_fcs, rcv_fcs;
4304 int hdr_size;
4305
4306 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4307 hdr_size = L2CAP_EXT_HDR_SIZE;
4308 else
4309 hdr_size = L2CAP_ENH_HDR_SIZE;
4310
4311 if (chan->fcs == L2CAP_FCS_CRC16) {
4312 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4313 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4314 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4315
4316 if (our_fcs != rcv_fcs)
4317 return -EBADMSG;
4318 }
4319 return 0;
4320 }
4321
4322 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4323 {
4324 struct l2cap_ctrl control;
4325
4326 BT_DBG("chan %p", chan);
4327
4328 memset(&control, 0, sizeof(control));
4329 control.sframe = 1;
4330 control.final = 1;
4331 control.reqseq = chan->buffer_seq;
4332 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4333
4334 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4335 control.super = L2CAP_SUPER_RNR;
4336 l2cap_send_sframe(chan, &control);
4337 }
4338
4339 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4340 chan->unacked_frames > 0)
4341 __set_retrans_timer(chan);
4342
4343 /* Send pending iframes */
4344 l2cap_ertm_send(chan);
4345
4346 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4347 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4348 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4349 * send it now.
4350 */
4351 control.super = L2CAP_SUPER_RR;
4352 l2cap_send_sframe(chan, &control);
4353 }
4354 }
4355
4356 static void append_skb_frag(struct sk_buff *skb,
4357 struct sk_buff *new_frag, struct sk_buff **last_frag)
4358 {
4359 /* skb->len reflects data in skb as well as all fragments
4360 * skb->data_len reflects only data in fragments
4361 */
4362 if (!skb_has_frag_list(skb))
4363 skb_shinfo(skb)->frag_list = new_frag;
4364
4365 new_frag->next = NULL;
4366
4367 (*last_frag)->next = new_frag;
4368 *last_frag = new_frag;
4369
4370 skb->len += new_frag->len;
4371 skb->data_len += new_frag->len;
4372 skb->truesize += new_frag->truesize;
4373 }
4374
4375 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4376 struct l2cap_ctrl *control)
4377 {
4378 int err = -EINVAL;
4379
4380 switch (control->sar) {
4381 case L2CAP_SAR_UNSEGMENTED:
4382 if (chan->sdu)
4383 break;
4384
4385 err = chan->ops->recv(chan, skb);
4386 break;
4387
4388 case L2CAP_SAR_START:
4389 if (chan->sdu)
4390 break;
4391
4392 chan->sdu_len = get_unaligned_le16(skb->data);
4393 skb_pull(skb, L2CAP_SDULEN_SIZE);
4394
4395 if (chan->sdu_len > chan->imtu) {
4396 err = -EMSGSIZE;
4397 break;
4398 }
4399
4400 if (skb->len >= chan->sdu_len)
4401 break;
4402
4403 chan->sdu = skb;
4404 chan->sdu_last_frag = skb;
4405
4406 skb = NULL;
4407 err = 0;
4408 break;
4409
4410 case L2CAP_SAR_CONTINUE:
4411 if (!chan->sdu)
4412 break;
4413
4414 append_skb_frag(chan->sdu, skb,
4415 &chan->sdu_last_frag);
4416 skb = NULL;
4417
4418 if (chan->sdu->len >= chan->sdu_len)
4419 break;
4420
4421 err = 0;
4422 break;
4423
4424 case L2CAP_SAR_END:
4425 if (!chan->sdu)
4426 break;
4427
4428 append_skb_frag(chan->sdu, skb,
4429 &chan->sdu_last_frag);
4430 skb = NULL;
4431
4432 if (chan->sdu->len != chan->sdu_len)
4433 break;
4434
4435 err = chan->ops->recv(chan, chan->sdu);
4436
4437 if (!err) {
4438 /* Reassembly complete */
4439 chan->sdu = NULL;
4440 chan->sdu_last_frag = NULL;
4441 chan->sdu_len = 0;
4442 }
4443 break;
4444 }
4445
4446 if (err) {
4447 kfree_skb(skb);
4448 kfree_skb(chan->sdu);
4449 chan->sdu = NULL;
4450 chan->sdu_last_frag = NULL;
4451 chan->sdu_len = 0;
4452 }
4453
4454 return err;
4455 }
4456
4457 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4458 {
4459 u8 event;
4460
4461 if (chan->mode != L2CAP_MODE_ERTM)
4462 return;
4463
4464 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4465 l2cap_tx(chan, NULL, NULL, event);
4466 }
4467
4468 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4469 {
4470 int err = 0;
4471 /* Pass sequential frames to l2cap_reassemble_sdu()
4472 * until a gap is encountered.
4473 */
4474
4475 BT_DBG("chan %p", chan);
4476
4477 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4478 struct sk_buff *skb;
4479 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4480 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4481
4482 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4483
4484 if (!skb)
4485 break;
4486
4487 skb_unlink(skb, &chan->srej_q);
4488 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4489 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4490 if (err)
4491 break;
4492 }
4493
4494 if (skb_queue_empty(&chan->srej_q)) {
4495 chan->rx_state = L2CAP_RX_STATE_RECV;
4496 l2cap_send_ack(chan);
4497 }
4498
4499 return err;
4500 }
4501
4502 static void l2cap_handle_srej(struct l2cap_chan *chan,
4503 struct l2cap_ctrl *control)
4504 {
4505 struct sk_buff *skb;
4506
4507 BT_DBG("chan %p, control %p", chan, control);
4508
4509 if (control->reqseq == chan->next_tx_seq) {
4510 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4511 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4512 return;
4513 }
4514
4515 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4516
4517 if (skb == NULL) {
4518 BT_DBG("Seq %d not available for retransmission",
4519 control->reqseq);
4520 return;
4521 }
4522
4523 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4524 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4525 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4526 return;
4527 }
4528
4529 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4530
4531 if (control->poll) {
4532 l2cap_pass_to_tx(chan, control);
4533
4534 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4535 l2cap_retransmit(chan, control);
4536 l2cap_ertm_send(chan);
4537
4538 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4539 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4540 chan->srej_save_reqseq = control->reqseq;
4541 }
4542 } else {
4543 l2cap_pass_to_tx_fbit(chan, control);
4544
4545 if (control->final) {
4546 if (chan->srej_save_reqseq != control->reqseq ||
4547 !test_and_clear_bit(CONN_SREJ_ACT,
4548 &chan->conn_state))
4549 l2cap_retransmit(chan, control);
4550 } else {
4551 l2cap_retransmit(chan, control);
4552 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4553 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4554 chan->srej_save_reqseq = control->reqseq;
4555 }
4556 }
4557 }
4558 }
4559
4560 static void l2cap_handle_rej(struct l2cap_chan *chan,
4561 struct l2cap_ctrl *control)
4562 {
4563 struct sk_buff *skb;
4564
4565 BT_DBG("chan %p, control %p", chan, control);
4566
4567 if (control->reqseq == chan->next_tx_seq) {
4568 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4569 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4570 return;
4571 }
4572
4573 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4574
4575 if (chan->max_tx && skb &&
4576 bt_cb(skb)->control.retries >= chan->max_tx) {
4577 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4579 return;
4580 }
4581
4582 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4583
4584 l2cap_pass_to_tx(chan, control);
4585
4586 if (control->final) {
4587 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4588 l2cap_retransmit_all(chan, control);
4589 } else {
4590 l2cap_retransmit_all(chan, control);
4591 l2cap_ertm_send(chan);
4592 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4593 set_bit(CONN_REJ_ACT, &chan->conn_state);
4594 }
4595 }
4596
4597 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4598 {
4599 BT_DBG("chan %p, txseq %d", chan, txseq);
4600
4601 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4602 chan->expected_tx_seq);
4603
4604 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4605 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4606 chan->tx_win) {
4607 /* See notes below regarding "double poll" and
4608 * invalid packets.
4609 */
4610 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4611 BT_DBG("Invalid/Ignore - after SREJ");
4612 return L2CAP_TXSEQ_INVALID_IGNORE;
4613 } else {
4614 BT_DBG("Invalid - in window after SREJ sent");
4615 return L2CAP_TXSEQ_INVALID;
4616 }
4617 }
4618
4619 if (chan->srej_list.head == txseq) {
4620 BT_DBG("Expected SREJ");
4621 return L2CAP_TXSEQ_EXPECTED_SREJ;
4622 }
4623
4624 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4625 BT_DBG("Duplicate SREJ - txseq already stored");
4626 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4627 }
4628
4629 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4630 BT_DBG("Unexpected SREJ - not requested");
4631 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4632 }
4633 }
4634
4635 if (chan->expected_tx_seq == txseq) {
4636 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4637 chan->tx_win) {
4638 BT_DBG("Invalid - txseq outside tx window");
4639 return L2CAP_TXSEQ_INVALID;
4640 } else {
4641 BT_DBG("Expected");
4642 return L2CAP_TXSEQ_EXPECTED;
4643 }
4644 }
4645
4646 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4647 __seq_offset(chan, chan->expected_tx_seq,
4648 chan->last_acked_seq)){
4649 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4650 return L2CAP_TXSEQ_DUPLICATE;
4651 }
4652
4653 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4654 /* A source of invalid packets is a "double poll" condition,
4655 * where delays cause us to send multiple poll packets. If
4656 * the remote stack receives and processes both polls,
4657 * sequence numbers can wrap around in such a way that a
4658 * resent frame has a sequence number that looks like new data
4659 * with a sequence gap. This would trigger an erroneous SREJ
4660 * request.
4661 *
4662 * Fortunately, this is impossible with a tx window that's
4663 * less than half of the maximum sequence number, which allows
4664 * invalid frames to be safely ignored.
4665 *
4666 * With tx window sizes greater than half of the tx window
4667 * maximum, the frame is invalid and cannot be ignored. This
4668 * causes a disconnect.
4669 */
4670
4671 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4672 BT_DBG("Invalid/Ignore - txseq outside tx window");
4673 return L2CAP_TXSEQ_INVALID_IGNORE;
4674 } else {
4675 BT_DBG("Invalid - txseq outside tx window");
4676 return L2CAP_TXSEQ_INVALID;
4677 }
4678 } else {
4679 BT_DBG("Unexpected - txseq indicates missing frames");
4680 return L2CAP_TXSEQ_UNEXPECTED;
4681 }
4682 }
4683
4684 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4685 struct l2cap_ctrl *control,
4686 struct sk_buff *skb, u8 event)
4687 {
4688 int err = 0;
4689 bool skb_in_use = 0;
4690
4691 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4692 event);
4693
4694 switch (event) {
4695 case L2CAP_EV_RECV_IFRAME:
4696 switch (l2cap_classify_txseq(chan, control->txseq)) {
4697 case L2CAP_TXSEQ_EXPECTED:
4698 l2cap_pass_to_tx(chan, control);
4699
4700 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4701 BT_DBG("Busy, discarding expected seq %d",
4702 control->txseq);
4703 break;
4704 }
4705
4706 chan->expected_tx_seq = __next_seq(chan,
4707 control->txseq);
4708
4709 chan->buffer_seq = chan->expected_tx_seq;
4710 skb_in_use = 1;
4711
4712 err = l2cap_reassemble_sdu(chan, skb, control);
4713 if (err)
4714 break;
4715
4716 if (control->final) {
4717 if (!test_and_clear_bit(CONN_REJ_ACT,
4718 &chan->conn_state)) {
4719 control->final = 0;
4720 l2cap_retransmit_all(chan, control);
4721 l2cap_ertm_send(chan);
4722 }
4723 }
4724
4725 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4726 l2cap_send_ack(chan);
4727 break;
4728 case L2CAP_TXSEQ_UNEXPECTED:
4729 l2cap_pass_to_tx(chan, control);
4730
4731 /* Can't issue SREJ frames in the local busy state.
4732 * Drop this frame, it will be seen as missing
4733 * when local busy is exited.
4734 */
4735 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4736 BT_DBG("Busy, discarding unexpected seq %d",
4737 control->txseq);
4738 break;
4739 }
4740
4741 /* There was a gap in the sequence, so an SREJ
4742 * must be sent for each missing frame. The
4743 * current frame is stored for later use.
4744 */
4745 skb_queue_tail(&chan->srej_q, skb);
4746 skb_in_use = 1;
4747 BT_DBG("Queued %p (queue len %d)", skb,
4748 skb_queue_len(&chan->srej_q));
4749
4750 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4751 l2cap_seq_list_clear(&chan->srej_list);
4752 l2cap_send_srej(chan, control->txseq);
4753
4754 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4755 break;
4756 case L2CAP_TXSEQ_DUPLICATE:
4757 l2cap_pass_to_tx(chan, control);
4758 break;
4759 case L2CAP_TXSEQ_INVALID_IGNORE:
4760 break;
4761 case L2CAP_TXSEQ_INVALID:
4762 default:
4763 l2cap_send_disconn_req(chan->conn, chan,
4764 ECONNRESET);
4765 break;
4766 }
4767 break;
4768 case L2CAP_EV_RECV_RR:
4769 l2cap_pass_to_tx(chan, control);
4770 if (control->final) {
4771 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4772
4773 if (!test_and_clear_bit(CONN_REJ_ACT,
4774 &chan->conn_state)) {
4775 control->final = 0;
4776 l2cap_retransmit_all(chan, control);
4777 }
4778
4779 l2cap_ertm_send(chan);
4780 } else if (control->poll) {
4781 l2cap_send_i_or_rr_or_rnr(chan);
4782 } else {
4783 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4784 &chan->conn_state) &&
4785 chan->unacked_frames)
4786 __set_retrans_timer(chan);
4787
4788 l2cap_ertm_send(chan);
4789 }
4790 break;
4791 case L2CAP_EV_RECV_RNR:
4792 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4793 l2cap_pass_to_tx(chan, control);
4794 if (control && control->poll) {
4795 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4796 l2cap_send_rr_or_rnr(chan, 0);
4797 }
4798 __clear_retrans_timer(chan);
4799 l2cap_seq_list_clear(&chan->retrans_list);
4800 break;
4801 case L2CAP_EV_RECV_REJ:
4802 l2cap_handle_rej(chan, control);
4803 break;
4804 case L2CAP_EV_RECV_SREJ:
4805 l2cap_handle_srej(chan, control);
4806 break;
4807 default:
4808 break;
4809 }
4810
4811 if (skb && !skb_in_use) {
4812 BT_DBG("Freeing %p", skb);
4813 kfree_skb(skb);
4814 }
4815
4816 return err;
4817 }
4818
4819 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4820 struct l2cap_ctrl *control,
4821 struct sk_buff *skb, u8 event)
4822 {
4823 int err = 0;
4824 u16 txseq = control->txseq;
4825 bool skb_in_use = 0;
4826
4827 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4828 event);
4829
4830 switch (event) {
4831 case L2CAP_EV_RECV_IFRAME:
4832 switch (l2cap_classify_txseq(chan, txseq)) {
4833 case L2CAP_TXSEQ_EXPECTED:
4834 /* Keep frame for reassembly later */
4835 l2cap_pass_to_tx(chan, control);
4836 skb_queue_tail(&chan->srej_q, skb);
4837 skb_in_use = 1;
4838 BT_DBG("Queued %p (queue len %d)", skb,
4839 skb_queue_len(&chan->srej_q));
4840
4841 chan->expected_tx_seq = __next_seq(chan, txseq);
4842 break;
4843 case L2CAP_TXSEQ_EXPECTED_SREJ:
4844 l2cap_seq_list_pop(&chan->srej_list);
4845
4846 l2cap_pass_to_tx(chan, control);
4847 skb_queue_tail(&chan->srej_q, skb);
4848 skb_in_use = 1;
4849 BT_DBG("Queued %p (queue len %d)", skb,
4850 skb_queue_len(&chan->srej_q));
4851
4852 err = l2cap_rx_queued_iframes(chan);
4853 if (err)
4854 break;
4855
4856 break;
4857 case L2CAP_TXSEQ_UNEXPECTED:
4858 /* Got a frame that can't be reassembled yet.
4859 * Save it for later, and send SREJs to cover
4860 * the missing frames.
4861 */
4862 skb_queue_tail(&chan->srej_q, skb);
4863 skb_in_use = 1;
4864 BT_DBG("Queued %p (queue len %d)", skb,
4865 skb_queue_len(&chan->srej_q));
4866
4867 l2cap_pass_to_tx(chan, control);
4868 l2cap_send_srej(chan, control->txseq);
4869 break;
4870 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4871 /* This frame was requested with an SREJ, but
4872 * some expected retransmitted frames are
4873 * missing. Request retransmission of missing
4874 * SREJ'd frames.
4875 */
4876 skb_queue_tail(&chan->srej_q, skb);
4877 skb_in_use = 1;
4878 BT_DBG("Queued %p (queue len %d)", skb,
4879 skb_queue_len(&chan->srej_q));
4880
4881 l2cap_pass_to_tx(chan, control);
4882 l2cap_send_srej_list(chan, control->txseq);
4883 break;
4884 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4885 /* We've already queued this frame. Drop this copy. */
4886 l2cap_pass_to_tx(chan, control);
4887 break;
4888 case L2CAP_TXSEQ_DUPLICATE:
4889 /* Expecting a later sequence number, so this frame
4890 * was already received. Ignore it completely.
4891 */
4892 break;
4893 case L2CAP_TXSEQ_INVALID_IGNORE:
4894 break;
4895 case L2CAP_TXSEQ_INVALID:
4896 default:
4897 l2cap_send_disconn_req(chan->conn, chan,
4898 ECONNRESET);
4899 break;
4900 }
4901 break;
4902 case L2CAP_EV_RECV_RR:
4903 l2cap_pass_to_tx(chan, control);
4904 if (control->final) {
4905 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4906
4907 if (!test_and_clear_bit(CONN_REJ_ACT,
4908 &chan->conn_state)) {
4909 control->final = 0;
4910 l2cap_retransmit_all(chan, control);
4911 }
4912
4913 l2cap_ertm_send(chan);
4914 } else if (control->poll) {
4915 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4916 &chan->conn_state) &&
4917 chan->unacked_frames) {
4918 __set_retrans_timer(chan);
4919 }
4920
4921 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4922 l2cap_send_srej_tail(chan);
4923 } else {
4924 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4925 &chan->conn_state) &&
4926 chan->unacked_frames)
4927 __set_retrans_timer(chan);
4928
4929 l2cap_send_ack(chan);
4930 }
4931 break;
4932 case L2CAP_EV_RECV_RNR:
4933 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4934 l2cap_pass_to_tx(chan, control);
4935 if (control->poll) {
4936 l2cap_send_srej_tail(chan);
4937 } else {
4938 struct l2cap_ctrl rr_control;
4939 memset(&rr_control, 0, sizeof(rr_control));
4940 rr_control.sframe = 1;
4941 rr_control.super = L2CAP_SUPER_RR;
4942 rr_control.reqseq = chan->buffer_seq;
4943 l2cap_send_sframe(chan, &rr_control);
4944 }
4945
4946 break;
4947 case L2CAP_EV_RECV_REJ:
4948 l2cap_handle_rej(chan, control);
4949 break;
4950 case L2CAP_EV_RECV_SREJ:
4951 l2cap_handle_srej(chan, control);
4952 break;
4953 }
4954
4955 if (skb && !skb_in_use) {
4956 BT_DBG("Freeing %p", skb);
4957 kfree_skb(skb);
4958 }
4959
4960 return err;
4961 }
4962
4963 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4964 {
4965 /* Make sure reqseq is for a packet that has been sent but not acked */
4966 u16 unacked;
4967
4968 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4969 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4970 }
4971
4972 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4973 struct sk_buff *skb, u8 event)
4974 {
4975 int err = 0;
4976
4977 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4978 control, skb, event, chan->rx_state);
4979
4980 if (__valid_reqseq(chan, control->reqseq)) {
4981 switch (chan->rx_state) {
4982 case L2CAP_RX_STATE_RECV:
4983 err = l2cap_rx_state_recv(chan, control, skb, event);
4984 break;
4985 case L2CAP_RX_STATE_SREJ_SENT:
4986 err = l2cap_rx_state_srej_sent(chan, control, skb,
4987 event);
4988 break;
4989 default:
4990 /* shut it down */
4991 break;
4992 }
4993 } else {
4994 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4995 control->reqseq, chan->next_tx_seq,
4996 chan->expected_ack_seq);
4997 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4998 }
4999
5000 return err;
5001 }
5002
5003 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5004 struct sk_buff *skb)
5005 {
5006 int err = 0;
5007
5008 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5009 chan->rx_state);
5010
5011 if (l2cap_classify_txseq(chan, control->txseq) ==
5012 L2CAP_TXSEQ_EXPECTED) {
5013 l2cap_pass_to_tx(chan, control);
5014
5015 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5016 __next_seq(chan, chan->buffer_seq));
5017
5018 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5019
5020 l2cap_reassemble_sdu(chan, skb, control);
5021 } else {
5022 if (chan->sdu) {
5023 kfree_skb(chan->sdu);
5024 chan->sdu = NULL;
5025 }
5026 chan->sdu_last_frag = NULL;
5027 chan->sdu_len = 0;
5028
5029 if (skb) {
5030 BT_DBG("Freeing %p", skb);
5031 kfree_skb(skb);
5032 }
5033 }
5034
5035 chan->last_acked_seq = control->txseq;
5036 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5037
5038 return err;
5039 }
5040
5041 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5042 {
5043 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5044 u16 len;
5045 u8 event;
5046
5047 __unpack_control(chan, skb);
5048
5049 len = skb->len;
5050
5051 /*
5052 * We can just drop the corrupted I-frame here.
5053 * Receiver will miss it and start proper recovery
5054 * procedures and ask for retransmission.
5055 */
5056 if (l2cap_check_fcs(chan, skb))
5057 goto drop;
5058
5059 if (!control->sframe && control->sar == L2CAP_SAR_START)
5060 len -= L2CAP_SDULEN_SIZE;
5061
5062 if (chan->fcs == L2CAP_FCS_CRC16)
5063 len -= L2CAP_FCS_SIZE;
5064
5065 if (len > chan->mps) {
5066 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5067 goto drop;
5068 }
5069
5070 if (!control->sframe) {
5071 int err;
5072
5073 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5074 control->sar, control->reqseq, control->final,
5075 control->txseq);
5076
5077 /* Validate F-bit - F=0 always valid, F=1 only
5078 * valid in TX WAIT_F
5079 */
5080 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5081 goto drop;
5082
5083 if (chan->mode != L2CAP_MODE_STREAMING) {
5084 event = L2CAP_EV_RECV_IFRAME;
5085 err = l2cap_rx(chan, control, skb, event);
5086 } else {
5087 err = l2cap_stream_rx(chan, control, skb);
5088 }
5089
5090 if (err)
5091 l2cap_send_disconn_req(chan->conn, chan,
5092 ECONNRESET);
5093 } else {
5094 const u8 rx_func_to_event[4] = {
5095 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5096 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5097 };
5098
5099 /* Only I-frames are expected in streaming mode */
5100 if (chan->mode == L2CAP_MODE_STREAMING)
5101 goto drop;
5102
5103 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5104 control->reqseq, control->final, control->poll,
5105 control->super);
5106
5107 if (len != 0) {
5108 BT_ERR("%d", len);
5109 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5110 goto drop;
5111 }
5112
5113 /* Validate F and P bits */
5114 if (control->final && (control->poll ||
5115 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5116 goto drop;
5117
5118 event = rx_func_to_event[control->super];
5119 if (l2cap_rx(chan, control, skb, event))
5120 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5121 }
5122
5123 return 0;
5124
5125 drop:
5126 kfree_skb(skb);
5127 return 0;
5128 }
5129
5130 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5131 {
5132 struct l2cap_chan *chan;
5133
5134 chan = l2cap_get_chan_by_scid(conn, cid);
5135 if (!chan) {
5136 if (cid == L2CAP_CID_A2MP) {
5137 chan = a2mp_channel_create(conn, skb);
5138 if (!chan) {
5139 kfree_skb(skb);
5140 return 0;
5141 }
5142
5143 l2cap_chan_lock(chan);
5144 } else {
5145 BT_DBG("unknown cid 0x%4.4x", cid);
5146 /* Drop packet and return */
5147 kfree_skb(skb);
5148 return 0;
5149 }
5150 }
5151
5152 BT_DBG("chan %p, len %d", chan, skb->len);
5153
5154 if (chan->state != BT_CONNECTED)
5155 goto drop;
5156
5157 switch (chan->mode) {
5158 case L2CAP_MODE_BASIC:
5159 /* If socket recv buffers overflows we drop data here
5160 * which is *bad* because L2CAP has to be reliable.
5161 * But we don't have any other choice. L2CAP doesn't
5162 * provide flow control mechanism. */
5163
5164 if (chan->imtu < skb->len)
5165 goto drop;
5166
5167 if (!chan->ops->recv(chan, skb))
5168 goto done;
5169 break;
5170
5171 case L2CAP_MODE_ERTM:
5172 case L2CAP_MODE_STREAMING:
5173 l2cap_data_rcv(chan, skb);
5174 goto done;
5175
5176 default:
5177 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5178 break;
5179 }
5180
5181 drop:
5182 kfree_skb(skb);
5183
5184 done:
5185 l2cap_chan_unlock(chan);
5186
5187 return 0;
5188 }
5189
5190 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5191 {
5192 struct l2cap_chan *chan;
5193
5194 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5195 if (!chan)
5196 goto drop;
5197
5198 BT_DBG("chan %p, len %d", chan, skb->len);
5199
5200 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5201 goto drop;
5202
5203 if (chan->imtu < skb->len)
5204 goto drop;
5205
5206 if (!chan->ops->recv(chan, skb))
5207 return 0;
5208
5209 drop:
5210 kfree_skb(skb);
5211
5212 return 0;
5213 }
5214
5215 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5216 struct sk_buff *skb)
5217 {
5218 struct l2cap_chan *chan;
5219
5220 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5221 if (!chan)
5222 goto drop;
5223
5224 BT_DBG("chan %p, len %d", chan, skb->len);
5225
5226 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5227 goto drop;
5228
5229 if (chan->imtu < skb->len)
5230 goto drop;
5231
5232 if (!chan->ops->recv(chan, skb))
5233 return 0;
5234
5235 drop:
5236 kfree_skb(skb);
5237
5238 return 0;
5239 }
5240
5241 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5242 {
5243 struct l2cap_hdr *lh = (void *) skb->data;
5244 u16 cid, len;
5245 __le16 psm;
5246
5247 skb_pull(skb, L2CAP_HDR_SIZE);
5248 cid = __le16_to_cpu(lh->cid);
5249 len = __le16_to_cpu(lh->len);
5250
5251 if (len != skb->len) {
5252 kfree_skb(skb);
5253 return;
5254 }
5255
5256 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5257
5258 switch (cid) {
5259 case L2CAP_CID_LE_SIGNALING:
5260 case L2CAP_CID_SIGNALING:
5261 l2cap_sig_channel(conn, skb);
5262 break;
5263
5264 case L2CAP_CID_CONN_LESS:
5265 psm = get_unaligned((__le16 *) skb->data);
5266 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5267 l2cap_conless_channel(conn, psm, skb);
5268 break;
5269
5270 case L2CAP_CID_LE_DATA:
5271 l2cap_att_channel(conn, cid, skb);
5272 break;
5273
5274 case L2CAP_CID_SMP:
5275 if (smp_sig_channel(conn, skb))
5276 l2cap_conn_del(conn->hcon, EACCES);
5277 break;
5278
5279 default:
5280 l2cap_data_channel(conn, cid, skb);
5281 break;
5282 }
5283 }
5284
5285 /* ---- L2CAP interface with lower layer (HCI) ---- */
5286
5287 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5288 {
5289 int exact = 0, lm1 = 0, lm2 = 0;
5290 struct l2cap_chan *c;
5291
5292 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5293
5294 /* Find listening sockets and check their link_mode */
5295 read_lock(&chan_list_lock);
5296 list_for_each_entry(c, &chan_list, global_l) {
5297 struct sock *sk = c->sk;
5298
5299 if (c->state != BT_LISTEN)
5300 continue;
5301
5302 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5303 lm1 |= HCI_LM_ACCEPT;
5304 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5305 lm1 |= HCI_LM_MASTER;
5306 exact++;
5307 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5308 lm2 |= HCI_LM_ACCEPT;
5309 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5310 lm2 |= HCI_LM_MASTER;
5311 }
5312 }
5313 read_unlock(&chan_list_lock);
5314
5315 return exact ? lm1 : lm2;
5316 }
5317
5318 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5319 {
5320 struct l2cap_conn *conn;
5321
5322 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5323
5324 if (!status) {
5325 conn = l2cap_conn_add(hcon, status);
5326 if (conn)
5327 l2cap_conn_ready(conn);
5328 } else
5329 l2cap_conn_del(hcon, bt_to_errno(status));
5330
5331 return 0;
5332 }
5333
5334 int l2cap_disconn_ind(struct hci_conn *hcon)
5335 {
5336 struct l2cap_conn *conn = hcon->l2cap_data;
5337
5338 BT_DBG("hcon %p", hcon);
5339
5340 if (!conn)
5341 return HCI_ERROR_REMOTE_USER_TERM;
5342 return conn->disc_reason;
5343 }
5344
5345 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5346 {
5347 BT_DBG("hcon %p reason %d", hcon, reason);
5348
5349 l2cap_conn_del(hcon, bt_to_errno(reason));
5350 return 0;
5351 }
5352
5353 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5354 {
5355 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5356 return;
5357
5358 if (encrypt == 0x00) {
5359 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5360 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5361 } else if (chan->sec_level == BT_SECURITY_HIGH)
5362 l2cap_chan_close(chan, ECONNREFUSED);
5363 } else {
5364 if (chan->sec_level == BT_SECURITY_MEDIUM)
5365 __clear_chan_timer(chan);
5366 }
5367 }
5368
5369 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5370 {
5371 struct l2cap_conn *conn = hcon->l2cap_data;
5372 struct l2cap_chan *chan;
5373
5374 if (!conn)
5375 return 0;
5376
5377 BT_DBG("conn %p", conn);
5378
5379 if (hcon->type == LE_LINK) {
5380 if (!status && encrypt)
5381 smp_distribute_keys(conn, 0);
5382 cancel_delayed_work(&conn->security_timer);
5383 }
5384
5385 mutex_lock(&conn->chan_lock);
5386
5387 list_for_each_entry(chan, &conn->chan_l, list) {
5388 l2cap_chan_lock(chan);
5389
5390 BT_DBG("chan->scid %d", chan->scid);
5391
5392 if (chan->scid == L2CAP_CID_LE_DATA) {
5393 if (!status && encrypt) {
5394 chan->sec_level = hcon->sec_level;
5395 l2cap_chan_ready(chan);
5396 }
5397
5398 l2cap_chan_unlock(chan);
5399 continue;
5400 }
5401
5402 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5403 l2cap_chan_unlock(chan);
5404 continue;
5405 }
5406
5407 if (!status && (chan->state == BT_CONNECTED ||
5408 chan->state == BT_CONFIG)) {
5409 struct sock *sk = chan->sk;
5410
5411 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5412 sk->sk_state_change(sk);
5413
5414 l2cap_check_encryption(chan, encrypt);
5415 l2cap_chan_unlock(chan);
5416 continue;
5417 }
5418
5419 if (chan->state == BT_CONNECT) {
5420 if (!status) {
5421 l2cap_send_conn_req(chan);
5422 } else {
5423 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5424 }
5425 } else if (chan->state == BT_CONNECT2) {
5426 struct sock *sk = chan->sk;
5427 struct l2cap_conn_rsp rsp;
5428 __u16 res, stat;
5429
5430 lock_sock(sk);
5431
5432 if (!status) {
5433 if (test_bit(BT_SK_DEFER_SETUP,
5434 &bt_sk(sk)->flags)) {
5435 struct sock *parent = bt_sk(sk)->parent;
5436 res = L2CAP_CR_PEND;
5437 stat = L2CAP_CS_AUTHOR_PEND;
5438 if (parent)
5439 parent->sk_data_ready(parent, 0);
5440 } else {
5441 __l2cap_state_change(chan, BT_CONFIG);
5442 res = L2CAP_CR_SUCCESS;
5443 stat = L2CAP_CS_NO_INFO;
5444 }
5445 } else {
5446 __l2cap_state_change(chan, BT_DISCONN);
5447 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5448 res = L2CAP_CR_SEC_BLOCK;
5449 stat = L2CAP_CS_NO_INFO;
5450 }
5451
5452 release_sock(sk);
5453
5454 rsp.scid = cpu_to_le16(chan->dcid);
5455 rsp.dcid = cpu_to_le16(chan->scid);
5456 rsp.result = cpu_to_le16(res);
5457 rsp.status = cpu_to_le16(stat);
5458 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5459 sizeof(rsp), &rsp);
5460
5461 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5462 res == L2CAP_CR_SUCCESS) {
5463 char buf[128];
5464 set_bit(CONF_REQ_SENT, &chan->conf_state);
5465 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5466 L2CAP_CONF_REQ,
5467 l2cap_build_conf_req(chan, buf),
5468 buf);
5469 chan->num_conf_req++;
5470 }
5471 }
5472
5473 l2cap_chan_unlock(chan);
5474 }
5475
5476 mutex_unlock(&conn->chan_lock);
5477
5478 return 0;
5479 }
5480
5481 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5482 {
5483 struct l2cap_conn *conn = hcon->l2cap_data;
5484
5485 if (!conn)
5486 conn = l2cap_conn_add(hcon, 0);
5487
5488 if (!conn)
5489 goto drop;
5490
5491 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5492
5493 if (!(flags & ACL_CONT)) {
5494 struct l2cap_hdr *hdr;
5495 int len;
5496
5497 if (conn->rx_len) {
5498 BT_ERR("Unexpected start frame (len %d)", skb->len);
5499 kfree_skb(conn->rx_skb);
5500 conn->rx_skb = NULL;
5501 conn->rx_len = 0;
5502 l2cap_conn_unreliable(conn, ECOMM);
5503 }
5504
5505 /* Start fragment always begin with Basic L2CAP header */
5506 if (skb->len < L2CAP_HDR_SIZE) {
5507 BT_ERR("Frame is too short (len %d)", skb->len);
5508 l2cap_conn_unreliable(conn, ECOMM);
5509 goto drop;
5510 }
5511
5512 hdr = (struct l2cap_hdr *) skb->data;
5513 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5514
5515 if (len == skb->len) {
5516 /* Complete frame received */
5517 l2cap_recv_frame(conn, skb);
5518 return 0;
5519 }
5520
5521 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5522
5523 if (skb->len > len) {
5524 BT_ERR("Frame is too long (len %d, expected len %d)",
5525 skb->len, len);
5526 l2cap_conn_unreliable(conn, ECOMM);
5527 goto drop;
5528 }
5529
5530 /* Allocate skb for the complete frame (with header) */
5531 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5532 if (!conn->rx_skb)
5533 goto drop;
5534
5535 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5536 skb->len);
5537 conn->rx_len = len - skb->len;
5538 } else {
5539 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5540
5541 if (!conn->rx_len) {
5542 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5543 l2cap_conn_unreliable(conn, ECOMM);
5544 goto drop;
5545 }
5546
5547 if (skb->len > conn->rx_len) {
5548 BT_ERR("Fragment is too long (len %d, expected %d)",
5549 skb->len, conn->rx_len);
5550 kfree_skb(conn->rx_skb);
5551 conn->rx_skb = NULL;
5552 conn->rx_len = 0;
5553 l2cap_conn_unreliable(conn, ECOMM);
5554 goto drop;
5555 }
5556
5557 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5558 skb->len);
5559 conn->rx_len -= skb->len;
5560
5561 if (!conn->rx_len) {
5562 /* Complete frame received */
5563 l2cap_recv_frame(conn, conn->rx_skb);
5564 conn->rx_skb = NULL;
5565 }
5566 }
5567
5568 drop:
5569 kfree_skb(skb);
5570 return 0;
5571 }
5572
5573 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5574 {
5575 struct l2cap_chan *c;
5576
5577 read_lock(&chan_list_lock);
5578
5579 list_for_each_entry(c, &chan_list, global_l) {
5580 struct sock *sk = c->sk;
5581
5582 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5583 batostr(&bt_sk(sk)->src),
5584 batostr(&bt_sk(sk)->dst),
5585 c->state, __le16_to_cpu(c->psm),
5586 c->scid, c->dcid, c->imtu, c->omtu,
5587 c->sec_level, c->mode);
5588 }
5589
5590 read_unlock(&chan_list_lock);
5591
5592 return 0;
5593 }
5594
5595 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5596 {
5597 return single_open(file, l2cap_debugfs_show, inode->i_private);
5598 }
5599
5600 static const struct file_operations l2cap_debugfs_fops = {
5601 .open = l2cap_debugfs_open,
5602 .read = seq_read,
5603 .llseek = seq_lseek,
5604 .release = single_release,
5605 };
5606
5607 static struct dentry *l2cap_debugfs;
5608
5609 int __init l2cap_init(void)
5610 {
5611 int err;
5612
5613 err = l2cap_init_sockets();
5614 if (err < 0)
5615 return err;
5616
5617 if (bt_debugfs) {
5618 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5619 bt_debugfs, NULL, &l2cap_debugfs_fops);
5620 if (!l2cap_debugfs)
5621 BT_ERR("Failed to create L2CAP debug file");
5622 }
5623
5624 return 0;
5625 }
5626
5627 void l2cap_exit(void)
5628 {
5629 debugfs_remove(l2cap_debugfs);
5630 l2cap_cleanup_sockets();
5631 }
5632
5633 module_param(disable_ertm, bool, 0644);
5634 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.204761 seconds and 5 git commands to generate.