Bluetooth: Flag ACL frames as complete for AMP controllers
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134 {
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144 }
145
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155 }
156
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
188 }
189
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191 {
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199 }
200
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211 }
212
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220 }
221
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229 }
230
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236 }
237
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 }
514 break;
515
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
522
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
529
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
535 }
536
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543
544 l2cap_chan_hold(chan);
545
546 list_add(&chan->list, &conn->chan_l);
547 }
548
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
554 }
555
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 struct l2cap_conn *conn = chan->conn;
559
560 __clear_chan_timer(chan);
561
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
568
569 l2cap_chan_put(chan);
570
571 chan->conn = NULL;
572
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
575
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
578 }
579
580 chan->ops->teardown(chan, err);
581
582 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
583 return;
584
585 switch(chan->mode) {
586 case L2CAP_MODE_BASIC:
587 break;
588
589 case L2CAP_MODE_ERTM:
590 __clear_retrans_timer(chan);
591 __clear_monitor_timer(chan);
592 __clear_ack_timer(chan);
593
594 skb_queue_purge(&chan->srej_q);
595
596 l2cap_seq_list_free(&chan->srej_list);
597 l2cap_seq_list_free(&chan->retrans_list);
598
599 /* fall through */
600
601 case L2CAP_MODE_STREAMING:
602 skb_queue_purge(&chan->tx_q);
603 break;
604 }
605
606 return;
607 }
608
609 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
610 {
611 struct l2cap_conn *conn = chan->conn;
612 struct sock *sk = chan->sk;
613
614 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
615 sk);
616
617 switch (chan->state) {
618 case BT_LISTEN:
619 chan->ops->teardown(chan, 0);
620 break;
621
622 case BT_CONNECTED:
623 case BT_CONFIG:
624 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
625 conn->hcon->type == ACL_LINK) {
626 __set_chan_timer(chan, sk->sk_sndtimeo);
627 l2cap_send_disconn_req(conn, chan, reason);
628 } else
629 l2cap_chan_del(chan, reason);
630 break;
631
632 case BT_CONNECT2:
633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 conn->hcon->type == ACL_LINK) {
635 struct l2cap_conn_rsp rsp;
636 __u16 result;
637
638 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
639 result = L2CAP_CR_SEC_BLOCK;
640 else
641 result = L2CAP_CR_BAD_PSM;
642 l2cap_state_change(chan, BT_DISCONN);
643
644 rsp.scid = cpu_to_le16(chan->dcid);
645 rsp.dcid = cpu_to_le16(chan->scid);
646 rsp.result = cpu_to_le16(result);
647 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
648 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
649 sizeof(rsp), &rsp);
650 }
651
652 l2cap_chan_del(chan, reason);
653 break;
654
655 case BT_CONNECT:
656 case BT_DISCONN:
657 l2cap_chan_del(chan, reason);
658 break;
659
660 default:
661 chan->ops->teardown(chan, 0);
662 break;
663 }
664 }
665
666 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
667 {
668 if (chan->chan_type == L2CAP_CHAN_RAW) {
669 switch (chan->sec_level) {
670 case BT_SECURITY_HIGH:
671 return HCI_AT_DEDICATED_BONDING_MITM;
672 case BT_SECURITY_MEDIUM:
673 return HCI_AT_DEDICATED_BONDING;
674 default:
675 return HCI_AT_NO_BONDING;
676 }
677 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
678 if (chan->sec_level == BT_SECURITY_LOW)
679 chan->sec_level = BT_SECURITY_SDP;
680
681 if (chan->sec_level == BT_SECURITY_HIGH)
682 return HCI_AT_NO_BONDING_MITM;
683 else
684 return HCI_AT_NO_BONDING;
685 } else {
686 switch (chan->sec_level) {
687 case BT_SECURITY_HIGH:
688 return HCI_AT_GENERAL_BONDING_MITM;
689 case BT_SECURITY_MEDIUM:
690 return HCI_AT_GENERAL_BONDING;
691 default:
692 return HCI_AT_NO_BONDING;
693 }
694 }
695 }
696
697 /* Service level security */
698 int l2cap_chan_check_security(struct l2cap_chan *chan)
699 {
700 struct l2cap_conn *conn = chan->conn;
701 __u8 auth_type;
702
703 auth_type = l2cap_get_auth_type(chan);
704
705 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
706 }
707
708 static u8 l2cap_get_ident(struct l2cap_conn *conn)
709 {
710 u8 id;
711
712 /* Get next available identificator.
713 * 1 - 128 are used by kernel.
714 * 129 - 199 are reserved.
715 * 200 - 254 are used by utilities like l2ping, etc.
716 */
717
718 spin_lock(&conn->lock);
719
720 if (++conn->tx_ident > 128)
721 conn->tx_ident = 1;
722
723 id = conn->tx_ident;
724
725 spin_unlock(&conn->lock);
726
727 return id;
728 }
729
730 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
731 void *data)
732 {
733 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
734 u8 flags;
735
736 BT_DBG("code 0x%2.2x", code);
737
738 if (!skb)
739 return;
740
741 if (lmp_no_flush_capable(conn->hcon->hdev))
742 flags = ACL_START_NO_FLUSH;
743 else
744 flags = ACL_START;
745
746 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
747 skb->priority = HCI_PRIO_MAX;
748
749 hci_send_acl(conn->hchan, skb, flags);
750 }
751
752 static bool __chan_is_moving(struct l2cap_chan *chan)
753 {
754 return chan->move_state != L2CAP_MOVE_STABLE &&
755 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
756 }
757
758 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
759 {
760 struct hci_conn *hcon = chan->conn->hcon;
761 u16 flags;
762
763 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
764 skb->priority);
765
766 if (chan->hs_hcon && !__chan_is_moving(chan)) {
767 if (chan->hs_hchan)
768 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
769 else
770 kfree_skb(skb);
771
772 return;
773 }
774
775 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
776 lmp_no_flush_capable(hcon->hdev))
777 flags = ACL_START_NO_FLUSH;
778 else
779 flags = ACL_START;
780
781 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
782 hci_send_acl(chan->conn->hchan, skb, flags);
783 }
784
785 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
786 {
787 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
788 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
789
790 if (enh & L2CAP_CTRL_FRAME_TYPE) {
791 /* S-Frame */
792 control->sframe = 1;
793 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
794 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
795
796 control->sar = 0;
797 control->txseq = 0;
798 } else {
799 /* I-Frame */
800 control->sframe = 0;
801 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
802 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
803
804 control->poll = 0;
805 control->super = 0;
806 }
807 }
808
809 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
810 {
811 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
812 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
813
814 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
815 /* S-Frame */
816 control->sframe = 1;
817 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
818 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
819
820 control->sar = 0;
821 control->txseq = 0;
822 } else {
823 /* I-Frame */
824 control->sframe = 0;
825 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
826 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
827
828 control->poll = 0;
829 control->super = 0;
830 }
831 }
832
833 static inline void __unpack_control(struct l2cap_chan *chan,
834 struct sk_buff *skb)
835 {
836 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
837 __unpack_extended_control(get_unaligned_le32(skb->data),
838 &bt_cb(skb)->control);
839 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
840 } else {
841 __unpack_enhanced_control(get_unaligned_le16(skb->data),
842 &bt_cb(skb)->control);
843 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
844 }
845 }
846
847 static u32 __pack_extended_control(struct l2cap_ctrl *control)
848 {
849 u32 packed;
850
851 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
852 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
853
854 if (control->sframe) {
855 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
856 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
857 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
858 } else {
859 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
860 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
861 }
862
863 return packed;
864 }
865
866 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
867 {
868 u16 packed;
869
870 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
871 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
872
873 if (control->sframe) {
874 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
875 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
876 packed |= L2CAP_CTRL_FRAME_TYPE;
877 } else {
878 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
879 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
880 }
881
882 return packed;
883 }
884
885 static inline void __pack_control(struct l2cap_chan *chan,
886 struct l2cap_ctrl *control,
887 struct sk_buff *skb)
888 {
889 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
890 put_unaligned_le32(__pack_extended_control(control),
891 skb->data + L2CAP_HDR_SIZE);
892 } else {
893 put_unaligned_le16(__pack_enhanced_control(control),
894 skb->data + L2CAP_HDR_SIZE);
895 }
896 }
897
898 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
899 {
900 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
901 return L2CAP_EXT_HDR_SIZE;
902 else
903 return L2CAP_ENH_HDR_SIZE;
904 }
905
906 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
907 u32 control)
908 {
909 struct sk_buff *skb;
910 struct l2cap_hdr *lh;
911 int hlen = __ertm_hdr_size(chan);
912
913 if (chan->fcs == L2CAP_FCS_CRC16)
914 hlen += L2CAP_FCS_SIZE;
915
916 skb = bt_skb_alloc(hlen, GFP_KERNEL);
917
918 if (!skb)
919 return ERR_PTR(-ENOMEM);
920
921 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
922 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
923 lh->cid = cpu_to_le16(chan->dcid);
924
925 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
926 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
927 else
928 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
929
930 if (chan->fcs == L2CAP_FCS_CRC16) {
931 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
932 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
933 }
934
935 skb->priority = HCI_PRIO_MAX;
936 return skb;
937 }
938
939 static void l2cap_send_sframe(struct l2cap_chan *chan,
940 struct l2cap_ctrl *control)
941 {
942 struct sk_buff *skb;
943 u32 control_field;
944
945 BT_DBG("chan %p, control %p", chan, control);
946
947 if (!control->sframe)
948 return;
949
950 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
951 !control->poll)
952 control->final = 1;
953
954 if (control->super == L2CAP_SUPER_RR)
955 clear_bit(CONN_RNR_SENT, &chan->conn_state);
956 else if (control->super == L2CAP_SUPER_RNR)
957 set_bit(CONN_RNR_SENT, &chan->conn_state);
958
959 if (control->super != L2CAP_SUPER_SREJ) {
960 chan->last_acked_seq = control->reqseq;
961 __clear_ack_timer(chan);
962 }
963
964 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
965 control->final, control->poll, control->super);
966
967 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
968 control_field = __pack_extended_control(control);
969 else
970 control_field = __pack_enhanced_control(control);
971
972 skb = l2cap_create_sframe_pdu(chan, control_field);
973 if (!IS_ERR(skb))
974 l2cap_do_send(chan, skb);
975 }
976
977 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
978 {
979 struct l2cap_ctrl control;
980
981 BT_DBG("chan %p, poll %d", chan, poll);
982
983 memset(&control, 0, sizeof(control));
984 control.sframe = 1;
985 control.poll = poll;
986
987 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
988 control.super = L2CAP_SUPER_RNR;
989 else
990 control.super = L2CAP_SUPER_RR;
991
992 control.reqseq = chan->buffer_seq;
993 l2cap_send_sframe(chan, &control);
994 }
995
996 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
997 {
998 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
999 }
1000
1001 static bool __amp_capable(struct l2cap_chan *chan)
1002 {
1003 struct l2cap_conn *conn = chan->conn;
1004
1005 if (enable_hs &&
1006 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1007 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1008 return true;
1009 else
1010 return false;
1011 }
1012
1013 void l2cap_send_conn_req(struct l2cap_chan *chan)
1014 {
1015 struct l2cap_conn *conn = chan->conn;
1016 struct l2cap_conn_req req;
1017
1018 req.scid = cpu_to_le16(chan->scid);
1019 req.psm = chan->psm;
1020
1021 chan->ident = l2cap_get_ident(conn);
1022
1023 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1024
1025 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1026 }
1027
1028 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1029 {
1030 struct l2cap_create_chan_req req;
1031 req.scid = cpu_to_le16(chan->scid);
1032 req.psm = chan->psm;
1033 req.amp_id = amp_id;
1034
1035 chan->ident = l2cap_get_ident(chan->conn);
1036
1037 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1038 sizeof(req), &req);
1039 }
1040
1041 static void l2cap_move_setup(struct l2cap_chan *chan)
1042 {
1043 struct sk_buff *skb;
1044
1045 BT_DBG("chan %p", chan);
1046
1047 if (chan->mode != L2CAP_MODE_ERTM)
1048 return;
1049
1050 __clear_retrans_timer(chan);
1051 __clear_monitor_timer(chan);
1052 __clear_ack_timer(chan);
1053
1054 chan->retry_count = 0;
1055 skb_queue_walk(&chan->tx_q, skb) {
1056 if (bt_cb(skb)->control.retries)
1057 bt_cb(skb)->control.retries = 1;
1058 else
1059 break;
1060 }
1061
1062 chan->expected_tx_seq = chan->buffer_seq;
1063
1064 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1065 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1066 l2cap_seq_list_clear(&chan->retrans_list);
1067 l2cap_seq_list_clear(&chan->srej_list);
1068 skb_queue_purge(&chan->srej_q);
1069
1070 chan->tx_state = L2CAP_TX_STATE_XMIT;
1071 chan->rx_state = L2CAP_RX_STATE_MOVE;
1072
1073 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1074 }
1075
1076 static void l2cap_move_done(struct l2cap_chan *chan)
1077 {
1078 u8 move_role = chan->move_role;
1079 BT_DBG("chan %p", chan);
1080
1081 chan->move_state = L2CAP_MOVE_STABLE;
1082 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1083
1084 if (chan->mode != L2CAP_MODE_ERTM)
1085 return;
1086
1087 switch (move_role) {
1088 case L2CAP_MOVE_ROLE_INITIATOR:
1089 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1090 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1091 break;
1092 case L2CAP_MOVE_ROLE_RESPONDER:
1093 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1094 break;
1095 }
1096 }
1097
1098 static void l2cap_chan_ready(struct l2cap_chan *chan)
1099 {
1100 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1101 chan->conf_state = 0;
1102 __clear_chan_timer(chan);
1103
1104 chan->state = BT_CONNECTED;
1105
1106 chan->ops->ready(chan);
1107 }
1108
1109 static void l2cap_start_connection(struct l2cap_chan *chan)
1110 {
1111 if (__amp_capable(chan)) {
1112 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1113 a2mp_discover_amp(chan);
1114 } else {
1115 l2cap_send_conn_req(chan);
1116 }
1117 }
1118
1119 static void l2cap_do_start(struct l2cap_chan *chan)
1120 {
1121 struct l2cap_conn *conn = chan->conn;
1122
1123 if (conn->hcon->type == LE_LINK) {
1124 l2cap_chan_ready(chan);
1125 return;
1126 }
1127
1128 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1129 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1130 return;
1131
1132 if (l2cap_chan_check_security(chan) &&
1133 __l2cap_no_conn_pending(chan)) {
1134 l2cap_start_connection(chan);
1135 }
1136 } else {
1137 struct l2cap_info_req req;
1138 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1139
1140 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1141 conn->info_ident = l2cap_get_ident(conn);
1142
1143 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1144
1145 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1146 sizeof(req), &req);
1147 }
1148 }
1149
1150 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1151 {
1152 u32 local_feat_mask = l2cap_feat_mask;
1153 if (!disable_ertm)
1154 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1155
1156 switch (mode) {
1157 case L2CAP_MODE_ERTM:
1158 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1159 case L2CAP_MODE_STREAMING:
1160 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1161 default:
1162 return 0x00;
1163 }
1164 }
1165
1166 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1167 struct l2cap_chan *chan, int err)
1168 {
1169 struct sock *sk = chan->sk;
1170 struct l2cap_disconn_req req;
1171
1172 if (!conn)
1173 return;
1174
1175 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1176 __clear_retrans_timer(chan);
1177 __clear_monitor_timer(chan);
1178 __clear_ack_timer(chan);
1179 }
1180
1181 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1182 l2cap_state_change(chan, BT_DISCONN);
1183 return;
1184 }
1185
1186 req.dcid = cpu_to_le16(chan->dcid);
1187 req.scid = cpu_to_le16(chan->scid);
1188 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1189 sizeof(req), &req);
1190
1191 lock_sock(sk);
1192 __l2cap_state_change(chan, BT_DISCONN);
1193 __l2cap_chan_set_err(chan, err);
1194 release_sock(sk);
1195 }
1196
1197 /* ---- L2CAP connections ---- */
1198 static void l2cap_conn_start(struct l2cap_conn *conn)
1199 {
1200 struct l2cap_chan *chan, *tmp;
1201
1202 BT_DBG("conn %p", conn);
1203
1204 mutex_lock(&conn->chan_lock);
1205
1206 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1207 struct sock *sk = chan->sk;
1208
1209 l2cap_chan_lock(chan);
1210
1211 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1212 l2cap_chan_unlock(chan);
1213 continue;
1214 }
1215
1216 if (chan->state == BT_CONNECT) {
1217 if (!l2cap_chan_check_security(chan) ||
1218 !__l2cap_no_conn_pending(chan)) {
1219 l2cap_chan_unlock(chan);
1220 continue;
1221 }
1222
1223 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1224 && test_bit(CONF_STATE2_DEVICE,
1225 &chan->conf_state)) {
1226 l2cap_chan_close(chan, ECONNRESET);
1227 l2cap_chan_unlock(chan);
1228 continue;
1229 }
1230
1231 l2cap_start_connection(chan);
1232
1233 } else if (chan->state == BT_CONNECT2) {
1234 struct l2cap_conn_rsp rsp;
1235 char buf[128];
1236 rsp.scid = cpu_to_le16(chan->dcid);
1237 rsp.dcid = cpu_to_le16(chan->scid);
1238
1239 if (l2cap_chan_check_security(chan)) {
1240 lock_sock(sk);
1241 if (test_bit(BT_SK_DEFER_SETUP,
1242 &bt_sk(sk)->flags)) {
1243 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1244 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1245 chan->ops->defer(chan);
1246
1247 } else {
1248 __l2cap_state_change(chan, BT_CONFIG);
1249 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1250 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1251 }
1252 release_sock(sk);
1253 } else {
1254 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1255 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1256 }
1257
1258 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1259 sizeof(rsp), &rsp);
1260
1261 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1262 rsp.result != L2CAP_CR_SUCCESS) {
1263 l2cap_chan_unlock(chan);
1264 continue;
1265 }
1266
1267 set_bit(CONF_REQ_SENT, &chan->conf_state);
1268 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1269 l2cap_build_conf_req(chan, buf), buf);
1270 chan->num_conf_req++;
1271 }
1272
1273 l2cap_chan_unlock(chan);
1274 }
1275
1276 mutex_unlock(&conn->chan_lock);
1277 }
1278
1279 /* Find socket with cid and source/destination bdaddr.
1280 * Returns closest match, locked.
1281 */
1282 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1283 bdaddr_t *src,
1284 bdaddr_t *dst)
1285 {
1286 struct l2cap_chan *c, *c1 = NULL;
1287
1288 read_lock(&chan_list_lock);
1289
1290 list_for_each_entry(c, &chan_list, global_l) {
1291 struct sock *sk = c->sk;
1292
1293 if (state && c->state != state)
1294 continue;
1295
1296 if (c->scid == cid) {
1297 int src_match, dst_match;
1298 int src_any, dst_any;
1299
1300 /* Exact match. */
1301 src_match = !bacmp(&bt_sk(sk)->src, src);
1302 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1303 if (src_match && dst_match) {
1304 read_unlock(&chan_list_lock);
1305 return c;
1306 }
1307
1308 /* Closest match */
1309 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1310 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1311 if ((src_match && dst_any) || (src_any && dst_match) ||
1312 (src_any && dst_any))
1313 c1 = c;
1314 }
1315 }
1316
1317 read_unlock(&chan_list_lock);
1318
1319 return c1;
1320 }
1321
1322 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1323 {
1324 struct sock *parent, *sk;
1325 struct l2cap_chan *chan, *pchan;
1326
1327 BT_DBG("");
1328
1329 /* Check if we have socket listening on cid */
1330 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1331 conn->src, conn->dst);
1332 if (!pchan)
1333 return;
1334
1335 parent = pchan->sk;
1336
1337 lock_sock(parent);
1338
1339 chan = pchan->ops->new_connection(pchan);
1340 if (!chan)
1341 goto clean;
1342
1343 sk = chan->sk;
1344
1345 hci_conn_hold(conn->hcon);
1346 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1347
1348 bacpy(&bt_sk(sk)->src, conn->src);
1349 bacpy(&bt_sk(sk)->dst, conn->dst);
1350
1351 l2cap_chan_add(conn, chan);
1352
1353 l2cap_chan_ready(chan);
1354
1355 clean:
1356 release_sock(parent);
1357 }
1358
1359 static void l2cap_conn_ready(struct l2cap_conn *conn)
1360 {
1361 struct l2cap_chan *chan;
1362 struct hci_conn *hcon = conn->hcon;
1363
1364 BT_DBG("conn %p", conn);
1365
1366 if (!hcon->out && hcon->type == LE_LINK)
1367 l2cap_le_conn_ready(conn);
1368
1369 if (hcon->out && hcon->type == LE_LINK)
1370 smp_conn_security(hcon, hcon->pending_sec_level);
1371
1372 mutex_lock(&conn->chan_lock);
1373
1374 list_for_each_entry(chan, &conn->chan_l, list) {
1375
1376 l2cap_chan_lock(chan);
1377
1378 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1379 l2cap_chan_unlock(chan);
1380 continue;
1381 }
1382
1383 if (hcon->type == LE_LINK) {
1384 if (smp_conn_security(hcon, chan->sec_level))
1385 l2cap_chan_ready(chan);
1386
1387 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1388 struct sock *sk = chan->sk;
1389 __clear_chan_timer(chan);
1390 lock_sock(sk);
1391 __l2cap_state_change(chan, BT_CONNECTED);
1392 sk->sk_state_change(sk);
1393 release_sock(sk);
1394
1395 } else if (chan->state == BT_CONNECT)
1396 l2cap_do_start(chan);
1397
1398 l2cap_chan_unlock(chan);
1399 }
1400
1401 mutex_unlock(&conn->chan_lock);
1402 }
1403
1404 /* Notify sockets that we cannot guaranty reliability anymore */
1405 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1406 {
1407 struct l2cap_chan *chan;
1408
1409 BT_DBG("conn %p", conn);
1410
1411 mutex_lock(&conn->chan_lock);
1412
1413 list_for_each_entry(chan, &conn->chan_l, list) {
1414 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1415 l2cap_chan_set_err(chan, err);
1416 }
1417
1418 mutex_unlock(&conn->chan_lock);
1419 }
1420
1421 static void l2cap_info_timeout(struct work_struct *work)
1422 {
1423 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1424 info_timer.work);
1425
1426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1427 conn->info_ident = 0;
1428
1429 l2cap_conn_start(conn);
1430 }
1431
1432 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1433 {
1434 struct l2cap_conn *conn = hcon->l2cap_data;
1435 struct l2cap_chan *chan, *l;
1436
1437 if (!conn)
1438 return;
1439
1440 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1441
1442 kfree_skb(conn->rx_skb);
1443
1444 mutex_lock(&conn->chan_lock);
1445
1446 /* Kill channels */
1447 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1448 l2cap_chan_hold(chan);
1449 l2cap_chan_lock(chan);
1450
1451 l2cap_chan_del(chan, err);
1452
1453 l2cap_chan_unlock(chan);
1454
1455 chan->ops->close(chan);
1456 l2cap_chan_put(chan);
1457 }
1458
1459 mutex_unlock(&conn->chan_lock);
1460
1461 hci_chan_del(conn->hchan);
1462
1463 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1464 cancel_delayed_work_sync(&conn->info_timer);
1465
1466 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1467 cancel_delayed_work_sync(&conn->security_timer);
1468 smp_chan_destroy(conn);
1469 }
1470
1471 hcon->l2cap_data = NULL;
1472 kfree(conn);
1473 }
1474
1475 static void security_timeout(struct work_struct *work)
1476 {
1477 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1478 security_timer.work);
1479
1480 BT_DBG("conn %p", conn);
1481
1482 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1483 smp_chan_destroy(conn);
1484 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1485 }
1486 }
1487
1488 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1489 {
1490 struct l2cap_conn *conn = hcon->l2cap_data;
1491 struct hci_chan *hchan;
1492
1493 if (conn || status)
1494 return conn;
1495
1496 hchan = hci_chan_create(hcon);
1497 if (!hchan)
1498 return NULL;
1499
1500 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1501 if (!conn) {
1502 hci_chan_del(hchan);
1503 return NULL;
1504 }
1505
1506 hcon->l2cap_data = conn;
1507 conn->hcon = hcon;
1508 conn->hchan = hchan;
1509
1510 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1511
1512 switch (hcon->type) {
1513 case AMP_LINK:
1514 conn->mtu = hcon->hdev->block_mtu;
1515 break;
1516
1517 case LE_LINK:
1518 if (hcon->hdev->le_mtu) {
1519 conn->mtu = hcon->hdev->le_mtu;
1520 break;
1521 }
1522 /* fall through */
1523
1524 default:
1525 conn->mtu = hcon->hdev->acl_mtu;
1526 break;
1527 }
1528
1529 conn->src = &hcon->hdev->bdaddr;
1530 conn->dst = &hcon->dst;
1531
1532 conn->feat_mask = 0;
1533
1534 spin_lock_init(&conn->lock);
1535 mutex_init(&conn->chan_lock);
1536
1537 INIT_LIST_HEAD(&conn->chan_l);
1538
1539 if (hcon->type == LE_LINK)
1540 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1541 else
1542 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1543
1544 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1545
1546 return conn;
1547 }
1548
1549 /* ---- Socket interface ---- */
1550
1551 /* Find socket with psm and source / destination bdaddr.
1552 * Returns closest match.
1553 */
1554 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1555 bdaddr_t *src,
1556 bdaddr_t *dst)
1557 {
1558 struct l2cap_chan *c, *c1 = NULL;
1559
1560 read_lock(&chan_list_lock);
1561
1562 list_for_each_entry(c, &chan_list, global_l) {
1563 struct sock *sk = c->sk;
1564
1565 if (state && c->state != state)
1566 continue;
1567
1568 if (c->psm == psm) {
1569 int src_match, dst_match;
1570 int src_any, dst_any;
1571
1572 /* Exact match. */
1573 src_match = !bacmp(&bt_sk(sk)->src, src);
1574 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1575 if (src_match && dst_match) {
1576 read_unlock(&chan_list_lock);
1577 return c;
1578 }
1579
1580 /* Closest match */
1581 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1582 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1583 if ((src_match && dst_any) || (src_any && dst_match) ||
1584 (src_any && dst_any))
1585 c1 = c;
1586 }
1587 }
1588
1589 read_unlock(&chan_list_lock);
1590
1591 return c1;
1592 }
1593
1594 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1595 bdaddr_t *dst, u8 dst_type)
1596 {
1597 struct sock *sk = chan->sk;
1598 bdaddr_t *src = &bt_sk(sk)->src;
1599 struct l2cap_conn *conn;
1600 struct hci_conn *hcon;
1601 struct hci_dev *hdev;
1602 __u8 auth_type;
1603 int err;
1604
1605 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1606 dst_type, __le16_to_cpu(psm));
1607
1608 hdev = hci_get_route(dst, src);
1609 if (!hdev)
1610 return -EHOSTUNREACH;
1611
1612 hci_dev_lock(hdev);
1613
1614 l2cap_chan_lock(chan);
1615
1616 /* PSM must be odd and lsb of upper byte must be 0 */
1617 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1618 chan->chan_type != L2CAP_CHAN_RAW) {
1619 err = -EINVAL;
1620 goto done;
1621 }
1622
1623 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1624 err = -EINVAL;
1625 goto done;
1626 }
1627
1628 switch (chan->mode) {
1629 case L2CAP_MODE_BASIC:
1630 break;
1631 case L2CAP_MODE_ERTM:
1632 case L2CAP_MODE_STREAMING:
1633 if (!disable_ertm)
1634 break;
1635 /* fall through */
1636 default:
1637 err = -ENOTSUPP;
1638 goto done;
1639 }
1640
1641 switch (chan->state) {
1642 case BT_CONNECT:
1643 case BT_CONNECT2:
1644 case BT_CONFIG:
1645 /* Already connecting */
1646 err = 0;
1647 goto done;
1648
1649 case BT_CONNECTED:
1650 /* Already connected */
1651 err = -EISCONN;
1652 goto done;
1653
1654 case BT_OPEN:
1655 case BT_BOUND:
1656 /* Can connect */
1657 break;
1658
1659 default:
1660 err = -EBADFD;
1661 goto done;
1662 }
1663
1664 /* Set destination address and psm */
1665 lock_sock(sk);
1666 bacpy(&bt_sk(sk)->dst, dst);
1667 release_sock(sk);
1668
1669 chan->psm = psm;
1670 chan->dcid = cid;
1671
1672 auth_type = l2cap_get_auth_type(chan);
1673
1674 if (chan->dcid == L2CAP_CID_LE_DATA)
1675 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1676 chan->sec_level, auth_type);
1677 else
1678 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1679 chan->sec_level, auth_type);
1680
1681 if (IS_ERR(hcon)) {
1682 err = PTR_ERR(hcon);
1683 goto done;
1684 }
1685
1686 conn = l2cap_conn_add(hcon, 0);
1687 if (!conn) {
1688 hci_conn_put(hcon);
1689 err = -ENOMEM;
1690 goto done;
1691 }
1692
1693 if (hcon->type == LE_LINK) {
1694 err = 0;
1695
1696 if (!list_empty(&conn->chan_l)) {
1697 err = -EBUSY;
1698 hci_conn_put(hcon);
1699 }
1700
1701 if (err)
1702 goto done;
1703 }
1704
1705 /* Update source addr of the socket */
1706 bacpy(src, conn->src);
1707
1708 l2cap_chan_unlock(chan);
1709 l2cap_chan_add(conn, chan);
1710 l2cap_chan_lock(chan);
1711
1712 l2cap_state_change(chan, BT_CONNECT);
1713 __set_chan_timer(chan, sk->sk_sndtimeo);
1714
1715 if (hcon->state == BT_CONNECTED) {
1716 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1717 __clear_chan_timer(chan);
1718 if (l2cap_chan_check_security(chan))
1719 l2cap_state_change(chan, BT_CONNECTED);
1720 } else
1721 l2cap_do_start(chan);
1722 }
1723
1724 err = 0;
1725
1726 done:
1727 l2cap_chan_unlock(chan);
1728 hci_dev_unlock(hdev);
1729 hci_dev_put(hdev);
1730 return err;
1731 }
1732
1733 int __l2cap_wait_ack(struct sock *sk)
1734 {
1735 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1736 DECLARE_WAITQUEUE(wait, current);
1737 int err = 0;
1738 int timeo = HZ/5;
1739
1740 add_wait_queue(sk_sleep(sk), &wait);
1741 set_current_state(TASK_INTERRUPTIBLE);
1742 while (chan->unacked_frames > 0 && chan->conn) {
1743 if (!timeo)
1744 timeo = HZ/5;
1745
1746 if (signal_pending(current)) {
1747 err = sock_intr_errno(timeo);
1748 break;
1749 }
1750
1751 release_sock(sk);
1752 timeo = schedule_timeout(timeo);
1753 lock_sock(sk);
1754 set_current_state(TASK_INTERRUPTIBLE);
1755
1756 err = sock_error(sk);
1757 if (err)
1758 break;
1759 }
1760 set_current_state(TASK_RUNNING);
1761 remove_wait_queue(sk_sleep(sk), &wait);
1762 return err;
1763 }
1764
1765 static void l2cap_monitor_timeout(struct work_struct *work)
1766 {
1767 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1768 monitor_timer.work);
1769
1770 BT_DBG("chan %p", chan);
1771
1772 l2cap_chan_lock(chan);
1773
1774 if (!chan->conn) {
1775 l2cap_chan_unlock(chan);
1776 l2cap_chan_put(chan);
1777 return;
1778 }
1779
1780 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1781
1782 l2cap_chan_unlock(chan);
1783 l2cap_chan_put(chan);
1784 }
1785
1786 static void l2cap_retrans_timeout(struct work_struct *work)
1787 {
1788 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1789 retrans_timer.work);
1790
1791 BT_DBG("chan %p", chan);
1792
1793 l2cap_chan_lock(chan);
1794
1795 if (!chan->conn) {
1796 l2cap_chan_unlock(chan);
1797 l2cap_chan_put(chan);
1798 return;
1799 }
1800
1801 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1802 l2cap_chan_unlock(chan);
1803 l2cap_chan_put(chan);
1804 }
1805
1806 static void l2cap_streaming_send(struct l2cap_chan *chan,
1807 struct sk_buff_head *skbs)
1808 {
1809 struct sk_buff *skb;
1810 struct l2cap_ctrl *control;
1811
1812 BT_DBG("chan %p, skbs %p", chan, skbs);
1813
1814 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1815
1816 while (!skb_queue_empty(&chan->tx_q)) {
1817
1818 skb = skb_dequeue(&chan->tx_q);
1819
1820 bt_cb(skb)->control.retries = 1;
1821 control = &bt_cb(skb)->control;
1822
1823 control->reqseq = 0;
1824 control->txseq = chan->next_tx_seq;
1825
1826 __pack_control(chan, control, skb);
1827
1828 if (chan->fcs == L2CAP_FCS_CRC16) {
1829 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1830 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1831 }
1832
1833 l2cap_do_send(chan, skb);
1834
1835 BT_DBG("Sent txseq %u", control->txseq);
1836
1837 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1838 chan->frames_sent++;
1839 }
1840 }
1841
1842 static int l2cap_ertm_send(struct l2cap_chan *chan)
1843 {
1844 struct sk_buff *skb, *tx_skb;
1845 struct l2cap_ctrl *control;
1846 int sent = 0;
1847
1848 BT_DBG("chan %p", chan);
1849
1850 if (chan->state != BT_CONNECTED)
1851 return -ENOTCONN;
1852
1853 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1854 return 0;
1855
1856 while (chan->tx_send_head &&
1857 chan->unacked_frames < chan->remote_tx_win &&
1858 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1859
1860 skb = chan->tx_send_head;
1861
1862 bt_cb(skb)->control.retries = 1;
1863 control = &bt_cb(skb)->control;
1864
1865 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1866 control->final = 1;
1867
1868 control->reqseq = chan->buffer_seq;
1869 chan->last_acked_seq = chan->buffer_seq;
1870 control->txseq = chan->next_tx_seq;
1871
1872 __pack_control(chan, control, skb);
1873
1874 if (chan->fcs == L2CAP_FCS_CRC16) {
1875 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1876 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1877 }
1878
1879 /* Clone after data has been modified. Data is assumed to be
1880 read-only (for locking purposes) on cloned sk_buffs.
1881 */
1882 tx_skb = skb_clone(skb, GFP_KERNEL);
1883
1884 if (!tx_skb)
1885 break;
1886
1887 __set_retrans_timer(chan);
1888
1889 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1890 chan->unacked_frames++;
1891 chan->frames_sent++;
1892 sent++;
1893
1894 if (skb_queue_is_last(&chan->tx_q, skb))
1895 chan->tx_send_head = NULL;
1896 else
1897 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1898
1899 l2cap_do_send(chan, tx_skb);
1900 BT_DBG("Sent txseq %u", control->txseq);
1901 }
1902
1903 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1904 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1905
1906 return sent;
1907 }
1908
1909 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1910 {
1911 struct l2cap_ctrl control;
1912 struct sk_buff *skb;
1913 struct sk_buff *tx_skb;
1914 u16 seq;
1915
1916 BT_DBG("chan %p", chan);
1917
1918 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1919 return;
1920
1921 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1922 seq = l2cap_seq_list_pop(&chan->retrans_list);
1923
1924 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1925 if (!skb) {
1926 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1927 seq);
1928 continue;
1929 }
1930
1931 bt_cb(skb)->control.retries++;
1932 control = bt_cb(skb)->control;
1933
1934 if (chan->max_tx != 0 &&
1935 bt_cb(skb)->control.retries > chan->max_tx) {
1936 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1937 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1938 l2cap_seq_list_clear(&chan->retrans_list);
1939 break;
1940 }
1941
1942 control.reqseq = chan->buffer_seq;
1943 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1944 control.final = 1;
1945 else
1946 control.final = 0;
1947
1948 if (skb_cloned(skb)) {
1949 /* Cloned sk_buffs are read-only, so we need a
1950 * writeable copy
1951 */
1952 tx_skb = skb_copy(skb, GFP_KERNEL);
1953 } else {
1954 tx_skb = skb_clone(skb, GFP_KERNEL);
1955 }
1956
1957 if (!tx_skb) {
1958 l2cap_seq_list_clear(&chan->retrans_list);
1959 break;
1960 }
1961
1962 /* Update skb contents */
1963 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1964 put_unaligned_le32(__pack_extended_control(&control),
1965 tx_skb->data + L2CAP_HDR_SIZE);
1966 } else {
1967 put_unaligned_le16(__pack_enhanced_control(&control),
1968 tx_skb->data + L2CAP_HDR_SIZE);
1969 }
1970
1971 if (chan->fcs == L2CAP_FCS_CRC16) {
1972 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1973 put_unaligned_le16(fcs, skb_put(tx_skb,
1974 L2CAP_FCS_SIZE));
1975 }
1976
1977 l2cap_do_send(chan, tx_skb);
1978
1979 BT_DBG("Resent txseq %d", control.txseq);
1980
1981 chan->last_acked_seq = chan->buffer_seq;
1982 }
1983 }
1984
1985 static void l2cap_retransmit(struct l2cap_chan *chan,
1986 struct l2cap_ctrl *control)
1987 {
1988 BT_DBG("chan %p, control %p", chan, control);
1989
1990 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1991 l2cap_ertm_resend(chan);
1992 }
1993
1994 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1995 struct l2cap_ctrl *control)
1996 {
1997 struct sk_buff *skb;
1998
1999 BT_DBG("chan %p, control %p", chan, control);
2000
2001 if (control->poll)
2002 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2003
2004 l2cap_seq_list_clear(&chan->retrans_list);
2005
2006 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2007 return;
2008
2009 if (chan->unacked_frames) {
2010 skb_queue_walk(&chan->tx_q, skb) {
2011 if (bt_cb(skb)->control.txseq == control->reqseq ||
2012 skb == chan->tx_send_head)
2013 break;
2014 }
2015
2016 skb_queue_walk_from(&chan->tx_q, skb) {
2017 if (skb == chan->tx_send_head)
2018 break;
2019
2020 l2cap_seq_list_append(&chan->retrans_list,
2021 bt_cb(skb)->control.txseq);
2022 }
2023
2024 l2cap_ertm_resend(chan);
2025 }
2026 }
2027
2028 static void l2cap_send_ack(struct l2cap_chan *chan)
2029 {
2030 struct l2cap_ctrl control;
2031 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2032 chan->last_acked_seq);
2033 int threshold;
2034
2035 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2036 chan, chan->last_acked_seq, chan->buffer_seq);
2037
2038 memset(&control, 0, sizeof(control));
2039 control.sframe = 1;
2040
2041 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2042 chan->rx_state == L2CAP_RX_STATE_RECV) {
2043 __clear_ack_timer(chan);
2044 control.super = L2CAP_SUPER_RNR;
2045 control.reqseq = chan->buffer_seq;
2046 l2cap_send_sframe(chan, &control);
2047 } else {
2048 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2049 l2cap_ertm_send(chan);
2050 /* If any i-frames were sent, they included an ack */
2051 if (chan->buffer_seq == chan->last_acked_seq)
2052 frames_to_ack = 0;
2053 }
2054
2055 /* Ack now if the window is 3/4ths full.
2056 * Calculate without mul or div
2057 */
2058 threshold = chan->ack_win;
2059 threshold += threshold << 1;
2060 threshold >>= 2;
2061
2062 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2063 threshold);
2064
2065 if (frames_to_ack >= threshold) {
2066 __clear_ack_timer(chan);
2067 control.super = L2CAP_SUPER_RR;
2068 control.reqseq = chan->buffer_seq;
2069 l2cap_send_sframe(chan, &control);
2070 frames_to_ack = 0;
2071 }
2072
2073 if (frames_to_ack)
2074 __set_ack_timer(chan);
2075 }
2076 }
2077
2078 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2079 struct msghdr *msg, int len,
2080 int count, struct sk_buff *skb)
2081 {
2082 struct l2cap_conn *conn = chan->conn;
2083 struct sk_buff **frag;
2084 int sent = 0;
2085
2086 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2087 return -EFAULT;
2088
2089 sent += count;
2090 len -= count;
2091
2092 /* Continuation fragments (no L2CAP header) */
2093 frag = &skb_shinfo(skb)->frag_list;
2094 while (len) {
2095 struct sk_buff *tmp;
2096
2097 count = min_t(unsigned int, conn->mtu, len);
2098
2099 tmp = chan->ops->alloc_skb(chan, count,
2100 msg->msg_flags & MSG_DONTWAIT);
2101 if (IS_ERR(tmp))
2102 return PTR_ERR(tmp);
2103
2104 *frag = tmp;
2105
2106 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2107 return -EFAULT;
2108
2109 (*frag)->priority = skb->priority;
2110
2111 sent += count;
2112 len -= count;
2113
2114 skb->len += (*frag)->len;
2115 skb->data_len += (*frag)->len;
2116
2117 frag = &(*frag)->next;
2118 }
2119
2120 return sent;
2121 }
2122
2123 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2124 struct msghdr *msg, size_t len,
2125 u32 priority)
2126 {
2127 struct l2cap_conn *conn = chan->conn;
2128 struct sk_buff *skb;
2129 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2130 struct l2cap_hdr *lh;
2131
2132 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2133
2134 count = min_t(unsigned int, (conn->mtu - hlen), len);
2135
2136 skb = chan->ops->alloc_skb(chan, count + hlen,
2137 msg->msg_flags & MSG_DONTWAIT);
2138 if (IS_ERR(skb))
2139 return skb;
2140
2141 skb->priority = priority;
2142
2143 /* Create L2CAP header */
2144 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2145 lh->cid = cpu_to_le16(chan->dcid);
2146 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2147 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2148
2149 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2150 if (unlikely(err < 0)) {
2151 kfree_skb(skb);
2152 return ERR_PTR(err);
2153 }
2154 return skb;
2155 }
2156
2157 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2158 struct msghdr *msg, size_t len,
2159 u32 priority)
2160 {
2161 struct l2cap_conn *conn = chan->conn;
2162 struct sk_buff *skb;
2163 int err, count;
2164 struct l2cap_hdr *lh;
2165
2166 BT_DBG("chan %p len %zu", chan, len);
2167
2168 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2169
2170 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2171 msg->msg_flags & MSG_DONTWAIT);
2172 if (IS_ERR(skb))
2173 return skb;
2174
2175 skb->priority = priority;
2176
2177 /* Create L2CAP header */
2178 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2179 lh->cid = cpu_to_le16(chan->dcid);
2180 lh->len = cpu_to_le16(len);
2181
2182 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2183 if (unlikely(err < 0)) {
2184 kfree_skb(skb);
2185 return ERR_PTR(err);
2186 }
2187 return skb;
2188 }
2189
2190 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2191 struct msghdr *msg, size_t len,
2192 u16 sdulen)
2193 {
2194 struct l2cap_conn *conn = chan->conn;
2195 struct sk_buff *skb;
2196 int err, count, hlen;
2197 struct l2cap_hdr *lh;
2198
2199 BT_DBG("chan %p len %zu", chan, len);
2200
2201 if (!conn)
2202 return ERR_PTR(-ENOTCONN);
2203
2204 hlen = __ertm_hdr_size(chan);
2205
2206 if (sdulen)
2207 hlen += L2CAP_SDULEN_SIZE;
2208
2209 if (chan->fcs == L2CAP_FCS_CRC16)
2210 hlen += L2CAP_FCS_SIZE;
2211
2212 count = min_t(unsigned int, (conn->mtu - hlen), len);
2213
2214 skb = chan->ops->alloc_skb(chan, count + hlen,
2215 msg->msg_flags & MSG_DONTWAIT);
2216 if (IS_ERR(skb))
2217 return skb;
2218
2219 /* Create L2CAP header */
2220 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2221 lh->cid = cpu_to_le16(chan->dcid);
2222 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2223
2224 /* Control header is populated later */
2225 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2226 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2227 else
2228 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2229
2230 if (sdulen)
2231 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2232
2233 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2234 if (unlikely(err < 0)) {
2235 kfree_skb(skb);
2236 return ERR_PTR(err);
2237 }
2238
2239 bt_cb(skb)->control.fcs = chan->fcs;
2240 bt_cb(skb)->control.retries = 0;
2241 return skb;
2242 }
2243
2244 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2245 struct sk_buff_head *seg_queue,
2246 struct msghdr *msg, size_t len)
2247 {
2248 struct sk_buff *skb;
2249 u16 sdu_len;
2250 size_t pdu_len;
2251 u8 sar;
2252
2253 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2254
2255 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2256 * so fragmented skbs are not used. The HCI layer's handling
2257 * of fragmented skbs is not compatible with ERTM's queueing.
2258 */
2259
2260 /* PDU size is derived from the HCI MTU */
2261 pdu_len = chan->conn->mtu;
2262
2263 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2264
2265 /* Adjust for largest possible L2CAP overhead. */
2266 if (chan->fcs)
2267 pdu_len -= L2CAP_FCS_SIZE;
2268
2269 pdu_len -= __ertm_hdr_size(chan);
2270
2271 /* Remote device may have requested smaller PDUs */
2272 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2273
2274 if (len <= pdu_len) {
2275 sar = L2CAP_SAR_UNSEGMENTED;
2276 sdu_len = 0;
2277 pdu_len = len;
2278 } else {
2279 sar = L2CAP_SAR_START;
2280 sdu_len = len;
2281 pdu_len -= L2CAP_SDULEN_SIZE;
2282 }
2283
2284 while (len > 0) {
2285 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2286
2287 if (IS_ERR(skb)) {
2288 __skb_queue_purge(seg_queue);
2289 return PTR_ERR(skb);
2290 }
2291
2292 bt_cb(skb)->control.sar = sar;
2293 __skb_queue_tail(seg_queue, skb);
2294
2295 len -= pdu_len;
2296 if (sdu_len) {
2297 sdu_len = 0;
2298 pdu_len += L2CAP_SDULEN_SIZE;
2299 }
2300
2301 if (len <= pdu_len) {
2302 sar = L2CAP_SAR_END;
2303 pdu_len = len;
2304 } else {
2305 sar = L2CAP_SAR_CONTINUE;
2306 }
2307 }
2308
2309 return 0;
2310 }
2311
2312 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2313 u32 priority)
2314 {
2315 struct sk_buff *skb;
2316 int err;
2317 struct sk_buff_head seg_queue;
2318
2319 /* Connectionless channel */
2320 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2321 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2322 if (IS_ERR(skb))
2323 return PTR_ERR(skb);
2324
2325 l2cap_do_send(chan, skb);
2326 return len;
2327 }
2328
2329 switch (chan->mode) {
2330 case L2CAP_MODE_BASIC:
2331 /* Check outgoing MTU */
2332 if (len > chan->omtu)
2333 return -EMSGSIZE;
2334
2335 /* Create a basic PDU */
2336 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2337 if (IS_ERR(skb))
2338 return PTR_ERR(skb);
2339
2340 l2cap_do_send(chan, skb);
2341 err = len;
2342 break;
2343
2344 case L2CAP_MODE_ERTM:
2345 case L2CAP_MODE_STREAMING:
2346 /* Check outgoing MTU */
2347 if (len > chan->omtu) {
2348 err = -EMSGSIZE;
2349 break;
2350 }
2351
2352 __skb_queue_head_init(&seg_queue);
2353
2354 /* Do segmentation before calling in to the state machine,
2355 * since it's possible to block while waiting for memory
2356 * allocation.
2357 */
2358 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2359
2360 /* The channel could have been closed while segmenting,
2361 * check that it is still connected.
2362 */
2363 if (chan->state != BT_CONNECTED) {
2364 __skb_queue_purge(&seg_queue);
2365 err = -ENOTCONN;
2366 }
2367
2368 if (err)
2369 break;
2370
2371 if (chan->mode == L2CAP_MODE_ERTM)
2372 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2373 else
2374 l2cap_streaming_send(chan, &seg_queue);
2375
2376 err = len;
2377
2378 /* If the skbs were not queued for sending, they'll still be in
2379 * seg_queue and need to be purged.
2380 */
2381 __skb_queue_purge(&seg_queue);
2382 break;
2383
2384 default:
2385 BT_DBG("bad state %1.1x", chan->mode);
2386 err = -EBADFD;
2387 }
2388
2389 return err;
2390 }
2391
2392 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2393 {
2394 struct l2cap_ctrl control;
2395 u16 seq;
2396
2397 BT_DBG("chan %p, txseq %u", chan, txseq);
2398
2399 memset(&control, 0, sizeof(control));
2400 control.sframe = 1;
2401 control.super = L2CAP_SUPER_SREJ;
2402
2403 for (seq = chan->expected_tx_seq; seq != txseq;
2404 seq = __next_seq(chan, seq)) {
2405 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2406 control.reqseq = seq;
2407 l2cap_send_sframe(chan, &control);
2408 l2cap_seq_list_append(&chan->srej_list, seq);
2409 }
2410 }
2411
2412 chan->expected_tx_seq = __next_seq(chan, txseq);
2413 }
2414
2415 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2416 {
2417 struct l2cap_ctrl control;
2418
2419 BT_DBG("chan %p", chan);
2420
2421 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2422 return;
2423
2424 memset(&control, 0, sizeof(control));
2425 control.sframe = 1;
2426 control.super = L2CAP_SUPER_SREJ;
2427 control.reqseq = chan->srej_list.tail;
2428 l2cap_send_sframe(chan, &control);
2429 }
2430
2431 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2432 {
2433 struct l2cap_ctrl control;
2434 u16 initial_head;
2435 u16 seq;
2436
2437 BT_DBG("chan %p, txseq %u", chan, txseq);
2438
2439 memset(&control, 0, sizeof(control));
2440 control.sframe = 1;
2441 control.super = L2CAP_SUPER_SREJ;
2442
2443 /* Capture initial list head to allow only one pass through the list. */
2444 initial_head = chan->srej_list.head;
2445
2446 do {
2447 seq = l2cap_seq_list_pop(&chan->srej_list);
2448 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2449 break;
2450
2451 control.reqseq = seq;
2452 l2cap_send_sframe(chan, &control);
2453 l2cap_seq_list_append(&chan->srej_list, seq);
2454 } while (chan->srej_list.head != initial_head);
2455 }
2456
2457 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2458 {
2459 struct sk_buff *acked_skb;
2460 u16 ackseq;
2461
2462 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2463
2464 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2465 return;
2466
2467 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2468 chan->expected_ack_seq, chan->unacked_frames);
2469
2470 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2471 ackseq = __next_seq(chan, ackseq)) {
2472
2473 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2474 if (acked_skb) {
2475 skb_unlink(acked_skb, &chan->tx_q);
2476 kfree_skb(acked_skb);
2477 chan->unacked_frames--;
2478 }
2479 }
2480
2481 chan->expected_ack_seq = reqseq;
2482
2483 if (chan->unacked_frames == 0)
2484 __clear_retrans_timer(chan);
2485
2486 BT_DBG("unacked_frames %u", chan->unacked_frames);
2487 }
2488
2489 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2490 {
2491 BT_DBG("chan %p", chan);
2492
2493 chan->expected_tx_seq = chan->buffer_seq;
2494 l2cap_seq_list_clear(&chan->srej_list);
2495 skb_queue_purge(&chan->srej_q);
2496 chan->rx_state = L2CAP_RX_STATE_RECV;
2497 }
2498
2499 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2500 struct l2cap_ctrl *control,
2501 struct sk_buff_head *skbs, u8 event)
2502 {
2503 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2504 event);
2505
2506 switch (event) {
2507 case L2CAP_EV_DATA_REQUEST:
2508 if (chan->tx_send_head == NULL)
2509 chan->tx_send_head = skb_peek(skbs);
2510
2511 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2512 l2cap_ertm_send(chan);
2513 break;
2514 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2515 BT_DBG("Enter LOCAL_BUSY");
2516 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2517
2518 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2519 /* The SREJ_SENT state must be aborted if we are to
2520 * enter the LOCAL_BUSY state.
2521 */
2522 l2cap_abort_rx_srej_sent(chan);
2523 }
2524
2525 l2cap_send_ack(chan);
2526
2527 break;
2528 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2529 BT_DBG("Exit LOCAL_BUSY");
2530 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2531
2532 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2533 struct l2cap_ctrl local_control;
2534
2535 memset(&local_control, 0, sizeof(local_control));
2536 local_control.sframe = 1;
2537 local_control.super = L2CAP_SUPER_RR;
2538 local_control.poll = 1;
2539 local_control.reqseq = chan->buffer_seq;
2540 l2cap_send_sframe(chan, &local_control);
2541
2542 chan->retry_count = 1;
2543 __set_monitor_timer(chan);
2544 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2545 }
2546 break;
2547 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2548 l2cap_process_reqseq(chan, control->reqseq);
2549 break;
2550 case L2CAP_EV_EXPLICIT_POLL:
2551 l2cap_send_rr_or_rnr(chan, 1);
2552 chan->retry_count = 1;
2553 __set_monitor_timer(chan);
2554 __clear_ack_timer(chan);
2555 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2556 break;
2557 case L2CAP_EV_RETRANS_TO:
2558 l2cap_send_rr_or_rnr(chan, 1);
2559 chan->retry_count = 1;
2560 __set_monitor_timer(chan);
2561 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2562 break;
2563 case L2CAP_EV_RECV_FBIT:
2564 /* Nothing to process */
2565 break;
2566 default:
2567 break;
2568 }
2569 }
2570
2571 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2572 struct l2cap_ctrl *control,
2573 struct sk_buff_head *skbs, u8 event)
2574 {
2575 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2576 event);
2577
2578 switch (event) {
2579 case L2CAP_EV_DATA_REQUEST:
2580 if (chan->tx_send_head == NULL)
2581 chan->tx_send_head = skb_peek(skbs);
2582 /* Queue data, but don't send. */
2583 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2584 break;
2585 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2586 BT_DBG("Enter LOCAL_BUSY");
2587 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2588
2589 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2590 /* The SREJ_SENT state must be aborted if we are to
2591 * enter the LOCAL_BUSY state.
2592 */
2593 l2cap_abort_rx_srej_sent(chan);
2594 }
2595
2596 l2cap_send_ack(chan);
2597
2598 break;
2599 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2600 BT_DBG("Exit LOCAL_BUSY");
2601 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2602
2603 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2604 struct l2cap_ctrl local_control;
2605 memset(&local_control, 0, sizeof(local_control));
2606 local_control.sframe = 1;
2607 local_control.super = L2CAP_SUPER_RR;
2608 local_control.poll = 1;
2609 local_control.reqseq = chan->buffer_seq;
2610 l2cap_send_sframe(chan, &local_control);
2611
2612 chan->retry_count = 1;
2613 __set_monitor_timer(chan);
2614 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2615 }
2616 break;
2617 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2618 l2cap_process_reqseq(chan, control->reqseq);
2619
2620 /* Fall through */
2621
2622 case L2CAP_EV_RECV_FBIT:
2623 if (control && control->final) {
2624 __clear_monitor_timer(chan);
2625 if (chan->unacked_frames > 0)
2626 __set_retrans_timer(chan);
2627 chan->retry_count = 0;
2628 chan->tx_state = L2CAP_TX_STATE_XMIT;
2629 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2630 }
2631 break;
2632 case L2CAP_EV_EXPLICIT_POLL:
2633 /* Ignore */
2634 break;
2635 case L2CAP_EV_MONITOR_TO:
2636 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2637 l2cap_send_rr_or_rnr(chan, 1);
2638 __set_monitor_timer(chan);
2639 chan->retry_count++;
2640 } else {
2641 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2642 }
2643 break;
2644 default:
2645 break;
2646 }
2647 }
2648
2649 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2650 struct sk_buff_head *skbs, u8 event)
2651 {
2652 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2653 chan, control, skbs, event, chan->tx_state);
2654
2655 switch (chan->tx_state) {
2656 case L2CAP_TX_STATE_XMIT:
2657 l2cap_tx_state_xmit(chan, control, skbs, event);
2658 break;
2659 case L2CAP_TX_STATE_WAIT_F:
2660 l2cap_tx_state_wait_f(chan, control, skbs, event);
2661 break;
2662 default:
2663 /* Ignore event */
2664 break;
2665 }
2666 }
2667
2668 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2669 struct l2cap_ctrl *control)
2670 {
2671 BT_DBG("chan %p, control %p", chan, control);
2672 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2673 }
2674
2675 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2676 struct l2cap_ctrl *control)
2677 {
2678 BT_DBG("chan %p, control %p", chan, control);
2679 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2680 }
2681
2682 /* Copy frame to all raw sockets on that connection */
2683 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2684 {
2685 struct sk_buff *nskb;
2686 struct l2cap_chan *chan;
2687
2688 BT_DBG("conn %p", conn);
2689
2690 mutex_lock(&conn->chan_lock);
2691
2692 list_for_each_entry(chan, &conn->chan_l, list) {
2693 struct sock *sk = chan->sk;
2694 if (chan->chan_type != L2CAP_CHAN_RAW)
2695 continue;
2696
2697 /* Don't send frame to the socket it came from */
2698 if (skb->sk == sk)
2699 continue;
2700 nskb = skb_clone(skb, GFP_KERNEL);
2701 if (!nskb)
2702 continue;
2703
2704 if (chan->ops->recv(chan, nskb))
2705 kfree_skb(nskb);
2706 }
2707
2708 mutex_unlock(&conn->chan_lock);
2709 }
2710
2711 /* ---- L2CAP signalling commands ---- */
2712 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2713 u8 ident, u16 dlen, void *data)
2714 {
2715 struct sk_buff *skb, **frag;
2716 struct l2cap_cmd_hdr *cmd;
2717 struct l2cap_hdr *lh;
2718 int len, count;
2719
2720 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2721 conn, code, ident, dlen);
2722
2723 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2724 count = min_t(unsigned int, conn->mtu, len);
2725
2726 skb = bt_skb_alloc(count, GFP_KERNEL);
2727 if (!skb)
2728 return NULL;
2729
2730 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2731 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2732
2733 if (conn->hcon->type == LE_LINK)
2734 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2735 else
2736 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2737
2738 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2739 cmd->code = code;
2740 cmd->ident = ident;
2741 cmd->len = cpu_to_le16(dlen);
2742
2743 if (dlen) {
2744 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2745 memcpy(skb_put(skb, count), data, count);
2746 data += count;
2747 }
2748
2749 len -= skb->len;
2750
2751 /* Continuation fragments (no L2CAP header) */
2752 frag = &skb_shinfo(skb)->frag_list;
2753 while (len) {
2754 count = min_t(unsigned int, conn->mtu, len);
2755
2756 *frag = bt_skb_alloc(count, GFP_KERNEL);
2757 if (!*frag)
2758 goto fail;
2759
2760 memcpy(skb_put(*frag, count), data, count);
2761
2762 len -= count;
2763 data += count;
2764
2765 frag = &(*frag)->next;
2766 }
2767
2768 return skb;
2769
2770 fail:
2771 kfree_skb(skb);
2772 return NULL;
2773 }
2774
2775 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2776 unsigned long *val)
2777 {
2778 struct l2cap_conf_opt *opt = *ptr;
2779 int len;
2780
2781 len = L2CAP_CONF_OPT_SIZE + opt->len;
2782 *ptr += len;
2783
2784 *type = opt->type;
2785 *olen = opt->len;
2786
2787 switch (opt->len) {
2788 case 1:
2789 *val = *((u8 *) opt->val);
2790 break;
2791
2792 case 2:
2793 *val = get_unaligned_le16(opt->val);
2794 break;
2795
2796 case 4:
2797 *val = get_unaligned_le32(opt->val);
2798 break;
2799
2800 default:
2801 *val = (unsigned long) opt->val;
2802 break;
2803 }
2804
2805 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2806 return len;
2807 }
2808
2809 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2810 {
2811 struct l2cap_conf_opt *opt = *ptr;
2812
2813 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2814
2815 opt->type = type;
2816 opt->len = len;
2817
2818 switch (len) {
2819 case 1:
2820 *((u8 *) opt->val) = val;
2821 break;
2822
2823 case 2:
2824 put_unaligned_le16(val, opt->val);
2825 break;
2826
2827 case 4:
2828 put_unaligned_le32(val, opt->val);
2829 break;
2830
2831 default:
2832 memcpy(opt->val, (void *) val, len);
2833 break;
2834 }
2835
2836 *ptr += L2CAP_CONF_OPT_SIZE + len;
2837 }
2838
2839 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2840 {
2841 struct l2cap_conf_efs efs;
2842
2843 switch (chan->mode) {
2844 case L2CAP_MODE_ERTM:
2845 efs.id = chan->local_id;
2846 efs.stype = chan->local_stype;
2847 efs.msdu = cpu_to_le16(chan->local_msdu);
2848 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2849 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2850 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2851 break;
2852
2853 case L2CAP_MODE_STREAMING:
2854 efs.id = 1;
2855 efs.stype = L2CAP_SERV_BESTEFFORT;
2856 efs.msdu = cpu_to_le16(chan->local_msdu);
2857 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2858 efs.acc_lat = 0;
2859 efs.flush_to = 0;
2860 break;
2861
2862 default:
2863 return;
2864 }
2865
2866 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2867 (unsigned long) &efs);
2868 }
2869
2870 static void l2cap_ack_timeout(struct work_struct *work)
2871 {
2872 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2873 ack_timer.work);
2874 u16 frames_to_ack;
2875
2876 BT_DBG("chan %p", chan);
2877
2878 l2cap_chan_lock(chan);
2879
2880 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2881 chan->last_acked_seq);
2882
2883 if (frames_to_ack)
2884 l2cap_send_rr_or_rnr(chan, 0);
2885
2886 l2cap_chan_unlock(chan);
2887 l2cap_chan_put(chan);
2888 }
2889
2890 int l2cap_ertm_init(struct l2cap_chan *chan)
2891 {
2892 int err;
2893
2894 chan->next_tx_seq = 0;
2895 chan->expected_tx_seq = 0;
2896 chan->expected_ack_seq = 0;
2897 chan->unacked_frames = 0;
2898 chan->buffer_seq = 0;
2899 chan->frames_sent = 0;
2900 chan->last_acked_seq = 0;
2901 chan->sdu = NULL;
2902 chan->sdu_last_frag = NULL;
2903 chan->sdu_len = 0;
2904
2905 skb_queue_head_init(&chan->tx_q);
2906
2907 chan->local_amp_id = 0;
2908 chan->move_id = 0;
2909 chan->move_state = L2CAP_MOVE_STABLE;
2910 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2911
2912 if (chan->mode != L2CAP_MODE_ERTM)
2913 return 0;
2914
2915 chan->rx_state = L2CAP_RX_STATE_RECV;
2916 chan->tx_state = L2CAP_TX_STATE_XMIT;
2917
2918 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2919 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2920 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2921
2922 skb_queue_head_init(&chan->srej_q);
2923
2924 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2925 if (err < 0)
2926 return err;
2927
2928 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2929 if (err < 0)
2930 l2cap_seq_list_free(&chan->srej_list);
2931
2932 return err;
2933 }
2934
2935 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2936 {
2937 switch (mode) {
2938 case L2CAP_MODE_STREAMING:
2939 case L2CAP_MODE_ERTM:
2940 if (l2cap_mode_supported(mode, remote_feat_mask))
2941 return mode;
2942 /* fall through */
2943 default:
2944 return L2CAP_MODE_BASIC;
2945 }
2946 }
2947
2948 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2949 {
2950 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2951 }
2952
2953 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2954 {
2955 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2956 }
2957
2958 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2959 {
2960 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2961 __l2cap_ews_supported(chan)) {
2962 /* use extended control field */
2963 set_bit(FLAG_EXT_CTRL, &chan->flags);
2964 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2965 } else {
2966 chan->tx_win = min_t(u16, chan->tx_win,
2967 L2CAP_DEFAULT_TX_WINDOW);
2968 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2969 }
2970 chan->ack_win = chan->tx_win;
2971 }
2972
2973 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2974 {
2975 struct l2cap_conf_req *req = data;
2976 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2977 void *ptr = req->data;
2978 u16 size;
2979
2980 BT_DBG("chan %p", chan);
2981
2982 if (chan->num_conf_req || chan->num_conf_rsp)
2983 goto done;
2984
2985 switch (chan->mode) {
2986 case L2CAP_MODE_STREAMING:
2987 case L2CAP_MODE_ERTM:
2988 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2989 break;
2990
2991 if (__l2cap_efs_supported(chan))
2992 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2993
2994 /* fall through */
2995 default:
2996 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2997 break;
2998 }
2999
3000 done:
3001 if (chan->imtu != L2CAP_DEFAULT_MTU)
3002 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3003
3004 switch (chan->mode) {
3005 case L2CAP_MODE_BASIC:
3006 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3007 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3008 break;
3009
3010 rfc.mode = L2CAP_MODE_BASIC;
3011 rfc.txwin_size = 0;
3012 rfc.max_transmit = 0;
3013 rfc.retrans_timeout = 0;
3014 rfc.monitor_timeout = 0;
3015 rfc.max_pdu_size = 0;
3016
3017 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3018 (unsigned long) &rfc);
3019 break;
3020
3021 case L2CAP_MODE_ERTM:
3022 rfc.mode = L2CAP_MODE_ERTM;
3023 rfc.max_transmit = chan->max_tx;
3024 rfc.retrans_timeout = 0;
3025 rfc.monitor_timeout = 0;
3026
3027 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3028 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3029 L2CAP_FCS_SIZE);
3030 rfc.max_pdu_size = cpu_to_le16(size);
3031
3032 l2cap_txwin_setup(chan);
3033
3034 rfc.txwin_size = min_t(u16, chan->tx_win,
3035 L2CAP_DEFAULT_TX_WINDOW);
3036
3037 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3038 (unsigned long) &rfc);
3039
3040 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3041 l2cap_add_opt_efs(&ptr, chan);
3042
3043 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3044 break;
3045
3046 if (chan->fcs == L2CAP_FCS_NONE ||
3047 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3048 chan->fcs = L2CAP_FCS_NONE;
3049 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3050 }
3051
3052 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3053 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3054 chan->tx_win);
3055 break;
3056
3057 case L2CAP_MODE_STREAMING:
3058 l2cap_txwin_setup(chan);
3059 rfc.mode = L2CAP_MODE_STREAMING;
3060 rfc.txwin_size = 0;
3061 rfc.max_transmit = 0;
3062 rfc.retrans_timeout = 0;
3063 rfc.monitor_timeout = 0;
3064
3065 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3066 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3067 L2CAP_FCS_SIZE);
3068 rfc.max_pdu_size = cpu_to_le16(size);
3069
3070 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3071 (unsigned long) &rfc);
3072
3073 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3074 l2cap_add_opt_efs(&ptr, chan);
3075
3076 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3077 break;
3078
3079 if (chan->fcs == L2CAP_FCS_NONE ||
3080 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3081 chan->fcs = L2CAP_FCS_NONE;
3082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3083 }
3084 break;
3085 }
3086
3087 req->dcid = cpu_to_le16(chan->dcid);
3088 req->flags = __constant_cpu_to_le16(0);
3089
3090 return ptr - data;
3091 }
3092
3093 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3094 {
3095 struct l2cap_conf_rsp *rsp = data;
3096 void *ptr = rsp->data;
3097 void *req = chan->conf_req;
3098 int len = chan->conf_len;
3099 int type, hint, olen;
3100 unsigned long val;
3101 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3102 struct l2cap_conf_efs efs;
3103 u8 remote_efs = 0;
3104 u16 mtu = L2CAP_DEFAULT_MTU;
3105 u16 result = L2CAP_CONF_SUCCESS;
3106 u16 size;
3107
3108 BT_DBG("chan %p", chan);
3109
3110 while (len >= L2CAP_CONF_OPT_SIZE) {
3111 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3112
3113 hint = type & L2CAP_CONF_HINT;
3114 type &= L2CAP_CONF_MASK;
3115
3116 switch (type) {
3117 case L2CAP_CONF_MTU:
3118 mtu = val;
3119 break;
3120
3121 case L2CAP_CONF_FLUSH_TO:
3122 chan->flush_to = val;
3123 break;
3124
3125 case L2CAP_CONF_QOS:
3126 break;
3127
3128 case L2CAP_CONF_RFC:
3129 if (olen == sizeof(rfc))
3130 memcpy(&rfc, (void *) val, olen);
3131 break;
3132
3133 case L2CAP_CONF_FCS:
3134 if (val == L2CAP_FCS_NONE)
3135 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3136 break;
3137
3138 case L2CAP_CONF_EFS:
3139 remote_efs = 1;
3140 if (olen == sizeof(efs))
3141 memcpy(&efs, (void *) val, olen);
3142 break;
3143
3144 case L2CAP_CONF_EWS:
3145 if (!enable_hs)
3146 return -ECONNREFUSED;
3147
3148 set_bit(FLAG_EXT_CTRL, &chan->flags);
3149 set_bit(CONF_EWS_RECV, &chan->conf_state);
3150 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3151 chan->remote_tx_win = val;
3152 break;
3153
3154 default:
3155 if (hint)
3156 break;
3157
3158 result = L2CAP_CONF_UNKNOWN;
3159 *((u8 *) ptr++) = type;
3160 break;
3161 }
3162 }
3163
3164 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3165 goto done;
3166
3167 switch (chan->mode) {
3168 case L2CAP_MODE_STREAMING:
3169 case L2CAP_MODE_ERTM:
3170 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3171 chan->mode = l2cap_select_mode(rfc.mode,
3172 chan->conn->feat_mask);
3173 break;
3174 }
3175
3176 if (remote_efs) {
3177 if (__l2cap_efs_supported(chan))
3178 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3179 else
3180 return -ECONNREFUSED;
3181 }
3182
3183 if (chan->mode != rfc.mode)
3184 return -ECONNREFUSED;
3185
3186 break;
3187 }
3188
3189 done:
3190 if (chan->mode != rfc.mode) {
3191 result = L2CAP_CONF_UNACCEPT;
3192 rfc.mode = chan->mode;
3193
3194 if (chan->num_conf_rsp == 1)
3195 return -ECONNREFUSED;
3196
3197 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3198 (unsigned long) &rfc);
3199 }
3200
3201 if (result == L2CAP_CONF_SUCCESS) {
3202 /* Configure output options and let the other side know
3203 * which ones we don't like. */
3204
3205 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3206 result = L2CAP_CONF_UNACCEPT;
3207 else {
3208 chan->omtu = mtu;
3209 set_bit(CONF_MTU_DONE, &chan->conf_state);
3210 }
3211 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3212
3213 if (remote_efs) {
3214 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3215 efs.stype != L2CAP_SERV_NOTRAFIC &&
3216 efs.stype != chan->local_stype) {
3217
3218 result = L2CAP_CONF_UNACCEPT;
3219
3220 if (chan->num_conf_req >= 1)
3221 return -ECONNREFUSED;
3222
3223 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3224 sizeof(efs),
3225 (unsigned long) &efs);
3226 } else {
3227 /* Send PENDING Conf Rsp */
3228 result = L2CAP_CONF_PENDING;
3229 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3230 }
3231 }
3232
3233 switch (rfc.mode) {
3234 case L2CAP_MODE_BASIC:
3235 chan->fcs = L2CAP_FCS_NONE;
3236 set_bit(CONF_MODE_DONE, &chan->conf_state);
3237 break;
3238
3239 case L2CAP_MODE_ERTM:
3240 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3241 chan->remote_tx_win = rfc.txwin_size;
3242 else
3243 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3244
3245 chan->remote_max_tx = rfc.max_transmit;
3246
3247 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3248 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3249 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3250 rfc.max_pdu_size = cpu_to_le16(size);
3251 chan->remote_mps = size;
3252
3253 rfc.retrans_timeout =
3254 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3255 rfc.monitor_timeout =
3256 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3257
3258 set_bit(CONF_MODE_DONE, &chan->conf_state);
3259
3260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3261 sizeof(rfc), (unsigned long) &rfc);
3262
3263 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3264 chan->remote_id = efs.id;
3265 chan->remote_stype = efs.stype;
3266 chan->remote_msdu = le16_to_cpu(efs.msdu);
3267 chan->remote_flush_to =
3268 le32_to_cpu(efs.flush_to);
3269 chan->remote_acc_lat =
3270 le32_to_cpu(efs.acc_lat);
3271 chan->remote_sdu_itime =
3272 le32_to_cpu(efs.sdu_itime);
3273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3274 sizeof(efs),
3275 (unsigned long) &efs);
3276 }
3277 break;
3278
3279 case L2CAP_MODE_STREAMING:
3280 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3281 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3282 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3283 rfc.max_pdu_size = cpu_to_le16(size);
3284 chan->remote_mps = size;
3285
3286 set_bit(CONF_MODE_DONE, &chan->conf_state);
3287
3288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3289 (unsigned long) &rfc);
3290
3291 break;
3292
3293 default:
3294 result = L2CAP_CONF_UNACCEPT;
3295
3296 memset(&rfc, 0, sizeof(rfc));
3297 rfc.mode = chan->mode;
3298 }
3299
3300 if (result == L2CAP_CONF_SUCCESS)
3301 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3302 }
3303 rsp->scid = cpu_to_le16(chan->dcid);
3304 rsp->result = cpu_to_le16(result);
3305 rsp->flags = __constant_cpu_to_le16(0);
3306
3307 return ptr - data;
3308 }
3309
3310 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3311 void *data, u16 *result)
3312 {
3313 struct l2cap_conf_req *req = data;
3314 void *ptr = req->data;
3315 int type, olen;
3316 unsigned long val;
3317 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3318 struct l2cap_conf_efs efs;
3319
3320 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3321
3322 while (len >= L2CAP_CONF_OPT_SIZE) {
3323 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3324
3325 switch (type) {
3326 case L2CAP_CONF_MTU:
3327 if (val < L2CAP_DEFAULT_MIN_MTU) {
3328 *result = L2CAP_CONF_UNACCEPT;
3329 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3330 } else
3331 chan->imtu = val;
3332 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3333 break;
3334
3335 case L2CAP_CONF_FLUSH_TO:
3336 chan->flush_to = val;
3337 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3338 2, chan->flush_to);
3339 break;
3340
3341 case L2CAP_CONF_RFC:
3342 if (olen == sizeof(rfc))
3343 memcpy(&rfc, (void *)val, olen);
3344
3345 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3346 rfc.mode != chan->mode)
3347 return -ECONNREFUSED;
3348
3349 chan->fcs = 0;
3350
3351 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3352 sizeof(rfc), (unsigned long) &rfc);
3353 break;
3354
3355 case L2CAP_CONF_EWS:
3356 chan->ack_win = min_t(u16, val, chan->ack_win);
3357 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3358 chan->tx_win);
3359 break;
3360
3361 case L2CAP_CONF_EFS:
3362 if (olen == sizeof(efs))
3363 memcpy(&efs, (void *)val, olen);
3364
3365 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3366 efs.stype != L2CAP_SERV_NOTRAFIC &&
3367 efs.stype != chan->local_stype)
3368 return -ECONNREFUSED;
3369
3370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3371 (unsigned long) &efs);
3372 break;
3373 }
3374 }
3375
3376 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3377 return -ECONNREFUSED;
3378
3379 chan->mode = rfc.mode;
3380
3381 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3382 switch (rfc.mode) {
3383 case L2CAP_MODE_ERTM:
3384 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3385 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3386 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3387 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3388 chan->ack_win = min_t(u16, chan->ack_win,
3389 rfc.txwin_size);
3390
3391 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3392 chan->local_msdu = le16_to_cpu(efs.msdu);
3393 chan->local_sdu_itime =
3394 le32_to_cpu(efs.sdu_itime);
3395 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3396 chan->local_flush_to =
3397 le32_to_cpu(efs.flush_to);
3398 }
3399 break;
3400
3401 case L2CAP_MODE_STREAMING:
3402 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3403 }
3404 }
3405
3406 req->dcid = cpu_to_le16(chan->dcid);
3407 req->flags = __constant_cpu_to_le16(0);
3408
3409 return ptr - data;
3410 }
3411
3412 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3413 u16 result, u16 flags)
3414 {
3415 struct l2cap_conf_rsp *rsp = data;
3416 void *ptr = rsp->data;
3417
3418 BT_DBG("chan %p", chan);
3419
3420 rsp->scid = cpu_to_le16(chan->dcid);
3421 rsp->result = cpu_to_le16(result);
3422 rsp->flags = cpu_to_le16(flags);
3423
3424 return ptr - data;
3425 }
3426
3427 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3428 {
3429 struct l2cap_conn_rsp rsp;
3430 struct l2cap_conn *conn = chan->conn;
3431 u8 buf[128];
3432
3433 rsp.scid = cpu_to_le16(chan->dcid);
3434 rsp.dcid = cpu_to_le16(chan->scid);
3435 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3436 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3437 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3438
3439 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3440 return;
3441
3442 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3443 l2cap_build_conf_req(chan, buf), buf);
3444 chan->num_conf_req++;
3445 }
3446
3447 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3448 {
3449 int type, olen;
3450 unsigned long val;
3451 /* Use sane default values in case a misbehaving remote device
3452 * did not send an RFC or extended window size option.
3453 */
3454 u16 txwin_ext = chan->ack_win;
3455 struct l2cap_conf_rfc rfc = {
3456 .mode = chan->mode,
3457 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3458 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3459 .max_pdu_size = cpu_to_le16(chan->imtu),
3460 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3461 };
3462
3463 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3464
3465 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3466 return;
3467
3468 while (len >= L2CAP_CONF_OPT_SIZE) {
3469 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3470
3471 switch (type) {
3472 case L2CAP_CONF_RFC:
3473 if (olen == sizeof(rfc))
3474 memcpy(&rfc, (void *)val, olen);
3475 break;
3476 case L2CAP_CONF_EWS:
3477 txwin_ext = val;
3478 break;
3479 }
3480 }
3481
3482 switch (rfc.mode) {
3483 case L2CAP_MODE_ERTM:
3484 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3485 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3486 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3487 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3488 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3489 else
3490 chan->ack_win = min_t(u16, chan->ack_win,
3491 rfc.txwin_size);
3492 break;
3493 case L2CAP_MODE_STREAMING:
3494 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3495 }
3496 }
3497
3498 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3499 struct l2cap_cmd_hdr *cmd, u8 *data)
3500 {
3501 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3502
3503 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3504 return 0;
3505
3506 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3507 cmd->ident == conn->info_ident) {
3508 cancel_delayed_work(&conn->info_timer);
3509
3510 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3511 conn->info_ident = 0;
3512
3513 l2cap_conn_start(conn);
3514 }
3515
3516 return 0;
3517 }
3518
3519 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3520 struct l2cap_cmd_hdr *cmd,
3521 u8 *data, u8 rsp_code, u8 amp_id)
3522 {
3523 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3524 struct l2cap_conn_rsp rsp;
3525 struct l2cap_chan *chan = NULL, *pchan;
3526 struct sock *parent, *sk = NULL;
3527 int result, status = L2CAP_CS_NO_INFO;
3528
3529 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3530 __le16 psm = req->psm;
3531
3532 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3533
3534 /* Check if we have socket listening on psm */
3535 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3536 if (!pchan) {
3537 result = L2CAP_CR_BAD_PSM;
3538 goto sendresp;
3539 }
3540
3541 parent = pchan->sk;
3542
3543 mutex_lock(&conn->chan_lock);
3544 lock_sock(parent);
3545
3546 /* Check if the ACL is secure enough (if not SDP) */
3547 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3548 !hci_conn_check_link_mode(conn->hcon)) {
3549 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3550 result = L2CAP_CR_SEC_BLOCK;
3551 goto response;
3552 }
3553
3554 result = L2CAP_CR_NO_MEM;
3555
3556 /* Check if we already have channel with that dcid */
3557 if (__l2cap_get_chan_by_dcid(conn, scid))
3558 goto response;
3559
3560 chan = pchan->ops->new_connection(pchan);
3561 if (!chan)
3562 goto response;
3563
3564 sk = chan->sk;
3565
3566 hci_conn_hold(conn->hcon);
3567
3568 bacpy(&bt_sk(sk)->src, conn->src);
3569 bacpy(&bt_sk(sk)->dst, conn->dst);
3570 chan->psm = psm;
3571 chan->dcid = scid;
3572 chan->local_amp_id = amp_id;
3573
3574 __l2cap_chan_add(conn, chan);
3575
3576 dcid = chan->scid;
3577
3578 __set_chan_timer(chan, sk->sk_sndtimeo);
3579
3580 chan->ident = cmd->ident;
3581
3582 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3583 if (l2cap_chan_check_security(chan)) {
3584 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3585 __l2cap_state_change(chan, BT_CONNECT2);
3586 result = L2CAP_CR_PEND;
3587 status = L2CAP_CS_AUTHOR_PEND;
3588 chan->ops->defer(chan);
3589 } else {
3590 /* Force pending result for AMP controllers.
3591 * The connection will succeed after the
3592 * physical link is up.
3593 */
3594 if (amp_id) {
3595 __l2cap_state_change(chan, BT_CONNECT2);
3596 result = L2CAP_CR_PEND;
3597 } else {
3598 __l2cap_state_change(chan, BT_CONFIG);
3599 result = L2CAP_CR_SUCCESS;
3600 }
3601 status = L2CAP_CS_NO_INFO;
3602 }
3603 } else {
3604 __l2cap_state_change(chan, BT_CONNECT2);
3605 result = L2CAP_CR_PEND;
3606 status = L2CAP_CS_AUTHEN_PEND;
3607 }
3608 } else {
3609 __l2cap_state_change(chan, BT_CONNECT2);
3610 result = L2CAP_CR_PEND;
3611 status = L2CAP_CS_NO_INFO;
3612 }
3613
3614 response:
3615 release_sock(parent);
3616 mutex_unlock(&conn->chan_lock);
3617
3618 sendresp:
3619 rsp.scid = cpu_to_le16(scid);
3620 rsp.dcid = cpu_to_le16(dcid);
3621 rsp.result = cpu_to_le16(result);
3622 rsp.status = cpu_to_le16(status);
3623 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3624
3625 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3626 struct l2cap_info_req info;
3627 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3628
3629 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3630 conn->info_ident = l2cap_get_ident(conn);
3631
3632 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3633
3634 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3635 sizeof(info), &info);
3636 }
3637
3638 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3639 result == L2CAP_CR_SUCCESS) {
3640 u8 buf[128];
3641 set_bit(CONF_REQ_SENT, &chan->conf_state);
3642 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3643 l2cap_build_conf_req(chan, buf), buf);
3644 chan->num_conf_req++;
3645 }
3646
3647 return chan;
3648 }
3649
3650 static int l2cap_connect_req(struct l2cap_conn *conn,
3651 struct l2cap_cmd_hdr *cmd, u8 *data)
3652 {
3653 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3654 return 0;
3655 }
3656
3657 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3658 struct l2cap_cmd_hdr *cmd, u8 *data)
3659 {
3660 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3661 u16 scid, dcid, result, status;
3662 struct l2cap_chan *chan;
3663 u8 req[128];
3664 int err;
3665
3666 scid = __le16_to_cpu(rsp->scid);
3667 dcid = __le16_to_cpu(rsp->dcid);
3668 result = __le16_to_cpu(rsp->result);
3669 status = __le16_to_cpu(rsp->status);
3670
3671 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3672 dcid, scid, result, status);
3673
3674 mutex_lock(&conn->chan_lock);
3675
3676 if (scid) {
3677 chan = __l2cap_get_chan_by_scid(conn, scid);
3678 if (!chan) {
3679 err = -EFAULT;
3680 goto unlock;
3681 }
3682 } else {
3683 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3684 if (!chan) {
3685 err = -EFAULT;
3686 goto unlock;
3687 }
3688 }
3689
3690 err = 0;
3691
3692 l2cap_chan_lock(chan);
3693
3694 switch (result) {
3695 case L2CAP_CR_SUCCESS:
3696 l2cap_state_change(chan, BT_CONFIG);
3697 chan->ident = 0;
3698 chan->dcid = dcid;
3699 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3700
3701 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3702 break;
3703
3704 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3705 l2cap_build_conf_req(chan, req), req);
3706 chan->num_conf_req++;
3707 break;
3708
3709 case L2CAP_CR_PEND:
3710 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3711 break;
3712
3713 default:
3714 l2cap_chan_del(chan, ECONNREFUSED);
3715 break;
3716 }
3717
3718 l2cap_chan_unlock(chan);
3719
3720 unlock:
3721 mutex_unlock(&conn->chan_lock);
3722
3723 return err;
3724 }
3725
3726 static inline void set_default_fcs(struct l2cap_chan *chan)
3727 {
3728 /* FCS is enabled only in ERTM or streaming mode, if one or both
3729 * sides request it.
3730 */
3731 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3732 chan->fcs = L2CAP_FCS_NONE;
3733 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3734 chan->fcs = L2CAP_FCS_CRC16;
3735 }
3736
3737 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3738 u8 ident, u16 flags)
3739 {
3740 struct l2cap_conn *conn = chan->conn;
3741
3742 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3743 flags);
3744
3745 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3746 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3747
3748 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3749 l2cap_build_conf_rsp(chan, data,
3750 L2CAP_CONF_SUCCESS, flags), data);
3751 }
3752
3753 static inline int l2cap_config_req(struct l2cap_conn *conn,
3754 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3755 u8 *data)
3756 {
3757 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3758 u16 dcid, flags;
3759 u8 rsp[64];
3760 struct l2cap_chan *chan;
3761 int len, err = 0;
3762
3763 dcid = __le16_to_cpu(req->dcid);
3764 flags = __le16_to_cpu(req->flags);
3765
3766 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3767
3768 chan = l2cap_get_chan_by_scid(conn, dcid);
3769 if (!chan)
3770 return -ENOENT;
3771
3772 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3773 struct l2cap_cmd_rej_cid rej;
3774
3775 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3776 rej.scid = cpu_to_le16(chan->scid);
3777 rej.dcid = cpu_to_le16(chan->dcid);
3778
3779 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3780 sizeof(rej), &rej);
3781 goto unlock;
3782 }
3783
3784 /* Reject if config buffer is too small. */
3785 len = cmd_len - sizeof(*req);
3786 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3787 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3788 l2cap_build_conf_rsp(chan, rsp,
3789 L2CAP_CONF_REJECT, flags), rsp);
3790 goto unlock;
3791 }
3792
3793 /* Store config. */
3794 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3795 chan->conf_len += len;
3796
3797 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3798 /* Incomplete config. Send empty response. */
3799 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3800 l2cap_build_conf_rsp(chan, rsp,
3801 L2CAP_CONF_SUCCESS, flags), rsp);
3802 goto unlock;
3803 }
3804
3805 /* Complete config. */
3806 len = l2cap_parse_conf_req(chan, rsp);
3807 if (len < 0) {
3808 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3809 goto unlock;
3810 }
3811
3812 chan->ident = cmd->ident;
3813 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3814 chan->num_conf_rsp++;
3815
3816 /* Reset config buffer. */
3817 chan->conf_len = 0;
3818
3819 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3820 goto unlock;
3821
3822 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3823 set_default_fcs(chan);
3824
3825 if (chan->mode == L2CAP_MODE_ERTM ||
3826 chan->mode == L2CAP_MODE_STREAMING)
3827 err = l2cap_ertm_init(chan);
3828
3829 if (err < 0)
3830 l2cap_send_disconn_req(chan->conn, chan, -err);
3831 else
3832 l2cap_chan_ready(chan);
3833
3834 goto unlock;
3835 }
3836
3837 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3838 u8 buf[64];
3839 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3840 l2cap_build_conf_req(chan, buf), buf);
3841 chan->num_conf_req++;
3842 }
3843
3844 /* Got Conf Rsp PENDING from remote side and asume we sent
3845 Conf Rsp PENDING in the code above */
3846 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3847 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3848
3849 /* check compatibility */
3850
3851 /* Send rsp for BR/EDR channel */
3852 if (!chan->ctrl_id)
3853 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3854 else
3855 chan->ident = cmd->ident;
3856 }
3857
3858 unlock:
3859 l2cap_chan_unlock(chan);
3860 return err;
3861 }
3862
3863 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3864 struct l2cap_cmd_hdr *cmd, u8 *data)
3865 {
3866 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3867 u16 scid, flags, result;
3868 struct l2cap_chan *chan;
3869 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3870 int err = 0;
3871
3872 scid = __le16_to_cpu(rsp->scid);
3873 flags = __le16_to_cpu(rsp->flags);
3874 result = __le16_to_cpu(rsp->result);
3875
3876 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3877 result, len);
3878
3879 chan = l2cap_get_chan_by_scid(conn, scid);
3880 if (!chan)
3881 return 0;
3882
3883 switch (result) {
3884 case L2CAP_CONF_SUCCESS:
3885 l2cap_conf_rfc_get(chan, rsp->data, len);
3886 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3887 break;
3888
3889 case L2CAP_CONF_PENDING:
3890 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3891
3892 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3893 char buf[64];
3894
3895 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3896 buf, &result);
3897 if (len < 0) {
3898 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3899 goto done;
3900 }
3901
3902 /* check compatibility */
3903
3904 if (!chan->ctrl_id)
3905 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3906 0);
3907 else
3908 chan->ident = cmd->ident;
3909 }
3910 goto done;
3911
3912 case L2CAP_CONF_UNACCEPT:
3913 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3914 char req[64];
3915
3916 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3917 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3918 goto done;
3919 }
3920
3921 /* throw out any old stored conf requests */
3922 result = L2CAP_CONF_SUCCESS;
3923 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3924 req, &result);
3925 if (len < 0) {
3926 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3927 goto done;
3928 }
3929
3930 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3931 L2CAP_CONF_REQ, len, req);
3932 chan->num_conf_req++;
3933 if (result != L2CAP_CONF_SUCCESS)
3934 goto done;
3935 break;
3936 }
3937
3938 default:
3939 l2cap_chan_set_err(chan, ECONNRESET);
3940
3941 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3942 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3943 goto done;
3944 }
3945
3946 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3947 goto done;
3948
3949 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3950
3951 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3952 set_default_fcs(chan);
3953
3954 if (chan->mode == L2CAP_MODE_ERTM ||
3955 chan->mode == L2CAP_MODE_STREAMING)
3956 err = l2cap_ertm_init(chan);
3957
3958 if (err < 0)
3959 l2cap_send_disconn_req(chan->conn, chan, -err);
3960 else
3961 l2cap_chan_ready(chan);
3962 }
3963
3964 done:
3965 l2cap_chan_unlock(chan);
3966 return err;
3967 }
3968
3969 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3970 struct l2cap_cmd_hdr *cmd, u8 *data)
3971 {
3972 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3973 struct l2cap_disconn_rsp rsp;
3974 u16 dcid, scid;
3975 struct l2cap_chan *chan;
3976 struct sock *sk;
3977
3978 scid = __le16_to_cpu(req->scid);
3979 dcid = __le16_to_cpu(req->dcid);
3980
3981 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3982
3983 mutex_lock(&conn->chan_lock);
3984
3985 chan = __l2cap_get_chan_by_scid(conn, dcid);
3986 if (!chan) {
3987 mutex_unlock(&conn->chan_lock);
3988 return 0;
3989 }
3990
3991 l2cap_chan_lock(chan);
3992
3993 sk = chan->sk;
3994
3995 rsp.dcid = cpu_to_le16(chan->scid);
3996 rsp.scid = cpu_to_le16(chan->dcid);
3997 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3998
3999 lock_sock(sk);
4000 sk->sk_shutdown = SHUTDOWN_MASK;
4001 release_sock(sk);
4002
4003 l2cap_chan_hold(chan);
4004 l2cap_chan_del(chan, ECONNRESET);
4005
4006 l2cap_chan_unlock(chan);
4007
4008 chan->ops->close(chan);
4009 l2cap_chan_put(chan);
4010
4011 mutex_unlock(&conn->chan_lock);
4012
4013 return 0;
4014 }
4015
4016 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4017 struct l2cap_cmd_hdr *cmd, u8 *data)
4018 {
4019 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4020 u16 dcid, scid;
4021 struct l2cap_chan *chan;
4022
4023 scid = __le16_to_cpu(rsp->scid);
4024 dcid = __le16_to_cpu(rsp->dcid);
4025
4026 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4027
4028 mutex_lock(&conn->chan_lock);
4029
4030 chan = __l2cap_get_chan_by_scid(conn, scid);
4031 if (!chan) {
4032 mutex_unlock(&conn->chan_lock);
4033 return 0;
4034 }
4035
4036 l2cap_chan_lock(chan);
4037
4038 l2cap_chan_hold(chan);
4039 l2cap_chan_del(chan, 0);
4040
4041 l2cap_chan_unlock(chan);
4042
4043 chan->ops->close(chan);
4044 l2cap_chan_put(chan);
4045
4046 mutex_unlock(&conn->chan_lock);
4047
4048 return 0;
4049 }
4050
4051 static inline int l2cap_information_req(struct l2cap_conn *conn,
4052 struct l2cap_cmd_hdr *cmd, u8 *data)
4053 {
4054 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4055 u16 type;
4056
4057 type = __le16_to_cpu(req->type);
4058
4059 BT_DBG("type 0x%4.4x", type);
4060
4061 if (type == L2CAP_IT_FEAT_MASK) {
4062 u8 buf[8];
4063 u32 feat_mask = l2cap_feat_mask;
4064 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4065 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4066 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4067 if (!disable_ertm)
4068 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4069 | L2CAP_FEAT_FCS;
4070 if (enable_hs)
4071 feat_mask |= L2CAP_FEAT_EXT_FLOW
4072 | L2CAP_FEAT_EXT_WINDOW;
4073
4074 put_unaligned_le32(feat_mask, rsp->data);
4075 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4076 buf);
4077 } else if (type == L2CAP_IT_FIXED_CHAN) {
4078 u8 buf[12];
4079 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4080
4081 if (enable_hs)
4082 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4083 else
4084 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4085
4086 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4087 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4088 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4089 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4090 buf);
4091 } else {
4092 struct l2cap_info_rsp rsp;
4093 rsp.type = cpu_to_le16(type);
4094 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4095 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4096 &rsp);
4097 }
4098
4099 return 0;
4100 }
4101
4102 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4103 struct l2cap_cmd_hdr *cmd, u8 *data)
4104 {
4105 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4106 u16 type, result;
4107
4108 type = __le16_to_cpu(rsp->type);
4109 result = __le16_to_cpu(rsp->result);
4110
4111 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4112
4113 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4114 if (cmd->ident != conn->info_ident ||
4115 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4116 return 0;
4117
4118 cancel_delayed_work(&conn->info_timer);
4119
4120 if (result != L2CAP_IR_SUCCESS) {
4121 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4122 conn->info_ident = 0;
4123
4124 l2cap_conn_start(conn);
4125
4126 return 0;
4127 }
4128
4129 switch (type) {
4130 case L2CAP_IT_FEAT_MASK:
4131 conn->feat_mask = get_unaligned_le32(rsp->data);
4132
4133 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4134 struct l2cap_info_req req;
4135 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4136
4137 conn->info_ident = l2cap_get_ident(conn);
4138
4139 l2cap_send_cmd(conn, conn->info_ident,
4140 L2CAP_INFO_REQ, sizeof(req), &req);
4141 } else {
4142 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4143 conn->info_ident = 0;
4144
4145 l2cap_conn_start(conn);
4146 }
4147 break;
4148
4149 case L2CAP_IT_FIXED_CHAN:
4150 conn->fixed_chan_mask = rsp->data[0];
4151 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4152 conn->info_ident = 0;
4153
4154 l2cap_conn_start(conn);
4155 break;
4156 }
4157
4158 return 0;
4159 }
4160
4161 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4162 struct l2cap_cmd_hdr *cmd,
4163 u16 cmd_len, void *data)
4164 {
4165 struct l2cap_create_chan_req *req = data;
4166 struct l2cap_chan *chan;
4167 u16 psm, scid;
4168
4169 if (cmd_len != sizeof(*req))
4170 return -EPROTO;
4171
4172 if (!enable_hs)
4173 return -EINVAL;
4174
4175 psm = le16_to_cpu(req->psm);
4176 scid = le16_to_cpu(req->scid);
4177
4178 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4179
4180 if (req->amp_id) {
4181 struct hci_dev *hdev;
4182
4183 /* Validate AMP controller id */
4184 hdev = hci_dev_get(req->amp_id);
4185 if (!hdev || hdev->dev_type != HCI_AMP ||
4186 !test_bit(HCI_UP, &hdev->flags)) {
4187 struct l2cap_create_chan_rsp rsp;
4188
4189 rsp.dcid = 0;
4190 rsp.scid = cpu_to_le16(scid);
4191 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4192 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4193
4194 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4195 sizeof(rsp), &rsp);
4196
4197 if (hdev)
4198 hci_dev_put(hdev);
4199
4200 return 0;
4201 }
4202
4203 hci_dev_put(hdev);
4204 }
4205
4206 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4207 req->amp_id);
4208
4209 return 0;
4210 }
4211
4212 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4213 {
4214 struct l2cap_move_chan_req req;
4215 u8 ident;
4216
4217 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4218
4219 ident = l2cap_get_ident(chan->conn);
4220 chan->ident = ident;
4221
4222 req.icid = cpu_to_le16(chan->scid);
4223 req.dest_amp_id = dest_amp_id;
4224
4225 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4226 &req);
4227
4228 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4229 }
4230
4231 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4232 {
4233 struct l2cap_move_chan_rsp rsp;
4234
4235 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4236
4237 rsp.icid = cpu_to_le16(chan->dcid);
4238 rsp.result = cpu_to_le16(result);
4239
4240 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4241 sizeof(rsp), &rsp);
4242 }
4243
4244 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4245 {
4246 struct l2cap_move_chan_cfm cfm;
4247
4248 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4249
4250 chan->ident = l2cap_get_ident(chan->conn);
4251
4252 cfm.icid = cpu_to_le16(chan->scid);
4253 cfm.result = cpu_to_le16(result);
4254
4255 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4256 sizeof(cfm), &cfm);
4257
4258 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4259 }
4260
4261 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4262 {
4263 struct l2cap_move_chan_cfm cfm;
4264
4265 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4266
4267 cfm.icid = cpu_to_le16(icid);
4268 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4269
4270 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4271 sizeof(cfm), &cfm);
4272 }
4273
4274 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4275 u16 icid)
4276 {
4277 struct l2cap_move_chan_cfm_rsp rsp;
4278
4279 BT_DBG("icid 0x%4.4x", icid);
4280
4281 rsp.icid = cpu_to_le16(icid);
4282 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4283 }
4284
4285 static void __release_logical_link(struct l2cap_chan *chan)
4286 {
4287 chan->hs_hchan = NULL;
4288 chan->hs_hcon = NULL;
4289
4290 /* Placeholder - release the logical link */
4291 }
4292
4293 static void l2cap_logical_fail(struct l2cap_chan *chan)
4294 {
4295 /* Logical link setup failed */
4296 if (chan->state != BT_CONNECTED) {
4297 /* Create channel failure, disconnect */
4298 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4299 return;
4300 }
4301
4302 switch (chan->move_role) {
4303 case L2CAP_MOVE_ROLE_RESPONDER:
4304 l2cap_move_done(chan);
4305 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4306 break;
4307 case L2CAP_MOVE_ROLE_INITIATOR:
4308 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4309 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4310 /* Remote has only sent pending or
4311 * success responses, clean up
4312 */
4313 l2cap_move_done(chan);
4314 }
4315
4316 /* Other amp move states imply that the move
4317 * has already aborted
4318 */
4319 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4320 break;
4321 }
4322 }
4323
4324 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4325 struct hci_chan *hchan)
4326 {
4327 struct l2cap_conf_rsp rsp;
4328 u8 code;
4329
4330 chan->hs_hcon = hchan->conn;
4331 chan->hs_hcon->l2cap_data = chan->conn;
4332
4333 code = l2cap_build_conf_rsp(chan, &rsp,
4334 L2CAP_CONF_SUCCESS, 0);
4335 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CONF_RSP, code,
4336 &rsp);
4337 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4338
4339 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4340 int err = 0;
4341
4342 set_default_fcs(chan);
4343
4344 err = l2cap_ertm_init(chan);
4345 if (err < 0)
4346 l2cap_send_disconn_req(chan->conn, chan, -err);
4347 else
4348 l2cap_chan_ready(chan);
4349 }
4350 }
4351
4352 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4353 struct hci_chan *hchan)
4354 {
4355 chan->hs_hcon = hchan->conn;
4356 chan->hs_hcon->l2cap_data = chan->conn;
4357
4358 BT_DBG("move_state %d", chan->move_state);
4359
4360 switch (chan->move_state) {
4361 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4362 /* Move confirm will be sent after a success
4363 * response is received
4364 */
4365 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4366 break;
4367 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4368 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4369 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4370 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4371 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4372 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4373 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4374 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4375 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4376 }
4377 break;
4378 default:
4379 /* Move was not in expected state, free the channel */
4380 __release_logical_link(chan);
4381
4382 chan->move_state = L2CAP_MOVE_STABLE;
4383 }
4384 }
4385
4386 /* Call with chan locked */
4387 static void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4388 u8 status)
4389 {
4390 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4391
4392 if (status) {
4393 l2cap_logical_fail(chan);
4394 __release_logical_link(chan);
4395 return;
4396 }
4397
4398 if (chan->state != BT_CONNECTED) {
4399 /* Ignore logical link if channel is on BR/EDR */
4400 if (chan->local_amp_id)
4401 l2cap_logical_finish_create(chan, hchan);
4402 } else {
4403 l2cap_logical_finish_move(chan, hchan);
4404 }
4405 }
4406
4407 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4408 u8 local_amp_id, u8 remote_amp_id)
4409 {
4410 if (!test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4411 struct l2cap_conn_rsp rsp;
4412 char buf[128];
4413 rsp.scid = cpu_to_le16(chan->dcid);
4414 rsp.dcid = cpu_to_le16(chan->scid);
4415
4416 /* Incoming channel on AMP */
4417 if (result == L2CAP_CR_SUCCESS) {
4418 /* Send successful response */
4419 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4420 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4421 } else {
4422 /* Send negative response */
4423 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4424 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4425 }
4426
4427 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4428 sizeof(rsp), &rsp);
4429
4430 if (result == L2CAP_CR_SUCCESS) {
4431 __l2cap_state_change(chan, BT_CONFIG);
4432 set_bit(CONF_REQ_SENT, &chan->conf_state);
4433 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4434 L2CAP_CONF_REQ,
4435 l2cap_build_conf_req(chan, buf), buf);
4436 chan->num_conf_req++;
4437 }
4438 } else {
4439 /* Outgoing channel on AMP */
4440 if (result == L2CAP_CR_SUCCESS) {
4441 chan->local_amp_id = local_amp_id;
4442 l2cap_send_create_chan_req(chan, remote_amp_id);
4443 } else {
4444 /* Revert to BR/EDR connect */
4445 l2cap_send_conn_req(chan);
4446 }
4447 }
4448 }
4449
4450 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4451 u8 remote_amp_id)
4452 {
4453 l2cap_move_setup(chan);
4454 chan->move_id = local_amp_id;
4455 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4456
4457 l2cap_send_move_chan_req(chan, remote_amp_id);
4458 }
4459
4460 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4461 {
4462 struct hci_chan *hchan = NULL;
4463
4464 /* Placeholder - get hci_chan for logical link */
4465
4466 if (hchan) {
4467 if (hchan->state == BT_CONNECTED) {
4468 /* Logical link is ready to go */
4469 chan->hs_hcon = hchan->conn;
4470 chan->hs_hcon->l2cap_data = chan->conn;
4471 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4472 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4473
4474 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4475 } else {
4476 /* Wait for logical link to be ready */
4477 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4478 }
4479 } else {
4480 /* Logical link not available */
4481 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4482 }
4483 }
4484
4485 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4486 {
4487 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4488 u8 rsp_result;
4489 if (result == -EINVAL)
4490 rsp_result = L2CAP_MR_BAD_ID;
4491 else
4492 rsp_result = L2CAP_MR_NOT_ALLOWED;
4493
4494 l2cap_send_move_chan_rsp(chan, rsp_result);
4495 }
4496
4497 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4498 chan->move_state = L2CAP_MOVE_STABLE;
4499
4500 /* Restart data transmission */
4501 l2cap_ertm_send(chan);
4502 }
4503
4504 void l2cap_physical_cfm(struct l2cap_chan *chan, int result, u8 local_amp_id,
4505 u8 remote_amp_id)
4506 {
4507 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4508 chan, result, local_amp_id, remote_amp_id);
4509
4510 l2cap_chan_lock(chan);
4511
4512 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4513 l2cap_chan_unlock(chan);
4514 return;
4515 }
4516
4517 if (chan->state != BT_CONNECTED) {
4518 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4519 } else if (result != L2CAP_MR_SUCCESS) {
4520 l2cap_do_move_cancel(chan, result);
4521 } else {
4522 switch (chan->move_role) {
4523 case L2CAP_MOVE_ROLE_INITIATOR:
4524 l2cap_do_move_initiate(chan, local_amp_id,
4525 remote_amp_id);
4526 break;
4527 case L2CAP_MOVE_ROLE_RESPONDER:
4528 l2cap_do_move_respond(chan, result);
4529 break;
4530 default:
4531 l2cap_do_move_cancel(chan, result);
4532 break;
4533 }
4534 }
4535
4536 l2cap_chan_unlock(chan);
4537 }
4538
4539 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4540 struct l2cap_cmd_hdr *cmd,
4541 u16 cmd_len, void *data)
4542 {
4543 struct l2cap_move_chan_req *req = data;
4544 struct l2cap_move_chan_rsp rsp;
4545 struct l2cap_chan *chan;
4546 u16 icid = 0;
4547 u16 result = L2CAP_MR_NOT_ALLOWED;
4548
4549 if (cmd_len != sizeof(*req))
4550 return -EPROTO;
4551
4552 icid = le16_to_cpu(req->icid);
4553
4554 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4555
4556 if (!enable_hs)
4557 return -EINVAL;
4558
4559 chan = l2cap_get_chan_by_dcid(conn, icid);
4560 if (!chan) {
4561 rsp.icid = cpu_to_le16(icid);
4562 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4563 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4564 sizeof(rsp), &rsp);
4565 return 0;
4566 }
4567
4568 chan->ident = cmd->ident;
4569
4570 if (chan->scid < L2CAP_CID_DYN_START ||
4571 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4572 (chan->mode != L2CAP_MODE_ERTM &&
4573 chan->mode != L2CAP_MODE_STREAMING)) {
4574 result = L2CAP_MR_NOT_ALLOWED;
4575 goto send_move_response;
4576 }
4577
4578 if (chan->local_amp_id == req->dest_amp_id) {
4579 result = L2CAP_MR_SAME_ID;
4580 goto send_move_response;
4581 }
4582
4583 if (req->dest_amp_id) {
4584 struct hci_dev *hdev;
4585 hdev = hci_dev_get(req->dest_amp_id);
4586 if (!hdev || hdev->dev_type != HCI_AMP ||
4587 !test_bit(HCI_UP, &hdev->flags)) {
4588 if (hdev)
4589 hci_dev_put(hdev);
4590
4591 result = L2CAP_MR_BAD_ID;
4592 goto send_move_response;
4593 }
4594 hci_dev_put(hdev);
4595 }
4596
4597 /* Detect a move collision. Only send a collision response
4598 * if this side has "lost", otherwise proceed with the move.
4599 * The winner has the larger bd_addr.
4600 */
4601 if ((__chan_is_moving(chan) ||
4602 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4603 bacmp(conn->src, conn->dst) > 0) {
4604 result = L2CAP_MR_COLLISION;
4605 goto send_move_response;
4606 }
4607
4608 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4609 l2cap_move_setup(chan);
4610 chan->move_id = req->dest_amp_id;
4611 icid = chan->dcid;
4612
4613 if (!req->dest_amp_id) {
4614 /* Moving to BR/EDR */
4615 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4616 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4617 result = L2CAP_MR_PEND;
4618 } else {
4619 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4620 result = L2CAP_MR_SUCCESS;
4621 }
4622 } else {
4623 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4624 /* Placeholder - uncomment when amp functions are available */
4625 /*amp_accept_physical(chan, req->dest_amp_id);*/
4626 result = L2CAP_MR_PEND;
4627 }
4628
4629 send_move_response:
4630 l2cap_send_move_chan_rsp(chan, result);
4631
4632 l2cap_chan_unlock(chan);
4633
4634 return 0;
4635 }
4636
4637 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4638 {
4639 struct l2cap_chan *chan;
4640 struct hci_chan *hchan = NULL;
4641
4642 chan = l2cap_get_chan_by_scid(conn, icid);
4643 if (!chan) {
4644 l2cap_send_move_chan_cfm_icid(conn, icid);
4645 return;
4646 }
4647
4648 __clear_chan_timer(chan);
4649 if (result == L2CAP_MR_PEND)
4650 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4651
4652 switch (chan->move_state) {
4653 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4654 /* Move confirm will be sent when logical link
4655 * is complete.
4656 */
4657 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4658 break;
4659 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4660 if (result == L2CAP_MR_PEND) {
4661 break;
4662 } else if (test_bit(CONN_LOCAL_BUSY,
4663 &chan->conn_state)) {
4664 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4665 } else {
4666 /* Logical link is up or moving to BR/EDR,
4667 * proceed with move
4668 */
4669 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4670 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4671 }
4672 break;
4673 case L2CAP_MOVE_WAIT_RSP:
4674 /* Moving to AMP */
4675 if (result == L2CAP_MR_SUCCESS) {
4676 /* Remote is ready, send confirm immediately
4677 * after logical link is ready
4678 */
4679 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4680 } else {
4681 /* Both logical link and move success
4682 * are required to confirm
4683 */
4684 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4685 }
4686
4687 /* Placeholder - get hci_chan for logical link */
4688 if (!hchan) {
4689 /* Logical link not available */
4690 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4691 break;
4692 }
4693
4694 /* If the logical link is not yet connected, do not
4695 * send confirmation.
4696 */
4697 if (hchan->state != BT_CONNECTED)
4698 break;
4699
4700 /* Logical link is already ready to go */
4701
4702 chan->hs_hcon = hchan->conn;
4703 chan->hs_hcon->l2cap_data = chan->conn;
4704
4705 if (result == L2CAP_MR_SUCCESS) {
4706 /* Can confirm now */
4707 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4708 } else {
4709 /* Now only need move success
4710 * to confirm
4711 */
4712 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4713 }
4714
4715 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4716 break;
4717 default:
4718 /* Any other amp move state means the move failed. */
4719 chan->move_id = chan->local_amp_id;
4720 l2cap_move_done(chan);
4721 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4722 }
4723
4724 l2cap_chan_unlock(chan);
4725 }
4726
4727 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4728 u16 result)
4729 {
4730 struct l2cap_chan *chan;
4731
4732 chan = l2cap_get_chan_by_ident(conn, ident);
4733 if (!chan) {
4734 /* Could not locate channel, icid is best guess */
4735 l2cap_send_move_chan_cfm_icid(conn, icid);
4736 return;
4737 }
4738
4739 __clear_chan_timer(chan);
4740
4741 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4742 if (result == L2CAP_MR_COLLISION) {
4743 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4744 } else {
4745 /* Cleanup - cancel move */
4746 chan->move_id = chan->local_amp_id;
4747 l2cap_move_done(chan);
4748 }
4749 }
4750
4751 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4752
4753 l2cap_chan_unlock(chan);
4754 }
4755
4756 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4757 struct l2cap_cmd_hdr *cmd,
4758 u16 cmd_len, void *data)
4759 {
4760 struct l2cap_move_chan_rsp *rsp = data;
4761 u16 icid, result;
4762
4763 if (cmd_len != sizeof(*rsp))
4764 return -EPROTO;
4765
4766 icid = le16_to_cpu(rsp->icid);
4767 result = le16_to_cpu(rsp->result);
4768
4769 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4770
4771 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4772 l2cap_move_continue(conn, icid, result);
4773 else
4774 l2cap_move_fail(conn, cmd->ident, icid, result);
4775
4776 return 0;
4777 }
4778
4779 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4780 struct l2cap_cmd_hdr *cmd,
4781 u16 cmd_len, void *data)
4782 {
4783 struct l2cap_move_chan_cfm *cfm = data;
4784 struct l2cap_chan *chan;
4785 u16 icid, result;
4786
4787 if (cmd_len != sizeof(*cfm))
4788 return -EPROTO;
4789
4790 icid = le16_to_cpu(cfm->icid);
4791 result = le16_to_cpu(cfm->result);
4792
4793 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4794
4795 chan = l2cap_get_chan_by_dcid(conn, icid);
4796 if (!chan) {
4797 /* Spec requires a response even if the icid was not found */
4798 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4799 return 0;
4800 }
4801
4802 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4803 if (result == L2CAP_MC_CONFIRMED) {
4804 chan->local_amp_id = chan->move_id;
4805 if (!chan->local_amp_id)
4806 __release_logical_link(chan);
4807 } else {
4808 chan->move_id = chan->local_amp_id;
4809 }
4810
4811 l2cap_move_done(chan);
4812 }
4813
4814 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4815
4816 l2cap_chan_unlock(chan);
4817
4818 return 0;
4819 }
4820
4821 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4822 struct l2cap_cmd_hdr *cmd,
4823 u16 cmd_len, void *data)
4824 {
4825 struct l2cap_move_chan_cfm_rsp *rsp = data;
4826 struct l2cap_chan *chan;
4827 u16 icid;
4828
4829 if (cmd_len != sizeof(*rsp))
4830 return -EPROTO;
4831
4832 icid = le16_to_cpu(rsp->icid);
4833
4834 BT_DBG("icid 0x%4.4x", icid);
4835
4836 chan = l2cap_get_chan_by_scid(conn, icid);
4837 if (!chan)
4838 return 0;
4839
4840 __clear_chan_timer(chan);
4841
4842 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4843 chan->local_amp_id = chan->move_id;
4844
4845 if (!chan->local_amp_id && chan->hs_hchan)
4846 __release_logical_link(chan);
4847
4848 l2cap_move_done(chan);
4849 }
4850
4851 l2cap_chan_unlock(chan);
4852
4853 return 0;
4854 }
4855
4856 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4857 u16 to_multiplier)
4858 {
4859 u16 max_latency;
4860
4861 if (min > max || min < 6 || max > 3200)
4862 return -EINVAL;
4863
4864 if (to_multiplier < 10 || to_multiplier > 3200)
4865 return -EINVAL;
4866
4867 if (max >= to_multiplier * 8)
4868 return -EINVAL;
4869
4870 max_latency = (to_multiplier * 8 / max) - 1;
4871 if (latency > 499 || latency > max_latency)
4872 return -EINVAL;
4873
4874 return 0;
4875 }
4876
4877 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4878 struct l2cap_cmd_hdr *cmd,
4879 u8 *data)
4880 {
4881 struct hci_conn *hcon = conn->hcon;
4882 struct l2cap_conn_param_update_req *req;
4883 struct l2cap_conn_param_update_rsp rsp;
4884 u16 min, max, latency, to_multiplier, cmd_len;
4885 int err;
4886
4887 if (!(hcon->link_mode & HCI_LM_MASTER))
4888 return -EINVAL;
4889
4890 cmd_len = __le16_to_cpu(cmd->len);
4891 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4892 return -EPROTO;
4893
4894 req = (struct l2cap_conn_param_update_req *) data;
4895 min = __le16_to_cpu(req->min);
4896 max = __le16_to_cpu(req->max);
4897 latency = __le16_to_cpu(req->latency);
4898 to_multiplier = __le16_to_cpu(req->to_multiplier);
4899
4900 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4901 min, max, latency, to_multiplier);
4902
4903 memset(&rsp, 0, sizeof(rsp));
4904
4905 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4906 if (err)
4907 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4908 else
4909 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4910
4911 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4912 sizeof(rsp), &rsp);
4913
4914 if (!err)
4915 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4916
4917 return 0;
4918 }
4919
4920 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4921 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4922 u8 *data)
4923 {
4924 int err = 0;
4925
4926 switch (cmd->code) {
4927 case L2CAP_COMMAND_REJ:
4928 l2cap_command_rej(conn, cmd, data);
4929 break;
4930
4931 case L2CAP_CONN_REQ:
4932 err = l2cap_connect_req(conn, cmd, data);
4933 break;
4934
4935 case L2CAP_CONN_RSP:
4936 case L2CAP_CREATE_CHAN_RSP:
4937 err = l2cap_connect_create_rsp(conn, cmd, data);
4938 break;
4939
4940 case L2CAP_CONF_REQ:
4941 err = l2cap_config_req(conn, cmd, cmd_len, data);
4942 break;
4943
4944 case L2CAP_CONF_RSP:
4945 err = l2cap_config_rsp(conn, cmd, data);
4946 break;
4947
4948 case L2CAP_DISCONN_REQ:
4949 err = l2cap_disconnect_req(conn, cmd, data);
4950 break;
4951
4952 case L2CAP_DISCONN_RSP:
4953 err = l2cap_disconnect_rsp(conn, cmd, data);
4954 break;
4955
4956 case L2CAP_ECHO_REQ:
4957 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4958 break;
4959
4960 case L2CAP_ECHO_RSP:
4961 break;
4962
4963 case L2CAP_INFO_REQ:
4964 err = l2cap_information_req(conn, cmd, data);
4965 break;
4966
4967 case L2CAP_INFO_RSP:
4968 err = l2cap_information_rsp(conn, cmd, data);
4969 break;
4970
4971 case L2CAP_CREATE_CHAN_REQ:
4972 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4973 break;
4974
4975 case L2CAP_MOVE_CHAN_REQ:
4976 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4977 break;
4978
4979 case L2CAP_MOVE_CHAN_RSP:
4980 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4981 break;
4982
4983 case L2CAP_MOVE_CHAN_CFM:
4984 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4985 break;
4986
4987 case L2CAP_MOVE_CHAN_CFM_RSP:
4988 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4989 break;
4990
4991 default:
4992 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4993 err = -EINVAL;
4994 break;
4995 }
4996
4997 return err;
4998 }
4999
5000 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5001 struct l2cap_cmd_hdr *cmd, u8 *data)
5002 {
5003 switch (cmd->code) {
5004 case L2CAP_COMMAND_REJ:
5005 return 0;
5006
5007 case L2CAP_CONN_PARAM_UPDATE_REQ:
5008 return l2cap_conn_param_update_req(conn, cmd, data);
5009
5010 case L2CAP_CONN_PARAM_UPDATE_RSP:
5011 return 0;
5012
5013 default:
5014 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5015 return -EINVAL;
5016 }
5017 }
5018
5019 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5020 struct sk_buff *skb)
5021 {
5022 u8 *data = skb->data;
5023 int len = skb->len;
5024 struct l2cap_cmd_hdr cmd;
5025 int err;
5026
5027 l2cap_raw_recv(conn, skb);
5028
5029 while (len >= L2CAP_CMD_HDR_SIZE) {
5030 u16 cmd_len;
5031 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5032 data += L2CAP_CMD_HDR_SIZE;
5033 len -= L2CAP_CMD_HDR_SIZE;
5034
5035 cmd_len = le16_to_cpu(cmd.len);
5036
5037 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5038 cmd.ident);
5039
5040 if (cmd_len > len || !cmd.ident) {
5041 BT_DBG("corrupted command");
5042 break;
5043 }
5044
5045 if (conn->hcon->type == LE_LINK)
5046 err = l2cap_le_sig_cmd(conn, &cmd, data);
5047 else
5048 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5049
5050 if (err) {
5051 struct l2cap_cmd_rej_unk rej;
5052
5053 BT_ERR("Wrong link type (%d)", err);
5054
5055 /* FIXME: Map err to a valid reason */
5056 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5057 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5058 sizeof(rej), &rej);
5059 }
5060
5061 data += cmd_len;
5062 len -= cmd_len;
5063 }
5064
5065 kfree_skb(skb);
5066 }
5067
5068 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5069 {
5070 u16 our_fcs, rcv_fcs;
5071 int hdr_size;
5072
5073 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5074 hdr_size = L2CAP_EXT_HDR_SIZE;
5075 else
5076 hdr_size = L2CAP_ENH_HDR_SIZE;
5077
5078 if (chan->fcs == L2CAP_FCS_CRC16) {
5079 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5080 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5081 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5082
5083 if (our_fcs != rcv_fcs)
5084 return -EBADMSG;
5085 }
5086 return 0;
5087 }
5088
5089 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5090 {
5091 struct l2cap_ctrl control;
5092
5093 BT_DBG("chan %p", chan);
5094
5095 memset(&control, 0, sizeof(control));
5096 control.sframe = 1;
5097 control.final = 1;
5098 control.reqseq = chan->buffer_seq;
5099 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5100
5101 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5102 control.super = L2CAP_SUPER_RNR;
5103 l2cap_send_sframe(chan, &control);
5104 }
5105
5106 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5107 chan->unacked_frames > 0)
5108 __set_retrans_timer(chan);
5109
5110 /* Send pending iframes */
5111 l2cap_ertm_send(chan);
5112
5113 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5114 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5115 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5116 * send it now.
5117 */
5118 control.super = L2CAP_SUPER_RR;
5119 l2cap_send_sframe(chan, &control);
5120 }
5121 }
5122
5123 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5124 struct sk_buff **last_frag)
5125 {
5126 /* skb->len reflects data in skb as well as all fragments
5127 * skb->data_len reflects only data in fragments
5128 */
5129 if (!skb_has_frag_list(skb))
5130 skb_shinfo(skb)->frag_list = new_frag;
5131
5132 new_frag->next = NULL;
5133
5134 (*last_frag)->next = new_frag;
5135 *last_frag = new_frag;
5136
5137 skb->len += new_frag->len;
5138 skb->data_len += new_frag->len;
5139 skb->truesize += new_frag->truesize;
5140 }
5141
5142 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5143 struct l2cap_ctrl *control)
5144 {
5145 int err = -EINVAL;
5146
5147 switch (control->sar) {
5148 case L2CAP_SAR_UNSEGMENTED:
5149 if (chan->sdu)
5150 break;
5151
5152 err = chan->ops->recv(chan, skb);
5153 break;
5154
5155 case L2CAP_SAR_START:
5156 if (chan->sdu)
5157 break;
5158
5159 chan->sdu_len = get_unaligned_le16(skb->data);
5160 skb_pull(skb, L2CAP_SDULEN_SIZE);
5161
5162 if (chan->sdu_len > chan->imtu) {
5163 err = -EMSGSIZE;
5164 break;
5165 }
5166
5167 if (skb->len >= chan->sdu_len)
5168 break;
5169
5170 chan->sdu = skb;
5171 chan->sdu_last_frag = skb;
5172
5173 skb = NULL;
5174 err = 0;
5175 break;
5176
5177 case L2CAP_SAR_CONTINUE:
5178 if (!chan->sdu)
5179 break;
5180
5181 append_skb_frag(chan->sdu, skb,
5182 &chan->sdu_last_frag);
5183 skb = NULL;
5184
5185 if (chan->sdu->len >= chan->sdu_len)
5186 break;
5187
5188 err = 0;
5189 break;
5190
5191 case L2CAP_SAR_END:
5192 if (!chan->sdu)
5193 break;
5194
5195 append_skb_frag(chan->sdu, skb,
5196 &chan->sdu_last_frag);
5197 skb = NULL;
5198
5199 if (chan->sdu->len != chan->sdu_len)
5200 break;
5201
5202 err = chan->ops->recv(chan, chan->sdu);
5203
5204 if (!err) {
5205 /* Reassembly complete */
5206 chan->sdu = NULL;
5207 chan->sdu_last_frag = NULL;
5208 chan->sdu_len = 0;
5209 }
5210 break;
5211 }
5212
5213 if (err) {
5214 kfree_skb(skb);
5215 kfree_skb(chan->sdu);
5216 chan->sdu = NULL;
5217 chan->sdu_last_frag = NULL;
5218 chan->sdu_len = 0;
5219 }
5220
5221 return err;
5222 }
5223
5224 static int l2cap_resegment(struct l2cap_chan *chan)
5225 {
5226 /* Placeholder */
5227 return 0;
5228 }
5229
5230 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5231 {
5232 u8 event;
5233
5234 if (chan->mode != L2CAP_MODE_ERTM)
5235 return;
5236
5237 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5238 l2cap_tx(chan, NULL, NULL, event);
5239 }
5240
5241 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5242 {
5243 int err = 0;
5244 /* Pass sequential frames to l2cap_reassemble_sdu()
5245 * until a gap is encountered.
5246 */
5247
5248 BT_DBG("chan %p", chan);
5249
5250 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5251 struct sk_buff *skb;
5252 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5253 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5254
5255 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5256
5257 if (!skb)
5258 break;
5259
5260 skb_unlink(skb, &chan->srej_q);
5261 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5262 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5263 if (err)
5264 break;
5265 }
5266
5267 if (skb_queue_empty(&chan->srej_q)) {
5268 chan->rx_state = L2CAP_RX_STATE_RECV;
5269 l2cap_send_ack(chan);
5270 }
5271
5272 return err;
5273 }
5274
5275 static void l2cap_handle_srej(struct l2cap_chan *chan,
5276 struct l2cap_ctrl *control)
5277 {
5278 struct sk_buff *skb;
5279
5280 BT_DBG("chan %p, control %p", chan, control);
5281
5282 if (control->reqseq == chan->next_tx_seq) {
5283 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5284 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5285 return;
5286 }
5287
5288 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5289
5290 if (skb == NULL) {
5291 BT_DBG("Seq %d not available for retransmission",
5292 control->reqseq);
5293 return;
5294 }
5295
5296 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5297 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5298 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5299 return;
5300 }
5301
5302 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5303
5304 if (control->poll) {
5305 l2cap_pass_to_tx(chan, control);
5306
5307 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5308 l2cap_retransmit(chan, control);
5309 l2cap_ertm_send(chan);
5310
5311 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5312 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5313 chan->srej_save_reqseq = control->reqseq;
5314 }
5315 } else {
5316 l2cap_pass_to_tx_fbit(chan, control);
5317
5318 if (control->final) {
5319 if (chan->srej_save_reqseq != control->reqseq ||
5320 !test_and_clear_bit(CONN_SREJ_ACT,
5321 &chan->conn_state))
5322 l2cap_retransmit(chan, control);
5323 } else {
5324 l2cap_retransmit(chan, control);
5325 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5326 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5327 chan->srej_save_reqseq = control->reqseq;
5328 }
5329 }
5330 }
5331 }
5332
5333 static void l2cap_handle_rej(struct l2cap_chan *chan,
5334 struct l2cap_ctrl *control)
5335 {
5336 struct sk_buff *skb;
5337
5338 BT_DBG("chan %p, control %p", chan, control);
5339
5340 if (control->reqseq == chan->next_tx_seq) {
5341 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5342 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5343 return;
5344 }
5345
5346 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5347
5348 if (chan->max_tx && skb &&
5349 bt_cb(skb)->control.retries >= chan->max_tx) {
5350 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5351 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5352 return;
5353 }
5354
5355 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5356
5357 l2cap_pass_to_tx(chan, control);
5358
5359 if (control->final) {
5360 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5361 l2cap_retransmit_all(chan, control);
5362 } else {
5363 l2cap_retransmit_all(chan, control);
5364 l2cap_ertm_send(chan);
5365 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5366 set_bit(CONN_REJ_ACT, &chan->conn_state);
5367 }
5368 }
5369
5370 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5371 {
5372 BT_DBG("chan %p, txseq %d", chan, txseq);
5373
5374 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5375 chan->expected_tx_seq);
5376
5377 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5378 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5379 chan->tx_win) {
5380 /* See notes below regarding "double poll" and
5381 * invalid packets.
5382 */
5383 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5384 BT_DBG("Invalid/Ignore - after SREJ");
5385 return L2CAP_TXSEQ_INVALID_IGNORE;
5386 } else {
5387 BT_DBG("Invalid - in window after SREJ sent");
5388 return L2CAP_TXSEQ_INVALID;
5389 }
5390 }
5391
5392 if (chan->srej_list.head == txseq) {
5393 BT_DBG("Expected SREJ");
5394 return L2CAP_TXSEQ_EXPECTED_SREJ;
5395 }
5396
5397 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5398 BT_DBG("Duplicate SREJ - txseq already stored");
5399 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5400 }
5401
5402 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5403 BT_DBG("Unexpected SREJ - not requested");
5404 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5405 }
5406 }
5407
5408 if (chan->expected_tx_seq == txseq) {
5409 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5410 chan->tx_win) {
5411 BT_DBG("Invalid - txseq outside tx window");
5412 return L2CAP_TXSEQ_INVALID;
5413 } else {
5414 BT_DBG("Expected");
5415 return L2CAP_TXSEQ_EXPECTED;
5416 }
5417 }
5418
5419 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5420 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5421 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5422 return L2CAP_TXSEQ_DUPLICATE;
5423 }
5424
5425 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5426 /* A source of invalid packets is a "double poll" condition,
5427 * where delays cause us to send multiple poll packets. If
5428 * the remote stack receives and processes both polls,
5429 * sequence numbers can wrap around in such a way that a
5430 * resent frame has a sequence number that looks like new data
5431 * with a sequence gap. This would trigger an erroneous SREJ
5432 * request.
5433 *
5434 * Fortunately, this is impossible with a tx window that's
5435 * less than half of the maximum sequence number, which allows
5436 * invalid frames to be safely ignored.
5437 *
5438 * With tx window sizes greater than half of the tx window
5439 * maximum, the frame is invalid and cannot be ignored. This
5440 * causes a disconnect.
5441 */
5442
5443 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5444 BT_DBG("Invalid/Ignore - txseq outside tx window");
5445 return L2CAP_TXSEQ_INVALID_IGNORE;
5446 } else {
5447 BT_DBG("Invalid - txseq outside tx window");
5448 return L2CAP_TXSEQ_INVALID;
5449 }
5450 } else {
5451 BT_DBG("Unexpected - txseq indicates missing frames");
5452 return L2CAP_TXSEQ_UNEXPECTED;
5453 }
5454 }
5455
5456 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5457 struct l2cap_ctrl *control,
5458 struct sk_buff *skb, u8 event)
5459 {
5460 int err = 0;
5461 bool skb_in_use = 0;
5462
5463 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5464 event);
5465
5466 switch (event) {
5467 case L2CAP_EV_RECV_IFRAME:
5468 switch (l2cap_classify_txseq(chan, control->txseq)) {
5469 case L2CAP_TXSEQ_EXPECTED:
5470 l2cap_pass_to_tx(chan, control);
5471
5472 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5473 BT_DBG("Busy, discarding expected seq %d",
5474 control->txseq);
5475 break;
5476 }
5477
5478 chan->expected_tx_seq = __next_seq(chan,
5479 control->txseq);
5480
5481 chan->buffer_seq = chan->expected_tx_seq;
5482 skb_in_use = 1;
5483
5484 err = l2cap_reassemble_sdu(chan, skb, control);
5485 if (err)
5486 break;
5487
5488 if (control->final) {
5489 if (!test_and_clear_bit(CONN_REJ_ACT,
5490 &chan->conn_state)) {
5491 control->final = 0;
5492 l2cap_retransmit_all(chan, control);
5493 l2cap_ertm_send(chan);
5494 }
5495 }
5496
5497 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5498 l2cap_send_ack(chan);
5499 break;
5500 case L2CAP_TXSEQ_UNEXPECTED:
5501 l2cap_pass_to_tx(chan, control);
5502
5503 /* Can't issue SREJ frames in the local busy state.
5504 * Drop this frame, it will be seen as missing
5505 * when local busy is exited.
5506 */
5507 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5508 BT_DBG("Busy, discarding unexpected seq %d",
5509 control->txseq);
5510 break;
5511 }
5512
5513 /* There was a gap in the sequence, so an SREJ
5514 * must be sent for each missing frame. The
5515 * current frame is stored for later use.
5516 */
5517 skb_queue_tail(&chan->srej_q, skb);
5518 skb_in_use = 1;
5519 BT_DBG("Queued %p (queue len %d)", skb,
5520 skb_queue_len(&chan->srej_q));
5521
5522 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5523 l2cap_seq_list_clear(&chan->srej_list);
5524 l2cap_send_srej(chan, control->txseq);
5525
5526 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5527 break;
5528 case L2CAP_TXSEQ_DUPLICATE:
5529 l2cap_pass_to_tx(chan, control);
5530 break;
5531 case L2CAP_TXSEQ_INVALID_IGNORE:
5532 break;
5533 case L2CAP_TXSEQ_INVALID:
5534 default:
5535 l2cap_send_disconn_req(chan->conn, chan,
5536 ECONNRESET);
5537 break;
5538 }
5539 break;
5540 case L2CAP_EV_RECV_RR:
5541 l2cap_pass_to_tx(chan, control);
5542 if (control->final) {
5543 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5544
5545 if (!test_and_clear_bit(CONN_REJ_ACT,
5546 &chan->conn_state)) {
5547 control->final = 0;
5548 l2cap_retransmit_all(chan, control);
5549 }
5550
5551 l2cap_ertm_send(chan);
5552 } else if (control->poll) {
5553 l2cap_send_i_or_rr_or_rnr(chan);
5554 } else {
5555 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5556 &chan->conn_state) &&
5557 chan->unacked_frames)
5558 __set_retrans_timer(chan);
5559
5560 l2cap_ertm_send(chan);
5561 }
5562 break;
5563 case L2CAP_EV_RECV_RNR:
5564 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5565 l2cap_pass_to_tx(chan, control);
5566 if (control && control->poll) {
5567 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5568 l2cap_send_rr_or_rnr(chan, 0);
5569 }
5570 __clear_retrans_timer(chan);
5571 l2cap_seq_list_clear(&chan->retrans_list);
5572 break;
5573 case L2CAP_EV_RECV_REJ:
5574 l2cap_handle_rej(chan, control);
5575 break;
5576 case L2CAP_EV_RECV_SREJ:
5577 l2cap_handle_srej(chan, control);
5578 break;
5579 default:
5580 break;
5581 }
5582
5583 if (skb && !skb_in_use) {
5584 BT_DBG("Freeing %p", skb);
5585 kfree_skb(skb);
5586 }
5587
5588 return err;
5589 }
5590
5591 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5592 struct l2cap_ctrl *control,
5593 struct sk_buff *skb, u8 event)
5594 {
5595 int err = 0;
5596 u16 txseq = control->txseq;
5597 bool skb_in_use = 0;
5598
5599 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5600 event);
5601
5602 switch (event) {
5603 case L2CAP_EV_RECV_IFRAME:
5604 switch (l2cap_classify_txseq(chan, txseq)) {
5605 case L2CAP_TXSEQ_EXPECTED:
5606 /* Keep frame for reassembly later */
5607 l2cap_pass_to_tx(chan, control);
5608 skb_queue_tail(&chan->srej_q, skb);
5609 skb_in_use = 1;
5610 BT_DBG("Queued %p (queue len %d)", skb,
5611 skb_queue_len(&chan->srej_q));
5612
5613 chan->expected_tx_seq = __next_seq(chan, txseq);
5614 break;
5615 case L2CAP_TXSEQ_EXPECTED_SREJ:
5616 l2cap_seq_list_pop(&chan->srej_list);
5617
5618 l2cap_pass_to_tx(chan, control);
5619 skb_queue_tail(&chan->srej_q, skb);
5620 skb_in_use = 1;
5621 BT_DBG("Queued %p (queue len %d)", skb,
5622 skb_queue_len(&chan->srej_q));
5623
5624 err = l2cap_rx_queued_iframes(chan);
5625 if (err)
5626 break;
5627
5628 break;
5629 case L2CAP_TXSEQ_UNEXPECTED:
5630 /* Got a frame that can't be reassembled yet.
5631 * Save it for later, and send SREJs to cover
5632 * the missing frames.
5633 */
5634 skb_queue_tail(&chan->srej_q, skb);
5635 skb_in_use = 1;
5636 BT_DBG("Queued %p (queue len %d)", skb,
5637 skb_queue_len(&chan->srej_q));
5638
5639 l2cap_pass_to_tx(chan, control);
5640 l2cap_send_srej(chan, control->txseq);
5641 break;
5642 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5643 /* This frame was requested with an SREJ, but
5644 * some expected retransmitted frames are
5645 * missing. Request retransmission of missing
5646 * SREJ'd frames.
5647 */
5648 skb_queue_tail(&chan->srej_q, skb);
5649 skb_in_use = 1;
5650 BT_DBG("Queued %p (queue len %d)", skb,
5651 skb_queue_len(&chan->srej_q));
5652
5653 l2cap_pass_to_tx(chan, control);
5654 l2cap_send_srej_list(chan, control->txseq);
5655 break;
5656 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5657 /* We've already queued this frame. Drop this copy. */
5658 l2cap_pass_to_tx(chan, control);
5659 break;
5660 case L2CAP_TXSEQ_DUPLICATE:
5661 /* Expecting a later sequence number, so this frame
5662 * was already received. Ignore it completely.
5663 */
5664 break;
5665 case L2CAP_TXSEQ_INVALID_IGNORE:
5666 break;
5667 case L2CAP_TXSEQ_INVALID:
5668 default:
5669 l2cap_send_disconn_req(chan->conn, chan,
5670 ECONNRESET);
5671 break;
5672 }
5673 break;
5674 case L2CAP_EV_RECV_RR:
5675 l2cap_pass_to_tx(chan, control);
5676 if (control->final) {
5677 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5678
5679 if (!test_and_clear_bit(CONN_REJ_ACT,
5680 &chan->conn_state)) {
5681 control->final = 0;
5682 l2cap_retransmit_all(chan, control);
5683 }
5684
5685 l2cap_ertm_send(chan);
5686 } else if (control->poll) {
5687 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5688 &chan->conn_state) &&
5689 chan->unacked_frames) {
5690 __set_retrans_timer(chan);
5691 }
5692
5693 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5694 l2cap_send_srej_tail(chan);
5695 } else {
5696 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5697 &chan->conn_state) &&
5698 chan->unacked_frames)
5699 __set_retrans_timer(chan);
5700
5701 l2cap_send_ack(chan);
5702 }
5703 break;
5704 case L2CAP_EV_RECV_RNR:
5705 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5706 l2cap_pass_to_tx(chan, control);
5707 if (control->poll) {
5708 l2cap_send_srej_tail(chan);
5709 } else {
5710 struct l2cap_ctrl rr_control;
5711 memset(&rr_control, 0, sizeof(rr_control));
5712 rr_control.sframe = 1;
5713 rr_control.super = L2CAP_SUPER_RR;
5714 rr_control.reqseq = chan->buffer_seq;
5715 l2cap_send_sframe(chan, &rr_control);
5716 }
5717
5718 break;
5719 case L2CAP_EV_RECV_REJ:
5720 l2cap_handle_rej(chan, control);
5721 break;
5722 case L2CAP_EV_RECV_SREJ:
5723 l2cap_handle_srej(chan, control);
5724 break;
5725 }
5726
5727 if (skb && !skb_in_use) {
5728 BT_DBG("Freeing %p", skb);
5729 kfree_skb(skb);
5730 }
5731
5732 return err;
5733 }
5734
5735 static int l2cap_finish_move(struct l2cap_chan *chan)
5736 {
5737 BT_DBG("chan %p", chan);
5738
5739 chan->rx_state = L2CAP_RX_STATE_RECV;
5740
5741 if (chan->hs_hcon)
5742 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5743 else
5744 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5745
5746 return l2cap_resegment(chan);
5747 }
5748
5749 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5750 struct l2cap_ctrl *control,
5751 struct sk_buff *skb, u8 event)
5752 {
5753 int err;
5754
5755 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5756 event);
5757
5758 if (!control->poll)
5759 return -EPROTO;
5760
5761 l2cap_process_reqseq(chan, control->reqseq);
5762
5763 if (!skb_queue_empty(&chan->tx_q))
5764 chan->tx_send_head = skb_peek(&chan->tx_q);
5765 else
5766 chan->tx_send_head = NULL;
5767
5768 /* Rewind next_tx_seq to the point expected
5769 * by the receiver.
5770 */
5771 chan->next_tx_seq = control->reqseq;
5772 chan->unacked_frames = 0;
5773
5774 err = l2cap_finish_move(chan);
5775 if (err)
5776 return err;
5777
5778 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5779 l2cap_send_i_or_rr_or_rnr(chan);
5780
5781 if (event == L2CAP_EV_RECV_IFRAME)
5782 return -EPROTO;
5783
5784 return l2cap_rx_state_recv(chan, control, NULL, event);
5785 }
5786
5787 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5788 struct l2cap_ctrl *control,
5789 struct sk_buff *skb, u8 event)
5790 {
5791 int err;
5792
5793 if (!control->final)
5794 return -EPROTO;
5795
5796 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5797
5798 chan->rx_state = L2CAP_RX_STATE_RECV;
5799 l2cap_process_reqseq(chan, control->reqseq);
5800
5801 if (!skb_queue_empty(&chan->tx_q))
5802 chan->tx_send_head = skb_peek(&chan->tx_q);
5803 else
5804 chan->tx_send_head = NULL;
5805
5806 /* Rewind next_tx_seq to the point expected
5807 * by the receiver.
5808 */
5809 chan->next_tx_seq = control->reqseq;
5810 chan->unacked_frames = 0;
5811
5812 if (chan->hs_hcon)
5813 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5814 else
5815 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5816
5817 err = l2cap_resegment(chan);
5818
5819 if (!err)
5820 err = l2cap_rx_state_recv(chan, control, skb, event);
5821
5822 return err;
5823 }
5824
5825 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5826 {
5827 /* Make sure reqseq is for a packet that has been sent but not acked */
5828 u16 unacked;
5829
5830 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5831 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5832 }
5833
5834 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5835 struct sk_buff *skb, u8 event)
5836 {
5837 int err = 0;
5838
5839 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5840 control, skb, event, chan->rx_state);
5841
5842 if (__valid_reqseq(chan, control->reqseq)) {
5843 switch (chan->rx_state) {
5844 case L2CAP_RX_STATE_RECV:
5845 err = l2cap_rx_state_recv(chan, control, skb, event);
5846 break;
5847 case L2CAP_RX_STATE_SREJ_SENT:
5848 err = l2cap_rx_state_srej_sent(chan, control, skb,
5849 event);
5850 break;
5851 case L2CAP_RX_STATE_WAIT_P:
5852 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5853 break;
5854 case L2CAP_RX_STATE_WAIT_F:
5855 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5856 break;
5857 default:
5858 /* shut it down */
5859 break;
5860 }
5861 } else {
5862 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5863 control->reqseq, chan->next_tx_seq,
5864 chan->expected_ack_seq);
5865 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5866 }
5867
5868 return err;
5869 }
5870
5871 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5872 struct sk_buff *skb)
5873 {
5874 int err = 0;
5875
5876 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5877 chan->rx_state);
5878
5879 if (l2cap_classify_txseq(chan, control->txseq) ==
5880 L2CAP_TXSEQ_EXPECTED) {
5881 l2cap_pass_to_tx(chan, control);
5882
5883 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5884 __next_seq(chan, chan->buffer_seq));
5885
5886 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5887
5888 l2cap_reassemble_sdu(chan, skb, control);
5889 } else {
5890 if (chan->sdu) {
5891 kfree_skb(chan->sdu);
5892 chan->sdu = NULL;
5893 }
5894 chan->sdu_last_frag = NULL;
5895 chan->sdu_len = 0;
5896
5897 if (skb) {
5898 BT_DBG("Freeing %p", skb);
5899 kfree_skb(skb);
5900 }
5901 }
5902
5903 chan->last_acked_seq = control->txseq;
5904 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5905
5906 return err;
5907 }
5908
5909 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5910 {
5911 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5912 u16 len;
5913 u8 event;
5914
5915 __unpack_control(chan, skb);
5916
5917 len = skb->len;
5918
5919 /*
5920 * We can just drop the corrupted I-frame here.
5921 * Receiver will miss it and start proper recovery
5922 * procedures and ask for retransmission.
5923 */
5924 if (l2cap_check_fcs(chan, skb))
5925 goto drop;
5926
5927 if (!control->sframe && control->sar == L2CAP_SAR_START)
5928 len -= L2CAP_SDULEN_SIZE;
5929
5930 if (chan->fcs == L2CAP_FCS_CRC16)
5931 len -= L2CAP_FCS_SIZE;
5932
5933 if (len > chan->mps) {
5934 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5935 goto drop;
5936 }
5937
5938 if (!control->sframe) {
5939 int err;
5940
5941 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5942 control->sar, control->reqseq, control->final,
5943 control->txseq);
5944
5945 /* Validate F-bit - F=0 always valid, F=1 only
5946 * valid in TX WAIT_F
5947 */
5948 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5949 goto drop;
5950
5951 if (chan->mode != L2CAP_MODE_STREAMING) {
5952 event = L2CAP_EV_RECV_IFRAME;
5953 err = l2cap_rx(chan, control, skb, event);
5954 } else {
5955 err = l2cap_stream_rx(chan, control, skb);
5956 }
5957
5958 if (err)
5959 l2cap_send_disconn_req(chan->conn, chan,
5960 ECONNRESET);
5961 } else {
5962 const u8 rx_func_to_event[4] = {
5963 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5964 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5965 };
5966
5967 /* Only I-frames are expected in streaming mode */
5968 if (chan->mode == L2CAP_MODE_STREAMING)
5969 goto drop;
5970
5971 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5972 control->reqseq, control->final, control->poll,
5973 control->super);
5974
5975 if (len != 0) {
5976 BT_ERR("%d", len);
5977 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5978 goto drop;
5979 }
5980
5981 /* Validate F and P bits */
5982 if (control->final && (control->poll ||
5983 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5984 goto drop;
5985
5986 event = rx_func_to_event[control->super];
5987 if (l2cap_rx(chan, control, skb, event))
5988 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5989 }
5990
5991 return 0;
5992
5993 drop:
5994 kfree_skb(skb);
5995 return 0;
5996 }
5997
5998 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5999 struct sk_buff *skb)
6000 {
6001 struct l2cap_chan *chan;
6002
6003 chan = l2cap_get_chan_by_scid(conn, cid);
6004 if (!chan) {
6005 if (cid == L2CAP_CID_A2MP) {
6006 chan = a2mp_channel_create(conn, skb);
6007 if (!chan) {
6008 kfree_skb(skb);
6009 return;
6010 }
6011
6012 l2cap_chan_lock(chan);
6013 } else {
6014 BT_DBG("unknown cid 0x%4.4x", cid);
6015 /* Drop packet and return */
6016 kfree_skb(skb);
6017 return;
6018 }
6019 }
6020
6021 BT_DBG("chan %p, len %d", chan, skb->len);
6022
6023 if (chan->state != BT_CONNECTED)
6024 goto drop;
6025
6026 switch (chan->mode) {
6027 case L2CAP_MODE_BASIC:
6028 /* If socket recv buffers overflows we drop data here
6029 * which is *bad* because L2CAP has to be reliable.
6030 * But we don't have any other choice. L2CAP doesn't
6031 * provide flow control mechanism. */
6032
6033 if (chan->imtu < skb->len)
6034 goto drop;
6035
6036 if (!chan->ops->recv(chan, skb))
6037 goto done;
6038 break;
6039
6040 case L2CAP_MODE_ERTM:
6041 case L2CAP_MODE_STREAMING:
6042 l2cap_data_rcv(chan, skb);
6043 goto done;
6044
6045 default:
6046 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6047 break;
6048 }
6049
6050 drop:
6051 kfree_skb(skb);
6052
6053 done:
6054 l2cap_chan_unlock(chan);
6055 }
6056
6057 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6058 struct sk_buff *skb)
6059 {
6060 struct l2cap_chan *chan;
6061
6062 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6063 if (!chan)
6064 goto drop;
6065
6066 BT_DBG("chan %p, len %d", chan, skb->len);
6067
6068 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6069 goto drop;
6070
6071 if (chan->imtu < skb->len)
6072 goto drop;
6073
6074 if (!chan->ops->recv(chan, skb))
6075 return;
6076
6077 drop:
6078 kfree_skb(skb);
6079 }
6080
6081 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6082 struct sk_buff *skb)
6083 {
6084 struct l2cap_chan *chan;
6085
6086 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6087 if (!chan)
6088 goto drop;
6089
6090 BT_DBG("chan %p, len %d", chan, skb->len);
6091
6092 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6093 goto drop;
6094
6095 if (chan->imtu < skb->len)
6096 goto drop;
6097
6098 if (!chan->ops->recv(chan, skb))
6099 return;
6100
6101 drop:
6102 kfree_skb(skb);
6103 }
6104
6105 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6106 {
6107 struct l2cap_hdr *lh = (void *) skb->data;
6108 u16 cid, len;
6109 __le16 psm;
6110
6111 skb_pull(skb, L2CAP_HDR_SIZE);
6112 cid = __le16_to_cpu(lh->cid);
6113 len = __le16_to_cpu(lh->len);
6114
6115 if (len != skb->len) {
6116 kfree_skb(skb);
6117 return;
6118 }
6119
6120 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6121
6122 switch (cid) {
6123 case L2CAP_CID_LE_SIGNALING:
6124 case L2CAP_CID_SIGNALING:
6125 l2cap_sig_channel(conn, skb);
6126 break;
6127
6128 case L2CAP_CID_CONN_LESS:
6129 psm = get_unaligned((__le16 *) skb->data);
6130 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6131 l2cap_conless_channel(conn, psm, skb);
6132 break;
6133
6134 case L2CAP_CID_LE_DATA:
6135 l2cap_att_channel(conn, cid, skb);
6136 break;
6137
6138 case L2CAP_CID_SMP:
6139 if (smp_sig_channel(conn, skb))
6140 l2cap_conn_del(conn->hcon, EACCES);
6141 break;
6142
6143 default:
6144 l2cap_data_channel(conn, cid, skb);
6145 break;
6146 }
6147 }
6148
6149 /* ---- L2CAP interface with lower layer (HCI) ---- */
6150
6151 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6152 {
6153 int exact = 0, lm1 = 0, lm2 = 0;
6154 struct l2cap_chan *c;
6155
6156 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6157
6158 /* Find listening sockets and check their link_mode */
6159 read_lock(&chan_list_lock);
6160 list_for_each_entry(c, &chan_list, global_l) {
6161 struct sock *sk = c->sk;
6162
6163 if (c->state != BT_LISTEN)
6164 continue;
6165
6166 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6167 lm1 |= HCI_LM_ACCEPT;
6168 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6169 lm1 |= HCI_LM_MASTER;
6170 exact++;
6171 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6172 lm2 |= HCI_LM_ACCEPT;
6173 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6174 lm2 |= HCI_LM_MASTER;
6175 }
6176 }
6177 read_unlock(&chan_list_lock);
6178
6179 return exact ? lm1 : lm2;
6180 }
6181
6182 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6183 {
6184 struct l2cap_conn *conn;
6185
6186 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6187
6188 if (!status) {
6189 conn = l2cap_conn_add(hcon, status);
6190 if (conn)
6191 l2cap_conn_ready(conn);
6192 } else
6193 l2cap_conn_del(hcon, bt_to_errno(status));
6194
6195 }
6196
6197 int l2cap_disconn_ind(struct hci_conn *hcon)
6198 {
6199 struct l2cap_conn *conn = hcon->l2cap_data;
6200
6201 BT_DBG("hcon %p", hcon);
6202
6203 if (!conn)
6204 return HCI_ERROR_REMOTE_USER_TERM;
6205 return conn->disc_reason;
6206 }
6207
6208 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6209 {
6210 BT_DBG("hcon %p reason %d", hcon, reason);
6211
6212 l2cap_conn_del(hcon, bt_to_errno(reason));
6213 }
6214
6215 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6216 {
6217 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6218 return;
6219
6220 if (encrypt == 0x00) {
6221 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6222 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6223 } else if (chan->sec_level == BT_SECURITY_HIGH)
6224 l2cap_chan_close(chan, ECONNREFUSED);
6225 } else {
6226 if (chan->sec_level == BT_SECURITY_MEDIUM)
6227 __clear_chan_timer(chan);
6228 }
6229 }
6230
6231 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6232 {
6233 struct l2cap_conn *conn = hcon->l2cap_data;
6234 struct l2cap_chan *chan;
6235
6236 if (!conn)
6237 return 0;
6238
6239 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6240
6241 if (hcon->type == LE_LINK) {
6242 if (!status && encrypt)
6243 smp_distribute_keys(conn, 0);
6244 cancel_delayed_work(&conn->security_timer);
6245 }
6246
6247 mutex_lock(&conn->chan_lock);
6248
6249 list_for_each_entry(chan, &conn->chan_l, list) {
6250 l2cap_chan_lock(chan);
6251
6252 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6253 state_to_string(chan->state));
6254
6255 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6256 l2cap_chan_unlock(chan);
6257 continue;
6258 }
6259
6260 if (chan->scid == L2CAP_CID_LE_DATA) {
6261 if (!status && encrypt) {
6262 chan->sec_level = hcon->sec_level;
6263 l2cap_chan_ready(chan);
6264 }
6265
6266 l2cap_chan_unlock(chan);
6267 continue;
6268 }
6269
6270 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
6271 l2cap_chan_unlock(chan);
6272 continue;
6273 }
6274
6275 if (!status && (chan->state == BT_CONNECTED ||
6276 chan->state == BT_CONFIG)) {
6277 struct sock *sk = chan->sk;
6278
6279 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6280 sk->sk_state_change(sk);
6281
6282 l2cap_check_encryption(chan, encrypt);
6283 l2cap_chan_unlock(chan);
6284 continue;
6285 }
6286
6287 if (chan->state == BT_CONNECT) {
6288 if (!status) {
6289 l2cap_start_connection(chan);
6290 } else {
6291 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6292 }
6293 } else if (chan->state == BT_CONNECT2) {
6294 struct sock *sk = chan->sk;
6295 struct l2cap_conn_rsp rsp;
6296 __u16 res, stat;
6297
6298 lock_sock(sk);
6299
6300 if (!status) {
6301 if (test_bit(BT_SK_DEFER_SETUP,
6302 &bt_sk(sk)->flags)) {
6303 res = L2CAP_CR_PEND;
6304 stat = L2CAP_CS_AUTHOR_PEND;
6305 chan->ops->defer(chan);
6306 } else {
6307 __l2cap_state_change(chan, BT_CONFIG);
6308 res = L2CAP_CR_SUCCESS;
6309 stat = L2CAP_CS_NO_INFO;
6310 }
6311 } else {
6312 __l2cap_state_change(chan, BT_DISCONN);
6313 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6314 res = L2CAP_CR_SEC_BLOCK;
6315 stat = L2CAP_CS_NO_INFO;
6316 }
6317
6318 release_sock(sk);
6319
6320 rsp.scid = cpu_to_le16(chan->dcid);
6321 rsp.dcid = cpu_to_le16(chan->scid);
6322 rsp.result = cpu_to_le16(res);
6323 rsp.status = cpu_to_le16(stat);
6324 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6325 sizeof(rsp), &rsp);
6326
6327 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6328 res == L2CAP_CR_SUCCESS) {
6329 char buf[128];
6330 set_bit(CONF_REQ_SENT, &chan->conf_state);
6331 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6332 L2CAP_CONF_REQ,
6333 l2cap_build_conf_req(chan, buf),
6334 buf);
6335 chan->num_conf_req++;
6336 }
6337 }
6338
6339 l2cap_chan_unlock(chan);
6340 }
6341
6342 mutex_unlock(&conn->chan_lock);
6343
6344 return 0;
6345 }
6346
6347 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6348 {
6349 struct l2cap_conn *conn = hcon->l2cap_data;
6350 struct l2cap_hdr *hdr;
6351 int len;
6352
6353 /* For AMP controller do not create l2cap conn */
6354 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6355 goto drop;
6356
6357 if (!conn)
6358 conn = l2cap_conn_add(hcon, 0);
6359
6360 if (!conn)
6361 goto drop;
6362
6363 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6364
6365 switch (flags) {
6366 case ACL_START:
6367 case ACL_START_NO_FLUSH:
6368 case ACL_COMPLETE:
6369 if (conn->rx_len) {
6370 BT_ERR("Unexpected start frame (len %d)", skb->len);
6371 kfree_skb(conn->rx_skb);
6372 conn->rx_skb = NULL;
6373 conn->rx_len = 0;
6374 l2cap_conn_unreliable(conn, ECOMM);
6375 }
6376
6377 /* Start fragment always begin with Basic L2CAP header */
6378 if (skb->len < L2CAP_HDR_SIZE) {
6379 BT_ERR("Frame is too short (len %d)", skb->len);
6380 l2cap_conn_unreliable(conn, ECOMM);
6381 goto drop;
6382 }
6383
6384 hdr = (struct l2cap_hdr *) skb->data;
6385 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6386
6387 if (len == skb->len) {
6388 /* Complete frame received */
6389 l2cap_recv_frame(conn, skb);
6390 return 0;
6391 }
6392
6393 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6394
6395 if (skb->len > len) {
6396 BT_ERR("Frame is too long (len %d, expected len %d)",
6397 skb->len, len);
6398 l2cap_conn_unreliable(conn, ECOMM);
6399 goto drop;
6400 }
6401
6402 /* Allocate skb for the complete frame (with header) */
6403 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6404 if (!conn->rx_skb)
6405 goto drop;
6406
6407 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6408 skb->len);
6409 conn->rx_len = len - skb->len;
6410 break;
6411
6412 case ACL_CONT:
6413 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6414
6415 if (!conn->rx_len) {
6416 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6417 l2cap_conn_unreliable(conn, ECOMM);
6418 goto drop;
6419 }
6420
6421 if (skb->len > conn->rx_len) {
6422 BT_ERR("Fragment is too long (len %d, expected %d)",
6423 skb->len, conn->rx_len);
6424 kfree_skb(conn->rx_skb);
6425 conn->rx_skb = NULL;
6426 conn->rx_len = 0;
6427 l2cap_conn_unreliable(conn, ECOMM);
6428 goto drop;
6429 }
6430
6431 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6432 skb->len);
6433 conn->rx_len -= skb->len;
6434
6435 if (!conn->rx_len) {
6436 /* Complete frame received */
6437 l2cap_recv_frame(conn, conn->rx_skb);
6438 conn->rx_skb = NULL;
6439 }
6440 break;
6441 }
6442
6443 drop:
6444 kfree_skb(skb);
6445 return 0;
6446 }
6447
6448 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6449 {
6450 struct l2cap_chan *c;
6451
6452 read_lock(&chan_list_lock);
6453
6454 list_for_each_entry(c, &chan_list, global_l) {
6455 struct sock *sk = c->sk;
6456
6457 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6458 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6459 c->state, __le16_to_cpu(c->psm),
6460 c->scid, c->dcid, c->imtu, c->omtu,
6461 c->sec_level, c->mode);
6462 }
6463
6464 read_unlock(&chan_list_lock);
6465
6466 return 0;
6467 }
6468
6469 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6470 {
6471 return single_open(file, l2cap_debugfs_show, inode->i_private);
6472 }
6473
6474 static const struct file_operations l2cap_debugfs_fops = {
6475 .open = l2cap_debugfs_open,
6476 .read = seq_read,
6477 .llseek = seq_lseek,
6478 .release = single_release,
6479 };
6480
6481 static struct dentry *l2cap_debugfs;
6482
6483 int __init l2cap_init(void)
6484 {
6485 int err;
6486
6487 err = l2cap_init_sockets();
6488 if (err < 0)
6489 return err;
6490
6491 if (bt_debugfs) {
6492 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6493 NULL, &l2cap_debugfs_fops);
6494 if (!l2cap_debugfs)
6495 BT_ERR("Failed to create L2CAP debug file");
6496 }
6497
6498 return 0;
6499 }
6500
6501 void l2cap_exit(void)
6502 {
6503 debugfs_remove(l2cap_debugfs);
6504 l2cap_cleanup_sockets();
6505 }
6506
6507 module_param(disable_ertm, bool, 0644);
6508 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.280626 seconds and 5 git commands to generate.