team: fix checks in team_get_first_port_txable_rcu()
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42
43 bool disable_ertm;
44
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134 {
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144 }
145
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155 }
156
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
188 }
189
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191 {
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199 }
200
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211 }
212
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220 }
221
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229 }
230
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236 }
237
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 }
514 break;
515
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
522
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
529
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
535 }
536
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543
544 l2cap_chan_hold(chan);
545
546 list_add(&chan->list, &conn->chan_l);
547 }
548
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
554 }
555
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 struct l2cap_conn *conn = chan->conn;
559
560 __clear_chan_timer(chan);
561
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
568
569 l2cap_chan_put(chan);
570
571 chan->conn = NULL;
572
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_drop(conn->hcon);
575
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
578 }
579
580 if (chan->hs_hchan) {
581 struct hci_chan *hs_hchan = chan->hs_hchan;
582
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
585 }
586
587 chan->ops->teardown(chan, err);
588
589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 return;
591
592 switch(chan->mode) {
593 case L2CAP_MODE_BASIC:
594 break;
595
596 case L2CAP_MODE_ERTM:
597 __clear_retrans_timer(chan);
598 __clear_monitor_timer(chan);
599 __clear_ack_timer(chan);
600
601 skb_queue_purge(&chan->srej_q);
602
603 l2cap_seq_list_free(&chan->srej_list);
604 l2cap_seq_list_free(&chan->retrans_list);
605
606 /* fall through */
607
608 case L2CAP_MODE_STREAMING:
609 skb_queue_purge(&chan->tx_q);
610 break;
611 }
612
613 return;
614 }
615
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
617 {
618 struct l2cap_conn *conn = chan->conn;
619 struct sock *sk = chan->sk;
620
621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 sk);
623
624 switch (chan->state) {
625 case BT_LISTEN:
626 chan->ops->teardown(chan, 0);
627 break;
628
629 case BT_CONNECTED:
630 case BT_CONFIG:
631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 conn->hcon->type == ACL_LINK) {
633 __set_chan_timer(chan, sk->sk_sndtimeo);
634 l2cap_send_disconn_req(chan, reason);
635 } else
636 l2cap_chan_del(chan, reason);
637 break;
638
639 case BT_CONNECT2:
640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 conn->hcon->type == ACL_LINK) {
642 struct l2cap_conn_rsp rsp;
643 __u16 result;
644
645 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 result = L2CAP_CR_SEC_BLOCK;
647 else
648 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
650
651 rsp.scid = cpu_to_le16(chan->dcid);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.result = cpu_to_le16(result);
654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 sizeof(rsp), &rsp);
657 }
658
659 l2cap_chan_del(chan, reason);
660 break;
661
662 case BT_CONNECT:
663 case BT_DISCONN:
664 l2cap_chan_del(chan, reason);
665 break;
666
667 default:
668 chan->ops->teardown(chan, 0);
669 break;
670 }
671 }
672
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
674 {
675 if (chan->chan_type == L2CAP_CHAN_RAW) {
676 switch (chan->sec_level) {
677 case BT_SECURITY_HIGH:
678 return HCI_AT_DEDICATED_BONDING_MITM;
679 case BT_SECURITY_MEDIUM:
680 return HCI_AT_DEDICATED_BONDING;
681 default:
682 return HCI_AT_NO_BONDING;
683 }
684 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 if (chan->sec_level == BT_SECURITY_LOW)
686 chan->sec_level = BT_SECURITY_SDP;
687
688 if (chan->sec_level == BT_SECURITY_HIGH)
689 return HCI_AT_NO_BONDING_MITM;
690 else
691 return HCI_AT_NO_BONDING;
692 } else {
693 switch (chan->sec_level) {
694 case BT_SECURITY_HIGH:
695 return HCI_AT_GENERAL_BONDING_MITM;
696 case BT_SECURITY_MEDIUM:
697 return HCI_AT_GENERAL_BONDING;
698 default:
699 return HCI_AT_NO_BONDING;
700 }
701 }
702 }
703
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
706 {
707 struct l2cap_conn *conn = chan->conn;
708 __u8 auth_type;
709
710 auth_type = l2cap_get_auth_type(chan);
711
712 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
713 }
714
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
716 {
717 u8 id;
718
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
723 */
724
725 spin_lock(&conn->lock);
726
727 if (++conn->tx_ident > 128)
728 conn->tx_ident = 1;
729
730 id = conn->tx_ident;
731
732 spin_unlock(&conn->lock);
733
734 return id;
735 }
736
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 void *data)
739 {
740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 u8 flags;
742
743 BT_DBG("code 0x%2.2x", code);
744
745 if (!skb)
746 return;
747
748 if (lmp_no_flush_capable(conn->hcon->hdev))
749 flags = ACL_START_NO_FLUSH;
750 else
751 flags = ACL_START;
752
753 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 skb->priority = HCI_PRIO_MAX;
755
756 hci_send_acl(conn->hchan, skb, flags);
757 }
758
759 static bool __chan_is_moving(struct l2cap_chan *chan)
760 {
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763 }
764
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
766 {
767 struct hci_conn *hcon = chan->conn->hcon;
768 u16 flags;
769
770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 skb->priority);
772
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 if (chan->hs_hchan)
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 else
777 kfree_skb(skb);
778
779 return;
780 }
781
782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 lmp_no_flush_capable(hcon->hdev))
784 flags = ACL_START_NO_FLUSH;
785 else
786 flags = ACL_START;
787
788 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 hci_send_acl(chan->conn->hchan, skb, flags);
790 }
791
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
793 {
794 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
796
797 if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 /* S-Frame */
799 control->sframe = 1;
800 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
802
803 control->sar = 0;
804 control->txseq = 0;
805 } else {
806 /* I-Frame */
807 control->sframe = 0;
808 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
810
811 control->poll = 0;
812 control->super = 0;
813 }
814 }
815
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
817 {
818 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
820
821 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 /* S-Frame */
823 control->sframe = 1;
824 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
826
827 control->sar = 0;
828 control->txseq = 0;
829 } else {
830 /* I-Frame */
831 control->sframe = 0;
832 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
834
835 control->poll = 0;
836 control->super = 0;
837 }
838 }
839
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 struct sk_buff *skb)
842 {
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 __unpack_extended_control(get_unaligned_le32(skb->data),
845 &bt_cb(skb)->control);
846 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 } else {
848 __unpack_enhanced_control(get_unaligned_le16(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
851 }
852 }
853
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
855 {
856 u32 packed;
857
858 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
860
861 if (control->sframe) {
862 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 } else {
866 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
868 }
869
870 return packed;
871 }
872
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
874 {
875 u16 packed;
876
877 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
879
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_CTRL_FRAME_TYPE;
884 } else {
885 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
887 }
888
889 return packed;
890 }
891
892 static inline void __pack_control(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control,
894 struct sk_buff *skb)
895 {
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 put_unaligned_le32(__pack_extended_control(control),
898 skb->data + L2CAP_HDR_SIZE);
899 } else {
900 put_unaligned_le16(__pack_enhanced_control(control),
901 skb->data + L2CAP_HDR_SIZE);
902 }
903 }
904
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
906 {
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 return L2CAP_EXT_HDR_SIZE;
909 else
910 return L2CAP_ENH_HDR_SIZE;
911 }
912
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 u32 control)
915 {
916 struct sk_buff *skb;
917 struct l2cap_hdr *lh;
918 int hlen = __ertm_hdr_size(chan);
919
920 if (chan->fcs == L2CAP_FCS_CRC16)
921 hlen += L2CAP_FCS_SIZE;
922
923 skb = bt_skb_alloc(hlen, GFP_KERNEL);
924
925 if (!skb)
926 return ERR_PTR(-ENOMEM);
927
928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 lh->cid = cpu_to_le16(chan->dcid);
931
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 else
935 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
936
937 if (chan->fcs == L2CAP_FCS_CRC16) {
938 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
940 }
941
942 skb->priority = HCI_PRIO_MAX;
943 return skb;
944 }
945
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 struct l2cap_ctrl *control)
948 {
949 struct sk_buff *skb;
950 u32 control_field;
951
952 BT_DBG("chan %p, control %p", chan, control);
953
954 if (!control->sframe)
955 return;
956
957 if (__chan_is_moving(chan))
958 return;
959
960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 !control->poll)
962 control->final = 1;
963
964 if (control->super == L2CAP_SUPER_RR)
965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 else if (control->super == L2CAP_SUPER_RNR)
967 set_bit(CONN_RNR_SENT, &chan->conn_state);
968
969 if (control->super != L2CAP_SUPER_SREJ) {
970 chan->last_acked_seq = control->reqseq;
971 __clear_ack_timer(chan);
972 }
973
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 control->final, control->poll, control->super);
976
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 control_field = __pack_extended_control(control);
979 else
980 control_field = __pack_enhanced_control(control);
981
982 skb = l2cap_create_sframe_pdu(chan, control_field);
983 if (!IS_ERR(skb))
984 l2cap_do_send(chan, skb);
985 }
986
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
988 {
989 struct l2cap_ctrl control;
990
991 BT_DBG("chan %p, poll %d", chan, poll);
992
993 memset(&control, 0, sizeof(control));
994 control.sframe = 1;
995 control.poll = poll;
996
997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 control.super = L2CAP_SUPER_RNR;
999 else
1000 control.super = L2CAP_SUPER_RR;
1001
1002 control.reqseq = chan->buffer_seq;
1003 l2cap_send_sframe(chan, &control);
1004 }
1005
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1007 {
1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1009 }
1010
1011 static bool __amp_capable(struct l2cap_chan *chan)
1012 {
1013 struct l2cap_conn *conn = chan->conn;
1014
1015 if (enable_hs &&
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1022 }
1023
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 /* Check EFS parameters */
1027 return true;
1028 }
1029
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1034
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1037
1038 chan->ident = l2cap_get_ident(conn);
1039
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1051
1052 chan->ident = l2cap_get_ident(chan->conn);
1053
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1056 }
1057
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 struct sk_buff *skb;
1061
1062 BT_DBG("chan %p", chan);
1063
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1066
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1070
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1077 }
1078
1079 chan->expected_tx_seq = chan->buffer_seq;
1080
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1086
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1089
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1097
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1103
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1112 }
1113 }
1114
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1120
1121 chan->state = BT_CONNECTED;
1122
1123 chan->ops->ready(chan);
1124 }
1125
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1133 }
1134 }
1135
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 struct l2cap_conn *conn = chan->conn;
1139
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1142 return;
1143 }
1144
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 return;
1148
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1152 }
1153 } else {
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1159
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 sizeof(req), &req);
1164 }
1165 }
1166
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 u32 local_feat_mask = l2cap_feat_mask;
1170 if (!disable_ertm)
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172
1173 switch (mode) {
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 default:
1179 return 0x00;
1180 }
1181 }
1182
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1184 {
1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1187 struct l2cap_disconn_req req;
1188
1189 if (!conn)
1190 return;
1191
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1196 }
1197
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1200 return;
1201 }
1202
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 sizeof(req), &req);
1207
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1211 release_sock(sk);
1212 }
1213
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 struct l2cap_chan *chan, *tmp;
1218
1219 BT_DBG("conn %p", conn);
1220
1221 mutex_lock(&conn->chan_lock);
1222
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1225
1226 l2cap_chan_lock(chan);
1227
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1230 continue;
1231 }
1232
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1238 }
1239
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1245 continue;
1246 }
1247
1248 l2cap_start_connection(chan);
1249
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1252 char buf[128];
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1255
1256 if (l2cap_chan_check_security(chan)) {
1257 lock_sock(sk);
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1263
1264 } else {
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 }
1269 release_sock(sk);
1270 } else {
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 }
1274
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 sizeof(rsp), &rsp);
1277
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1281 continue;
1282 }
1283
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1288 }
1289
1290 l2cap_chan_unlock(chan);
1291 }
1292
1293 mutex_unlock(&conn->chan_lock);
1294 }
1295
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1298 */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 bdaddr_t *src,
1301 bdaddr_t *dst)
1302 {
1303 struct l2cap_chan *c, *c1 = NULL;
1304
1305 read_lock(&chan_list_lock);
1306
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1309
1310 if (state && c->state != state)
1311 continue;
1312
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1316
1317 /* Exact match. */
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1322 return c;
1323 }
1324
1325 /* Closest match */
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1330 c1 = c;
1331 }
1332 }
1333
1334 read_unlock(&chan_list_lock);
1335
1336 return c1;
1337 }
1338
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1343
1344 BT_DBG("");
1345
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1349 if (!pchan)
1350 return;
1351
1352 parent = pchan->sk;
1353
1354 lock_sock(parent);
1355
1356 chan = pchan->ops->new_connection(pchan);
1357 if (!chan)
1358 goto clean;
1359
1360 sk = chan->sk;
1361
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1367
1368 l2cap_chan_add(conn, chan);
1369
1370 l2cap_chan_ready(chan);
1371
1372 clean:
1373 release_sock(parent);
1374 }
1375
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1380
1381 BT_DBG("conn %p", conn);
1382
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1385
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1388
1389 mutex_lock(&conn->chan_lock);
1390
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1392
1393 l2cap_chan_lock(chan);
1394
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1397 continue;
1398 }
1399
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1403
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1407 lock_sock(sk);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1410 release_sock(sk);
1411
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1414
1415 l2cap_chan_unlock(chan);
1416 }
1417
1418 mutex_unlock(&conn->chan_lock);
1419 }
1420
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 struct l2cap_chan *chan;
1425
1426 BT_DBG("conn %p", conn);
1427
1428 mutex_lock(&conn->chan_lock);
1429
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1433 }
1434
1435 mutex_unlock(&conn->chan_lock);
1436 }
1437
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 info_timer.work);
1442
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1445
1446 l2cap_conn_start(conn);
1447 }
1448
1449 /*
1450 * l2cap_user
1451 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1452 * callback is called during registration. The ->remove callback is called
1453 * during unregistration.
1454 * An l2cap_user object can either be explicitly unregistered or when the
1455 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1456 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1457 * External modules must own a reference to the l2cap_conn object if they intend
1458 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1459 * any time if they don't.
1460 */
1461
1462 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1463 {
1464 struct hci_dev *hdev = conn->hcon->hdev;
1465 int ret;
1466
1467 /* We need to check whether l2cap_conn is registered. If it is not, we
1468 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1469 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1470 * relies on the parent hci_conn object to be locked. This itself relies
1471 * on the hci_dev object to be locked. So we must lock the hci device
1472 * here, too. */
1473
1474 hci_dev_lock(hdev);
1475
1476 if (user->list.next || user->list.prev) {
1477 ret = -EINVAL;
1478 goto out_unlock;
1479 }
1480
1481 /* conn->hchan is NULL after l2cap_conn_del() was called */
1482 if (!conn->hchan) {
1483 ret = -ENODEV;
1484 goto out_unlock;
1485 }
1486
1487 ret = user->probe(conn, user);
1488 if (ret)
1489 goto out_unlock;
1490
1491 list_add(&user->list, &conn->users);
1492 ret = 0;
1493
1494 out_unlock:
1495 hci_dev_unlock(hdev);
1496 return ret;
1497 }
1498 EXPORT_SYMBOL(l2cap_register_user);
1499
1500 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1501 {
1502 struct hci_dev *hdev = conn->hcon->hdev;
1503
1504 hci_dev_lock(hdev);
1505
1506 if (!user->list.next || !user->list.prev)
1507 goto out_unlock;
1508
1509 list_del(&user->list);
1510 user->list.next = NULL;
1511 user->list.prev = NULL;
1512 user->remove(conn, user);
1513
1514 out_unlock:
1515 hci_dev_unlock(hdev);
1516 }
1517 EXPORT_SYMBOL(l2cap_unregister_user);
1518
1519 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1520 {
1521 struct l2cap_user *user;
1522
1523 while (!list_empty(&conn->users)) {
1524 user = list_first_entry(&conn->users, struct l2cap_user, list);
1525 list_del(&user->list);
1526 user->list.next = NULL;
1527 user->list.prev = NULL;
1528 user->remove(conn, user);
1529 }
1530 }
1531
1532 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1533 {
1534 struct l2cap_conn *conn = hcon->l2cap_data;
1535 struct l2cap_chan *chan, *l;
1536
1537 if (!conn)
1538 return;
1539
1540 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1541
1542 kfree_skb(conn->rx_skb);
1543
1544 l2cap_unregister_all_users(conn);
1545
1546 mutex_lock(&conn->chan_lock);
1547
1548 /* Kill channels */
1549 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1550 l2cap_chan_hold(chan);
1551 l2cap_chan_lock(chan);
1552
1553 l2cap_chan_del(chan, err);
1554
1555 l2cap_chan_unlock(chan);
1556
1557 chan->ops->close(chan);
1558 l2cap_chan_put(chan);
1559 }
1560
1561 mutex_unlock(&conn->chan_lock);
1562
1563 hci_chan_del(conn->hchan);
1564
1565 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1566 cancel_delayed_work_sync(&conn->info_timer);
1567
1568 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1569 cancel_delayed_work_sync(&conn->security_timer);
1570 smp_chan_destroy(conn);
1571 }
1572
1573 hcon->l2cap_data = NULL;
1574 conn->hchan = NULL;
1575 l2cap_conn_put(conn);
1576 }
1577
1578 static void security_timeout(struct work_struct *work)
1579 {
1580 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 security_timer.work);
1582
1583 BT_DBG("conn %p", conn);
1584
1585 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1586 smp_chan_destroy(conn);
1587 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1588 }
1589 }
1590
1591 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1592 {
1593 struct l2cap_conn *conn = hcon->l2cap_data;
1594 struct hci_chan *hchan;
1595
1596 if (conn)
1597 return conn;
1598
1599 hchan = hci_chan_create(hcon);
1600 if (!hchan)
1601 return NULL;
1602
1603 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1604 if (!conn) {
1605 hci_chan_del(hchan);
1606 return NULL;
1607 }
1608
1609 kref_init(&conn->ref);
1610 hcon->l2cap_data = conn;
1611 conn->hcon = hcon;
1612 hci_conn_get(conn->hcon);
1613 conn->hchan = hchan;
1614
1615 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1616
1617 switch (hcon->type) {
1618 case LE_LINK:
1619 if (hcon->hdev->le_mtu) {
1620 conn->mtu = hcon->hdev->le_mtu;
1621 break;
1622 }
1623 /* fall through */
1624 default:
1625 conn->mtu = hcon->hdev->acl_mtu;
1626 break;
1627 }
1628
1629 conn->src = &hcon->hdev->bdaddr;
1630 conn->dst = &hcon->dst;
1631
1632 conn->feat_mask = 0;
1633
1634 spin_lock_init(&conn->lock);
1635 mutex_init(&conn->chan_lock);
1636
1637 INIT_LIST_HEAD(&conn->chan_l);
1638 INIT_LIST_HEAD(&conn->users);
1639
1640 if (hcon->type == LE_LINK)
1641 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1642 else
1643 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1644
1645 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1646
1647 return conn;
1648 }
1649
1650 static void l2cap_conn_free(struct kref *ref)
1651 {
1652 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1653
1654 hci_conn_put(conn->hcon);
1655 kfree(conn);
1656 }
1657
1658 void l2cap_conn_get(struct l2cap_conn *conn)
1659 {
1660 kref_get(&conn->ref);
1661 }
1662 EXPORT_SYMBOL(l2cap_conn_get);
1663
1664 void l2cap_conn_put(struct l2cap_conn *conn)
1665 {
1666 kref_put(&conn->ref, l2cap_conn_free);
1667 }
1668 EXPORT_SYMBOL(l2cap_conn_put);
1669
1670 /* ---- Socket interface ---- */
1671
1672 /* Find socket with psm and source / destination bdaddr.
1673 * Returns closest match.
1674 */
1675 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1676 bdaddr_t *src,
1677 bdaddr_t *dst)
1678 {
1679 struct l2cap_chan *c, *c1 = NULL;
1680
1681 read_lock(&chan_list_lock);
1682
1683 list_for_each_entry(c, &chan_list, global_l) {
1684 struct sock *sk = c->sk;
1685
1686 if (state && c->state != state)
1687 continue;
1688
1689 if (c->psm == psm) {
1690 int src_match, dst_match;
1691 int src_any, dst_any;
1692
1693 /* Exact match. */
1694 src_match = !bacmp(&bt_sk(sk)->src, src);
1695 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1696 if (src_match && dst_match) {
1697 read_unlock(&chan_list_lock);
1698 return c;
1699 }
1700
1701 /* Closest match */
1702 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1703 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1704 if ((src_match && dst_any) || (src_any && dst_match) ||
1705 (src_any && dst_any))
1706 c1 = c;
1707 }
1708 }
1709
1710 read_unlock(&chan_list_lock);
1711
1712 return c1;
1713 }
1714
1715 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1716 bdaddr_t *dst, u8 dst_type)
1717 {
1718 struct sock *sk = chan->sk;
1719 bdaddr_t *src = &bt_sk(sk)->src;
1720 struct l2cap_conn *conn;
1721 struct hci_conn *hcon;
1722 struct hci_dev *hdev;
1723 __u8 auth_type;
1724 int err;
1725
1726 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1727 dst_type, __le16_to_cpu(psm));
1728
1729 hdev = hci_get_route(dst, src);
1730 if (!hdev)
1731 return -EHOSTUNREACH;
1732
1733 hci_dev_lock(hdev);
1734
1735 l2cap_chan_lock(chan);
1736
1737 /* PSM must be odd and lsb of upper byte must be 0 */
1738 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1739 chan->chan_type != L2CAP_CHAN_RAW) {
1740 err = -EINVAL;
1741 goto done;
1742 }
1743
1744 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1745 err = -EINVAL;
1746 goto done;
1747 }
1748
1749 switch (chan->mode) {
1750 case L2CAP_MODE_BASIC:
1751 break;
1752 case L2CAP_MODE_ERTM:
1753 case L2CAP_MODE_STREAMING:
1754 if (!disable_ertm)
1755 break;
1756 /* fall through */
1757 default:
1758 err = -ENOTSUPP;
1759 goto done;
1760 }
1761
1762 switch (chan->state) {
1763 case BT_CONNECT:
1764 case BT_CONNECT2:
1765 case BT_CONFIG:
1766 /* Already connecting */
1767 err = 0;
1768 goto done;
1769
1770 case BT_CONNECTED:
1771 /* Already connected */
1772 err = -EISCONN;
1773 goto done;
1774
1775 case BT_OPEN:
1776 case BT_BOUND:
1777 /* Can connect */
1778 break;
1779
1780 default:
1781 err = -EBADFD;
1782 goto done;
1783 }
1784
1785 /* Set destination address and psm */
1786 lock_sock(sk);
1787 bacpy(&bt_sk(sk)->dst, dst);
1788 release_sock(sk);
1789
1790 chan->psm = psm;
1791 chan->dcid = cid;
1792
1793 auth_type = l2cap_get_auth_type(chan);
1794
1795 if (chan->dcid == L2CAP_CID_LE_DATA)
1796 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1797 chan->sec_level, auth_type);
1798 else
1799 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1800 chan->sec_level, auth_type);
1801
1802 if (IS_ERR(hcon)) {
1803 err = PTR_ERR(hcon);
1804 goto done;
1805 }
1806
1807 conn = l2cap_conn_add(hcon);
1808 if (!conn) {
1809 hci_conn_drop(hcon);
1810 err = -ENOMEM;
1811 goto done;
1812 }
1813
1814 if (hcon->type == LE_LINK) {
1815 err = 0;
1816
1817 if (!list_empty(&conn->chan_l)) {
1818 err = -EBUSY;
1819 hci_conn_drop(hcon);
1820 }
1821
1822 if (err)
1823 goto done;
1824 }
1825
1826 /* Update source addr of the socket */
1827 bacpy(src, conn->src);
1828
1829 l2cap_chan_unlock(chan);
1830 l2cap_chan_add(conn, chan);
1831 l2cap_chan_lock(chan);
1832
1833 l2cap_state_change(chan, BT_CONNECT);
1834 __set_chan_timer(chan, sk->sk_sndtimeo);
1835
1836 if (hcon->state == BT_CONNECTED) {
1837 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1838 __clear_chan_timer(chan);
1839 if (l2cap_chan_check_security(chan))
1840 l2cap_state_change(chan, BT_CONNECTED);
1841 } else
1842 l2cap_do_start(chan);
1843 }
1844
1845 err = 0;
1846
1847 done:
1848 l2cap_chan_unlock(chan);
1849 hci_dev_unlock(hdev);
1850 hci_dev_put(hdev);
1851 return err;
1852 }
1853
1854 int __l2cap_wait_ack(struct sock *sk)
1855 {
1856 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1857 DECLARE_WAITQUEUE(wait, current);
1858 int err = 0;
1859 int timeo = HZ/5;
1860
1861 add_wait_queue(sk_sleep(sk), &wait);
1862 set_current_state(TASK_INTERRUPTIBLE);
1863 while (chan->unacked_frames > 0 && chan->conn) {
1864 if (!timeo)
1865 timeo = HZ/5;
1866
1867 if (signal_pending(current)) {
1868 err = sock_intr_errno(timeo);
1869 break;
1870 }
1871
1872 release_sock(sk);
1873 timeo = schedule_timeout(timeo);
1874 lock_sock(sk);
1875 set_current_state(TASK_INTERRUPTIBLE);
1876
1877 err = sock_error(sk);
1878 if (err)
1879 break;
1880 }
1881 set_current_state(TASK_RUNNING);
1882 remove_wait_queue(sk_sleep(sk), &wait);
1883 return err;
1884 }
1885
1886 static void l2cap_monitor_timeout(struct work_struct *work)
1887 {
1888 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1889 monitor_timer.work);
1890
1891 BT_DBG("chan %p", chan);
1892
1893 l2cap_chan_lock(chan);
1894
1895 if (!chan->conn) {
1896 l2cap_chan_unlock(chan);
1897 l2cap_chan_put(chan);
1898 return;
1899 }
1900
1901 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1902
1903 l2cap_chan_unlock(chan);
1904 l2cap_chan_put(chan);
1905 }
1906
1907 static void l2cap_retrans_timeout(struct work_struct *work)
1908 {
1909 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1910 retrans_timer.work);
1911
1912 BT_DBG("chan %p", chan);
1913
1914 l2cap_chan_lock(chan);
1915
1916 if (!chan->conn) {
1917 l2cap_chan_unlock(chan);
1918 l2cap_chan_put(chan);
1919 return;
1920 }
1921
1922 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1923 l2cap_chan_unlock(chan);
1924 l2cap_chan_put(chan);
1925 }
1926
1927 static void l2cap_streaming_send(struct l2cap_chan *chan,
1928 struct sk_buff_head *skbs)
1929 {
1930 struct sk_buff *skb;
1931 struct l2cap_ctrl *control;
1932
1933 BT_DBG("chan %p, skbs %p", chan, skbs);
1934
1935 if (__chan_is_moving(chan))
1936 return;
1937
1938 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1939
1940 while (!skb_queue_empty(&chan->tx_q)) {
1941
1942 skb = skb_dequeue(&chan->tx_q);
1943
1944 bt_cb(skb)->control.retries = 1;
1945 control = &bt_cb(skb)->control;
1946
1947 control->reqseq = 0;
1948 control->txseq = chan->next_tx_seq;
1949
1950 __pack_control(chan, control, skb);
1951
1952 if (chan->fcs == L2CAP_FCS_CRC16) {
1953 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1954 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1955 }
1956
1957 l2cap_do_send(chan, skb);
1958
1959 BT_DBG("Sent txseq %u", control->txseq);
1960
1961 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1962 chan->frames_sent++;
1963 }
1964 }
1965
1966 static int l2cap_ertm_send(struct l2cap_chan *chan)
1967 {
1968 struct sk_buff *skb, *tx_skb;
1969 struct l2cap_ctrl *control;
1970 int sent = 0;
1971
1972 BT_DBG("chan %p", chan);
1973
1974 if (chan->state != BT_CONNECTED)
1975 return -ENOTCONN;
1976
1977 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1978 return 0;
1979
1980 if (__chan_is_moving(chan))
1981 return 0;
1982
1983 while (chan->tx_send_head &&
1984 chan->unacked_frames < chan->remote_tx_win &&
1985 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1986
1987 skb = chan->tx_send_head;
1988
1989 bt_cb(skb)->control.retries = 1;
1990 control = &bt_cb(skb)->control;
1991
1992 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1993 control->final = 1;
1994
1995 control->reqseq = chan->buffer_seq;
1996 chan->last_acked_seq = chan->buffer_seq;
1997 control->txseq = chan->next_tx_seq;
1998
1999 __pack_control(chan, control, skb);
2000
2001 if (chan->fcs == L2CAP_FCS_CRC16) {
2002 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2003 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2004 }
2005
2006 /* Clone after data has been modified. Data is assumed to be
2007 read-only (for locking purposes) on cloned sk_buffs.
2008 */
2009 tx_skb = skb_clone(skb, GFP_KERNEL);
2010
2011 if (!tx_skb)
2012 break;
2013
2014 __set_retrans_timer(chan);
2015
2016 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2017 chan->unacked_frames++;
2018 chan->frames_sent++;
2019 sent++;
2020
2021 if (skb_queue_is_last(&chan->tx_q, skb))
2022 chan->tx_send_head = NULL;
2023 else
2024 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2025
2026 l2cap_do_send(chan, tx_skb);
2027 BT_DBG("Sent txseq %u", control->txseq);
2028 }
2029
2030 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2031 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2032
2033 return sent;
2034 }
2035
2036 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2037 {
2038 struct l2cap_ctrl control;
2039 struct sk_buff *skb;
2040 struct sk_buff *tx_skb;
2041 u16 seq;
2042
2043 BT_DBG("chan %p", chan);
2044
2045 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2046 return;
2047
2048 if (__chan_is_moving(chan))
2049 return;
2050
2051 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2052 seq = l2cap_seq_list_pop(&chan->retrans_list);
2053
2054 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2055 if (!skb) {
2056 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2057 seq);
2058 continue;
2059 }
2060
2061 bt_cb(skb)->control.retries++;
2062 control = bt_cb(skb)->control;
2063
2064 if (chan->max_tx != 0 &&
2065 bt_cb(skb)->control.retries > chan->max_tx) {
2066 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2067 l2cap_send_disconn_req(chan, ECONNRESET);
2068 l2cap_seq_list_clear(&chan->retrans_list);
2069 break;
2070 }
2071
2072 control.reqseq = chan->buffer_seq;
2073 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2074 control.final = 1;
2075 else
2076 control.final = 0;
2077
2078 if (skb_cloned(skb)) {
2079 /* Cloned sk_buffs are read-only, so we need a
2080 * writeable copy
2081 */
2082 tx_skb = skb_copy(skb, GFP_KERNEL);
2083 } else {
2084 tx_skb = skb_clone(skb, GFP_KERNEL);
2085 }
2086
2087 if (!tx_skb) {
2088 l2cap_seq_list_clear(&chan->retrans_list);
2089 break;
2090 }
2091
2092 /* Update skb contents */
2093 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2094 put_unaligned_le32(__pack_extended_control(&control),
2095 tx_skb->data + L2CAP_HDR_SIZE);
2096 } else {
2097 put_unaligned_le16(__pack_enhanced_control(&control),
2098 tx_skb->data + L2CAP_HDR_SIZE);
2099 }
2100
2101 if (chan->fcs == L2CAP_FCS_CRC16) {
2102 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2103 put_unaligned_le16(fcs, skb_put(tx_skb,
2104 L2CAP_FCS_SIZE));
2105 }
2106
2107 l2cap_do_send(chan, tx_skb);
2108
2109 BT_DBG("Resent txseq %d", control.txseq);
2110
2111 chan->last_acked_seq = chan->buffer_seq;
2112 }
2113 }
2114
2115 static void l2cap_retransmit(struct l2cap_chan *chan,
2116 struct l2cap_ctrl *control)
2117 {
2118 BT_DBG("chan %p, control %p", chan, control);
2119
2120 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2121 l2cap_ertm_resend(chan);
2122 }
2123
2124 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2125 struct l2cap_ctrl *control)
2126 {
2127 struct sk_buff *skb;
2128
2129 BT_DBG("chan %p, control %p", chan, control);
2130
2131 if (control->poll)
2132 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2133
2134 l2cap_seq_list_clear(&chan->retrans_list);
2135
2136 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2137 return;
2138
2139 if (chan->unacked_frames) {
2140 skb_queue_walk(&chan->tx_q, skb) {
2141 if (bt_cb(skb)->control.txseq == control->reqseq ||
2142 skb == chan->tx_send_head)
2143 break;
2144 }
2145
2146 skb_queue_walk_from(&chan->tx_q, skb) {
2147 if (skb == chan->tx_send_head)
2148 break;
2149
2150 l2cap_seq_list_append(&chan->retrans_list,
2151 bt_cb(skb)->control.txseq);
2152 }
2153
2154 l2cap_ertm_resend(chan);
2155 }
2156 }
2157
2158 static void l2cap_send_ack(struct l2cap_chan *chan)
2159 {
2160 struct l2cap_ctrl control;
2161 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2162 chan->last_acked_seq);
2163 int threshold;
2164
2165 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2166 chan, chan->last_acked_seq, chan->buffer_seq);
2167
2168 memset(&control, 0, sizeof(control));
2169 control.sframe = 1;
2170
2171 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2172 chan->rx_state == L2CAP_RX_STATE_RECV) {
2173 __clear_ack_timer(chan);
2174 control.super = L2CAP_SUPER_RNR;
2175 control.reqseq = chan->buffer_seq;
2176 l2cap_send_sframe(chan, &control);
2177 } else {
2178 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2179 l2cap_ertm_send(chan);
2180 /* If any i-frames were sent, they included an ack */
2181 if (chan->buffer_seq == chan->last_acked_seq)
2182 frames_to_ack = 0;
2183 }
2184
2185 /* Ack now if the window is 3/4ths full.
2186 * Calculate without mul or div
2187 */
2188 threshold = chan->ack_win;
2189 threshold += threshold << 1;
2190 threshold >>= 2;
2191
2192 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2193 threshold);
2194
2195 if (frames_to_ack >= threshold) {
2196 __clear_ack_timer(chan);
2197 control.super = L2CAP_SUPER_RR;
2198 control.reqseq = chan->buffer_seq;
2199 l2cap_send_sframe(chan, &control);
2200 frames_to_ack = 0;
2201 }
2202
2203 if (frames_to_ack)
2204 __set_ack_timer(chan);
2205 }
2206 }
2207
2208 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2209 struct msghdr *msg, int len,
2210 int count, struct sk_buff *skb)
2211 {
2212 struct l2cap_conn *conn = chan->conn;
2213 struct sk_buff **frag;
2214 int sent = 0;
2215
2216 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2217 return -EFAULT;
2218
2219 sent += count;
2220 len -= count;
2221
2222 /* Continuation fragments (no L2CAP header) */
2223 frag = &skb_shinfo(skb)->frag_list;
2224 while (len) {
2225 struct sk_buff *tmp;
2226
2227 count = min_t(unsigned int, conn->mtu, len);
2228
2229 tmp = chan->ops->alloc_skb(chan, count,
2230 msg->msg_flags & MSG_DONTWAIT);
2231 if (IS_ERR(tmp))
2232 return PTR_ERR(tmp);
2233
2234 *frag = tmp;
2235
2236 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2237 return -EFAULT;
2238
2239 (*frag)->priority = skb->priority;
2240
2241 sent += count;
2242 len -= count;
2243
2244 skb->len += (*frag)->len;
2245 skb->data_len += (*frag)->len;
2246
2247 frag = &(*frag)->next;
2248 }
2249
2250 return sent;
2251 }
2252
2253 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2254 struct msghdr *msg, size_t len,
2255 u32 priority)
2256 {
2257 struct l2cap_conn *conn = chan->conn;
2258 struct sk_buff *skb;
2259 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2260 struct l2cap_hdr *lh;
2261
2262 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2263
2264 count = min_t(unsigned int, (conn->mtu - hlen), len);
2265
2266 skb = chan->ops->alloc_skb(chan, count + hlen,
2267 msg->msg_flags & MSG_DONTWAIT);
2268 if (IS_ERR(skb))
2269 return skb;
2270
2271 skb->priority = priority;
2272
2273 /* Create L2CAP header */
2274 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2275 lh->cid = cpu_to_le16(chan->dcid);
2276 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2277 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2278
2279 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2280 if (unlikely(err < 0)) {
2281 kfree_skb(skb);
2282 return ERR_PTR(err);
2283 }
2284 return skb;
2285 }
2286
2287 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2288 struct msghdr *msg, size_t len,
2289 u32 priority)
2290 {
2291 struct l2cap_conn *conn = chan->conn;
2292 struct sk_buff *skb;
2293 int err, count;
2294 struct l2cap_hdr *lh;
2295
2296 BT_DBG("chan %p len %zu", chan, len);
2297
2298 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2299
2300 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2301 msg->msg_flags & MSG_DONTWAIT);
2302 if (IS_ERR(skb))
2303 return skb;
2304
2305 skb->priority = priority;
2306
2307 /* Create L2CAP header */
2308 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2309 lh->cid = cpu_to_le16(chan->dcid);
2310 lh->len = cpu_to_le16(len);
2311
2312 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2313 if (unlikely(err < 0)) {
2314 kfree_skb(skb);
2315 return ERR_PTR(err);
2316 }
2317 return skb;
2318 }
2319
2320 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2321 struct msghdr *msg, size_t len,
2322 u16 sdulen)
2323 {
2324 struct l2cap_conn *conn = chan->conn;
2325 struct sk_buff *skb;
2326 int err, count, hlen;
2327 struct l2cap_hdr *lh;
2328
2329 BT_DBG("chan %p len %zu", chan, len);
2330
2331 if (!conn)
2332 return ERR_PTR(-ENOTCONN);
2333
2334 hlen = __ertm_hdr_size(chan);
2335
2336 if (sdulen)
2337 hlen += L2CAP_SDULEN_SIZE;
2338
2339 if (chan->fcs == L2CAP_FCS_CRC16)
2340 hlen += L2CAP_FCS_SIZE;
2341
2342 count = min_t(unsigned int, (conn->mtu - hlen), len);
2343
2344 skb = chan->ops->alloc_skb(chan, count + hlen,
2345 msg->msg_flags & MSG_DONTWAIT);
2346 if (IS_ERR(skb))
2347 return skb;
2348
2349 /* Create L2CAP header */
2350 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2351 lh->cid = cpu_to_le16(chan->dcid);
2352 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2353
2354 /* Control header is populated later */
2355 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2356 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2357 else
2358 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2359
2360 if (sdulen)
2361 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2362
2363 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2364 if (unlikely(err < 0)) {
2365 kfree_skb(skb);
2366 return ERR_PTR(err);
2367 }
2368
2369 bt_cb(skb)->control.fcs = chan->fcs;
2370 bt_cb(skb)->control.retries = 0;
2371 return skb;
2372 }
2373
2374 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2375 struct sk_buff_head *seg_queue,
2376 struct msghdr *msg, size_t len)
2377 {
2378 struct sk_buff *skb;
2379 u16 sdu_len;
2380 size_t pdu_len;
2381 u8 sar;
2382
2383 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2384
2385 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2386 * so fragmented skbs are not used. The HCI layer's handling
2387 * of fragmented skbs is not compatible with ERTM's queueing.
2388 */
2389
2390 /* PDU size is derived from the HCI MTU */
2391 pdu_len = chan->conn->mtu;
2392
2393 /* Constrain PDU size for BR/EDR connections */
2394 if (!chan->hs_hcon)
2395 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2396
2397 /* Adjust for largest possible L2CAP overhead. */
2398 if (chan->fcs)
2399 pdu_len -= L2CAP_FCS_SIZE;
2400
2401 pdu_len -= __ertm_hdr_size(chan);
2402
2403 /* Remote device may have requested smaller PDUs */
2404 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2405
2406 if (len <= pdu_len) {
2407 sar = L2CAP_SAR_UNSEGMENTED;
2408 sdu_len = 0;
2409 pdu_len = len;
2410 } else {
2411 sar = L2CAP_SAR_START;
2412 sdu_len = len;
2413 pdu_len -= L2CAP_SDULEN_SIZE;
2414 }
2415
2416 while (len > 0) {
2417 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2418
2419 if (IS_ERR(skb)) {
2420 __skb_queue_purge(seg_queue);
2421 return PTR_ERR(skb);
2422 }
2423
2424 bt_cb(skb)->control.sar = sar;
2425 __skb_queue_tail(seg_queue, skb);
2426
2427 len -= pdu_len;
2428 if (sdu_len) {
2429 sdu_len = 0;
2430 pdu_len += L2CAP_SDULEN_SIZE;
2431 }
2432
2433 if (len <= pdu_len) {
2434 sar = L2CAP_SAR_END;
2435 pdu_len = len;
2436 } else {
2437 sar = L2CAP_SAR_CONTINUE;
2438 }
2439 }
2440
2441 return 0;
2442 }
2443
2444 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2445 u32 priority)
2446 {
2447 struct sk_buff *skb;
2448 int err;
2449 struct sk_buff_head seg_queue;
2450
2451 /* Connectionless channel */
2452 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2453 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2454 if (IS_ERR(skb))
2455 return PTR_ERR(skb);
2456
2457 l2cap_do_send(chan, skb);
2458 return len;
2459 }
2460
2461 switch (chan->mode) {
2462 case L2CAP_MODE_BASIC:
2463 /* Check outgoing MTU */
2464 if (len > chan->omtu)
2465 return -EMSGSIZE;
2466
2467 /* Create a basic PDU */
2468 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2469 if (IS_ERR(skb))
2470 return PTR_ERR(skb);
2471
2472 l2cap_do_send(chan, skb);
2473 err = len;
2474 break;
2475
2476 case L2CAP_MODE_ERTM:
2477 case L2CAP_MODE_STREAMING:
2478 /* Check outgoing MTU */
2479 if (len > chan->omtu) {
2480 err = -EMSGSIZE;
2481 break;
2482 }
2483
2484 __skb_queue_head_init(&seg_queue);
2485
2486 /* Do segmentation before calling in to the state machine,
2487 * since it's possible to block while waiting for memory
2488 * allocation.
2489 */
2490 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2491
2492 /* The channel could have been closed while segmenting,
2493 * check that it is still connected.
2494 */
2495 if (chan->state != BT_CONNECTED) {
2496 __skb_queue_purge(&seg_queue);
2497 err = -ENOTCONN;
2498 }
2499
2500 if (err)
2501 break;
2502
2503 if (chan->mode == L2CAP_MODE_ERTM)
2504 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2505 else
2506 l2cap_streaming_send(chan, &seg_queue);
2507
2508 err = len;
2509
2510 /* If the skbs were not queued for sending, they'll still be in
2511 * seg_queue and need to be purged.
2512 */
2513 __skb_queue_purge(&seg_queue);
2514 break;
2515
2516 default:
2517 BT_DBG("bad state %1.1x", chan->mode);
2518 err = -EBADFD;
2519 }
2520
2521 return err;
2522 }
2523
2524 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2525 {
2526 struct l2cap_ctrl control;
2527 u16 seq;
2528
2529 BT_DBG("chan %p, txseq %u", chan, txseq);
2530
2531 memset(&control, 0, sizeof(control));
2532 control.sframe = 1;
2533 control.super = L2CAP_SUPER_SREJ;
2534
2535 for (seq = chan->expected_tx_seq; seq != txseq;
2536 seq = __next_seq(chan, seq)) {
2537 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2538 control.reqseq = seq;
2539 l2cap_send_sframe(chan, &control);
2540 l2cap_seq_list_append(&chan->srej_list, seq);
2541 }
2542 }
2543
2544 chan->expected_tx_seq = __next_seq(chan, txseq);
2545 }
2546
2547 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2548 {
2549 struct l2cap_ctrl control;
2550
2551 BT_DBG("chan %p", chan);
2552
2553 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2554 return;
2555
2556 memset(&control, 0, sizeof(control));
2557 control.sframe = 1;
2558 control.super = L2CAP_SUPER_SREJ;
2559 control.reqseq = chan->srej_list.tail;
2560 l2cap_send_sframe(chan, &control);
2561 }
2562
2563 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2564 {
2565 struct l2cap_ctrl control;
2566 u16 initial_head;
2567 u16 seq;
2568
2569 BT_DBG("chan %p, txseq %u", chan, txseq);
2570
2571 memset(&control, 0, sizeof(control));
2572 control.sframe = 1;
2573 control.super = L2CAP_SUPER_SREJ;
2574
2575 /* Capture initial list head to allow only one pass through the list. */
2576 initial_head = chan->srej_list.head;
2577
2578 do {
2579 seq = l2cap_seq_list_pop(&chan->srej_list);
2580 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2581 break;
2582
2583 control.reqseq = seq;
2584 l2cap_send_sframe(chan, &control);
2585 l2cap_seq_list_append(&chan->srej_list, seq);
2586 } while (chan->srej_list.head != initial_head);
2587 }
2588
2589 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2590 {
2591 struct sk_buff *acked_skb;
2592 u16 ackseq;
2593
2594 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2595
2596 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2597 return;
2598
2599 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2600 chan->expected_ack_seq, chan->unacked_frames);
2601
2602 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2603 ackseq = __next_seq(chan, ackseq)) {
2604
2605 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2606 if (acked_skb) {
2607 skb_unlink(acked_skb, &chan->tx_q);
2608 kfree_skb(acked_skb);
2609 chan->unacked_frames--;
2610 }
2611 }
2612
2613 chan->expected_ack_seq = reqseq;
2614
2615 if (chan->unacked_frames == 0)
2616 __clear_retrans_timer(chan);
2617
2618 BT_DBG("unacked_frames %u", chan->unacked_frames);
2619 }
2620
2621 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2622 {
2623 BT_DBG("chan %p", chan);
2624
2625 chan->expected_tx_seq = chan->buffer_seq;
2626 l2cap_seq_list_clear(&chan->srej_list);
2627 skb_queue_purge(&chan->srej_q);
2628 chan->rx_state = L2CAP_RX_STATE_RECV;
2629 }
2630
2631 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2632 struct l2cap_ctrl *control,
2633 struct sk_buff_head *skbs, u8 event)
2634 {
2635 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2636 event);
2637
2638 switch (event) {
2639 case L2CAP_EV_DATA_REQUEST:
2640 if (chan->tx_send_head == NULL)
2641 chan->tx_send_head = skb_peek(skbs);
2642
2643 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2644 l2cap_ertm_send(chan);
2645 break;
2646 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2647 BT_DBG("Enter LOCAL_BUSY");
2648 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2649
2650 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2651 /* The SREJ_SENT state must be aborted if we are to
2652 * enter the LOCAL_BUSY state.
2653 */
2654 l2cap_abort_rx_srej_sent(chan);
2655 }
2656
2657 l2cap_send_ack(chan);
2658
2659 break;
2660 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2661 BT_DBG("Exit LOCAL_BUSY");
2662 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2663
2664 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2665 struct l2cap_ctrl local_control;
2666
2667 memset(&local_control, 0, sizeof(local_control));
2668 local_control.sframe = 1;
2669 local_control.super = L2CAP_SUPER_RR;
2670 local_control.poll = 1;
2671 local_control.reqseq = chan->buffer_seq;
2672 l2cap_send_sframe(chan, &local_control);
2673
2674 chan->retry_count = 1;
2675 __set_monitor_timer(chan);
2676 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2677 }
2678 break;
2679 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2680 l2cap_process_reqseq(chan, control->reqseq);
2681 break;
2682 case L2CAP_EV_EXPLICIT_POLL:
2683 l2cap_send_rr_or_rnr(chan, 1);
2684 chan->retry_count = 1;
2685 __set_monitor_timer(chan);
2686 __clear_ack_timer(chan);
2687 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2688 break;
2689 case L2CAP_EV_RETRANS_TO:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 break;
2695 case L2CAP_EV_RECV_FBIT:
2696 /* Nothing to process */
2697 break;
2698 default:
2699 break;
2700 }
2701 }
2702
2703 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control,
2705 struct sk_buff_head *skbs, u8 event)
2706 {
2707 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2708 event);
2709
2710 switch (event) {
2711 case L2CAP_EV_DATA_REQUEST:
2712 if (chan->tx_send_head == NULL)
2713 chan->tx_send_head = skb_peek(skbs);
2714 /* Queue data, but don't send. */
2715 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2716 break;
2717 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 BT_DBG("Enter LOCAL_BUSY");
2719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720
2721 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 /* The SREJ_SENT state must be aborted if we are to
2723 * enter the LOCAL_BUSY state.
2724 */
2725 l2cap_abort_rx_srej_sent(chan);
2726 }
2727
2728 l2cap_send_ack(chan);
2729
2730 break;
2731 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 BT_DBG("Exit LOCAL_BUSY");
2733 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734
2735 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 struct l2cap_ctrl local_control;
2737 memset(&local_control, 0, sizeof(local_control));
2738 local_control.sframe = 1;
2739 local_control.super = L2CAP_SUPER_RR;
2740 local_control.poll = 1;
2741 local_control.reqseq = chan->buffer_seq;
2742 l2cap_send_sframe(chan, &local_control);
2743
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2747 }
2748 break;
2749 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2750 l2cap_process_reqseq(chan, control->reqseq);
2751
2752 /* Fall through */
2753
2754 case L2CAP_EV_RECV_FBIT:
2755 if (control && control->final) {
2756 __clear_monitor_timer(chan);
2757 if (chan->unacked_frames > 0)
2758 __set_retrans_timer(chan);
2759 chan->retry_count = 0;
2760 chan->tx_state = L2CAP_TX_STATE_XMIT;
2761 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2762 }
2763 break;
2764 case L2CAP_EV_EXPLICIT_POLL:
2765 /* Ignore */
2766 break;
2767 case L2CAP_EV_MONITOR_TO:
2768 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2769 l2cap_send_rr_or_rnr(chan, 1);
2770 __set_monitor_timer(chan);
2771 chan->retry_count++;
2772 } else {
2773 l2cap_send_disconn_req(chan, ECONNABORTED);
2774 }
2775 break;
2776 default:
2777 break;
2778 }
2779 }
2780
2781 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2782 struct sk_buff_head *skbs, u8 event)
2783 {
2784 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2785 chan, control, skbs, event, chan->tx_state);
2786
2787 switch (chan->tx_state) {
2788 case L2CAP_TX_STATE_XMIT:
2789 l2cap_tx_state_xmit(chan, control, skbs, event);
2790 break;
2791 case L2CAP_TX_STATE_WAIT_F:
2792 l2cap_tx_state_wait_f(chan, control, skbs, event);
2793 break;
2794 default:
2795 /* Ignore event */
2796 break;
2797 }
2798 }
2799
2800 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2801 struct l2cap_ctrl *control)
2802 {
2803 BT_DBG("chan %p, control %p", chan, control);
2804 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2805 }
2806
2807 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2809 {
2810 BT_DBG("chan %p, control %p", chan, control);
2811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2812 }
2813
2814 /* Copy frame to all raw sockets on that connection */
2815 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2816 {
2817 struct sk_buff *nskb;
2818 struct l2cap_chan *chan;
2819
2820 BT_DBG("conn %p", conn);
2821
2822 mutex_lock(&conn->chan_lock);
2823
2824 list_for_each_entry(chan, &conn->chan_l, list) {
2825 struct sock *sk = chan->sk;
2826 if (chan->chan_type != L2CAP_CHAN_RAW)
2827 continue;
2828
2829 /* Don't send frame to the socket it came from */
2830 if (skb->sk == sk)
2831 continue;
2832 nskb = skb_clone(skb, GFP_KERNEL);
2833 if (!nskb)
2834 continue;
2835
2836 if (chan->ops->recv(chan, nskb))
2837 kfree_skb(nskb);
2838 }
2839
2840 mutex_unlock(&conn->chan_lock);
2841 }
2842
2843 /* ---- L2CAP signalling commands ---- */
2844 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2845 u8 ident, u16 dlen, void *data)
2846 {
2847 struct sk_buff *skb, **frag;
2848 struct l2cap_cmd_hdr *cmd;
2849 struct l2cap_hdr *lh;
2850 int len, count;
2851
2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2853 conn, code, ident, dlen);
2854
2855 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2856 count = min_t(unsigned int, conn->mtu, len);
2857
2858 skb = bt_skb_alloc(count, GFP_KERNEL);
2859 if (!skb)
2860 return NULL;
2861
2862 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2863 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2864
2865 if (conn->hcon->type == LE_LINK)
2866 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2867 else
2868 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2869
2870 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2871 cmd->code = code;
2872 cmd->ident = ident;
2873 cmd->len = cpu_to_le16(dlen);
2874
2875 if (dlen) {
2876 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2877 memcpy(skb_put(skb, count), data, count);
2878 data += count;
2879 }
2880
2881 len -= skb->len;
2882
2883 /* Continuation fragments (no L2CAP header) */
2884 frag = &skb_shinfo(skb)->frag_list;
2885 while (len) {
2886 count = min_t(unsigned int, conn->mtu, len);
2887
2888 *frag = bt_skb_alloc(count, GFP_KERNEL);
2889 if (!*frag)
2890 goto fail;
2891
2892 memcpy(skb_put(*frag, count), data, count);
2893
2894 len -= count;
2895 data += count;
2896
2897 frag = &(*frag)->next;
2898 }
2899
2900 return skb;
2901
2902 fail:
2903 kfree_skb(skb);
2904 return NULL;
2905 }
2906
2907 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2908 unsigned long *val)
2909 {
2910 struct l2cap_conf_opt *opt = *ptr;
2911 int len;
2912
2913 len = L2CAP_CONF_OPT_SIZE + opt->len;
2914 *ptr += len;
2915
2916 *type = opt->type;
2917 *olen = opt->len;
2918
2919 switch (opt->len) {
2920 case 1:
2921 *val = *((u8 *) opt->val);
2922 break;
2923
2924 case 2:
2925 *val = get_unaligned_le16(opt->val);
2926 break;
2927
2928 case 4:
2929 *val = get_unaligned_le32(opt->val);
2930 break;
2931
2932 default:
2933 *val = (unsigned long) opt->val;
2934 break;
2935 }
2936
2937 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2938 return len;
2939 }
2940
2941 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2942 {
2943 struct l2cap_conf_opt *opt = *ptr;
2944
2945 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2946
2947 opt->type = type;
2948 opt->len = len;
2949
2950 switch (len) {
2951 case 1:
2952 *((u8 *) opt->val) = val;
2953 break;
2954
2955 case 2:
2956 put_unaligned_le16(val, opt->val);
2957 break;
2958
2959 case 4:
2960 put_unaligned_le32(val, opt->val);
2961 break;
2962
2963 default:
2964 memcpy(opt->val, (void *) val, len);
2965 break;
2966 }
2967
2968 *ptr += L2CAP_CONF_OPT_SIZE + len;
2969 }
2970
2971 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2972 {
2973 struct l2cap_conf_efs efs;
2974
2975 switch (chan->mode) {
2976 case L2CAP_MODE_ERTM:
2977 efs.id = chan->local_id;
2978 efs.stype = chan->local_stype;
2979 efs.msdu = cpu_to_le16(chan->local_msdu);
2980 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2981 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2982 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2983 break;
2984
2985 case L2CAP_MODE_STREAMING:
2986 efs.id = 1;
2987 efs.stype = L2CAP_SERV_BESTEFFORT;
2988 efs.msdu = cpu_to_le16(chan->local_msdu);
2989 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2990 efs.acc_lat = 0;
2991 efs.flush_to = 0;
2992 break;
2993
2994 default:
2995 return;
2996 }
2997
2998 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2999 (unsigned long) &efs);
3000 }
3001
3002 static void l2cap_ack_timeout(struct work_struct *work)
3003 {
3004 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3005 ack_timer.work);
3006 u16 frames_to_ack;
3007
3008 BT_DBG("chan %p", chan);
3009
3010 l2cap_chan_lock(chan);
3011
3012 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3013 chan->last_acked_seq);
3014
3015 if (frames_to_ack)
3016 l2cap_send_rr_or_rnr(chan, 0);
3017
3018 l2cap_chan_unlock(chan);
3019 l2cap_chan_put(chan);
3020 }
3021
3022 int l2cap_ertm_init(struct l2cap_chan *chan)
3023 {
3024 int err;
3025
3026 chan->next_tx_seq = 0;
3027 chan->expected_tx_seq = 0;
3028 chan->expected_ack_seq = 0;
3029 chan->unacked_frames = 0;
3030 chan->buffer_seq = 0;
3031 chan->frames_sent = 0;
3032 chan->last_acked_seq = 0;
3033 chan->sdu = NULL;
3034 chan->sdu_last_frag = NULL;
3035 chan->sdu_len = 0;
3036
3037 skb_queue_head_init(&chan->tx_q);
3038
3039 chan->local_amp_id = 0;
3040 chan->move_id = 0;
3041 chan->move_state = L2CAP_MOVE_STABLE;
3042 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3043
3044 if (chan->mode != L2CAP_MODE_ERTM)
3045 return 0;
3046
3047 chan->rx_state = L2CAP_RX_STATE_RECV;
3048 chan->tx_state = L2CAP_TX_STATE_XMIT;
3049
3050 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3051 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3052 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3053
3054 skb_queue_head_init(&chan->srej_q);
3055
3056 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3057 if (err < 0)
3058 return err;
3059
3060 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3061 if (err < 0)
3062 l2cap_seq_list_free(&chan->srej_list);
3063
3064 return err;
3065 }
3066
3067 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3068 {
3069 switch (mode) {
3070 case L2CAP_MODE_STREAMING:
3071 case L2CAP_MODE_ERTM:
3072 if (l2cap_mode_supported(mode, remote_feat_mask))
3073 return mode;
3074 /* fall through */
3075 default:
3076 return L2CAP_MODE_BASIC;
3077 }
3078 }
3079
3080 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3081 {
3082 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3083 }
3084
3085 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3086 {
3087 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3088 }
3089
3090 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3091 struct l2cap_conf_rfc *rfc)
3092 {
3093 if (chan->local_amp_id && chan->hs_hcon) {
3094 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3095
3096 /* Class 1 devices have must have ERTM timeouts
3097 * exceeding the Link Supervision Timeout. The
3098 * default Link Supervision Timeout for AMP
3099 * controllers is 10 seconds.
3100 *
3101 * Class 1 devices use 0xffffffff for their
3102 * best-effort flush timeout, so the clamping logic
3103 * will result in a timeout that meets the above
3104 * requirement. ERTM timeouts are 16-bit values, so
3105 * the maximum timeout is 65.535 seconds.
3106 */
3107
3108 /* Convert timeout to milliseconds and round */
3109 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3110
3111 /* This is the recommended formula for class 2 devices
3112 * that start ERTM timers when packets are sent to the
3113 * controller.
3114 */
3115 ertm_to = 3 * ertm_to + 500;
3116
3117 if (ertm_to > 0xffff)
3118 ertm_to = 0xffff;
3119
3120 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3121 rfc->monitor_timeout = rfc->retrans_timeout;
3122 } else {
3123 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3124 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3125 }
3126 }
3127
3128 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3129 {
3130 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3131 __l2cap_ews_supported(chan)) {
3132 /* use extended control field */
3133 set_bit(FLAG_EXT_CTRL, &chan->flags);
3134 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3135 } else {
3136 chan->tx_win = min_t(u16, chan->tx_win,
3137 L2CAP_DEFAULT_TX_WINDOW);
3138 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3139 }
3140 chan->ack_win = chan->tx_win;
3141 }
3142
3143 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3144 {
3145 struct l2cap_conf_req *req = data;
3146 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3147 void *ptr = req->data;
3148 u16 size;
3149
3150 BT_DBG("chan %p", chan);
3151
3152 if (chan->num_conf_req || chan->num_conf_rsp)
3153 goto done;
3154
3155 switch (chan->mode) {
3156 case L2CAP_MODE_STREAMING:
3157 case L2CAP_MODE_ERTM:
3158 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3159 break;
3160
3161 if (__l2cap_efs_supported(chan))
3162 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3163
3164 /* fall through */
3165 default:
3166 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3167 break;
3168 }
3169
3170 done:
3171 if (chan->imtu != L2CAP_DEFAULT_MTU)
3172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3173
3174 switch (chan->mode) {
3175 case L2CAP_MODE_BASIC:
3176 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3177 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3178 break;
3179
3180 rfc.mode = L2CAP_MODE_BASIC;
3181 rfc.txwin_size = 0;
3182 rfc.max_transmit = 0;
3183 rfc.retrans_timeout = 0;
3184 rfc.monitor_timeout = 0;
3185 rfc.max_pdu_size = 0;
3186
3187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3188 (unsigned long) &rfc);
3189 break;
3190
3191 case L2CAP_MODE_ERTM:
3192 rfc.mode = L2CAP_MODE_ERTM;
3193 rfc.max_transmit = chan->max_tx;
3194
3195 __l2cap_set_ertm_timeouts(chan, &rfc);
3196
3197 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3198 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3199 L2CAP_FCS_SIZE);
3200 rfc.max_pdu_size = cpu_to_le16(size);
3201
3202 l2cap_txwin_setup(chan);
3203
3204 rfc.txwin_size = min_t(u16, chan->tx_win,
3205 L2CAP_DEFAULT_TX_WINDOW);
3206
3207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3208 (unsigned long) &rfc);
3209
3210 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3211 l2cap_add_opt_efs(&ptr, chan);
3212
3213 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3215 chan->tx_win);
3216
3217 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3218 if (chan->fcs == L2CAP_FCS_NONE ||
3219 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3220 chan->fcs = L2CAP_FCS_NONE;
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3222 chan->fcs);
3223 }
3224 break;
3225
3226 case L2CAP_MODE_STREAMING:
3227 l2cap_txwin_setup(chan);
3228 rfc.mode = L2CAP_MODE_STREAMING;
3229 rfc.txwin_size = 0;
3230 rfc.max_transmit = 0;
3231 rfc.retrans_timeout = 0;
3232 rfc.monitor_timeout = 0;
3233
3234 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3235 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3236 L2CAP_FCS_SIZE);
3237 rfc.max_pdu_size = cpu_to_le16(size);
3238
3239 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3240 (unsigned long) &rfc);
3241
3242 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3243 l2cap_add_opt_efs(&ptr, chan);
3244
3245 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3246 if (chan->fcs == L2CAP_FCS_NONE ||
3247 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3248 chan->fcs = L2CAP_FCS_NONE;
3249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3250 chan->fcs);
3251 }
3252 break;
3253 }
3254
3255 req->dcid = cpu_to_le16(chan->dcid);
3256 req->flags = __constant_cpu_to_le16(0);
3257
3258 return ptr - data;
3259 }
3260
3261 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3262 {
3263 struct l2cap_conf_rsp *rsp = data;
3264 void *ptr = rsp->data;
3265 void *req = chan->conf_req;
3266 int len = chan->conf_len;
3267 int type, hint, olen;
3268 unsigned long val;
3269 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3270 struct l2cap_conf_efs efs;
3271 u8 remote_efs = 0;
3272 u16 mtu = L2CAP_DEFAULT_MTU;
3273 u16 result = L2CAP_CONF_SUCCESS;
3274 u16 size;
3275
3276 BT_DBG("chan %p", chan);
3277
3278 while (len >= L2CAP_CONF_OPT_SIZE) {
3279 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3280
3281 hint = type & L2CAP_CONF_HINT;
3282 type &= L2CAP_CONF_MASK;
3283
3284 switch (type) {
3285 case L2CAP_CONF_MTU:
3286 mtu = val;
3287 break;
3288
3289 case L2CAP_CONF_FLUSH_TO:
3290 chan->flush_to = val;
3291 break;
3292
3293 case L2CAP_CONF_QOS:
3294 break;
3295
3296 case L2CAP_CONF_RFC:
3297 if (olen == sizeof(rfc))
3298 memcpy(&rfc, (void *) val, olen);
3299 break;
3300
3301 case L2CAP_CONF_FCS:
3302 if (val == L2CAP_FCS_NONE)
3303 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3304 break;
3305
3306 case L2CAP_CONF_EFS:
3307 remote_efs = 1;
3308 if (olen == sizeof(efs))
3309 memcpy(&efs, (void *) val, olen);
3310 break;
3311
3312 case L2CAP_CONF_EWS:
3313 if (!enable_hs)
3314 return -ECONNREFUSED;
3315
3316 set_bit(FLAG_EXT_CTRL, &chan->flags);
3317 set_bit(CONF_EWS_RECV, &chan->conf_state);
3318 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3319 chan->remote_tx_win = val;
3320 break;
3321
3322 default:
3323 if (hint)
3324 break;
3325
3326 result = L2CAP_CONF_UNKNOWN;
3327 *((u8 *) ptr++) = type;
3328 break;
3329 }
3330 }
3331
3332 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3333 goto done;
3334
3335 switch (chan->mode) {
3336 case L2CAP_MODE_STREAMING:
3337 case L2CAP_MODE_ERTM:
3338 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3339 chan->mode = l2cap_select_mode(rfc.mode,
3340 chan->conn->feat_mask);
3341 break;
3342 }
3343
3344 if (remote_efs) {
3345 if (__l2cap_efs_supported(chan))
3346 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3347 else
3348 return -ECONNREFUSED;
3349 }
3350
3351 if (chan->mode != rfc.mode)
3352 return -ECONNREFUSED;
3353
3354 break;
3355 }
3356
3357 done:
3358 if (chan->mode != rfc.mode) {
3359 result = L2CAP_CONF_UNACCEPT;
3360 rfc.mode = chan->mode;
3361
3362 if (chan->num_conf_rsp == 1)
3363 return -ECONNREFUSED;
3364
3365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3366 (unsigned long) &rfc);
3367 }
3368
3369 if (result == L2CAP_CONF_SUCCESS) {
3370 /* Configure output options and let the other side know
3371 * which ones we don't like. */
3372
3373 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3374 result = L2CAP_CONF_UNACCEPT;
3375 else {
3376 chan->omtu = mtu;
3377 set_bit(CONF_MTU_DONE, &chan->conf_state);
3378 }
3379 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3380
3381 if (remote_efs) {
3382 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3383 efs.stype != L2CAP_SERV_NOTRAFIC &&
3384 efs.stype != chan->local_stype) {
3385
3386 result = L2CAP_CONF_UNACCEPT;
3387
3388 if (chan->num_conf_req >= 1)
3389 return -ECONNREFUSED;
3390
3391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3392 sizeof(efs),
3393 (unsigned long) &efs);
3394 } else {
3395 /* Send PENDING Conf Rsp */
3396 result = L2CAP_CONF_PENDING;
3397 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3398 }
3399 }
3400
3401 switch (rfc.mode) {
3402 case L2CAP_MODE_BASIC:
3403 chan->fcs = L2CAP_FCS_NONE;
3404 set_bit(CONF_MODE_DONE, &chan->conf_state);
3405 break;
3406
3407 case L2CAP_MODE_ERTM:
3408 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3409 chan->remote_tx_win = rfc.txwin_size;
3410 else
3411 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3412
3413 chan->remote_max_tx = rfc.max_transmit;
3414
3415 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3416 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3417 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3418 rfc.max_pdu_size = cpu_to_le16(size);
3419 chan->remote_mps = size;
3420
3421 __l2cap_set_ertm_timeouts(chan, &rfc);
3422
3423 set_bit(CONF_MODE_DONE, &chan->conf_state);
3424
3425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3426 sizeof(rfc), (unsigned long) &rfc);
3427
3428 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3429 chan->remote_id = efs.id;
3430 chan->remote_stype = efs.stype;
3431 chan->remote_msdu = le16_to_cpu(efs.msdu);
3432 chan->remote_flush_to =
3433 le32_to_cpu(efs.flush_to);
3434 chan->remote_acc_lat =
3435 le32_to_cpu(efs.acc_lat);
3436 chan->remote_sdu_itime =
3437 le32_to_cpu(efs.sdu_itime);
3438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3439 sizeof(efs),
3440 (unsigned long) &efs);
3441 }
3442 break;
3443
3444 case L2CAP_MODE_STREAMING:
3445 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3446 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3447 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3448 rfc.max_pdu_size = cpu_to_le16(size);
3449 chan->remote_mps = size;
3450
3451 set_bit(CONF_MODE_DONE, &chan->conf_state);
3452
3453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3454 (unsigned long) &rfc);
3455
3456 break;
3457
3458 default:
3459 result = L2CAP_CONF_UNACCEPT;
3460
3461 memset(&rfc, 0, sizeof(rfc));
3462 rfc.mode = chan->mode;
3463 }
3464
3465 if (result == L2CAP_CONF_SUCCESS)
3466 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3467 }
3468 rsp->scid = cpu_to_le16(chan->dcid);
3469 rsp->result = cpu_to_le16(result);
3470 rsp->flags = __constant_cpu_to_le16(0);
3471
3472 return ptr - data;
3473 }
3474
3475 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3476 void *data, u16 *result)
3477 {
3478 struct l2cap_conf_req *req = data;
3479 void *ptr = req->data;
3480 int type, olen;
3481 unsigned long val;
3482 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3483 struct l2cap_conf_efs efs;
3484
3485 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3486
3487 while (len >= L2CAP_CONF_OPT_SIZE) {
3488 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3489
3490 switch (type) {
3491 case L2CAP_CONF_MTU:
3492 if (val < L2CAP_DEFAULT_MIN_MTU) {
3493 *result = L2CAP_CONF_UNACCEPT;
3494 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3495 } else
3496 chan->imtu = val;
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3498 break;
3499
3500 case L2CAP_CONF_FLUSH_TO:
3501 chan->flush_to = val;
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3503 2, chan->flush_to);
3504 break;
3505
3506 case L2CAP_CONF_RFC:
3507 if (olen == sizeof(rfc))
3508 memcpy(&rfc, (void *)val, olen);
3509
3510 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3511 rfc.mode != chan->mode)
3512 return -ECONNREFUSED;
3513
3514 chan->fcs = 0;
3515
3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3517 sizeof(rfc), (unsigned long) &rfc);
3518 break;
3519
3520 case L2CAP_CONF_EWS:
3521 chan->ack_win = min_t(u16, val, chan->ack_win);
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3523 chan->tx_win);
3524 break;
3525
3526 case L2CAP_CONF_EFS:
3527 if (olen == sizeof(efs))
3528 memcpy(&efs, (void *)val, olen);
3529
3530 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3531 efs.stype != L2CAP_SERV_NOTRAFIC &&
3532 efs.stype != chan->local_stype)
3533 return -ECONNREFUSED;
3534
3535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3536 (unsigned long) &efs);
3537 break;
3538
3539 case L2CAP_CONF_FCS:
3540 if (*result == L2CAP_CONF_PENDING)
3541 if (val == L2CAP_FCS_NONE)
3542 set_bit(CONF_RECV_NO_FCS,
3543 &chan->conf_state);
3544 break;
3545 }
3546 }
3547
3548 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3549 return -ECONNREFUSED;
3550
3551 chan->mode = rfc.mode;
3552
3553 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3554 switch (rfc.mode) {
3555 case L2CAP_MODE_ERTM:
3556 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3557 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3558 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3559 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3560 chan->ack_win = min_t(u16, chan->ack_win,
3561 rfc.txwin_size);
3562
3563 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3564 chan->local_msdu = le16_to_cpu(efs.msdu);
3565 chan->local_sdu_itime =
3566 le32_to_cpu(efs.sdu_itime);
3567 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3568 chan->local_flush_to =
3569 le32_to_cpu(efs.flush_to);
3570 }
3571 break;
3572
3573 case L2CAP_MODE_STREAMING:
3574 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3575 }
3576 }
3577
3578 req->dcid = cpu_to_le16(chan->dcid);
3579 req->flags = __constant_cpu_to_le16(0);
3580
3581 return ptr - data;
3582 }
3583
3584 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3585 u16 result, u16 flags)
3586 {
3587 struct l2cap_conf_rsp *rsp = data;
3588 void *ptr = rsp->data;
3589
3590 BT_DBG("chan %p", chan);
3591
3592 rsp->scid = cpu_to_le16(chan->dcid);
3593 rsp->result = cpu_to_le16(result);
3594 rsp->flags = cpu_to_le16(flags);
3595
3596 return ptr - data;
3597 }
3598
3599 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3600 {
3601 struct l2cap_conn_rsp rsp;
3602 struct l2cap_conn *conn = chan->conn;
3603 u8 buf[128];
3604 u8 rsp_code;
3605
3606 rsp.scid = cpu_to_le16(chan->dcid);
3607 rsp.dcid = cpu_to_le16(chan->scid);
3608 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3609 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3610
3611 if (chan->hs_hcon)
3612 rsp_code = L2CAP_CREATE_CHAN_RSP;
3613 else
3614 rsp_code = L2CAP_CONN_RSP;
3615
3616 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3617
3618 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3619
3620 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3621 return;
3622
3623 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3624 l2cap_build_conf_req(chan, buf), buf);
3625 chan->num_conf_req++;
3626 }
3627
3628 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3629 {
3630 int type, olen;
3631 unsigned long val;
3632 /* Use sane default values in case a misbehaving remote device
3633 * did not send an RFC or extended window size option.
3634 */
3635 u16 txwin_ext = chan->ack_win;
3636 struct l2cap_conf_rfc rfc = {
3637 .mode = chan->mode,
3638 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3639 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3640 .max_pdu_size = cpu_to_le16(chan->imtu),
3641 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3642 };
3643
3644 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3645
3646 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3647 return;
3648
3649 while (len >= L2CAP_CONF_OPT_SIZE) {
3650 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3651
3652 switch (type) {
3653 case L2CAP_CONF_RFC:
3654 if (olen == sizeof(rfc))
3655 memcpy(&rfc, (void *)val, olen);
3656 break;
3657 case L2CAP_CONF_EWS:
3658 txwin_ext = val;
3659 break;
3660 }
3661 }
3662
3663 switch (rfc.mode) {
3664 case L2CAP_MODE_ERTM:
3665 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3666 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3667 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3668 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3669 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3670 else
3671 chan->ack_win = min_t(u16, chan->ack_win,
3672 rfc.txwin_size);
3673 break;
3674 case L2CAP_MODE_STREAMING:
3675 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3676 }
3677 }
3678
3679 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3680 struct l2cap_cmd_hdr *cmd, u8 *data)
3681 {
3682 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3683
3684 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3685 return 0;
3686
3687 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3688 cmd->ident == conn->info_ident) {
3689 cancel_delayed_work(&conn->info_timer);
3690
3691 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3692 conn->info_ident = 0;
3693
3694 l2cap_conn_start(conn);
3695 }
3696
3697 return 0;
3698 }
3699
3700 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3701 struct l2cap_cmd_hdr *cmd,
3702 u8 *data, u8 rsp_code, u8 amp_id)
3703 {
3704 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3705 struct l2cap_conn_rsp rsp;
3706 struct l2cap_chan *chan = NULL, *pchan;
3707 struct sock *parent, *sk = NULL;
3708 int result, status = L2CAP_CS_NO_INFO;
3709
3710 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3711 __le16 psm = req->psm;
3712
3713 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3714
3715 /* Check if we have socket listening on psm */
3716 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3717 if (!pchan) {
3718 result = L2CAP_CR_BAD_PSM;
3719 goto sendresp;
3720 }
3721
3722 parent = pchan->sk;
3723
3724 mutex_lock(&conn->chan_lock);
3725 lock_sock(parent);
3726
3727 /* Check if the ACL is secure enough (if not SDP) */
3728 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3729 !hci_conn_check_link_mode(conn->hcon)) {
3730 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3731 result = L2CAP_CR_SEC_BLOCK;
3732 goto response;
3733 }
3734
3735 result = L2CAP_CR_NO_MEM;
3736
3737 /* Check if we already have channel with that dcid */
3738 if (__l2cap_get_chan_by_dcid(conn, scid))
3739 goto response;
3740
3741 chan = pchan->ops->new_connection(pchan);
3742 if (!chan)
3743 goto response;
3744
3745 sk = chan->sk;
3746
3747 hci_conn_hold(conn->hcon);
3748
3749 bacpy(&bt_sk(sk)->src, conn->src);
3750 bacpy(&bt_sk(sk)->dst, conn->dst);
3751 chan->psm = psm;
3752 chan->dcid = scid;
3753 chan->local_amp_id = amp_id;
3754
3755 __l2cap_chan_add(conn, chan);
3756
3757 dcid = chan->scid;
3758
3759 __set_chan_timer(chan, sk->sk_sndtimeo);
3760
3761 chan->ident = cmd->ident;
3762
3763 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3764 if (l2cap_chan_check_security(chan)) {
3765 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3766 __l2cap_state_change(chan, BT_CONNECT2);
3767 result = L2CAP_CR_PEND;
3768 status = L2CAP_CS_AUTHOR_PEND;
3769 chan->ops->defer(chan);
3770 } else {
3771 /* Force pending result for AMP controllers.
3772 * The connection will succeed after the
3773 * physical link is up.
3774 */
3775 if (amp_id) {
3776 __l2cap_state_change(chan, BT_CONNECT2);
3777 result = L2CAP_CR_PEND;
3778 } else {
3779 __l2cap_state_change(chan, BT_CONFIG);
3780 result = L2CAP_CR_SUCCESS;
3781 }
3782 status = L2CAP_CS_NO_INFO;
3783 }
3784 } else {
3785 __l2cap_state_change(chan, BT_CONNECT2);
3786 result = L2CAP_CR_PEND;
3787 status = L2CAP_CS_AUTHEN_PEND;
3788 }
3789 } else {
3790 __l2cap_state_change(chan, BT_CONNECT2);
3791 result = L2CAP_CR_PEND;
3792 status = L2CAP_CS_NO_INFO;
3793 }
3794
3795 response:
3796 release_sock(parent);
3797 mutex_unlock(&conn->chan_lock);
3798
3799 sendresp:
3800 rsp.scid = cpu_to_le16(scid);
3801 rsp.dcid = cpu_to_le16(dcid);
3802 rsp.result = cpu_to_le16(result);
3803 rsp.status = cpu_to_le16(status);
3804 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3805
3806 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3807 struct l2cap_info_req info;
3808 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3809
3810 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3811 conn->info_ident = l2cap_get_ident(conn);
3812
3813 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3814
3815 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3816 sizeof(info), &info);
3817 }
3818
3819 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3820 result == L2CAP_CR_SUCCESS) {
3821 u8 buf[128];
3822 set_bit(CONF_REQ_SENT, &chan->conf_state);
3823 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3824 l2cap_build_conf_req(chan, buf), buf);
3825 chan->num_conf_req++;
3826 }
3827
3828 return chan;
3829 }
3830
3831 static int l2cap_connect_req(struct l2cap_conn *conn,
3832 struct l2cap_cmd_hdr *cmd, u8 *data)
3833 {
3834 struct hci_dev *hdev = conn->hcon->hdev;
3835 struct hci_conn *hcon = conn->hcon;
3836
3837 hci_dev_lock(hdev);
3838 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3839 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3840 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3841 hcon->dst_type, 0, NULL, 0,
3842 hcon->dev_class);
3843 hci_dev_unlock(hdev);
3844
3845 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3846 return 0;
3847 }
3848
3849 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3850 struct l2cap_cmd_hdr *cmd, u8 *data)
3851 {
3852 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3853 u16 scid, dcid, result, status;
3854 struct l2cap_chan *chan;
3855 u8 req[128];
3856 int err;
3857
3858 scid = __le16_to_cpu(rsp->scid);
3859 dcid = __le16_to_cpu(rsp->dcid);
3860 result = __le16_to_cpu(rsp->result);
3861 status = __le16_to_cpu(rsp->status);
3862
3863 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3864 dcid, scid, result, status);
3865
3866 mutex_lock(&conn->chan_lock);
3867
3868 if (scid) {
3869 chan = __l2cap_get_chan_by_scid(conn, scid);
3870 if (!chan) {
3871 err = -EFAULT;
3872 goto unlock;
3873 }
3874 } else {
3875 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3876 if (!chan) {
3877 err = -EFAULT;
3878 goto unlock;
3879 }
3880 }
3881
3882 err = 0;
3883
3884 l2cap_chan_lock(chan);
3885
3886 switch (result) {
3887 case L2CAP_CR_SUCCESS:
3888 l2cap_state_change(chan, BT_CONFIG);
3889 chan->ident = 0;
3890 chan->dcid = dcid;
3891 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3892
3893 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3894 break;
3895
3896 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3897 l2cap_build_conf_req(chan, req), req);
3898 chan->num_conf_req++;
3899 break;
3900
3901 case L2CAP_CR_PEND:
3902 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3903 break;
3904
3905 default:
3906 l2cap_chan_del(chan, ECONNREFUSED);
3907 break;
3908 }
3909
3910 l2cap_chan_unlock(chan);
3911
3912 unlock:
3913 mutex_unlock(&conn->chan_lock);
3914
3915 return err;
3916 }
3917
3918 static inline void set_default_fcs(struct l2cap_chan *chan)
3919 {
3920 /* FCS is enabled only in ERTM or streaming mode, if one or both
3921 * sides request it.
3922 */
3923 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3924 chan->fcs = L2CAP_FCS_NONE;
3925 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3926 chan->fcs = L2CAP_FCS_CRC16;
3927 }
3928
3929 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3930 u8 ident, u16 flags)
3931 {
3932 struct l2cap_conn *conn = chan->conn;
3933
3934 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3935 flags);
3936
3937 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3938 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3939
3940 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3941 l2cap_build_conf_rsp(chan, data,
3942 L2CAP_CONF_SUCCESS, flags), data);
3943 }
3944
3945 static inline int l2cap_config_req(struct l2cap_conn *conn,
3946 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3947 u8 *data)
3948 {
3949 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3950 u16 dcid, flags;
3951 u8 rsp[64];
3952 struct l2cap_chan *chan;
3953 int len, err = 0;
3954
3955 dcid = __le16_to_cpu(req->dcid);
3956 flags = __le16_to_cpu(req->flags);
3957
3958 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3959
3960 chan = l2cap_get_chan_by_scid(conn, dcid);
3961 if (!chan)
3962 return -ENOENT;
3963
3964 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3965 struct l2cap_cmd_rej_cid rej;
3966
3967 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3968 rej.scid = cpu_to_le16(chan->scid);
3969 rej.dcid = cpu_to_le16(chan->dcid);
3970
3971 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3972 sizeof(rej), &rej);
3973 goto unlock;
3974 }
3975
3976 /* Reject if config buffer is too small. */
3977 len = cmd_len - sizeof(*req);
3978 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3979 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3980 l2cap_build_conf_rsp(chan, rsp,
3981 L2CAP_CONF_REJECT, flags), rsp);
3982 goto unlock;
3983 }
3984
3985 /* Store config. */
3986 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3987 chan->conf_len += len;
3988
3989 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3990 /* Incomplete config. Send empty response. */
3991 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3992 l2cap_build_conf_rsp(chan, rsp,
3993 L2CAP_CONF_SUCCESS, flags), rsp);
3994 goto unlock;
3995 }
3996
3997 /* Complete config. */
3998 len = l2cap_parse_conf_req(chan, rsp);
3999 if (len < 0) {
4000 l2cap_send_disconn_req(chan, ECONNRESET);
4001 goto unlock;
4002 }
4003
4004 chan->ident = cmd->ident;
4005 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4006 chan->num_conf_rsp++;
4007
4008 /* Reset config buffer. */
4009 chan->conf_len = 0;
4010
4011 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4012 goto unlock;
4013
4014 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4015 set_default_fcs(chan);
4016
4017 if (chan->mode == L2CAP_MODE_ERTM ||
4018 chan->mode == L2CAP_MODE_STREAMING)
4019 err = l2cap_ertm_init(chan);
4020
4021 if (err < 0)
4022 l2cap_send_disconn_req(chan, -err);
4023 else
4024 l2cap_chan_ready(chan);
4025
4026 goto unlock;
4027 }
4028
4029 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4030 u8 buf[64];
4031 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4032 l2cap_build_conf_req(chan, buf), buf);
4033 chan->num_conf_req++;
4034 }
4035
4036 /* Got Conf Rsp PENDING from remote side and asume we sent
4037 Conf Rsp PENDING in the code above */
4038 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4039 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4040
4041 /* check compatibility */
4042
4043 /* Send rsp for BR/EDR channel */
4044 if (!chan->hs_hcon)
4045 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4046 else
4047 chan->ident = cmd->ident;
4048 }
4049
4050 unlock:
4051 l2cap_chan_unlock(chan);
4052 return err;
4053 }
4054
4055 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4056 struct l2cap_cmd_hdr *cmd, u8 *data)
4057 {
4058 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4059 u16 scid, flags, result;
4060 struct l2cap_chan *chan;
4061 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
4062 int err = 0;
4063
4064 scid = __le16_to_cpu(rsp->scid);
4065 flags = __le16_to_cpu(rsp->flags);
4066 result = __le16_to_cpu(rsp->result);
4067
4068 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4069 result, len);
4070
4071 chan = l2cap_get_chan_by_scid(conn, scid);
4072 if (!chan)
4073 return 0;
4074
4075 switch (result) {
4076 case L2CAP_CONF_SUCCESS:
4077 l2cap_conf_rfc_get(chan, rsp->data, len);
4078 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4079 break;
4080
4081 case L2CAP_CONF_PENDING:
4082 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4083
4084 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4085 char buf[64];
4086
4087 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4088 buf, &result);
4089 if (len < 0) {
4090 l2cap_send_disconn_req(chan, ECONNRESET);
4091 goto done;
4092 }
4093
4094 if (!chan->hs_hcon) {
4095 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4096 0);
4097 } else {
4098 if (l2cap_check_efs(chan)) {
4099 amp_create_logical_link(chan);
4100 chan->ident = cmd->ident;
4101 }
4102 }
4103 }
4104 goto done;
4105
4106 case L2CAP_CONF_UNACCEPT:
4107 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4108 char req[64];
4109
4110 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4111 l2cap_send_disconn_req(chan, ECONNRESET);
4112 goto done;
4113 }
4114
4115 /* throw out any old stored conf requests */
4116 result = L2CAP_CONF_SUCCESS;
4117 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4118 req, &result);
4119 if (len < 0) {
4120 l2cap_send_disconn_req(chan, ECONNRESET);
4121 goto done;
4122 }
4123
4124 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4125 L2CAP_CONF_REQ, len, req);
4126 chan->num_conf_req++;
4127 if (result != L2CAP_CONF_SUCCESS)
4128 goto done;
4129 break;
4130 }
4131
4132 default:
4133 l2cap_chan_set_err(chan, ECONNRESET);
4134
4135 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4136 l2cap_send_disconn_req(chan, ECONNRESET);
4137 goto done;
4138 }
4139
4140 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4141 goto done;
4142
4143 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4144
4145 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4146 set_default_fcs(chan);
4147
4148 if (chan->mode == L2CAP_MODE_ERTM ||
4149 chan->mode == L2CAP_MODE_STREAMING)
4150 err = l2cap_ertm_init(chan);
4151
4152 if (err < 0)
4153 l2cap_send_disconn_req(chan, -err);
4154 else
4155 l2cap_chan_ready(chan);
4156 }
4157
4158 done:
4159 l2cap_chan_unlock(chan);
4160 return err;
4161 }
4162
4163 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4164 struct l2cap_cmd_hdr *cmd, u8 *data)
4165 {
4166 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4167 struct l2cap_disconn_rsp rsp;
4168 u16 dcid, scid;
4169 struct l2cap_chan *chan;
4170 struct sock *sk;
4171
4172 scid = __le16_to_cpu(req->scid);
4173 dcid = __le16_to_cpu(req->dcid);
4174
4175 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4176
4177 mutex_lock(&conn->chan_lock);
4178
4179 chan = __l2cap_get_chan_by_scid(conn, dcid);
4180 if (!chan) {
4181 mutex_unlock(&conn->chan_lock);
4182 return 0;
4183 }
4184
4185 l2cap_chan_lock(chan);
4186
4187 sk = chan->sk;
4188
4189 rsp.dcid = cpu_to_le16(chan->scid);
4190 rsp.scid = cpu_to_le16(chan->dcid);
4191 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4192
4193 lock_sock(sk);
4194 sk->sk_shutdown = SHUTDOWN_MASK;
4195 release_sock(sk);
4196
4197 l2cap_chan_hold(chan);
4198 l2cap_chan_del(chan, ECONNRESET);
4199
4200 l2cap_chan_unlock(chan);
4201
4202 chan->ops->close(chan);
4203 l2cap_chan_put(chan);
4204
4205 mutex_unlock(&conn->chan_lock);
4206
4207 return 0;
4208 }
4209
4210 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4211 struct l2cap_cmd_hdr *cmd, u8 *data)
4212 {
4213 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4214 u16 dcid, scid;
4215 struct l2cap_chan *chan;
4216
4217 scid = __le16_to_cpu(rsp->scid);
4218 dcid = __le16_to_cpu(rsp->dcid);
4219
4220 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4221
4222 mutex_lock(&conn->chan_lock);
4223
4224 chan = __l2cap_get_chan_by_scid(conn, scid);
4225 if (!chan) {
4226 mutex_unlock(&conn->chan_lock);
4227 return 0;
4228 }
4229
4230 l2cap_chan_lock(chan);
4231
4232 l2cap_chan_hold(chan);
4233 l2cap_chan_del(chan, 0);
4234
4235 l2cap_chan_unlock(chan);
4236
4237 chan->ops->close(chan);
4238 l2cap_chan_put(chan);
4239
4240 mutex_unlock(&conn->chan_lock);
4241
4242 return 0;
4243 }
4244
4245 static inline int l2cap_information_req(struct l2cap_conn *conn,
4246 struct l2cap_cmd_hdr *cmd, u8 *data)
4247 {
4248 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4249 u16 type;
4250
4251 type = __le16_to_cpu(req->type);
4252
4253 BT_DBG("type 0x%4.4x", type);
4254
4255 if (type == L2CAP_IT_FEAT_MASK) {
4256 u8 buf[8];
4257 u32 feat_mask = l2cap_feat_mask;
4258 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4259 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4260 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4261 if (!disable_ertm)
4262 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4263 | L2CAP_FEAT_FCS;
4264 if (enable_hs)
4265 feat_mask |= L2CAP_FEAT_EXT_FLOW
4266 | L2CAP_FEAT_EXT_WINDOW;
4267
4268 put_unaligned_le32(feat_mask, rsp->data);
4269 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4270 buf);
4271 } else if (type == L2CAP_IT_FIXED_CHAN) {
4272 u8 buf[12];
4273 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4274
4275 if (enable_hs)
4276 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4277 else
4278 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4279
4280 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4281 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4282 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4283 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4284 buf);
4285 } else {
4286 struct l2cap_info_rsp rsp;
4287 rsp.type = cpu_to_le16(type);
4288 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4289 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4290 &rsp);
4291 }
4292
4293 return 0;
4294 }
4295
4296 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4297 struct l2cap_cmd_hdr *cmd, u8 *data)
4298 {
4299 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4300 u16 type, result;
4301
4302 type = __le16_to_cpu(rsp->type);
4303 result = __le16_to_cpu(rsp->result);
4304
4305 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4306
4307 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4308 if (cmd->ident != conn->info_ident ||
4309 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4310 return 0;
4311
4312 cancel_delayed_work(&conn->info_timer);
4313
4314 if (result != L2CAP_IR_SUCCESS) {
4315 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4316 conn->info_ident = 0;
4317
4318 l2cap_conn_start(conn);
4319
4320 return 0;
4321 }
4322
4323 switch (type) {
4324 case L2CAP_IT_FEAT_MASK:
4325 conn->feat_mask = get_unaligned_le32(rsp->data);
4326
4327 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4328 struct l2cap_info_req req;
4329 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4330
4331 conn->info_ident = l2cap_get_ident(conn);
4332
4333 l2cap_send_cmd(conn, conn->info_ident,
4334 L2CAP_INFO_REQ, sizeof(req), &req);
4335 } else {
4336 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4337 conn->info_ident = 0;
4338
4339 l2cap_conn_start(conn);
4340 }
4341 break;
4342
4343 case L2CAP_IT_FIXED_CHAN:
4344 conn->fixed_chan_mask = rsp->data[0];
4345 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4346 conn->info_ident = 0;
4347
4348 l2cap_conn_start(conn);
4349 break;
4350 }
4351
4352 return 0;
4353 }
4354
4355 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4356 struct l2cap_cmd_hdr *cmd,
4357 u16 cmd_len, void *data)
4358 {
4359 struct l2cap_create_chan_req *req = data;
4360 struct l2cap_create_chan_rsp rsp;
4361 struct l2cap_chan *chan;
4362 struct hci_dev *hdev;
4363 u16 psm, scid;
4364
4365 if (cmd_len != sizeof(*req))
4366 return -EPROTO;
4367
4368 if (!enable_hs)
4369 return -EINVAL;
4370
4371 psm = le16_to_cpu(req->psm);
4372 scid = le16_to_cpu(req->scid);
4373
4374 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4375
4376 /* For controller id 0 make BR/EDR connection */
4377 if (req->amp_id == HCI_BREDR_ID) {
4378 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4379 req->amp_id);
4380 return 0;
4381 }
4382
4383 /* Validate AMP controller id */
4384 hdev = hci_dev_get(req->amp_id);
4385 if (!hdev)
4386 goto error;
4387
4388 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4389 hci_dev_put(hdev);
4390 goto error;
4391 }
4392
4393 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4394 req->amp_id);
4395 if (chan) {
4396 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4397 struct hci_conn *hs_hcon;
4398
4399 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4400 if (!hs_hcon) {
4401 hci_dev_put(hdev);
4402 return -EFAULT;
4403 }
4404
4405 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4406
4407 mgr->bredr_chan = chan;
4408 chan->hs_hcon = hs_hcon;
4409 chan->fcs = L2CAP_FCS_NONE;
4410 conn->mtu = hdev->block_mtu;
4411 }
4412
4413 hci_dev_put(hdev);
4414
4415 return 0;
4416
4417 error:
4418 rsp.dcid = 0;
4419 rsp.scid = cpu_to_le16(scid);
4420 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4421 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4422
4423 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4424 sizeof(rsp), &rsp);
4425
4426 return -EFAULT;
4427 }
4428
4429 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4430 {
4431 struct l2cap_move_chan_req req;
4432 u8 ident;
4433
4434 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4435
4436 ident = l2cap_get_ident(chan->conn);
4437 chan->ident = ident;
4438
4439 req.icid = cpu_to_le16(chan->scid);
4440 req.dest_amp_id = dest_amp_id;
4441
4442 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4443 &req);
4444
4445 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4446 }
4447
4448 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4449 {
4450 struct l2cap_move_chan_rsp rsp;
4451
4452 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4453
4454 rsp.icid = cpu_to_le16(chan->dcid);
4455 rsp.result = cpu_to_le16(result);
4456
4457 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4458 sizeof(rsp), &rsp);
4459 }
4460
4461 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4462 {
4463 struct l2cap_move_chan_cfm cfm;
4464
4465 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4466
4467 chan->ident = l2cap_get_ident(chan->conn);
4468
4469 cfm.icid = cpu_to_le16(chan->scid);
4470 cfm.result = cpu_to_le16(result);
4471
4472 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4473 sizeof(cfm), &cfm);
4474
4475 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4476 }
4477
4478 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4479 {
4480 struct l2cap_move_chan_cfm cfm;
4481
4482 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4483
4484 cfm.icid = cpu_to_le16(icid);
4485 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4486
4487 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4488 sizeof(cfm), &cfm);
4489 }
4490
4491 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4492 u16 icid)
4493 {
4494 struct l2cap_move_chan_cfm_rsp rsp;
4495
4496 BT_DBG("icid 0x%4.4x", icid);
4497
4498 rsp.icid = cpu_to_le16(icid);
4499 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4500 }
4501
4502 static void __release_logical_link(struct l2cap_chan *chan)
4503 {
4504 chan->hs_hchan = NULL;
4505 chan->hs_hcon = NULL;
4506
4507 /* Placeholder - release the logical link */
4508 }
4509
4510 static void l2cap_logical_fail(struct l2cap_chan *chan)
4511 {
4512 /* Logical link setup failed */
4513 if (chan->state != BT_CONNECTED) {
4514 /* Create channel failure, disconnect */
4515 l2cap_send_disconn_req(chan, ECONNRESET);
4516 return;
4517 }
4518
4519 switch (chan->move_role) {
4520 case L2CAP_MOVE_ROLE_RESPONDER:
4521 l2cap_move_done(chan);
4522 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4523 break;
4524 case L2CAP_MOVE_ROLE_INITIATOR:
4525 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4526 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4527 /* Remote has only sent pending or
4528 * success responses, clean up
4529 */
4530 l2cap_move_done(chan);
4531 }
4532
4533 /* Other amp move states imply that the move
4534 * has already aborted
4535 */
4536 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4537 break;
4538 }
4539 }
4540
4541 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4542 struct hci_chan *hchan)
4543 {
4544 struct l2cap_conf_rsp rsp;
4545
4546 chan->hs_hchan = hchan;
4547 chan->hs_hcon->l2cap_data = chan->conn;
4548
4549 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4550
4551 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4552 int err;
4553
4554 set_default_fcs(chan);
4555
4556 err = l2cap_ertm_init(chan);
4557 if (err < 0)
4558 l2cap_send_disconn_req(chan, -err);
4559 else
4560 l2cap_chan_ready(chan);
4561 }
4562 }
4563
4564 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4565 struct hci_chan *hchan)
4566 {
4567 chan->hs_hcon = hchan->conn;
4568 chan->hs_hcon->l2cap_data = chan->conn;
4569
4570 BT_DBG("move_state %d", chan->move_state);
4571
4572 switch (chan->move_state) {
4573 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4574 /* Move confirm will be sent after a success
4575 * response is received
4576 */
4577 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4578 break;
4579 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4580 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4581 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4582 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4583 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4584 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4585 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4586 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4587 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4588 }
4589 break;
4590 default:
4591 /* Move was not in expected state, free the channel */
4592 __release_logical_link(chan);
4593
4594 chan->move_state = L2CAP_MOVE_STABLE;
4595 }
4596 }
4597
4598 /* Call with chan locked */
4599 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4600 u8 status)
4601 {
4602 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4603
4604 if (status) {
4605 l2cap_logical_fail(chan);
4606 __release_logical_link(chan);
4607 return;
4608 }
4609
4610 if (chan->state != BT_CONNECTED) {
4611 /* Ignore logical link if channel is on BR/EDR */
4612 if (chan->local_amp_id)
4613 l2cap_logical_finish_create(chan, hchan);
4614 } else {
4615 l2cap_logical_finish_move(chan, hchan);
4616 }
4617 }
4618
4619 void l2cap_move_start(struct l2cap_chan *chan)
4620 {
4621 BT_DBG("chan %p", chan);
4622
4623 if (chan->local_amp_id == HCI_BREDR_ID) {
4624 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4625 return;
4626 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4627 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4628 /* Placeholder - start physical link setup */
4629 } else {
4630 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4631 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4632 chan->move_id = 0;
4633 l2cap_move_setup(chan);
4634 l2cap_send_move_chan_req(chan, 0);
4635 }
4636 }
4637
4638 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4639 u8 local_amp_id, u8 remote_amp_id)
4640 {
4641 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4642 local_amp_id, remote_amp_id);
4643
4644 chan->fcs = L2CAP_FCS_NONE;
4645
4646 /* Outgoing channel on AMP */
4647 if (chan->state == BT_CONNECT) {
4648 if (result == L2CAP_CR_SUCCESS) {
4649 chan->local_amp_id = local_amp_id;
4650 l2cap_send_create_chan_req(chan, remote_amp_id);
4651 } else {
4652 /* Revert to BR/EDR connect */
4653 l2cap_send_conn_req(chan);
4654 }
4655
4656 return;
4657 }
4658
4659 /* Incoming channel on AMP */
4660 if (__l2cap_no_conn_pending(chan)) {
4661 struct l2cap_conn_rsp rsp;
4662 char buf[128];
4663 rsp.scid = cpu_to_le16(chan->dcid);
4664 rsp.dcid = cpu_to_le16(chan->scid);
4665
4666 if (result == L2CAP_CR_SUCCESS) {
4667 /* Send successful response */
4668 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4669 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4670 } else {
4671 /* Send negative response */
4672 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4673 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4674 }
4675
4676 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4677 sizeof(rsp), &rsp);
4678
4679 if (result == L2CAP_CR_SUCCESS) {
4680 __l2cap_state_change(chan, BT_CONFIG);
4681 set_bit(CONF_REQ_SENT, &chan->conf_state);
4682 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4683 L2CAP_CONF_REQ,
4684 l2cap_build_conf_req(chan, buf), buf);
4685 chan->num_conf_req++;
4686 }
4687 }
4688 }
4689
4690 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4691 u8 remote_amp_id)
4692 {
4693 l2cap_move_setup(chan);
4694 chan->move_id = local_amp_id;
4695 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4696
4697 l2cap_send_move_chan_req(chan, remote_amp_id);
4698 }
4699
4700 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4701 {
4702 struct hci_chan *hchan = NULL;
4703
4704 /* Placeholder - get hci_chan for logical link */
4705
4706 if (hchan) {
4707 if (hchan->state == BT_CONNECTED) {
4708 /* Logical link is ready to go */
4709 chan->hs_hcon = hchan->conn;
4710 chan->hs_hcon->l2cap_data = chan->conn;
4711 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4712 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4713
4714 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4715 } else {
4716 /* Wait for logical link to be ready */
4717 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4718 }
4719 } else {
4720 /* Logical link not available */
4721 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4722 }
4723 }
4724
4725 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4726 {
4727 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4728 u8 rsp_result;
4729 if (result == -EINVAL)
4730 rsp_result = L2CAP_MR_BAD_ID;
4731 else
4732 rsp_result = L2CAP_MR_NOT_ALLOWED;
4733
4734 l2cap_send_move_chan_rsp(chan, rsp_result);
4735 }
4736
4737 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4738 chan->move_state = L2CAP_MOVE_STABLE;
4739
4740 /* Restart data transmission */
4741 l2cap_ertm_send(chan);
4742 }
4743
4744 /* Invoke with locked chan */
4745 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4746 {
4747 u8 local_amp_id = chan->local_amp_id;
4748 u8 remote_amp_id = chan->remote_amp_id;
4749
4750 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4751 chan, result, local_amp_id, remote_amp_id);
4752
4753 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4754 l2cap_chan_unlock(chan);
4755 return;
4756 }
4757
4758 if (chan->state != BT_CONNECTED) {
4759 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4760 } else if (result != L2CAP_MR_SUCCESS) {
4761 l2cap_do_move_cancel(chan, result);
4762 } else {
4763 switch (chan->move_role) {
4764 case L2CAP_MOVE_ROLE_INITIATOR:
4765 l2cap_do_move_initiate(chan, local_amp_id,
4766 remote_amp_id);
4767 break;
4768 case L2CAP_MOVE_ROLE_RESPONDER:
4769 l2cap_do_move_respond(chan, result);
4770 break;
4771 default:
4772 l2cap_do_move_cancel(chan, result);
4773 break;
4774 }
4775 }
4776 }
4777
4778 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4779 struct l2cap_cmd_hdr *cmd,
4780 u16 cmd_len, void *data)
4781 {
4782 struct l2cap_move_chan_req *req = data;
4783 struct l2cap_move_chan_rsp rsp;
4784 struct l2cap_chan *chan;
4785 u16 icid = 0;
4786 u16 result = L2CAP_MR_NOT_ALLOWED;
4787
4788 if (cmd_len != sizeof(*req))
4789 return -EPROTO;
4790
4791 icid = le16_to_cpu(req->icid);
4792
4793 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4794
4795 if (!enable_hs)
4796 return -EINVAL;
4797
4798 chan = l2cap_get_chan_by_dcid(conn, icid);
4799 if (!chan) {
4800 rsp.icid = cpu_to_le16(icid);
4801 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4802 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4803 sizeof(rsp), &rsp);
4804 return 0;
4805 }
4806
4807 chan->ident = cmd->ident;
4808
4809 if (chan->scid < L2CAP_CID_DYN_START ||
4810 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4811 (chan->mode != L2CAP_MODE_ERTM &&
4812 chan->mode != L2CAP_MODE_STREAMING)) {
4813 result = L2CAP_MR_NOT_ALLOWED;
4814 goto send_move_response;
4815 }
4816
4817 if (chan->local_amp_id == req->dest_amp_id) {
4818 result = L2CAP_MR_SAME_ID;
4819 goto send_move_response;
4820 }
4821
4822 if (req->dest_amp_id) {
4823 struct hci_dev *hdev;
4824 hdev = hci_dev_get(req->dest_amp_id);
4825 if (!hdev || hdev->dev_type != HCI_AMP ||
4826 !test_bit(HCI_UP, &hdev->flags)) {
4827 if (hdev)
4828 hci_dev_put(hdev);
4829
4830 result = L2CAP_MR_BAD_ID;
4831 goto send_move_response;
4832 }
4833 hci_dev_put(hdev);
4834 }
4835
4836 /* Detect a move collision. Only send a collision response
4837 * if this side has "lost", otherwise proceed with the move.
4838 * The winner has the larger bd_addr.
4839 */
4840 if ((__chan_is_moving(chan) ||
4841 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4842 bacmp(conn->src, conn->dst) > 0) {
4843 result = L2CAP_MR_COLLISION;
4844 goto send_move_response;
4845 }
4846
4847 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4848 l2cap_move_setup(chan);
4849 chan->move_id = req->dest_amp_id;
4850 icid = chan->dcid;
4851
4852 if (!req->dest_amp_id) {
4853 /* Moving to BR/EDR */
4854 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4855 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4856 result = L2CAP_MR_PEND;
4857 } else {
4858 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4859 result = L2CAP_MR_SUCCESS;
4860 }
4861 } else {
4862 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4863 /* Placeholder - uncomment when amp functions are available */
4864 /*amp_accept_physical(chan, req->dest_amp_id);*/
4865 result = L2CAP_MR_PEND;
4866 }
4867
4868 send_move_response:
4869 l2cap_send_move_chan_rsp(chan, result);
4870
4871 l2cap_chan_unlock(chan);
4872
4873 return 0;
4874 }
4875
4876 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4877 {
4878 struct l2cap_chan *chan;
4879 struct hci_chan *hchan = NULL;
4880
4881 chan = l2cap_get_chan_by_scid(conn, icid);
4882 if (!chan) {
4883 l2cap_send_move_chan_cfm_icid(conn, icid);
4884 return;
4885 }
4886
4887 __clear_chan_timer(chan);
4888 if (result == L2CAP_MR_PEND)
4889 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4890
4891 switch (chan->move_state) {
4892 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4893 /* Move confirm will be sent when logical link
4894 * is complete.
4895 */
4896 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4897 break;
4898 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4899 if (result == L2CAP_MR_PEND) {
4900 break;
4901 } else if (test_bit(CONN_LOCAL_BUSY,
4902 &chan->conn_state)) {
4903 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4904 } else {
4905 /* Logical link is up or moving to BR/EDR,
4906 * proceed with move
4907 */
4908 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4909 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4910 }
4911 break;
4912 case L2CAP_MOVE_WAIT_RSP:
4913 /* Moving to AMP */
4914 if (result == L2CAP_MR_SUCCESS) {
4915 /* Remote is ready, send confirm immediately
4916 * after logical link is ready
4917 */
4918 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4919 } else {
4920 /* Both logical link and move success
4921 * are required to confirm
4922 */
4923 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4924 }
4925
4926 /* Placeholder - get hci_chan for logical link */
4927 if (!hchan) {
4928 /* Logical link not available */
4929 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4930 break;
4931 }
4932
4933 /* If the logical link is not yet connected, do not
4934 * send confirmation.
4935 */
4936 if (hchan->state != BT_CONNECTED)
4937 break;
4938
4939 /* Logical link is already ready to go */
4940
4941 chan->hs_hcon = hchan->conn;
4942 chan->hs_hcon->l2cap_data = chan->conn;
4943
4944 if (result == L2CAP_MR_SUCCESS) {
4945 /* Can confirm now */
4946 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4947 } else {
4948 /* Now only need move success
4949 * to confirm
4950 */
4951 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4952 }
4953
4954 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4955 break;
4956 default:
4957 /* Any other amp move state means the move failed. */
4958 chan->move_id = chan->local_amp_id;
4959 l2cap_move_done(chan);
4960 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4961 }
4962
4963 l2cap_chan_unlock(chan);
4964 }
4965
4966 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4967 u16 result)
4968 {
4969 struct l2cap_chan *chan;
4970
4971 chan = l2cap_get_chan_by_ident(conn, ident);
4972 if (!chan) {
4973 /* Could not locate channel, icid is best guess */
4974 l2cap_send_move_chan_cfm_icid(conn, icid);
4975 return;
4976 }
4977
4978 __clear_chan_timer(chan);
4979
4980 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4981 if (result == L2CAP_MR_COLLISION) {
4982 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4983 } else {
4984 /* Cleanup - cancel move */
4985 chan->move_id = chan->local_amp_id;
4986 l2cap_move_done(chan);
4987 }
4988 }
4989
4990 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4991
4992 l2cap_chan_unlock(chan);
4993 }
4994
4995 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4996 struct l2cap_cmd_hdr *cmd,
4997 u16 cmd_len, void *data)
4998 {
4999 struct l2cap_move_chan_rsp *rsp = data;
5000 u16 icid, result;
5001
5002 if (cmd_len != sizeof(*rsp))
5003 return -EPROTO;
5004
5005 icid = le16_to_cpu(rsp->icid);
5006 result = le16_to_cpu(rsp->result);
5007
5008 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5009
5010 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5011 l2cap_move_continue(conn, icid, result);
5012 else
5013 l2cap_move_fail(conn, cmd->ident, icid, result);
5014
5015 return 0;
5016 }
5017
5018 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5019 struct l2cap_cmd_hdr *cmd,
5020 u16 cmd_len, void *data)
5021 {
5022 struct l2cap_move_chan_cfm *cfm = data;
5023 struct l2cap_chan *chan;
5024 u16 icid, result;
5025
5026 if (cmd_len != sizeof(*cfm))
5027 return -EPROTO;
5028
5029 icid = le16_to_cpu(cfm->icid);
5030 result = le16_to_cpu(cfm->result);
5031
5032 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5033
5034 chan = l2cap_get_chan_by_dcid(conn, icid);
5035 if (!chan) {
5036 /* Spec requires a response even if the icid was not found */
5037 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5038 return 0;
5039 }
5040
5041 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5042 if (result == L2CAP_MC_CONFIRMED) {
5043 chan->local_amp_id = chan->move_id;
5044 if (!chan->local_amp_id)
5045 __release_logical_link(chan);
5046 } else {
5047 chan->move_id = chan->local_amp_id;
5048 }
5049
5050 l2cap_move_done(chan);
5051 }
5052
5053 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5054
5055 l2cap_chan_unlock(chan);
5056
5057 return 0;
5058 }
5059
5060 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5061 struct l2cap_cmd_hdr *cmd,
5062 u16 cmd_len, void *data)
5063 {
5064 struct l2cap_move_chan_cfm_rsp *rsp = data;
5065 struct l2cap_chan *chan;
5066 u16 icid;
5067
5068 if (cmd_len != sizeof(*rsp))
5069 return -EPROTO;
5070
5071 icid = le16_to_cpu(rsp->icid);
5072
5073 BT_DBG("icid 0x%4.4x", icid);
5074
5075 chan = l2cap_get_chan_by_scid(conn, icid);
5076 if (!chan)
5077 return 0;
5078
5079 __clear_chan_timer(chan);
5080
5081 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5082 chan->local_amp_id = chan->move_id;
5083
5084 if (!chan->local_amp_id && chan->hs_hchan)
5085 __release_logical_link(chan);
5086
5087 l2cap_move_done(chan);
5088 }
5089
5090 l2cap_chan_unlock(chan);
5091
5092 return 0;
5093 }
5094
5095 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5096 u16 to_multiplier)
5097 {
5098 u16 max_latency;
5099
5100 if (min > max || min < 6 || max > 3200)
5101 return -EINVAL;
5102
5103 if (to_multiplier < 10 || to_multiplier > 3200)
5104 return -EINVAL;
5105
5106 if (max >= to_multiplier * 8)
5107 return -EINVAL;
5108
5109 max_latency = (to_multiplier * 8 / max) - 1;
5110 if (latency > 499 || latency > max_latency)
5111 return -EINVAL;
5112
5113 return 0;
5114 }
5115
5116 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5117 struct l2cap_cmd_hdr *cmd,
5118 u8 *data)
5119 {
5120 struct hci_conn *hcon = conn->hcon;
5121 struct l2cap_conn_param_update_req *req;
5122 struct l2cap_conn_param_update_rsp rsp;
5123 u16 min, max, latency, to_multiplier, cmd_len;
5124 int err;
5125
5126 if (!(hcon->link_mode & HCI_LM_MASTER))
5127 return -EINVAL;
5128
5129 cmd_len = __le16_to_cpu(cmd->len);
5130 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5131 return -EPROTO;
5132
5133 req = (struct l2cap_conn_param_update_req *) data;
5134 min = __le16_to_cpu(req->min);
5135 max = __le16_to_cpu(req->max);
5136 latency = __le16_to_cpu(req->latency);
5137 to_multiplier = __le16_to_cpu(req->to_multiplier);
5138
5139 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5140 min, max, latency, to_multiplier);
5141
5142 memset(&rsp, 0, sizeof(rsp));
5143
5144 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5145 if (err)
5146 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5147 else
5148 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5149
5150 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5151 sizeof(rsp), &rsp);
5152
5153 if (!err)
5154 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5155
5156 return 0;
5157 }
5158
5159 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5160 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5161 u8 *data)
5162 {
5163 int err = 0;
5164
5165 switch (cmd->code) {
5166 case L2CAP_COMMAND_REJ:
5167 l2cap_command_rej(conn, cmd, data);
5168 break;
5169
5170 case L2CAP_CONN_REQ:
5171 err = l2cap_connect_req(conn, cmd, data);
5172 break;
5173
5174 case L2CAP_CONN_RSP:
5175 case L2CAP_CREATE_CHAN_RSP:
5176 err = l2cap_connect_create_rsp(conn, cmd, data);
5177 break;
5178
5179 case L2CAP_CONF_REQ:
5180 err = l2cap_config_req(conn, cmd, cmd_len, data);
5181 break;
5182
5183 case L2CAP_CONF_RSP:
5184 err = l2cap_config_rsp(conn, cmd, data);
5185 break;
5186
5187 case L2CAP_DISCONN_REQ:
5188 err = l2cap_disconnect_req(conn, cmd, data);
5189 break;
5190
5191 case L2CAP_DISCONN_RSP:
5192 err = l2cap_disconnect_rsp(conn, cmd, data);
5193 break;
5194
5195 case L2CAP_ECHO_REQ:
5196 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5197 break;
5198
5199 case L2CAP_ECHO_RSP:
5200 break;
5201
5202 case L2CAP_INFO_REQ:
5203 err = l2cap_information_req(conn, cmd, data);
5204 break;
5205
5206 case L2CAP_INFO_RSP:
5207 err = l2cap_information_rsp(conn, cmd, data);
5208 break;
5209
5210 case L2CAP_CREATE_CHAN_REQ:
5211 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5212 break;
5213
5214 case L2CAP_MOVE_CHAN_REQ:
5215 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5216 break;
5217
5218 case L2CAP_MOVE_CHAN_RSP:
5219 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5220 break;
5221
5222 case L2CAP_MOVE_CHAN_CFM:
5223 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5224 break;
5225
5226 case L2CAP_MOVE_CHAN_CFM_RSP:
5227 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5228 break;
5229
5230 default:
5231 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5232 err = -EINVAL;
5233 break;
5234 }
5235
5236 return err;
5237 }
5238
5239 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5240 struct l2cap_cmd_hdr *cmd, u8 *data)
5241 {
5242 switch (cmd->code) {
5243 case L2CAP_COMMAND_REJ:
5244 return 0;
5245
5246 case L2CAP_CONN_PARAM_UPDATE_REQ:
5247 return l2cap_conn_param_update_req(conn, cmd, data);
5248
5249 case L2CAP_CONN_PARAM_UPDATE_RSP:
5250 return 0;
5251
5252 default:
5253 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5254 return -EINVAL;
5255 }
5256 }
5257
5258 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5259 struct sk_buff *skb)
5260 {
5261 u8 *data = skb->data;
5262 int len = skb->len;
5263 struct l2cap_cmd_hdr cmd;
5264 int err;
5265
5266 l2cap_raw_recv(conn, skb);
5267
5268 while (len >= L2CAP_CMD_HDR_SIZE) {
5269 u16 cmd_len;
5270 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5271 data += L2CAP_CMD_HDR_SIZE;
5272 len -= L2CAP_CMD_HDR_SIZE;
5273
5274 cmd_len = le16_to_cpu(cmd.len);
5275
5276 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5277 cmd.ident);
5278
5279 if (cmd_len > len || !cmd.ident) {
5280 BT_DBG("corrupted command");
5281 break;
5282 }
5283
5284 if (conn->hcon->type == LE_LINK)
5285 err = l2cap_le_sig_cmd(conn, &cmd, data);
5286 else
5287 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5288
5289 if (err) {
5290 struct l2cap_cmd_rej_unk rej;
5291
5292 BT_ERR("Wrong link type (%d)", err);
5293
5294 /* FIXME: Map err to a valid reason */
5295 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5296 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5297 sizeof(rej), &rej);
5298 }
5299
5300 data += cmd_len;
5301 len -= cmd_len;
5302 }
5303
5304 kfree_skb(skb);
5305 }
5306
5307 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5308 {
5309 u16 our_fcs, rcv_fcs;
5310 int hdr_size;
5311
5312 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5313 hdr_size = L2CAP_EXT_HDR_SIZE;
5314 else
5315 hdr_size = L2CAP_ENH_HDR_SIZE;
5316
5317 if (chan->fcs == L2CAP_FCS_CRC16) {
5318 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5319 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5320 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5321
5322 if (our_fcs != rcv_fcs)
5323 return -EBADMSG;
5324 }
5325 return 0;
5326 }
5327
5328 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5329 {
5330 struct l2cap_ctrl control;
5331
5332 BT_DBG("chan %p", chan);
5333
5334 memset(&control, 0, sizeof(control));
5335 control.sframe = 1;
5336 control.final = 1;
5337 control.reqseq = chan->buffer_seq;
5338 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5339
5340 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5341 control.super = L2CAP_SUPER_RNR;
5342 l2cap_send_sframe(chan, &control);
5343 }
5344
5345 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5346 chan->unacked_frames > 0)
5347 __set_retrans_timer(chan);
5348
5349 /* Send pending iframes */
5350 l2cap_ertm_send(chan);
5351
5352 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5353 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5354 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5355 * send it now.
5356 */
5357 control.super = L2CAP_SUPER_RR;
5358 l2cap_send_sframe(chan, &control);
5359 }
5360 }
5361
5362 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5363 struct sk_buff **last_frag)
5364 {
5365 /* skb->len reflects data in skb as well as all fragments
5366 * skb->data_len reflects only data in fragments
5367 */
5368 if (!skb_has_frag_list(skb))
5369 skb_shinfo(skb)->frag_list = new_frag;
5370
5371 new_frag->next = NULL;
5372
5373 (*last_frag)->next = new_frag;
5374 *last_frag = new_frag;
5375
5376 skb->len += new_frag->len;
5377 skb->data_len += new_frag->len;
5378 skb->truesize += new_frag->truesize;
5379 }
5380
5381 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5382 struct l2cap_ctrl *control)
5383 {
5384 int err = -EINVAL;
5385
5386 switch (control->sar) {
5387 case L2CAP_SAR_UNSEGMENTED:
5388 if (chan->sdu)
5389 break;
5390
5391 err = chan->ops->recv(chan, skb);
5392 break;
5393
5394 case L2CAP_SAR_START:
5395 if (chan->sdu)
5396 break;
5397
5398 chan->sdu_len = get_unaligned_le16(skb->data);
5399 skb_pull(skb, L2CAP_SDULEN_SIZE);
5400
5401 if (chan->sdu_len > chan->imtu) {
5402 err = -EMSGSIZE;
5403 break;
5404 }
5405
5406 if (skb->len >= chan->sdu_len)
5407 break;
5408
5409 chan->sdu = skb;
5410 chan->sdu_last_frag = skb;
5411
5412 skb = NULL;
5413 err = 0;
5414 break;
5415
5416 case L2CAP_SAR_CONTINUE:
5417 if (!chan->sdu)
5418 break;
5419
5420 append_skb_frag(chan->sdu, skb,
5421 &chan->sdu_last_frag);
5422 skb = NULL;
5423
5424 if (chan->sdu->len >= chan->sdu_len)
5425 break;
5426
5427 err = 0;
5428 break;
5429
5430 case L2CAP_SAR_END:
5431 if (!chan->sdu)
5432 break;
5433
5434 append_skb_frag(chan->sdu, skb,
5435 &chan->sdu_last_frag);
5436 skb = NULL;
5437
5438 if (chan->sdu->len != chan->sdu_len)
5439 break;
5440
5441 err = chan->ops->recv(chan, chan->sdu);
5442
5443 if (!err) {
5444 /* Reassembly complete */
5445 chan->sdu = NULL;
5446 chan->sdu_last_frag = NULL;
5447 chan->sdu_len = 0;
5448 }
5449 break;
5450 }
5451
5452 if (err) {
5453 kfree_skb(skb);
5454 kfree_skb(chan->sdu);
5455 chan->sdu = NULL;
5456 chan->sdu_last_frag = NULL;
5457 chan->sdu_len = 0;
5458 }
5459
5460 return err;
5461 }
5462
5463 static int l2cap_resegment(struct l2cap_chan *chan)
5464 {
5465 /* Placeholder */
5466 return 0;
5467 }
5468
5469 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5470 {
5471 u8 event;
5472
5473 if (chan->mode != L2CAP_MODE_ERTM)
5474 return;
5475
5476 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5477 l2cap_tx(chan, NULL, NULL, event);
5478 }
5479
5480 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5481 {
5482 int err = 0;
5483 /* Pass sequential frames to l2cap_reassemble_sdu()
5484 * until a gap is encountered.
5485 */
5486
5487 BT_DBG("chan %p", chan);
5488
5489 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5490 struct sk_buff *skb;
5491 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5492 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5493
5494 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5495
5496 if (!skb)
5497 break;
5498
5499 skb_unlink(skb, &chan->srej_q);
5500 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5501 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5502 if (err)
5503 break;
5504 }
5505
5506 if (skb_queue_empty(&chan->srej_q)) {
5507 chan->rx_state = L2CAP_RX_STATE_RECV;
5508 l2cap_send_ack(chan);
5509 }
5510
5511 return err;
5512 }
5513
5514 static void l2cap_handle_srej(struct l2cap_chan *chan,
5515 struct l2cap_ctrl *control)
5516 {
5517 struct sk_buff *skb;
5518
5519 BT_DBG("chan %p, control %p", chan, control);
5520
5521 if (control->reqseq == chan->next_tx_seq) {
5522 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5523 l2cap_send_disconn_req(chan, ECONNRESET);
5524 return;
5525 }
5526
5527 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5528
5529 if (skb == NULL) {
5530 BT_DBG("Seq %d not available for retransmission",
5531 control->reqseq);
5532 return;
5533 }
5534
5535 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5536 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5537 l2cap_send_disconn_req(chan, ECONNRESET);
5538 return;
5539 }
5540
5541 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5542
5543 if (control->poll) {
5544 l2cap_pass_to_tx(chan, control);
5545
5546 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5547 l2cap_retransmit(chan, control);
5548 l2cap_ertm_send(chan);
5549
5550 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5551 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5552 chan->srej_save_reqseq = control->reqseq;
5553 }
5554 } else {
5555 l2cap_pass_to_tx_fbit(chan, control);
5556
5557 if (control->final) {
5558 if (chan->srej_save_reqseq != control->reqseq ||
5559 !test_and_clear_bit(CONN_SREJ_ACT,
5560 &chan->conn_state))
5561 l2cap_retransmit(chan, control);
5562 } else {
5563 l2cap_retransmit(chan, control);
5564 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5565 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5566 chan->srej_save_reqseq = control->reqseq;
5567 }
5568 }
5569 }
5570 }
5571
5572 static void l2cap_handle_rej(struct l2cap_chan *chan,
5573 struct l2cap_ctrl *control)
5574 {
5575 struct sk_buff *skb;
5576
5577 BT_DBG("chan %p, control %p", chan, control);
5578
5579 if (control->reqseq == chan->next_tx_seq) {
5580 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5581 l2cap_send_disconn_req(chan, ECONNRESET);
5582 return;
5583 }
5584
5585 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5586
5587 if (chan->max_tx && skb &&
5588 bt_cb(skb)->control.retries >= chan->max_tx) {
5589 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5590 l2cap_send_disconn_req(chan, ECONNRESET);
5591 return;
5592 }
5593
5594 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5595
5596 l2cap_pass_to_tx(chan, control);
5597
5598 if (control->final) {
5599 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5600 l2cap_retransmit_all(chan, control);
5601 } else {
5602 l2cap_retransmit_all(chan, control);
5603 l2cap_ertm_send(chan);
5604 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5605 set_bit(CONN_REJ_ACT, &chan->conn_state);
5606 }
5607 }
5608
5609 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5610 {
5611 BT_DBG("chan %p, txseq %d", chan, txseq);
5612
5613 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5614 chan->expected_tx_seq);
5615
5616 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5617 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5618 chan->tx_win) {
5619 /* See notes below regarding "double poll" and
5620 * invalid packets.
5621 */
5622 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5623 BT_DBG("Invalid/Ignore - after SREJ");
5624 return L2CAP_TXSEQ_INVALID_IGNORE;
5625 } else {
5626 BT_DBG("Invalid - in window after SREJ sent");
5627 return L2CAP_TXSEQ_INVALID;
5628 }
5629 }
5630
5631 if (chan->srej_list.head == txseq) {
5632 BT_DBG("Expected SREJ");
5633 return L2CAP_TXSEQ_EXPECTED_SREJ;
5634 }
5635
5636 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5637 BT_DBG("Duplicate SREJ - txseq already stored");
5638 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5639 }
5640
5641 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5642 BT_DBG("Unexpected SREJ - not requested");
5643 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5644 }
5645 }
5646
5647 if (chan->expected_tx_seq == txseq) {
5648 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5649 chan->tx_win) {
5650 BT_DBG("Invalid - txseq outside tx window");
5651 return L2CAP_TXSEQ_INVALID;
5652 } else {
5653 BT_DBG("Expected");
5654 return L2CAP_TXSEQ_EXPECTED;
5655 }
5656 }
5657
5658 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5659 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5660 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5661 return L2CAP_TXSEQ_DUPLICATE;
5662 }
5663
5664 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5665 /* A source of invalid packets is a "double poll" condition,
5666 * where delays cause us to send multiple poll packets. If
5667 * the remote stack receives and processes both polls,
5668 * sequence numbers can wrap around in such a way that a
5669 * resent frame has a sequence number that looks like new data
5670 * with a sequence gap. This would trigger an erroneous SREJ
5671 * request.
5672 *
5673 * Fortunately, this is impossible with a tx window that's
5674 * less than half of the maximum sequence number, which allows
5675 * invalid frames to be safely ignored.
5676 *
5677 * With tx window sizes greater than half of the tx window
5678 * maximum, the frame is invalid and cannot be ignored. This
5679 * causes a disconnect.
5680 */
5681
5682 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5683 BT_DBG("Invalid/Ignore - txseq outside tx window");
5684 return L2CAP_TXSEQ_INVALID_IGNORE;
5685 } else {
5686 BT_DBG("Invalid - txseq outside tx window");
5687 return L2CAP_TXSEQ_INVALID;
5688 }
5689 } else {
5690 BT_DBG("Unexpected - txseq indicates missing frames");
5691 return L2CAP_TXSEQ_UNEXPECTED;
5692 }
5693 }
5694
5695 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5696 struct l2cap_ctrl *control,
5697 struct sk_buff *skb, u8 event)
5698 {
5699 int err = 0;
5700 bool skb_in_use = 0;
5701
5702 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5703 event);
5704
5705 switch (event) {
5706 case L2CAP_EV_RECV_IFRAME:
5707 switch (l2cap_classify_txseq(chan, control->txseq)) {
5708 case L2CAP_TXSEQ_EXPECTED:
5709 l2cap_pass_to_tx(chan, control);
5710
5711 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5712 BT_DBG("Busy, discarding expected seq %d",
5713 control->txseq);
5714 break;
5715 }
5716
5717 chan->expected_tx_seq = __next_seq(chan,
5718 control->txseq);
5719
5720 chan->buffer_seq = chan->expected_tx_seq;
5721 skb_in_use = 1;
5722
5723 err = l2cap_reassemble_sdu(chan, skb, control);
5724 if (err)
5725 break;
5726
5727 if (control->final) {
5728 if (!test_and_clear_bit(CONN_REJ_ACT,
5729 &chan->conn_state)) {
5730 control->final = 0;
5731 l2cap_retransmit_all(chan, control);
5732 l2cap_ertm_send(chan);
5733 }
5734 }
5735
5736 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5737 l2cap_send_ack(chan);
5738 break;
5739 case L2CAP_TXSEQ_UNEXPECTED:
5740 l2cap_pass_to_tx(chan, control);
5741
5742 /* Can't issue SREJ frames in the local busy state.
5743 * Drop this frame, it will be seen as missing
5744 * when local busy is exited.
5745 */
5746 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5747 BT_DBG("Busy, discarding unexpected seq %d",
5748 control->txseq);
5749 break;
5750 }
5751
5752 /* There was a gap in the sequence, so an SREJ
5753 * must be sent for each missing frame. The
5754 * current frame is stored for later use.
5755 */
5756 skb_queue_tail(&chan->srej_q, skb);
5757 skb_in_use = 1;
5758 BT_DBG("Queued %p (queue len %d)", skb,
5759 skb_queue_len(&chan->srej_q));
5760
5761 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5762 l2cap_seq_list_clear(&chan->srej_list);
5763 l2cap_send_srej(chan, control->txseq);
5764
5765 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5766 break;
5767 case L2CAP_TXSEQ_DUPLICATE:
5768 l2cap_pass_to_tx(chan, control);
5769 break;
5770 case L2CAP_TXSEQ_INVALID_IGNORE:
5771 break;
5772 case L2CAP_TXSEQ_INVALID:
5773 default:
5774 l2cap_send_disconn_req(chan, ECONNRESET);
5775 break;
5776 }
5777 break;
5778 case L2CAP_EV_RECV_RR:
5779 l2cap_pass_to_tx(chan, control);
5780 if (control->final) {
5781 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5782
5783 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5784 !__chan_is_moving(chan)) {
5785 control->final = 0;
5786 l2cap_retransmit_all(chan, control);
5787 }
5788
5789 l2cap_ertm_send(chan);
5790 } else if (control->poll) {
5791 l2cap_send_i_or_rr_or_rnr(chan);
5792 } else {
5793 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5794 &chan->conn_state) &&
5795 chan->unacked_frames)
5796 __set_retrans_timer(chan);
5797
5798 l2cap_ertm_send(chan);
5799 }
5800 break;
5801 case L2CAP_EV_RECV_RNR:
5802 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5803 l2cap_pass_to_tx(chan, control);
5804 if (control && control->poll) {
5805 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5806 l2cap_send_rr_or_rnr(chan, 0);
5807 }
5808 __clear_retrans_timer(chan);
5809 l2cap_seq_list_clear(&chan->retrans_list);
5810 break;
5811 case L2CAP_EV_RECV_REJ:
5812 l2cap_handle_rej(chan, control);
5813 break;
5814 case L2CAP_EV_RECV_SREJ:
5815 l2cap_handle_srej(chan, control);
5816 break;
5817 default:
5818 break;
5819 }
5820
5821 if (skb && !skb_in_use) {
5822 BT_DBG("Freeing %p", skb);
5823 kfree_skb(skb);
5824 }
5825
5826 return err;
5827 }
5828
5829 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5830 struct l2cap_ctrl *control,
5831 struct sk_buff *skb, u8 event)
5832 {
5833 int err = 0;
5834 u16 txseq = control->txseq;
5835 bool skb_in_use = 0;
5836
5837 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5838 event);
5839
5840 switch (event) {
5841 case L2CAP_EV_RECV_IFRAME:
5842 switch (l2cap_classify_txseq(chan, txseq)) {
5843 case L2CAP_TXSEQ_EXPECTED:
5844 /* Keep frame for reassembly later */
5845 l2cap_pass_to_tx(chan, control);
5846 skb_queue_tail(&chan->srej_q, skb);
5847 skb_in_use = 1;
5848 BT_DBG("Queued %p (queue len %d)", skb,
5849 skb_queue_len(&chan->srej_q));
5850
5851 chan->expected_tx_seq = __next_seq(chan, txseq);
5852 break;
5853 case L2CAP_TXSEQ_EXPECTED_SREJ:
5854 l2cap_seq_list_pop(&chan->srej_list);
5855
5856 l2cap_pass_to_tx(chan, control);
5857 skb_queue_tail(&chan->srej_q, skb);
5858 skb_in_use = 1;
5859 BT_DBG("Queued %p (queue len %d)", skb,
5860 skb_queue_len(&chan->srej_q));
5861
5862 err = l2cap_rx_queued_iframes(chan);
5863 if (err)
5864 break;
5865
5866 break;
5867 case L2CAP_TXSEQ_UNEXPECTED:
5868 /* Got a frame that can't be reassembled yet.
5869 * Save it for later, and send SREJs to cover
5870 * the missing frames.
5871 */
5872 skb_queue_tail(&chan->srej_q, skb);
5873 skb_in_use = 1;
5874 BT_DBG("Queued %p (queue len %d)", skb,
5875 skb_queue_len(&chan->srej_q));
5876
5877 l2cap_pass_to_tx(chan, control);
5878 l2cap_send_srej(chan, control->txseq);
5879 break;
5880 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5881 /* This frame was requested with an SREJ, but
5882 * some expected retransmitted frames are
5883 * missing. Request retransmission of missing
5884 * SREJ'd frames.
5885 */
5886 skb_queue_tail(&chan->srej_q, skb);
5887 skb_in_use = 1;
5888 BT_DBG("Queued %p (queue len %d)", skb,
5889 skb_queue_len(&chan->srej_q));
5890
5891 l2cap_pass_to_tx(chan, control);
5892 l2cap_send_srej_list(chan, control->txseq);
5893 break;
5894 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5895 /* We've already queued this frame. Drop this copy. */
5896 l2cap_pass_to_tx(chan, control);
5897 break;
5898 case L2CAP_TXSEQ_DUPLICATE:
5899 /* Expecting a later sequence number, so this frame
5900 * was already received. Ignore it completely.
5901 */
5902 break;
5903 case L2CAP_TXSEQ_INVALID_IGNORE:
5904 break;
5905 case L2CAP_TXSEQ_INVALID:
5906 default:
5907 l2cap_send_disconn_req(chan, ECONNRESET);
5908 break;
5909 }
5910 break;
5911 case L2CAP_EV_RECV_RR:
5912 l2cap_pass_to_tx(chan, control);
5913 if (control->final) {
5914 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5915
5916 if (!test_and_clear_bit(CONN_REJ_ACT,
5917 &chan->conn_state)) {
5918 control->final = 0;
5919 l2cap_retransmit_all(chan, control);
5920 }
5921
5922 l2cap_ertm_send(chan);
5923 } else if (control->poll) {
5924 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5925 &chan->conn_state) &&
5926 chan->unacked_frames) {
5927 __set_retrans_timer(chan);
5928 }
5929
5930 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5931 l2cap_send_srej_tail(chan);
5932 } else {
5933 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5934 &chan->conn_state) &&
5935 chan->unacked_frames)
5936 __set_retrans_timer(chan);
5937
5938 l2cap_send_ack(chan);
5939 }
5940 break;
5941 case L2CAP_EV_RECV_RNR:
5942 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5943 l2cap_pass_to_tx(chan, control);
5944 if (control->poll) {
5945 l2cap_send_srej_tail(chan);
5946 } else {
5947 struct l2cap_ctrl rr_control;
5948 memset(&rr_control, 0, sizeof(rr_control));
5949 rr_control.sframe = 1;
5950 rr_control.super = L2CAP_SUPER_RR;
5951 rr_control.reqseq = chan->buffer_seq;
5952 l2cap_send_sframe(chan, &rr_control);
5953 }
5954
5955 break;
5956 case L2CAP_EV_RECV_REJ:
5957 l2cap_handle_rej(chan, control);
5958 break;
5959 case L2CAP_EV_RECV_SREJ:
5960 l2cap_handle_srej(chan, control);
5961 break;
5962 }
5963
5964 if (skb && !skb_in_use) {
5965 BT_DBG("Freeing %p", skb);
5966 kfree_skb(skb);
5967 }
5968
5969 return err;
5970 }
5971
5972 static int l2cap_finish_move(struct l2cap_chan *chan)
5973 {
5974 BT_DBG("chan %p", chan);
5975
5976 chan->rx_state = L2CAP_RX_STATE_RECV;
5977
5978 if (chan->hs_hcon)
5979 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5980 else
5981 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5982
5983 return l2cap_resegment(chan);
5984 }
5985
5986 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5987 struct l2cap_ctrl *control,
5988 struct sk_buff *skb, u8 event)
5989 {
5990 int err;
5991
5992 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5993 event);
5994
5995 if (!control->poll)
5996 return -EPROTO;
5997
5998 l2cap_process_reqseq(chan, control->reqseq);
5999
6000 if (!skb_queue_empty(&chan->tx_q))
6001 chan->tx_send_head = skb_peek(&chan->tx_q);
6002 else
6003 chan->tx_send_head = NULL;
6004
6005 /* Rewind next_tx_seq to the point expected
6006 * by the receiver.
6007 */
6008 chan->next_tx_seq = control->reqseq;
6009 chan->unacked_frames = 0;
6010
6011 err = l2cap_finish_move(chan);
6012 if (err)
6013 return err;
6014
6015 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6016 l2cap_send_i_or_rr_or_rnr(chan);
6017
6018 if (event == L2CAP_EV_RECV_IFRAME)
6019 return -EPROTO;
6020
6021 return l2cap_rx_state_recv(chan, control, NULL, event);
6022 }
6023
6024 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6025 struct l2cap_ctrl *control,
6026 struct sk_buff *skb, u8 event)
6027 {
6028 int err;
6029
6030 if (!control->final)
6031 return -EPROTO;
6032
6033 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6034
6035 chan->rx_state = L2CAP_RX_STATE_RECV;
6036 l2cap_process_reqseq(chan, control->reqseq);
6037
6038 if (!skb_queue_empty(&chan->tx_q))
6039 chan->tx_send_head = skb_peek(&chan->tx_q);
6040 else
6041 chan->tx_send_head = NULL;
6042
6043 /* Rewind next_tx_seq to the point expected
6044 * by the receiver.
6045 */
6046 chan->next_tx_seq = control->reqseq;
6047 chan->unacked_frames = 0;
6048
6049 if (chan->hs_hcon)
6050 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6051 else
6052 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6053
6054 err = l2cap_resegment(chan);
6055
6056 if (!err)
6057 err = l2cap_rx_state_recv(chan, control, skb, event);
6058
6059 return err;
6060 }
6061
6062 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6063 {
6064 /* Make sure reqseq is for a packet that has been sent but not acked */
6065 u16 unacked;
6066
6067 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6068 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6069 }
6070
6071 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6072 struct sk_buff *skb, u8 event)
6073 {
6074 int err = 0;
6075
6076 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6077 control, skb, event, chan->rx_state);
6078
6079 if (__valid_reqseq(chan, control->reqseq)) {
6080 switch (chan->rx_state) {
6081 case L2CAP_RX_STATE_RECV:
6082 err = l2cap_rx_state_recv(chan, control, skb, event);
6083 break;
6084 case L2CAP_RX_STATE_SREJ_SENT:
6085 err = l2cap_rx_state_srej_sent(chan, control, skb,
6086 event);
6087 break;
6088 case L2CAP_RX_STATE_WAIT_P:
6089 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6090 break;
6091 case L2CAP_RX_STATE_WAIT_F:
6092 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6093 break;
6094 default:
6095 /* shut it down */
6096 break;
6097 }
6098 } else {
6099 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6100 control->reqseq, chan->next_tx_seq,
6101 chan->expected_ack_seq);
6102 l2cap_send_disconn_req(chan, ECONNRESET);
6103 }
6104
6105 return err;
6106 }
6107
6108 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6109 struct sk_buff *skb)
6110 {
6111 int err = 0;
6112
6113 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6114 chan->rx_state);
6115
6116 if (l2cap_classify_txseq(chan, control->txseq) ==
6117 L2CAP_TXSEQ_EXPECTED) {
6118 l2cap_pass_to_tx(chan, control);
6119
6120 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6121 __next_seq(chan, chan->buffer_seq));
6122
6123 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6124
6125 l2cap_reassemble_sdu(chan, skb, control);
6126 } else {
6127 if (chan->sdu) {
6128 kfree_skb(chan->sdu);
6129 chan->sdu = NULL;
6130 }
6131 chan->sdu_last_frag = NULL;
6132 chan->sdu_len = 0;
6133
6134 if (skb) {
6135 BT_DBG("Freeing %p", skb);
6136 kfree_skb(skb);
6137 }
6138 }
6139
6140 chan->last_acked_seq = control->txseq;
6141 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6142
6143 return err;
6144 }
6145
6146 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6147 {
6148 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6149 u16 len;
6150 u8 event;
6151
6152 __unpack_control(chan, skb);
6153
6154 len = skb->len;
6155
6156 /*
6157 * We can just drop the corrupted I-frame here.
6158 * Receiver will miss it and start proper recovery
6159 * procedures and ask for retransmission.
6160 */
6161 if (l2cap_check_fcs(chan, skb))
6162 goto drop;
6163
6164 if (!control->sframe && control->sar == L2CAP_SAR_START)
6165 len -= L2CAP_SDULEN_SIZE;
6166
6167 if (chan->fcs == L2CAP_FCS_CRC16)
6168 len -= L2CAP_FCS_SIZE;
6169
6170 if (len > chan->mps) {
6171 l2cap_send_disconn_req(chan, ECONNRESET);
6172 goto drop;
6173 }
6174
6175 if (!control->sframe) {
6176 int err;
6177
6178 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6179 control->sar, control->reqseq, control->final,
6180 control->txseq);
6181
6182 /* Validate F-bit - F=0 always valid, F=1 only
6183 * valid in TX WAIT_F
6184 */
6185 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6186 goto drop;
6187
6188 if (chan->mode != L2CAP_MODE_STREAMING) {
6189 event = L2CAP_EV_RECV_IFRAME;
6190 err = l2cap_rx(chan, control, skb, event);
6191 } else {
6192 err = l2cap_stream_rx(chan, control, skb);
6193 }
6194
6195 if (err)
6196 l2cap_send_disconn_req(chan, ECONNRESET);
6197 } else {
6198 const u8 rx_func_to_event[4] = {
6199 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6200 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6201 };
6202
6203 /* Only I-frames are expected in streaming mode */
6204 if (chan->mode == L2CAP_MODE_STREAMING)
6205 goto drop;
6206
6207 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6208 control->reqseq, control->final, control->poll,
6209 control->super);
6210
6211 if (len != 0) {
6212 BT_ERR("Trailing bytes: %d in sframe", len);
6213 l2cap_send_disconn_req(chan, ECONNRESET);
6214 goto drop;
6215 }
6216
6217 /* Validate F and P bits */
6218 if (control->final && (control->poll ||
6219 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6220 goto drop;
6221
6222 event = rx_func_to_event[control->super];
6223 if (l2cap_rx(chan, control, skb, event))
6224 l2cap_send_disconn_req(chan, ECONNRESET);
6225 }
6226
6227 return 0;
6228
6229 drop:
6230 kfree_skb(skb);
6231 return 0;
6232 }
6233
6234 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6235 struct sk_buff *skb)
6236 {
6237 struct l2cap_chan *chan;
6238
6239 chan = l2cap_get_chan_by_scid(conn, cid);
6240 if (!chan) {
6241 if (cid == L2CAP_CID_A2MP) {
6242 chan = a2mp_channel_create(conn, skb);
6243 if (!chan) {
6244 kfree_skb(skb);
6245 return;
6246 }
6247
6248 l2cap_chan_lock(chan);
6249 } else {
6250 BT_DBG("unknown cid 0x%4.4x", cid);
6251 /* Drop packet and return */
6252 kfree_skb(skb);
6253 return;
6254 }
6255 }
6256
6257 BT_DBG("chan %p, len %d", chan, skb->len);
6258
6259 if (chan->state != BT_CONNECTED)
6260 goto drop;
6261
6262 switch (chan->mode) {
6263 case L2CAP_MODE_BASIC:
6264 /* If socket recv buffers overflows we drop data here
6265 * which is *bad* because L2CAP has to be reliable.
6266 * But we don't have any other choice. L2CAP doesn't
6267 * provide flow control mechanism. */
6268
6269 if (chan->imtu < skb->len)
6270 goto drop;
6271
6272 if (!chan->ops->recv(chan, skb))
6273 goto done;
6274 break;
6275
6276 case L2CAP_MODE_ERTM:
6277 case L2CAP_MODE_STREAMING:
6278 l2cap_data_rcv(chan, skb);
6279 goto done;
6280
6281 default:
6282 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6283 break;
6284 }
6285
6286 drop:
6287 kfree_skb(skb);
6288
6289 done:
6290 l2cap_chan_unlock(chan);
6291 }
6292
6293 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6294 struct sk_buff *skb)
6295 {
6296 struct l2cap_chan *chan;
6297
6298 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6299 if (!chan)
6300 goto drop;
6301
6302 BT_DBG("chan %p, len %d", chan, skb->len);
6303
6304 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6305 goto drop;
6306
6307 if (chan->imtu < skb->len)
6308 goto drop;
6309
6310 if (!chan->ops->recv(chan, skb))
6311 return;
6312
6313 drop:
6314 kfree_skb(skb);
6315 }
6316
6317 static void l2cap_att_channel(struct l2cap_conn *conn,
6318 struct sk_buff *skb)
6319 {
6320 struct l2cap_chan *chan;
6321
6322 chan = l2cap_global_chan_by_scid(0, L2CAP_CID_LE_DATA,
6323 conn->src, conn->dst);
6324 if (!chan)
6325 goto drop;
6326
6327 BT_DBG("chan %p, len %d", chan, skb->len);
6328
6329 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6330 goto drop;
6331
6332 if (chan->imtu < skb->len)
6333 goto drop;
6334
6335 if (!chan->ops->recv(chan, skb))
6336 return;
6337
6338 drop:
6339 kfree_skb(skb);
6340 }
6341
6342 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6343 {
6344 struct l2cap_hdr *lh = (void *) skb->data;
6345 u16 cid, len;
6346 __le16 psm;
6347
6348 skb_pull(skb, L2CAP_HDR_SIZE);
6349 cid = __le16_to_cpu(lh->cid);
6350 len = __le16_to_cpu(lh->len);
6351
6352 if (len != skb->len) {
6353 kfree_skb(skb);
6354 return;
6355 }
6356
6357 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6358
6359 switch (cid) {
6360 case L2CAP_CID_LE_SIGNALING:
6361 case L2CAP_CID_SIGNALING:
6362 l2cap_sig_channel(conn, skb);
6363 break;
6364
6365 case L2CAP_CID_CONN_LESS:
6366 psm = get_unaligned((__le16 *) skb->data);
6367 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6368 l2cap_conless_channel(conn, psm, skb);
6369 break;
6370
6371 case L2CAP_CID_LE_DATA:
6372 l2cap_att_channel(conn, skb);
6373 break;
6374
6375 case L2CAP_CID_SMP:
6376 if (smp_sig_channel(conn, skb))
6377 l2cap_conn_del(conn->hcon, EACCES);
6378 break;
6379
6380 default:
6381 l2cap_data_channel(conn, cid, skb);
6382 break;
6383 }
6384 }
6385
6386 /* ---- L2CAP interface with lower layer (HCI) ---- */
6387
6388 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6389 {
6390 int exact = 0, lm1 = 0, lm2 = 0;
6391 struct l2cap_chan *c;
6392
6393 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6394
6395 /* Find listening sockets and check their link_mode */
6396 read_lock(&chan_list_lock);
6397 list_for_each_entry(c, &chan_list, global_l) {
6398 struct sock *sk = c->sk;
6399
6400 if (c->state != BT_LISTEN)
6401 continue;
6402
6403 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6404 lm1 |= HCI_LM_ACCEPT;
6405 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6406 lm1 |= HCI_LM_MASTER;
6407 exact++;
6408 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6409 lm2 |= HCI_LM_ACCEPT;
6410 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6411 lm2 |= HCI_LM_MASTER;
6412 }
6413 }
6414 read_unlock(&chan_list_lock);
6415
6416 return exact ? lm1 : lm2;
6417 }
6418
6419 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6420 {
6421 struct l2cap_conn *conn;
6422
6423 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6424
6425 if (!status) {
6426 conn = l2cap_conn_add(hcon);
6427 if (conn)
6428 l2cap_conn_ready(conn);
6429 } else {
6430 l2cap_conn_del(hcon, bt_to_errno(status));
6431 }
6432 }
6433
6434 int l2cap_disconn_ind(struct hci_conn *hcon)
6435 {
6436 struct l2cap_conn *conn = hcon->l2cap_data;
6437
6438 BT_DBG("hcon %p", hcon);
6439
6440 if (!conn)
6441 return HCI_ERROR_REMOTE_USER_TERM;
6442 return conn->disc_reason;
6443 }
6444
6445 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6446 {
6447 BT_DBG("hcon %p reason %d", hcon, reason);
6448
6449 l2cap_conn_del(hcon, bt_to_errno(reason));
6450 }
6451
6452 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6453 {
6454 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6455 return;
6456
6457 if (encrypt == 0x00) {
6458 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6459 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6460 } else if (chan->sec_level == BT_SECURITY_HIGH)
6461 l2cap_chan_close(chan, ECONNREFUSED);
6462 } else {
6463 if (chan->sec_level == BT_SECURITY_MEDIUM)
6464 __clear_chan_timer(chan);
6465 }
6466 }
6467
6468 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6469 {
6470 struct l2cap_conn *conn = hcon->l2cap_data;
6471 struct l2cap_chan *chan;
6472
6473 if (!conn)
6474 return 0;
6475
6476 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6477
6478 if (hcon->type == LE_LINK) {
6479 if (!status && encrypt)
6480 smp_distribute_keys(conn, 0);
6481 cancel_delayed_work(&conn->security_timer);
6482 }
6483
6484 mutex_lock(&conn->chan_lock);
6485
6486 list_for_each_entry(chan, &conn->chan_l, list) {
6487 l2cap_chan_lock(chan);
6488
6489 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6490 state_to_string(chan->state));
6491
6492 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6493 l2cap_chan_unlock(chan);
6494 continue;
6495 }
6496
6497 if (chan->scid == L2CAP_CID_LE_DATA) {
6498 if (!status && encrypt) {
6499 chan->sec_level = hcon->sec_level;
6500 l2cap_chan_ready(chan);
6501 }
6502
6503 l2cap_chan_unlock(chan);
6504 continue;
6505 }
6506
6507 if (!__l2cap_no_conn_pending(chan)) {
6508 l2cap_chan_unlock(chan);
6509 continue;
6510 }
6511
6512 if (!status && (chan->state == BT_CONNECTED ||
6513 chan->state == BT_CONFIG)) {
6514 struct sock *sk = chan->sk;
6515
6516 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6517 sk->sk_state_change(sk);
6518
6519 l2cap_check_encryption(chan, encrypt);
6520 l2cap_chan_unlock(chan);
6521 continue;
6522 }
6523
6524 if (chan->state == BT_CONNECT) {
6525 if (!status) {
6526 l2cap_start_connection(chan);
6527 } else {
6528 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6529 }
6530 } else if (chan->state == BT_CONNECT2) {
6531 struct sock *sk = chan->sk;
6532 struct l2cap_conn_rsp rsp;
6533 __u16 res, stat;
6534
6535 lock_sock(sk);
6536
6537 if (!status) {
6538 if (test_bit(BT_SK_DEFER_SETUP,
6539 &bt_sk(sk)->flags)) {
6540 res = L2CAP_CR_PEND;
6541 stat = L2CAP_CS_AUTHOR_PEND;
6542 chan->ops->defer(chan);
6543 } else {
6544 __l2cap_state_change(chan, BT_CONFIG);
6545 res = L2CAP_CR_SUCCESS;
6546 stat = L2CAP_CS_NO_INFO;
6547 }
6548 } else {
6549 __l2cap_state_change(chan, BT_DISCONN);
6550 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6551 res = L2CAP_CR_SEC_BLOCK;
6552 stat = L2CAP_CS_NO_INFO;
6553 }
6554
6555 release_sock(sk);
6556
6557 rsp.scid = cpu_to_le16(chan->dcid);
6558 rsp.dcid = cpu_to_le16(chan->scid);
6559 rsp.result = cpu_to_le16(res);
6560 rsp.status = cpu_to_le16(stat);
6561 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6562 sizeof(rsp), &rsp);
6563
6564 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6565 res == L2CAP_CR_SUCCESS) {
6566 char buf[128];
6567 set_bit(CONF_REQ_SENT, &chan->conf_state);
6568 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6569 L2CAP_CONF_REQ,
6570 l2cap_build_conf_req(chan, buf),
6571 buf);
6572 chan->num_conf_req++;
6573 }
6574 }
6575
6576 l2cap_chan_unlock(chan);
6577 }
6578
6579 mutex_unlock(&conn->chan_lock);
6580
6581 return 0;
6582 }
6583
6584 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6585 {
6586 struct l2cap_conn *conn = hcon->l2cap_data;
6587 struct l2cap_hdr *hdr;
6588 int len;
6589
6590 /* For AMP controller do not create l2cap conn */
6591 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6592 goto drop;
6593
6594 if (!conn)
6595 conn = l2cap_conn_add(hcon);
6596
6597 if (!conn)
6598 goto drop;
6599
6600 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6601
6602 switch (flags) {
6603 case ACL_START:
6604 case ACL_START_NO_FLUSH:
6605 case ACL_COMPLETE:
6606 if (conn->rx_len) {
6607 BT_ERR("Unexpected start frame (len %d)", skb->len);
6608 kfree_skb(conn->rx_skb);
6609 conn->rx_skb = NULL;
6610 conn->rx_len = 0;
6611 l2cap_conn_unreliable(conn, ECOMM);
6612 }
6613
6614 /* Start fragment always begin with Basic L2CAP header */
6615 if (skb->len < L2CAP_HDR_SIZE) {
6616 BT_ERR("Frame is too short (len %d)", skb->len);
6617 l2cap_conn_unreliable(conn, ECOMM);
6618 goto drop;
6619 }
6620
6621 hdr = (struct l2cap_hdr *) skb->data;
6622 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6623
6624 if (len == skb->len) {
6625 /* Complete frame received */
6626 l2cap_recv_frame(conn, skb);
6627 return 0;
6628 }
6629
6630 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6631
6632 if (skb->len > len) {
6633 BT_ERR("Frame is too long (len %d, expected len %d)",
6634 skb->len, len);
6635 l2cap_conn_unreliable(conn, ECOMM);
6636 goto drop;
6637 }
6638
6639 /* Allocate skb for the complete frame (with header) */
6640 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6641 if (!conn->rx_skb)
6642 goto drop;
6643
6644 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6645 skb->len);
6646 conn->rx_len = len - skb->len;
6647 break;
6648
6649 case ACL_CONT:
6650 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6651
6652 if (!conn->rx_len) {
6653 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6654 l2cap_conn_unreliable(conn, ECOMM);
6655 goto drop;
6656 }
6657
6658 if (skb->len > conn->rx_len) {
6659 BT_ERR("Fragment is too long (len %d, expected %d)",
6660 skb->len, conn->rx_len);
6661 kfree_skb(conn->rx_skb);
6662 conn->rx_skb = NULL;
6663 conn->rx_len = 0;
6664 l2cap_conn_unreliable(conn, ECOMM);
6665 goto drop;
6666 }
6667
6668 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6669 skb->len);
6670 conn->rx_len -= skb->len;
6671
6672 if (!conn->rx_len) {
6673 /* Complete frame received */
6674 l2cap_recv_frame(conn, conn->rx_skb);
6675 conn->rx_skb = NULL;
6676 }
6677 break;
6678 }
6679
6680 drop:
6681 kfree_skb(skb);
6682 return 0;
6683 }
6684
6685 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6686 {
6687 struct l2cap_chan *c;
6688
6689 read_lock(&chan_list_lock);
6690
6691 list_for_each_entry(c, &chan_list, global_l) {
6692 struct sock *sk = c->sk;
6693
6694 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6695 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6696 c->state, __le16_to_cpu(c->psm),
6697 c->scid, c->dcid, c->imtu, c->omtu,
6698 c->sec_level, c->mode);
6699 }
6700
6701 read_unlock(&chan_list_lock);
6702
6703 return 0;
6704 }
6705
6706 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6707 {
6708 return single_open(file, l2cap_debugfs_show, inode->i_private);
6709 }
6710
6711 static const struct file_operations l2cap_debugfs_fops = {
6712 .open = l2cap_debugfs_open,
6713 .read = seq_read,
6714 .llseek = seq_lseek,
6715 .release = single_release,
6716 };
6717
6718 static struct dentry *l2cap_debugfs;
6719
6720 int __init l2cap_init(void)
6721 {
6722 int err;
6723
6724 err = l2cap_init_sockets();
6725 if (err < 0)
6726 return err;
6727
6728 if (bt_debugfs) {
6729 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6730 NULL, &l2cap_debugfs_fops);
6731 if (!l2cap_debugfs)
6732 BT_ERR("Failed to create L2CAP debug file");
6733 }
6734
6735 return 0;
6736 }
6737
6738 void l2cap_exit(void)
6739 {
6740 debugfs_remove(l2cap_debugfs);
6741 l2cap_cleanup_sockets();
6742 }
6743
6744 module_param(disable_ertm, bool, 0644);
6745 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.280837 seconds and 5 git commands to generate.