Bluetooth: Provide L2CAP ops callback for memcpy_fromiovec
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39
40#include "smp.h"
41#include "a2mp.h"
42#include "amp.h"
43#include "6lowpan.h"
44
45#define LE_FLOWCTL_MAX_CREDITS 65535
46
47bool disable_ertm;
48
49static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
51
52static LIST_HEAD(chan_list);
53static DEFINE_RWLOCK(chan_list_lock);
54
55static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57
58static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 u8 code, u8 ident, u16 dlen, void *data);
60static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 void *data);
62static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64
65static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 struct sk_buff_head *skbs, u8 event);
67
68static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69{
70 if (hcon->type == LE_LINK) {
71 if (type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
73 else
74 return BDADDR_LE_RANDOM;
75 }
76
77 return BDADDR_BREDR;
78}
79
80/* ---- L2CAP channels ---- */
81
82static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
83 u16 cid)
84{
85 struct l2cap_chan *c;
86
87 list_for_each_entry(c, &conn->chan_l, list) {
88 if (c->dcid == cid)
89 return c;
90 }
91 return NULL;
92}
93
94static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 u16 cid)
96{
97 struct l2cap_chan *c;
98
99 list_for_each_entry(c, &conn->chan_l, list) {
100 if (c->scid == cid)
101 return c;
102 }
103 return NULL;
104}
105
106/* Find channel with given SCID.
107 * Returns locked channel. */
108static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 u16 cid)
110{
111 struct l2cap_chan *c;
112
113 mutex_lock(&conn->chan_lock);
114 c = __l2cap_get_chan_by_scid(conn, cid);
115 if (c)
116 l2cap_chan_lock(c);
117 mutex_unlock(&conn->chan_lock);
118
119 return c;
120}
121
122/* Find channel with given DCID.
123 * Returns locked channel.
124 */
125static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
126 u16 cid)
127{
128 struct l2cap_chan *c;
129
130 mutex_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_dcid(conn, cid);
132 if (c)
133 l2cap_chan_lock(c);
134 mutex_unlock(&conn->chan_lock);
135
136 return c;
137}
138
139static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
140 u8 ident)
141{
142 struct l2cap_chan *c;
143
144 list_for_each_entry(c, &conn->chan_l, list) {
145 if (c->ident == ident)
146 return c;
147 }
148 return NULL;
149}
150
151static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
152 u8 ident)
153{
154 struct l2cap_chan *c;
155
156 mutex_lock(&conn->chan_lock);
157 c = __l2cap_get_chan_by_ident(conn, ident);
158 if (c)
159 l2cap_chan_lock(c);
160 mutex_unlock(&conn->chan_lock);
161
162 return c;
163}
164
165static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166{
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (c->sport == psm && !bacmp(&c->src, src))
171 return c;
172 }
173 return NULL;
174}
175
176int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
177{
178 int err;
179
180 write_lock(&chan_list_lock);
181
182 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 err = -EADDRINUSE;
184 goto done;
185 }
186
187 if (psm) {
188 chan->psm = psm;
189 chan->sport = psm;
190 err = 0;
191 } else {
192 u16 p;
193
194 err = -EINVAL;
195 for (p = 0x1001; p < 0x1100; p += 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 chan->psm = cpu_to_le16(p);
198 chan->sport = cpu_to_le16(p);
199 err = 0;
200 break;
201 }
202 }
203
204done:
205 write_unlock(&chan_list_lock);
206 return err;
207}
208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
211 write_lock(&chan_list_lock);
212
213 chan->scid = scid;
214
215 write_unlock(&chan_list_lock);
216
217 return 0;
218}
219
220static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221{
222 u16 cid, dyn_end;
223
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
228
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
232 }
233
234 return 0;
235}
236
237static void l2cap_state_change(struct l2cap_chan *chan, int state)
238{
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
241
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
244}
245
246static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
248{
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
251}
252
253static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254{
255 chan->ops->state_change(chan, chan->state, err);
256}
257
258static void __set_retrans_timer(struct l2cap_chan *chan)
259{
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
264 }
265}
266
267static void __set_monitor_timer(struct l2cap_chan *chan)
268{
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
273 }
274}
275
276static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
278{
279 struct sk_buff *skb;
280
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
284 }
285
286 return NULL;
287}
288
289/* ---- L2CAP sequence number lists ---- */
290
291/* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
298 */
299
300static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301{
302 size_t alloc_size, i;
303
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
307 */
308 alloc_size = roundup_pow_of_two(size);
309
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
313
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320 return 0;
321}
322
323static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324{
325 kfree(seq_list->list);
326}
327
328static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
330{
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333}
334
335static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336{
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
339
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 return seq;
349}
350
351static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352{
353 u16 i;
354
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
357
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363}
364
365static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366{
367 u16 mask = seq_list->mask;
368
369 /* All appends happen in constant time */
370
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
373
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
378
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381}
382
383static void l2cap_chan_timeout(struct work_struct *work)
384{
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
389
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
394
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
402
403 l2cap_chan_close(chan, reason);
404
405 l2cap_chan_unlock(chan);
406
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
409
410 l2cap_chan_put(chan);
411}
412
413struct l2cap_chan *l2cap_chan_create(void)
414{
415 struct l2cap_chan *chan;
416
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
420
421 mutex_init(&chan->lock);
422
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
426
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429 chan->state = BT_OPEN;
430
431 kref_init(&chan->kref);
432
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436 BT_DBG("chan %p", chan);
437
438 return chan;
439}
440
441static void l2cap_chan_destroy(struct kref *kref)
442{
443 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
444
445 BT_DBG("chan %p", chan);
446
447 write_lock(&chan_list_lock);
448 list_del(&chan->global_l);
449 write_unlock(&chan_list_lock);
450
451 kfree(chan);
452}
453
454void l2cap_chan_hold(struct l2cap_chan *c)
455{
456 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
457
458 kref_get(&c->kref);
459}
460
461void l2cap_chan_put(struct l2cap_chan *c)
462{
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
464
465 kref_put(&c->kref, l2cap_chan_destroy);
466}
467
468void l2cap_chan_set_defaults(struct l2cap_chan *chan)
469{
470 chan->fcs = L2CAP_FCS_CRC16;
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->remote_max_tx = chan->max_tx;
475 chan->remote_tx_win = chan->tx_win;
476 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
477 chan->sec_level = BT_SECURITY_LOW;
478 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
479 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
480 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
481 chan->conf_state = 0;
482
483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
484}
485
486static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
487{
488 chan->sdu = NULL;
489 chan->sdu_last_frag = NULL;
490 chan->sdu_len = 0;
491 chan->tx_credits = 0;
492 chan->rx_credits = le_max_credits;
493 chan->mps = min_t(u16, chan->imtu, le_default_mps);
494
495 skb_queue_head_init(&chan->tx_q);
496}
497
498void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
499{
500 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
501 __le16_to_cpu(chan->psm), chan->dcid);
502
503 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
504
505 chan->conn = conn;
506
507 switch (chan->chan_type) {
508 case L2CAP_CHAN_CONN_ORIENTED:
509 /* Alloc CID for connection-oriented socket */
510 chan->scid = l2cap_alloc_cid(conn);
511 if (conn->hcon->type == ACL_LINK)
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 break;
514
515 case L2CAP_CHAN_CONN_LESS:
516 /* Connectionless socket */
517 chan->scid = L2CAP_CID_CONN_LESS;
518 chan->dcid = L2CAP_CID_CONN_LESS;
519 chan->omtu = L2CAP_DEFAULT_MTU;
520 break;
521
522 case L2CAP_CHAN_FIXED:
523 /* Caller will set CID and CID specific MTU values */
524 break;
525
526 default:
527 /* Raw socket can send/recv signalling messages only */
528 chan->scid = L2CAP_CID_SIGNALING;
529 chan->dcid = L2CAP_CID_SIGNALING;
530 chan->omtu = L2CAP_DEFAULT_MTU;
531 }
532
533 chan->local_id = L2CAP_BESTEFFORT_ID;
534 chan->local_stype = L2CAP_SERV_BESTEFFORT;
535 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
536 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
537 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
538 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
539
540 l2cap_chan_hold(chan);
541
542 hci_conn_hold(conn->hcon);
543
544 list_add(&chan->list, &conn->chan_l);
545}
546
547void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
548{
549 mutex_lock(&conn->chan_lock);
550 __l2cap_chan_add(conn, chan);
551 mutex_unlock(&conn->chan_lock);
552}
553
554void l2cap_chan_del(struct l2cap_chan *chan, int err)
555{
556 struct l2cap_conn *conn = chan->conn;
557
558 __clear_chan_timer(chan);
559
560 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
561
562 if (conn) {
563 struct amp_mgr *mgr = conn->hcon->amp_mgr;
564 /* Delete from channel list */
565 list_del(&chan->list);
566
567 l2cap_chan_put(chan);
568
569 chan->conn = NULL;
570
571 if (chan->scid != L2CAP_CID_A2MP)
572 hci_conn_drop(conn->hcon);
573
574 if (mgr && mgr->bredr_chan == chan)
575 mgr->bredr_chan = NULL;
576 }
577
578 if (chan->hs_hchan) {
579 struct hci_chan *hs_hchan = chan->hs_hchan;
580
581 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
582 amp_disconnect_logical_link(hs_hchan);
583 }
584
585 chan->ops->teardown(chan, err);
586
587 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
588 return;
589
590 switch(chan->mode) {
591 case L2CAP_MODE_BASIC:
592 break;
593
594 case L2CAP_MODE_LE_FLOWCTL:
595 skb_queue_purge(&chan->tx_q);
596 break;
597
598 case L2CAP_MODE_ERTM:
599 __clear_retrans_timer(chan);
600 __clear_monitor_timer(chan);
601 __clear_ack_timer(chan);
602
603 skb_queue_purge(&chan->srej_q);
604
605 l2cap_seq_list_free(&chan->srej_list);
606 l2cap_seq_list_free(&chan->retrans_list);
607
608 /* fall through */
609
610 case L2CAP_MODE_STREAMING:
611 skb_queue_purge(&chan->tx_q);
612 break;
613 }
614
615 return;
616}
617
618void l2cap_conn_update_id_addr(struct hci_conn *hcon)
619{
620 struct l2cap_conn *conn = hcon->l2cap_data;
621 struct l2cap_chan *chan;
622
623 mutex_lock(&conn->chan_lock);
624
625 list_for_each_entry(chan, &conn->chan_l, list) {
626 l2cap_chan_lock(chan);
627 bacpy(&chan->dst, &hcon->dst);
628 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
629 l2cap_chan_unlock(chan);
630 }
631
632 mutex_unlock(&conn->chan_lock);
633}
634
635static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
636{
637 struct l2cap_conn *conn = chan->conn;
638 struct l2cap_le_conn_rsp rsp;
639 u16 result;
640
641 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
642 result = L2CAP_CR_AUTHORIZATION;
643 else
644 result = L2CAP_CR_BAD_PSM;
645
646 l2cap_state_change(chan, BT_DISCONN);
647
648 rsp.dcid = cpu_to_le16(chan->scid);
649 rsp.mtu = cpu_to_le16(chan->imtu);
650 rsp.mps = cpu_to_le16(chan->mps);
651 rsp.credits = cpu_to_le16(chan->rx_credits);
652 rsp.result = cpu_to_le16(result);
653
654 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
655 &rsp);
656}
657
658static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
659{
660 struct l2cap_conn *conn = chan->conn;
661 struct l2cap_conn_rsp rsp;
662 u16 result;
663
664 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 result = L2CAP_CR_SEC_BLOCK;
666 else
667 result = L2CAP_CR_BAD_PSM;
668
669 l2cap_state_change(chan, BT_DISCONN);
670
671 rsp.scid = cpu_to_le16(chan->dcid);
672 rsp.dcid = cpu_to_le16(chan->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
675
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
677}
678
679void l2cap_chan_close(struct l2cap_chan *chan, int reason)
680{
681 struct l2cap_conn *conn = chan->conn;
682
683 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
684
685 switch (chan->state) {
686 case BT_LISTEN:
687 chan->ops->teardown(chan, 0);
688 break;
689
690 case BT_CONNECTED:
691 case BT_CONFIG:
692 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
693 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
694 l2cap_send_disconn_req(chan, reason);
695 } else
696 l2cap_chan_del(chan, reason);
697 break;
698
699 case BT_CONNECT2:
700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 if (conn->hcon->type == ACL_LINK)
702 l2cap_chan_connect_reject(chan);
703 else if (conn->hcon->type == LE_LINK)
704 l2cap_chan_le_connect_reject(chan);
705 }
706
707 l2cap_chan_del(chan, reason);
708 break;
709
710 case BT_CONNECT:
711 case BT_DISCONN:
712 l2cap_chan_del(chan, reason);
713 break;
714
715 default:
716 chan->ops->teardown(chan, 0);
717 break;
718 }
719}
720
721static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
722{
723 switch (chan->chan_type) {
724 case L2CAP_CHAN_RAW:
725 switch (chan->sec_level) {
726 case BT_SECURITY_HIGH:
727 case BT_SECURITY_FIPS:
728 return HCI_AT_DEDICATED_BONDING_MITM;
729 case BT_SECURITY_MEDIUM:
730 return HCI_AT_DEDICATED_BONDING;
731 default:
732 return HCI_AT_NO_BONDING;
733 }
734 break;
735 case L2CAP_CHAN_CONN_LESS:
736 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
737 if (chan->sec_level == BT_SECURITY_LOW)
738 chan->sec_level = BT_SECURITY_SDP;
739 }
740 if (chan->sec_level == BT_SECURITY_HIGH ||
741 chan->sec_level == BT_SECURITY_FIPS)
742 return HCI_AT_NO_BONDING_MITM;
743 else
744 return HCI_AT_NO_BONDING;
745 break;
746 case L2CAP_CHAN_CONN_ORIENTED:
747 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
748 if (chan->sec_level == BT_SECURITY_LOW)
749 chan->sec_level = BT_SECURITY_SDP;
750
751 if (chan->sec_level == BT_SECURITY_HIGH ||
752 chan->sec_level == BT_SECURITY_FIPS)
753 return HCI_AT_NO_BONDING_MITM;
754 else
755 return HCI_AT_NO_BONDING;
756 }
757 /* fall through */
758 default:
759 switch (chan->sec_level) {
760 case BT_SECURITY_HIGH:
761 case BT_SECURITY_FIPS:
762 return HCI_AT_GENERAL_BONDING_MITM;
763 case BT_SECURITY_MEDIUM:
764 return HCI_AT_GENERAL_BONDING;
765 default:
766 return HCI_AT_NO_BONDING;
767 }
768 break;
769 }
770}
771
772/* Service level security */
773int l2cap_chan_check_security(struct l2cap_chan *chan)
774{
775 struct l2cap_conn *conn = chan->conn;
776 __u8 auth_type;
777
778 if (conn->hcon->type == LE_LINK)
779 return smp_conn_security(conn->hcon, chan->sec_level);
780
781 auth_type = l2cap_get_auth_type(chan);
782
783 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
784}
785
786static u8 l2cap_get_ident(struct l2cap_conn *conn)
787{
788 u8 id;
789
790 /* Get next available identificator.
791 * 1 - 128 are used by kernel.
792 * 129 - 199 are reserved.
793 * 200 - 254 are used by utilities like l2ping, etc.
794 */
795
796 spin_lock(&conn->lock);
797
798 if (++conn->tx_ident > 128)
799 conn->tx_ident = 1;
800
801 id = conn->tx_ident;
802
803 spin_unlock(&conn->lock);
804
805 return id;
806}
807
808static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
809 void *data)
810{
811 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
812 u8 flags;
813
814 BT_DBG("code 0x%2.2x", code);
815
816 if (!skb)
817 return;
818
819 if (lmp_no_flush_capable(conn->hcon->hdev))
820 flags = ACL_START_NO_FLUSH;
821 else
822 flags = ACL_START;
823
824 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
825 skb->priority = HCI_PRIO_MAX;
826
827 hci_send_acl(conn->hchan, skb, flags);
828}
829
830static bool __chan_is_moving(struct l2cap_chan *chan)
831{
832 return chan->move_state != L2CAP_MOVE_STABLE &&
833 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
834}
835
836static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
837{
838 struct hci_conn *hcon = chan->conn->hcon;
839 u16 flags;
840
841 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
842 skb->priority);
843
844 if (chan->hs_hcon && !__chan_is_moving(chan)) {
845 if (chan->hs_hchan)
846 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
847 else
848 kfree_skb(skb);
849
850 return;
851 }
852
853 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
854 lmp_no_flush_capable(hcon->hdev))
855 flags = ACL_START_NO_FLUSH;
856 else
857 flags = ACL_START;
858
859 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
860 hci_send_acl(chan->conn->hchan, skb, flags);
861}
862
863static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
864{
865 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
866 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
867
868 if (enh & L2CAP_CTRL_FRAME_TYPE) {
869 /* S-Frame */
870 control->sframe = 1;
871 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
872 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
873
874 control->sar = 0;
875 control->txseq = 0;
876 } else {
877 /* I-Frame */
878 control->sframe = 0;
879 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
880 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
881
882 control->poll = 0;
883 control->super = 0;
884 }
885}
886
887static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
888{
889 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
890 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
891
892 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
893 /* S-Frame */
894 control->sframe = 1;
895 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
896 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
897
898 control->sar = 0;
899 control->txseq = 0;
900 } else {
901 /* I-Frame */
902 control->sframe = 0;
903 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
904 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
905
906 control->poll = 0;
907 control->super = 0;
908 }
909}
910
911static inline void __unpack_control(struct l2cap_chan *chan,
912 struct sk_buff *skb)
913{
914 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
915 __unpack_extended_control(get_unaligned_le32(skb->data),
916 &bt_cb(skb)->control);
917 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
918 } else {
919 __unpack_enhanced_control(get_unaligned_le16(skb->data),
920 &bt_cb(skb)->control);
921 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
922 }
923}
924
925static u32 __pack_extended_control(struct l2cap_ctrl *control)
926{
927 u32 packed;
928
929 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
930 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
931
932 if (control->sframe) {
933 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
934 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
935 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
936 } else {
937 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
938 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
939 }
940
941 return packed;
942}
943
944static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
945{
946 u16 packed;
947
948 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
949 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
950
951 if (control->sframe) {
952 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
953 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
954 packed |= L2CAP_CTRL_FRAME_TYPE;
955 } else {
956 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
957 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
958 }
959
960 return packed;
961}
962
963static inline void __pack_control(struct l2cap_chan *chan,
964 struct l2cap_ctrl *control,
965 struct sk_buff *skb)
966{
967 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
968 put_unaligned_le32(__pack_extended_control(control),
969 skb->data + L2CAP_HDR_SIZE);
970 } else {
971 put_unaligned_le16(__pack_enhanced_control(control),
972 skb->data + L2CAP_HDR_SIZE);
973 }
974}
975
976static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
977{
978 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
979 return L2CAP_EXT_HDR_SIZE;
980 else
981 return L2CAP_ENH_HDR_SIZE;
982}
983
984static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
985 u32 control)
986{
987 struct sk_buff *skb;
988 struct l2cap_hdr *lh;
989 int hlen = __ertm_hdr_size(chan);
990
991 if (chan->fcs == L2CAP_FCS_CRC16)
992 hlen += L2CAP_FCS_SIZE;
993
994 skb = bt_skb_alloc(hlen, GFP_KERNEL);
995
996 if (!skb)
997 return ERR_PTR(-ENOMEM);
998
999 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1000 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1001 lh->cid = cpu_to_le16(chan->dcid);
1002
1003 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1004 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1005 else
1006 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1007
1008 if (chan->fcs == L2CAP_FCS_CRC16) {
1009 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1010 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1011 }
1012
1013 skb->priority = HCI_PRIO_MAX;
1014 return skb;
1015}
1016
1017static void l2cap_send_sframe(struct l2cap_chan *chan,
1018 struct l2cap_ctrl *control)
1019{
1020 struct sk_buff *skb;
1021 u32 control_field;
1022
1023 BT_DBG("chan %p, control %p", chan, control);
1024
1025 if (!control->sframe)
1026 return;
1027
1028 if (__chan_is_moving(chan))
1029 return;
1030
1031 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1032 !control->poll)
1033 control->final = 1;
1034
1035 if (control->super == L2CAP_SUPER_RR)
1036 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1037 else if (control->super == L2CAP_SUPER_RNR)
1038 set_bit(CONN_RNR_SENT, &chan->conn_state);
1039
1040 if (control->super != L2CAP_SUPER_SREJ) {
1041 chan->last_acked_seq = control->reqseq;
1042 __clear_ack_timer(chan);
1043 }
1044
1045 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1046 control->final, control->poll, control->super);
1047
1048 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1049 control_field = __pack_extended_control(control);
1050 else
1051 control_field = __pack_enhanced_control(control);
1052
1053 skb = l2cap_create_sframe_pdu(chan, control_field);
1054 if (!IS_ERR(skb))
1055 l2cap_do_send(chan, skb);
1056}
1057
1058static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1059{
1060 struct l2cap_ctrl control;
1061
1062 BT_DBG("chan %p, poll %d", chan, poll);
1063
1064 memset(&control, 0, sizeof(control));
1065 control.sframe = 1;
1066 control.poll = poll;
1067
1068 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1069 control.super = L2CAP_SUPER_RNR;
1070 else
1071 control.super = L2CAP_SUPER_RR;
1072
1073 control.reqseq = chan->buffer_seq;
1074 l2cap_send_sframe(chan, &control);
1075}
1076
1077static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1078{
1079 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1080}
1081
1082static bool __amp_capable(struct l2cap_chan *chan)
1083{
1084 struct l2cap_conn *conn = chan->conn;
1085 struct hci_dev *hdev;
1086 bool amp_available = false;
1087
1088 if (!conn->hs_enabled)
1089 return false;
1090
1091 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1092 return false;
1093
1094 read_lock(&hci_dev_list_lock);
1095 list_for_each_entry(hdev, &hci_dev_list, list) {
1096 if (hdev->amp_type != AMP_TYPE_BREDR &&
1097 test_bit(HCI_UP, &hdev->flags)) {
1098 amp_available = true;
1099 break;
1100 }
1101 }
1102 read_unlock(&hci_dev_list_lock);
1103
1104 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1105 return amp_available;
1106
1107 return false;
1108}
1109
1110static bool l2cap_check_efs(struct l2cap_chan *chan)
1111{
1112 /* Check EFS parameters */
1113 return true;
1114}
1115
1116void l2cap_send_conn_req(struct l2cap_chan *chan)
1117{
1118 struct l2cap_conn *conn = chan->conn;
1119 struct l2cap_conn_req req;
1120
1121 req.scid = cpu_to_le16(chan->scid);
1122 req.psm = chan->psm;
1123
1124 chan->ident = l2cap_get_ident(conn);
1125
1126 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1127
1128 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1129}
1130
1131static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1132{
1133 struct l2cap_create_chan_req req;
1134 req.scid = cpu_to_le16(chan->scid);
1135 req.psm = chan->psm;
1136 req.amp_id = amp_id;
1137
1138 chan->ident = l2cap_get_ident(chan->conn);
1139
1140 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1141 sizeof(req), &req);
1142}
1143
1144static void l2cap_move_setup(struct l2cap_chan *chan)
1145{
1146 struct sk_buff *skb;
1147
1148 BT_DBG("chan %p", chan);
1149
1150 if (chan->mode != L2CAP_MODE_ERTM)
1151 return;
1152
1153 __clear_retrans_timer(chan);
1154 __clear_monitor_timer(chan);
1155 __clear_ack_timer(chan);
1156
1157 chan->retry_count = 0;
1158 skb_queue_walk(&chan->tx_q, skb) {
1159 if (bt_cb(skb)->control.retries)
1160 bt_cb(skb)->control.retries = 1;
1161 else
1162 break;
1163 }
1164
1165 chan->expected_tx_seq = chan->buffer_seq;
1166
1167 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1168 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1169 l2cap_seq_list_clear(&chan->retrans_list);
1170 l2cap_seq_list_clear(&chan->srej_list);
1171 skb_queue_purge(&chan->srej_q);
1172
1173 chan->tx_state = L2CAP_TX_STATE_XMIT;
1174 chan->rx_state = L2CAP_RX_STATE_MOVE;
1175
1176 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1177}
1178
1179static void l2cap_move_done(struct l2cap_chan *chan)
1180{
1181 u8 move_role = chan->move_role;
1182 BT_DBG("chan %p", chan);
1183
1184 chan->move_state = L2CAP_MOVE_STABLE;
1185 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1186
1187 if (chan->mode != L2CAP_MODE_ERTM)
1188 return;
1189
1190 switch (move_role) {
1191 case L2CAP_MOVE_ROLE_INITIATOR:
1192 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1193 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1194 break;
1195 case L2CAP_MOVE_ROLE_RESPONDER:
1196 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1197 break;
1198 }
1199}
1200
1201static void l2cap_chan_ready(struct l2cap_chan *chan)
1202{
1203 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1204 chan->conf_state = 0;
1205 __clear_chan_timer(chan);
1206
1207 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1208 chan->ops->suspend(chan);
1209
1210 chan->state = BT_CONNECTED;
1211
1212 chan->ops->ready(chan);
1213}
1214
1215static void l2cap_le_connect(struct l2cap_chan *chan)
1216{
1217 struct l2cap_conn *conn = chan->conn;
1218 struct l2cap_le_conn_req req;
1219
1220 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1221 return;
1222
1223 req.psm = chan->psm;
1224 req.scid = cpu_to_le16(chan->scid);
1225 req.mtu = cpu_to_le16(chan->imtu);
1226 req.mps = cpu_to_le16(chan->mps);
1227 req.credits = cpu_to_le16(chan->rx_credits);
1228
1229 chan->ident = l2cap_get_ident(conn);
1230
1231 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1232 sizeof(req), &req);
1233}
1234
1235static void l2cap_le_start(struct l2cap_chan *chan)
1236{
1237 struct l2cap_conn *conn = chan->conn;
1238
1239 if (!smp_conn_security(conn->hcon, chan->sec_level))
1240 return;
1241
1242 if (!chan->psm) {
1243 l2cap_chan_ready(chan);
1244 return;
1245 }
1246
1247 if (chan->state == BT_CONNECT)
1248 l2cap_le_connect(chan);
1249}
1250
1251static void l2cap_start_connection(struct l2cap_chan *chan)
1252{
1253 if (__amp_capable(chan)) {
1254 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1255 a2mp_discover_amp(chan);
1256 } else if (chan->conn->hcon->type == LE_LINK) {
1257 l2cap_le_start(chan);
1258 } else {
1259 l2cap_send_conn_req(chan);
1260 }
1261}
1262
1263static void l2cap_do_start(struct l2cap_chan *chan)
1264{
1265 struct l2cap_conn *conn = chan->conn;
1266
1267 if (conn->hcon->type == LE_LINK) {
1268 l2cap_le_start(chan);
1269 return;
1270 }
1271
1272 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1273 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1274 return;
1275
1276 if (l2cap_chan_check_security(chan) &&
1277 __l2cap_no_conn_pending(chan)) {
1278 l2cap_start_connection(chan);
1279 }
1280 } else {
1281 struct l2cap_info_req req;
1282 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1283
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1285 conn->info_ident = l2cap_get_ident(conn);
1286
1287 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1288
1289 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1290 sizeof(req), &req);
1291 }
1292}
1293
1294static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1295{
1296 u32 local_feat_mask = l2cap_feat_mask;
1297 if (!disable_ertm)
1298 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1299
1300 switch (mode) {
1301 case L2CAP_MODE_ERTM:
1302 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1303 case L2CAP_MODE_STREAMING:
1304 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1305 default:
1306 return 0x00;
1307 }
1308}
1309
1310static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1311{
1312 struct l2cap_conn *conn = chan->conn;
1313 struct l2cap_disconn_req req;
1314
1315 if (!conn)
1316 return;
1317
1318 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1319 __clear_retrans_timer(chan);
1320 __clear_monitor_timer(chan);
1321 __clear_ack_timer(chan);
1322 }
1323
1324 if (chan->scid == L2CAP_CID_A2MP) {
1325 l2cap_state_change(chan, BT_DISCONN);
1326 return;
1327 }
1328
1329 req.dcid = cpu_to_le16(chan->dcid);
1330 req.scid = cpu_to_le16(chan->scid);
1331 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1332 sizeof(req), &req);
1333
1334 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1335}
1336
1337/* ---- L2CAP connections ---- */
1338static void l2cap_conn_start(struct l2cap_conn *conn)
1339{
1340 struct l2cap_chan *chan, *tmp;
1341
1342 BT_DBG("conn %p", conn);
1343
1344 mutex_lock(&conn->chan_lock);
1345
1346 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1347 l2cap_chan_lock(chan);
1348
1349 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1350 l2cap_chan_unlock(chan);
1351 continue;
1352 }
1353
1354 if (chan->state == BT_CONNECT) {
1355 if (!l2cap_chan_check_security(chan) ||
1356 !__l2cap_no_conn_pending(chan)) {
1357 l2cap_chan_unlock(chan);
1358 continue;
1359 }
1360
1361 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1362 && test_bit(CONF_STATE2_DEVICE,
1363 &chan->conf_state)) {
1364 l2cap_chan_close(chan, ECONNRESET);
1365 l2cap_chan_unlock(chan);
1366 continue;
1367 }
1368
1369 l2cap_start_connection(chan);
1370
1371 } else if (chan->state == BT_CONNECT2) {
1372 struct l2cap_conn_rsp rsp;
1373 char buf[128];
1374 rsp.scid = cpu_to_le16(chan->dcid);
1375 rsp.dcid = cpu_to_le16(chan->scid);
1376
1377 if (l2cap_chan_check_security(chan)) {
1378 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1381 chan->ops->defer(chan);
1382
1383 } else {
1384 l2cap_state_change(chan, BT_CONFIG);
1385 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1386 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1387 }
1388 } else {
1389 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1390 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1391 }
1392
1393 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1394 sizeof(rsp), &rsp);
1395
1396 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1397 rsp.result != L2CAP_CR_SUCCESS) {
1398 l2cap_chan_unlock(chan);
1399 continue;
1400 }
1401
1402 set_bit(CONF_REQ_SENT, &chan->conf_state);
1403 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1404 l2cap_build_conf_req(chan, buf), buf);
1405 chan->num_conf_req++;
1406 }
1407
1408 l2cap_chan_unlock(chan);
1409 }
1410
1411 mutex_unlock(&conn->chan_lock);
1412}
1413
1414/* Find socket with cid and source/destination bdaddr.
1415 * Returns closest match, locked.
1416 */
1417static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1418 bdaddr_t *src,
1419 bdaddr_t *dst)
1420{
1421 struct l2cap_chan *c, *c1 = NULL;
1422
1423 read_lock(&chan_list_lock);
1424
1425 list_for_each_entry(c, &chan_list, global_l) {
1426 if (state && c->state != state)
1427 continue;
1428
1429 if (c->scid == cid) {
1430 int src_match, dst_match;
1431 int src_any, dst_any;
1432
1433 /* Exact match. */
1434 src_match = !bacmp(&c->src, src);
1435 dst_match = !bacmp(&c->dst, dst);
1436 if (src_match && dst_match) {
1437 read_unlock(&chan_list_lock);
1438 return c;
1439 }
1440
1441 /* Closest match */
1442 src_any = !bacmp(&c->src, BDADDR_ANY);
1443 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1444 if ((src_match && dst_any) || (src_any && dst_match) ||
1445 (src_any && dst_any))
1446 c1 = c;
1447 }
1448 }
1449
1450 read_unlock(&chan_list_lock);
1451
1452 return c1;
1453}
1454
1455static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1456{
1457 struct hci_conn *hcon = conn->hcon;
1458 struct l2cap_chan *chan, *pchan;
1459 u8 dst_type;
1460
1461 BT_DBG("");
1462
1463 bt_6lowpan_add_conn(conn);
1464
1465 /* Check if we have socket listening on cid */
1466 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1467 &hcon->src, &hcon->dst);
1468 if (!pchan)
1469 return;
1470
1471 /* Client ATT sockets should override the server one */
1472 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1473 return;
1474
1475 dst_type = bdaddr_type(hcon, hcon->dst_type);
1476
1477 /* If device is blocked, do not create a channel for it */
1478 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1479 return;
1480
1481 l2cap_chan_lock(pchan);
1482
1483 chan = pchan->ops->new_connection(pchan);
1484 if (!chan)
1485 goto clean;
1486
1487 bacpy(&chan->src, &hcon->src);
1488 bacpy(&chan->dst, &hcon->dst);
1489 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1490 chan->dst_type = dst_type;
1491
1492 __l2cap_chan_add(conn, chan);
1493
1494clean:
1495 l2cap_chan_unlock(pchan);
1496}
1497
1498static void l2cap_conn_ready(struct l2cap_conn *conn)
1499{
1500 struct l2cap_chan *chan;
1501 struct hci_conn *hcon = conn->hcon;
1502
1503 BT_DBG("conn %p", conn);
1504
1505 /* For outgoing pairing which doesn't necessarily have an
1506 * associated socket (e.g. mgmt_pair_device).
1507 */
1508 if (hcon->out && hcon->type == LE_LINK)
1509 smp_conn_security(hcon, hcon->pending_sec_level);
1510
1511 mutex_lock(&conn->chan_lock);
1512
1513 if (hcon->type == LE_LINK)
1514 l2cap_le_conn_ready(conn);
1515
1516 list_for_each_entry(chan, &conn->chan_l, list) {
1517
1518 l2cap_chan_lock(chan);
1519
1520 if (chan->scid == L2CAP_CID_A2MP) {
1521 l2cap_chan_unlock(chan);
1522 continue;
1523 }
1524
1525 if (hcon->type == LE_LINK) {
1526 l2cap_le_start(chan);
1527 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 l2cap_chan_ready(chan);
1529
1530 } else if (chan->state == BT_CONNECT) {
1531 l2cap_do_start(chan);
1532 }
1533
1534 l2cap_chan_unlock(chan);
1535 }
1536
1537 mutex_unlock(&conn->chan_lock);
1538
1539 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1540}
1541
1542/* Notify sockets that we cannot guaranty reliability anymore */
1543static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1544{
1545 struct l2cap_chan *chan;
1546
1547 BT_DBG("conn %p", conn);
1548
1549 mutex_lock(&conn->chan_lock);
1550
1551 list_for_each_entry(chan, &conn->chan_l, list) {
1552 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1553 l2cap_chan_set_err(chan, err);
1554 }
1555
1556 mutex_unlock(&conn->chan_lock);
1557}
1558
1559static void l2cap_info_timeout(struct work_struct *work)
1560{
1561 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1562 info_timer.work);
1563
1564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1565 conn->info_ident = 0;
1566
1567 l2cap_conn_start(conn);
1568}
1569
1570/*
1571 * l2cap_user
1572 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1573 * callback is called during registration. The ->remove callback is called
1574 * during unregistration.
1575 * An l2cap_user object can either be explicitly unregistered or when the
1576 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1577 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1578 * External modules must own a reference to the l2cap_conn object if they intend
1579 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1580 * any time if they don't.
1581 */
1582
1583int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1584{
1585 struct hci_dev *hdev = conn->hcon->hdev;
1586 int ret;
1587
1588 /* We need to check whether l2cap_conn is registered. If it is not, we
1589 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1590 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1591 * relies on the parent hci_conn object to be locked. This itself relies
1592 * on the hci_dev object to be locked. So we must lock the hci device
1593 * here, too. */
1594
1595 hci_dev_lock(hdev);
1596
1597 if (user->list.next || user->list.prev) {
1598 ret = -EINVAL;
1599 goto out_unlock;
1600 }
1601
1602 /* conn->hchan is NULL after l2cap_conn_del() was called */
1603 if (!conn->hchan) {
1604 ret = -ENODEV;
1605 goto out_unlock;
1606 }
1607
1608 ret = user->probe(conn, user);
1609 if (ret)
1610 goto out_unlock;
1611
1612 list_add(&user->list, &conn->users);
1613 ret = 0;
1614
1615out_unlock:
1616 hci_dev_unlock(hdev);
1617 return ret;
1618}
1619EXPORT_SYMBOL(l2cap_register_user);
1620
1621void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1622{
1623 struct hci_dev *hdev = conn->hcon->hdev;
1624
1625 hci_dev_lock(hdev);
1626
1627 if (!user->list.next || !user->list.prev)
1628 goto out_unlock;
1629
1630 list_del(&user->list);
1631 user->list.next = NULL;
1632 user->list.prev = NULL;
1633 user->remove(conn, user);
1634
1635out_unlock:
1636 hci_dev_unlock(hdev);
1637}
1638EXPORT_SYMBOL(l2cap_unregister_user);
1639
1640static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1641{
1642 struct l2cap_user *user;
1643
1644 while (!list_empty(&conn->users)) {
1645 user = list_first_entry(&conn->users, struct l2cap_user, list);
1646 list_del(&user->list);
1647 user->list.next = NULL;
1648 user->list.prev = NULL;
1649 user->remove(conn, user);
1650 }
1651}
1652
1653static void l2cap_conn_del(struct hci_conn *hcon, int err)
1654{
1655 struct l2cap_conn *conn = hcon->l2cap_data;
1656 struct l2cap_chan *chan, *l;
1657
1658 if (!conn)
1659 return;
1660
1661 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1662
1663 kfree_skb(conn->rx_skb);
1664
1665 skb_queue_purge(&conn->pending_rx);
1666
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1670 */
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1673
1674 l2cap_unregister_all_users(conn);
1675
1676 mutex_lock(&conn->chan_lock);
1677
1678 /* Kill channels */
1679 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1680 l2cap_chan_hold(chan);
1681 l2cap_chan_lock(chan);
1682
1683 l2cap_chan_del(chan, err);
1684
1685 l2cap_chan_unlock(chan);
1686
1687 chan->ops->close(chan);
1688 l2cap_chan_put(chan);
1689 }
1690
1691 mutex_unlock(&conn->chan_lock);
1692
1693 hci_chan_del(conn->hchan);
1694
1695 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1696 cancel_delayed_work_sync(&conn->info_timer);
1697
1698 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1699 cancel_delayed_work_sync(&conn->security_timer);
1700 smp_chan_destroy(conn);
1701 }
1702
1703 hcon->l2cap_data = NULL;
1704 conn->hchan = NULL;
1705 l2cap_conn_put(conn);
1706}
1707
1708static void security_timeout(struct work_struct *work)
1709{
1710 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1711 security_timer.work);
1712
1713 BT_DBG("conn %p", conn);
1714
1715 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1716 smp_chan_destroy(conn);
1717 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1718 }
1719}
1720
1721static void l2cap_conn_free(struct kref *ref)
1722{
1723 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1724
1725 hci_conn_put(conn->hcon);
1726 kfree(conn);
1727}
1728
1729void l2cap_conn_get(struct l2cap_conn *conn)
1730{
1731 kref_get(&conn->ref);
1732}
1733EXPORT_SYMBOL(l2cap_conn_get);
1734
1735void l2cap_conn_put(struct l2cap_conn *conn)
1736{
1737 kref_put(&conn->ref, l2cap_conn_free);
1738}
1739EXPORT_SYMBOL(l2cap_conn_put);
1740
1741/* ---- Socket interface ---- */
1742
1743/* Find socket with psm and source / destination bdaddr.
1744 * Returns closest match.
1745 */
1746static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1747 bdaddr_t *src,
1748 bdaddr_t *dst,
1749 u8 link_type)
1750{
1751 struct l2cap_chan *c, *c1 = NULL;
1752
1753 read_lock(&chan_list_lock);
1754
1755 list_for_each_entry(c, &chan_list, global_l) {
1756 if (state && c->state != state)
1757 continue;
1758
1759 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1760 continue;
1761
1762 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1763 continue;
1764
1765 if (c->psm == psm) {
1766 int src_match, dst_match;
1767 int src_any, dst_any;
1768
1769 /* Exact match. */
1770 src_match = !bacmp(&c->src, src);
1771 dst_match = !bacmp(&c->dst, dst);
1772 if (src_match && dst_match) {
1773 read_unlock(&chan_list_lock);
1774 return c;
1775 }
1776
1777 /* Closest match */
1778 src_any = !bacmp(&c->src, BDADDR_ANY);
1779 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1780 if ((src_match && dst_any) || (src_any && dst_match) ||
1781 (src_any && dst_any))
1782 c1 = c;
1783 }
1784 }
1785
1786 read_unlock(&chan_list_lock);
1787
1788 return c1;
1789}
1790
1791static void l2cap_monitor_timeout(struct work_struct *work)
1792{
1793 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1794 monitor_timer.work);
1795
1796 BT_DBG("chan %p", chan);
1797
1798 l2cap_chan_lock(chan);
1799
1800 if (!chan->conn) {
1801 l2cap_chan_unlock(chan);
1802 l2cap_chan_put(chan);
1803 return;
1804 }
1805
1806 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1807
1808 l2cap_chan_unlock(chan);
1809 l2cap_chan_put(chan);
1810}
1811
1812static void l2cap_retrans_timeout(struct work_struct *work)
1813{
1814 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1815 retrans_timer.work);
1816
1817 BT_DBG("chan %p", chan);
1818
1819 l2cap_chan_lock(chan);
1820
1821 if (!chan->conn) {
1822 l2cap_chan_unlock(chan);
1823 l2cap_chan_put(chan);
1824 return;
1825 }
1826
1827 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1828 l2cap_chan_unlock(chan);
1829 l2cap_chan_put(chan);
1830}
1831
1832static void l2cap_streaming_send(struct l2cap_chan *chan,
1833 struct sk_buff_head *skbs)
1834{
1835 struct sk_buff *skb;
1836 struct l2cap_ctrl *control;
1837
1838 BT_DBG("chan %p, skbs %p", chan, skbs);
1839
1840 if (__chan_is_moving(chan))
1841 return;
1842
1843 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1844
1845 while (!skb_queue_empty(&chan->tx_q)) {
1846
1847 skb = skb_dequeue(&chan->tx_q);
1848
1849 bt_cb(skb)->control.retries = 1;
1850 control = &bt_cb(skb)->control;
1851
1852 control->reqseq = 0;
1853 control->txseq = chan->next_tx_seq;
1854
1855 __pack_control(chan, control, skb);
1856
1857 if (chan->fcs == L2CAP_FCS_CRC16) {
1858 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1859 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1860 }
1861
1862 l2cap_do_send(chan, skb);
1863
1864 BT_DBG("Sent txseq %u", control->txseq);
1865
1866 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1867 chan->frames_sent++;
1868 }
1869}
1870
1871static int l2cap_ertm_send(struct l2cap_chan *chan)
1872{
1873 struct sk_buff *skb, *tx_skb;
1874 struct l2cap_ctrl *control;
1875 int sent = 0;
1876
1877 BT_DBG("chan %p", chan);
1878
1879 if (chan->state != BT_CONNECTED)
1880 return -ENOTCONN;
1881
1882 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1883 return 0;
1884
1885 if (__chan_is_moving(chan))
1886 return 0;
1887
1888 while (chan->tx_send_head &&
1889 chan->unacked_frames < chan->remote_tx_win &&
1890 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1891
1892 skb = chan->tx_send_head;
1893
1894 bt_cb(skb)->control.retries = 1;
1895 control = &bt_cb(skb)->control;
1896
1897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1898 control->final = 1;
1899
1900 control->reqseq = chan->buffer_seq;
1901 chan->last_acked_seq = chan->buffer_seq;
1902 control->txseq = chan->next_tx_seq;
1903
1904 __pack_control(chan, control, skb);
1905
1906 if (chan->fcs == L2CAP_FCS_CRC16) {
1907 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1908 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1909 }
1910
1911 /* Clone after data has been modified. Data is assumed to be
1912 read-only (for locking purposes) on cloned sk_buffs.
1913 */
1914 tx_skb = skb_clone(skb, GFP_KERNEL);
1915
1916 if (!tx_skb)
1917 break;
1918
1919 __set_retrans_timer(chan);
1920
1921 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1922 chan->unacked_frames++;
1923 chan->frames_sent++;
1924 sent++;
1925
1926 if (skb_queue_is_last(&chan->tx_q, skb))
1927 chan->tx_send_head = NULL;
1928 else
1929 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1930
1931 l2cap_do_send(chan, tx_skb);
1932 BT_DBG("Sent txseq %u", control->txseq);
1933 }
1934
1935 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1936 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1937
1938 return sent;
1939}
1940
1941static void l2cap_ertm_resend(struct l2cap_chan *chan)
1942{
1943 struct l2cap_ctrl control;
1944 struct sk_buff *skb;
1945 struct sk_buff *tx_skb;
1946 u16 seq;
1947
1948 BT_DBG("chan %p", chan);
1949
1950 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1951 return;
1952
1953 if (__chan_is_moving(chan))
1954 return;
1955
1956 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1957 seq = l2cap_seq_list_pop(&chan->retrans_list);
1958
1959 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1960 if (!skb) {
1961 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1962 seq);
1963 continue;
1964 }
1965
1966 bt_cb(skb)->control.retries++;
1967 control = bt_cb(skb)->control;
1968
1969 if (chan->max_tx != 0 &&
1970 bt_cb(skb)->control.retries > chan->max_tx) {
1971 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1972 l2cap_send_disconn_req(chan, ECONNRESET);
1973 l2cap_seq_list_clear(&chan->retrans_list);
1974 break;
1975 }
1976
1977 control.reqseq = chan->buffer_seq;
1978 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1979 control.final = 1;
1980 else
1981 control.final = 0;
1982
1983 if (skb_cloned(skb)) {
1984 /* Cloned sk_buffs are read-only, so we need a
1985 * writeable copy
1986 */
1987 tx_skb = skb_copy(skb, GFP_KERNEL);
1988 } else {
1989 tx_skb = skb_clone(skb, GFP_KERNEL);
1990 }
1991
1992 if (!tx_skb) {
1993 l2cap_seq_list_clear(&chan->retrans_list);
1994 break;
1995 }
1996
1997 /* Update skb contents */
1998 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1999 put_unaligned_le32(__pack_extended_control(&control),
2000 tx_skb->data + L2CAP_HDR_SIZE);
2001 } else {
2002 put_unaligned_le16(__pack_enhanced_control(&control),
2003 tx_skb->data + L2CAP_HDR_SIZE);
2004 }
2005
2006 if (chan->fcs == L2CAP_FCS_CRC16) {
2007 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2008 put_unaligned_le16(fcs, skb_put(tx_skb,
2009 L2CAP_FCS_SIZE));
2010 }
2011
2012 l2cap_do_send(chan, tx_skb);
2013
2014 BT_DBG("Resent txseq %d", control.txseq);
2015
2016 chan->last_acked_seq = chan->buffer_seq;
2017 }
2018}
2019
2020static void l2cap_retransmit(struct l2cap_chan *chan,
2021 struct l2cap_ctrl *control)
2022{
2023 BT_DBG("chan %p, control %p", chan, control);
2024
2025 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2026 l2cap_ertm_resend(chan);
2027}
2028
2029static void l2cap_retransmit_all(struct l2cap_chan *chan,
2030 struct l2cap_ctrl *control)
2031{
2032 struct sk_buff *skb;
2033
2034 BT_DBG("chan %p, control %p", chan, control);
2035
2036 if (control->poll)
2037 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2038
2039 l2cap_seq_list_clear(&chan->retrans_list);
2040
2041 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2042 return;
2043
2044 if (chan->unacked_frames) {
2045 skb_queue_walk(&chan->tx_q, skb) {
2046 if (bt_cb(skb)->control.txseq == control->reqseq ||
2047 skb == chan->tx_send_head)
2048 break;
2049 }
2050
2051 skb_queue_walk_from(&chan->tx_q, skb) {
2052 if (skb == chan->tx_send_head)
2053 break;
2054
2055 l2cap_seq_list_append(&chan->retrans_list,
2056 bt_cb(skb)->control.txseq);
2057 }
2058
2059 l2cap_ertm_resend(chan);
2060 }
2061}
2062
2063static void l2cap_send_ack(struct l2cap_chan *chan)
2064{
2065 struct l2cap_ctrl control;
2066 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2067 chan->last_acked_seq);
2068 int threshold;
2069
2070 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2071 chan, chan->last_acked_seq, chan->buffer_seq);
2072
2073 memset(&control, 0, sizeof(control));
2074 control.sframe = 1;
2075
2076 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2077 chan->rx_state == L2CAP_RX_STATE_RECV) {
2078 __clear_ack_timer(chan);
2079 control.super = L2CAP_SUPER_RNR;
2080 control.reqseq = chan->buffer_seq;
2081 l2cap_send_sframe(chan, &control);
2082 } else {
2083 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2084 l2cap_ertm_send(chan);
2085 /* If any i-frames were sent, they included an ack */
2086 if (chan->buffer_seq == chan->last_acked_seq)
2087 frames_to_ack = 0;
2088 }
2089
2090 /* Ack now if the window is 3/4ths full.
2091 * Calculate without mul or div
2092 */
2093 threshold = chan->ack_win;
2094 threshold += threshold << 1;
2095 threshold >>= 2;
2096
2097 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2098 threshold);
2099
2100 if (frames_to_ack >= threshold) {
2101 __clear_ack_timer(chan);
2102 control.super = L2CAP_SUPER_RR;
2103 control.reqseq = chan->buffer_seq;
2104 l2cap_send_sframe(chan, &control);
2105 frames_to_ack = 0;
2106 }
2107
2108 if (frames_to_ack)
2109 __set_ack_timer(chan);
2110 }
2111}
2112
2113static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2114 struct msghdr *msg, int len,
2115 int count, struct sk_buff *skb)
2116{
2117 struct l2cap_conn *conn = chan->conn;
2118 struct sk_buff **frag;
2119 int sent = 0;
2120
2121 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2122 msg->msg_iov, count))
2123 return -EFAULT;
2124
2125 sent += count;
2126 len -= count;
2127
2128 /* Continuation fragments (no L2CAP header) */
2129 frag = &skb_shinfo(skb)->frag_list;
2130 while (len) {
2131 struct sk_buff *tmp;
2132
2133 count = min_t(unsigned int, conn->mtu, len);
2134
2135 tmp = chan->ops->alloc_skb(chan, 0, count,
2136 msg->msg_flags & MSG_DONTWAIT);
2137 if (IS_ERR(tmp))
2138 return PTR_ERR(tmp);
2139
2140 *frag = tmp;
2141
2142 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2143 msg->msg_iov, count))
2144 return -EFAULT;
2145
2146 sent += count;
2147 len -= count;
2148
2149 skb->len += (*frag)->len;
2150 skb->data_len += (*frag)->len;
2151
2152 frag = &(*frag)->next;
2153 }
2154
2155 return sent;
2156}
2157
2158static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2159 struct msghdr *msg, size_t len)
2160{
2161 struct l2cap_conn *conn = chan->conn;
2162 struct sk_buff *skb;
2163 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2164 struct l2cap_hdr *lh;
2165
2166 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2167 __le16_to_cpu(chan->psm), len);
2168
2169 count = min_t(unsigned int, (conn->mtu - hlen), len);
2170
2171 skb = chan->ops->alloc_skb(chan, hlen, count,
2172 msg->msg_flags & MSG_DONTWAIT);
2173 if (IS_ERR(skb))
2174 return skb;
2175
2176 /* Create L2CAP header */
2177 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2178 lh->cid = cpu_to_le16(chan->dcid);
2179 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2180 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2181
2182 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2183 if (unlikely(err < 0)) {
2184 kfree_skb(skb);
2185 return ERR_PTR(err);
2186 }
2187 return skb;
2188}
2189
2190static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2191 struct msghdr *msg, size_t len)
2192{
2193 struct l2cap_conn *conn = chan->conn;
2194 struct sk_buff *skb;
2195 int err, count;
2196 struct l2cap_hdr *lh;
2197
2198 BT_DBG("chan %p len %zu", chan, len);
2199
2200 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2201
2202 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2203 msg->msg_flags & MSG_DONTWAIT);
2204 if (IS_ERR(skb))
2205 return skb;
2206
2207 /* Create L2CAP header */
2208 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2209 lh->cid = cpu_to_le16(chan->dcid);
2210 lh->len = cpu_to_le16(len);
2211
2212 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2213 if (unlikely(err < 0)) {
2214 kfree_skb(skb);
2215 return ERR_PTR(err);
2216 }
2217 return skb;
2218}
2219
2220static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2221 struct msghdr *msg, size_t len,
2222 u16 sdulen)
2223{
2224 struct l2cap_conn *conn = chan->conn;
2225 struct sk_buff *skb;
2226 int err, count, hlen;
2227 struct l2cap_hdr *lh;
2228
2229 BT_DBG("chan %p len %zu", chan, len);
2230
2231 if (!conn)
2232 return ERR_PTR(-ENOTCONN);
2233
2234 hlen = __ertm_hdr_size(chan);
2235
2236 if (sdulen)
2237 hlen += L2CAP_SDULEN_SIZE;
2238
2239 if (chan->fcs == L2CAP_FCS_CRC16)
2240 hlen += L2CAP_FCS_SIZE;
2241
2242 count = min_t(unsigned int, (conn->mtu - hlen), len);
2243
2244 skb = chan->ops->alloc_skb(chan, hlen, count,
2245 msg->msg_flags & MSG_DONTWAIT);
2246 if (IS_ERR(skb))
2247 return skb;
2248
2249 /* Create L2CAP header */
2250 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2251 lh->cid = cpu_to_le16(chan->dcid);
2252 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2253
2254 /* Control header is populated later */
2255 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2256 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2257 else
2258 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2259
2260 if (sdulen)
2261 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2262
2263 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2264 if (unlikely(err < 0)) {
2265 kfree_skb(skb);
2266 return ERR_PTR(err);
2267 }
2268
2269 bt_cb(skb)->control.fcs = chan->fcs;
2270 bt_cb(skb)->control.retries = 0;
2271 return skb;
2272}
2273
2274static int l2cap_segment_sdu(struct l2cap_chan *chan,
2275 struct sk_buff_head *seg_queue,
2276 struct msghdr *msg, size_t len)
2277{
2278 struct sk_buff *skb;
2279 u16 sdu_len;
2280 size_t pdu_len;
2281 u8 sar;
2282
2283 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2284
2285 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2286 * so fragmented skbs are not used. The HCI layer's handling
2287 * of fragmented skbs is not compatible with ERTM's queueing.
2288 */
2289
2290 /* PDU size is derived from the HCI MTU */
2291 pdu_len = chan->conn->mtu;
2292
2293 /* Constrain PDU size for BR/EDR connections */
2294 if (!chan->hs_hcon)
2295 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2296
2297 /* Adjust for largest possible L2CAP overhead. */
2298 if (chan->fcs)
2299 pdu_len -= L2CAP_FCS_SIZE;
2300
2301 pdu_len -= __ertm_hdr_size(chan);
2302
2303 /* Remote device may have requested smaller PDUs */
2304 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2305
2306 if (len <= pdu_len) {
2307 sar = L2CAP_SAR_UNSEGMENTED;
2308 sdu_len = 0;
2309 pdu_len = len;
2310 } else {
2311 sar = L2CAP_SAR_START;
2312 sdu_len = len;
2313 pdu_len -= L2CAP_SDULEN_SIZE;
2314 }
2315
2316 while (len > 0) {
2317 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2318
2319 if (IS_ERR(skb)) {
2320 __skb_queue_purge(seg_queue);
2321 return PTR_ERR(skb);
2322 }
2323
2324 bt_cb(skb)->control.sar = sar;
2325 __skb_queue_tail(seg_queue, skb);
2326
2327 len -= pdu_len;
2328 if (sdu_len) {
2329 sdu_len = 0;
2330 pdu_len += L2CAP_SDULEN_SIZE;
2331 }
2332
2333 if (len <= pdu_len) {
2334 sar = L2CAP_SAR_END;
2335 pdu_len = len;
2336 } else {
2337 sar = L2CAP_SAR_CONTINUE;
2338 }
2339 }
2340
2341 return 0;
2342}
2343
2344static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2345 struct msghdr *msg,
2346 size_t len, u16 sdulen)
2347{
2348 struct l2cap_conn *conn = chan->conn;
2349 struct sk_buff *skb;
2350 int err, count, hlen;
2351 struct l2cap_hdr *lh;
2352
2353 BT_DBG("chan %p len %zu", chan, len);
2354
2355 if (!conn)
2356 return ERR_PTR(-ENOTCONN);
2357
2358 hlen = L2CAP_HDR_SIZE;
2359
2360 if (sdulen)
2361 hlen += L2CAP_SDULEN_SIZE;
2362
2363 count = min_t(unsigned int, (conn->mtu - hlen), len);
2364
2365 skb = chan->ops->alloc_skb(chan, hlen, count,
2366 msg->msg_flags & MSG_DONTWAIT);
2367 if (IS_ERR(skb))
2368 return skb;
2369
2370 /* Create L2CAP header */
2371 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2372 lh->cid = cpu_to_le16(chan->dcid);
2373 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2374
2375 if (sdulen)
2376 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2377
2378 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2379 if (unlikely(err < 0)) {
2380 kfree_skb(skb);
2381 return ERR_PTR(err);
2382 }
2383
2384 return skb;
2385}
2386
2387static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2388 struct sk_buff_head *seg_queue,
2389 struct msghdr *msg, size_t len)
2390{
2391 struct sk_buff *skb;
2392 size_t pdu_len;
2393 u16 sdu_len;
2394
2395 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2396
2397 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2398
2399 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2400
2401 sdu_len = len;
2402 pdu_len -= L2CAP_SDULEN_SIZE;
2403
2404 while (len > 0) {
2405 if (len <= pdu_len)
2406 pdu_len = len;
2407
2408 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2409 if (IS_ERR(skb)) {
2410 __skb_queue_purge(seg_queue);
2411 return PTR_ERR(skb);
2412 }
2413
2414 __skb_queue_tail(seg_queue, skb);
2415
2416 len -= pdu_len;
2417
2418 if (sdu_len) {
2419 sdu_len = 0;
2420 pdu_len += L2CAP_SDULEN_SIZE;
2421 }
2422 }
2423
2424 return 0;
2425}
2426
2427int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2428{
2429 struct sk_buff *skb;
2430 int err;
2431 struct sk_buff_head seg_queue;
2432
2433 if (!chan->conn)
2434 return -ENOTCONN;
2435
2436 /* Connectionless channel */
2437 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2438 skb = l2cap_create_connless_pdu(chan, msg, len);
2439 if (IS_ERR(skb))
2440 return PTR_ERR(skb);
2441
2442 /* Channel lock is released before requesting new skb and then
2443 * reacquired thus we need to recheck channel state.
2444 */
2445 if (chan->state != BT_CONNECTED) {
2446 kfree_skb(skb);
2447 return -ENOTCONN;
2448 }
2449
2450 l2cap_do_send(chan, skb);
2451 return len;
2452 }
2453
2454 switch (chan->mode) {
2455 case L2CAP_MODE_LE_FLOWCTL:
2456 /* Check outgoing MTU */
2457 if (len > chan->omtu)
2458 return -EMSGSIZE;
2459
2460 if (!chan->tx_credits)
2461 return -EAGAIN;
2462
2463 __skb_queue_head_init(&seg_queue);
2464
2465 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2466
2467 if (chan->state != BT_CONNECTED) {
2468 __skb_queue_purge(&seg_queue);
2469 err = -ENOTCONN;
2470 }
2471
2472 if (err)
2473 return err;
2474
2475 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2476
2477 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2478 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2479 chan->tx_credits--;
2480 }
2481
2482 if (!chan->tx_credits)
2483 chan->ops->suspend(chan);
2484
2485 err = len;
2486
2487 break;
2488
2489 case L2CAP_MODE_BASIC:
2490 /* Check outgoing MTU */
2491 if (len > chan->omtu)
2492 return -EMSGSIZE;
2493
2494 /* Create a basic PDU */
2495 skb = l2cap_create_basic_pdu(chan, msg, len);
2496 if (IS_ERR(skb))
2497 return PTR_ERR(skb);
2498
2499 /* Channel lock is released before requesting new skb and then
2500 * reacquired thus we need to recheck channel state.
2501 */
2502 if (chan->state != BT_CONNECTED) {
2503 kfree_skb(skb);
2504 return -ENOTCONN;
2505 }
2506
2507 l2cap_do_send(chan, skb);
2508 err = len;
2509 break;
2510
2511 case L2CAP_MODE_ERTM:
2512 case L2CAP_MODE_STREAMING:
2513 /* Check outgoing MTU */
2514 if (len > chan->omtu) {
2515 err = -EMSGSIZE;
2516 break;
2517 }
2518
2519 __skb_queue_head_init(&seg_queue);
2520
2521 /* Do segmentation before calling in to the state machine,
2522 * since it's possible to block while waiting for memory
2523 * allocation.
2524 */
2525 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2526
2527 /* The channel could have been closed while segmenting,
2528 * check that it is still connected.
2529 */
2530 if (chan->state != BT_CONNECTED) {
2531 __skb_queue_purge(&seg_queue);
2532 err = -ENOTCONN;
2533 }
2534
2535 if (err)
2536 break;
2537
2538 if (chan->mode == L2CAP_MODE_ERTM)
2539 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2540 else
2541 l2cap_streaming_send(chan, &seg_queue);
2542
2543 err = len;
2544
2545 /* If the skbs were not queued for sending, they'll still be in
2546 * seg_queue and need to be purged.
2547 */
2548 __skb_queue_purge(&seg_queue);
2549 break;
2550
2551 default:
2552 BT_DBG("bad state %1.1x", chan->mode);
2553 err = -EBADFD;
2554 }
2555
2556 return err;
2557}
2558
2559static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2560{
2561 struct l2cap_ctrl control;
2562 u16 seq;
2563
2564 BT_DBG("chan %p, txseq %u", chan, txseq);
2565
2566 memset(&control, 0, sizeof(control));
2567 control.sframe = 1;
2568 control.super = L2CAP_SUPER_SREJ;
2569
2570 for (seq = chan->expected_tx_seq; seq != txseq;
2571 seq = __next_seq(chan, seq)) {
2572 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2573 control.reqseq = seq;
2574 l2cap_send_sframe(chan, &control);
2575 l2cap_seq_list_append(&chan->srej_list, seq);
2576 }
2577 }
2578
2579 chan->expected_tx_seq = __next_seq(chan, txseq);
2580}
2581
2582static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2583{
2584 struct l2cap_ctrl control;
2585
2586 BT_DBG("chan %p", chan);
2587
2588 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2589 return;
2590
2591 memset(&control, 0, sizeof(control));
2592 control.sframe = 1;
2593 control.super = L2CAP_SUPER_SREJ;
2594 control.reqseq = chan->srej_list.tail;
2595 l2cap_send_sframe(chan, &control);
2596}
2597
2598static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2599{
2600 struct l2cap_ctrl control;
2601 u16 initial_head;
2602 u16 seq;
2603
2604 BT_DBG("chan %p, txseq %u", chan, txseq);
2605
2606 memset(&control, 0, sizeof(control));
2607 control.sframe = 1;
2608 control.super = L2CAP_SUPER_SREJ;
2609
2610 /* Capture initial list head to allow only one pass through the list. */
2611 initial_head = chan->srej_list.head;
2612
2613 do {
2614 seq = l2cap_seq_list_pop(&chan->srej_list);
2615 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2616 break;
2617
2618 control.reqseq = seq;
2619 l2cap_send_sframe(chan, &control);
2620 l2cap_seq_list_append(&chan->srej_list, seq);
2621 } while (chan->srej_list.head != initial_head);
2622}
2623
2624static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2625{
2626 struct sk_buff *acked_skb;
2627 u16 ackseq;
2628
2629 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2630
2631 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2632 return;
2633
2634 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2635 chan->expected_ack_seq, chan->unacked_frames);
2636
2637 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2638 ackseq = __next_seq(chan, ackseq)) {
2639
2640 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2641 if (acked_skb) {
2642 skb_unlink(acked_skb, &chan->tx_q);
2643 kfree_skb(acked_skb);
2644 chan->unacked_frames--;
2645 }
2646 }
2647
2648 chan->expected_ack_seq = reqseq;
2649
2650 if (chan->unacked_frames == 0)
2651 __clear_retrans_timer(chan);
2652
2653 BT_DBG("unacked_frames %u", chan->unacked_frames);
2654}
2655
2656static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2657{
2658 BT_DBG("chan %p", chan);
2659
2660 chan->expected_tx_seq = chan->buffer_seq;
2661 l2cap_seq_list_clear(&chan->srej_list);
2662 skb_queue_purge(&chan->srej_q);
2663 chan->rx_state = L2CAP_RX_STATE_RECV;
2664}
2665
2666static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2667 struct l2cap_ctrl *control,
2668 struct sk_buff_head *skbs, u8 event)
2669{
2670 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2671 event);
2672
2673 switch (event) {
2674 case L2CAP_EV_DATA_REQUEST:
2675 if (chan->tx_send_head == NULL)
2676 chan->tx_send_head = skb_peek(skbs);
2677
2678 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2679 l2cap_ertm_send(chan);
2680 break;
2681 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2682 BT_DBG("Enter LOCAL_BUSY");
2683 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2684
2685 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2686 /* The SREJ_SENT state must be aborted if we are to
2687 * enter the LOCAL_BUSY state.
2688 */
2689 l2cap_abort_rx_srej_sent(chan);
2690 }
2691
2692 l2cap_send_ack(chan);
2693
2694 break;
2695 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2696 BT_DBG("Exit LOCAL_BUSY");
2697 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2698
2699 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2700 struct l2cap_ctrl local_control;
2701
2702 memset(&local_control, 0, sizeof(local_control));
2703 local_control.sframe = 1;
2704 local_control.super = L2CAP_SUPER_RR;
2705 local_control.poll = 1;
2706 local_control.reqseq = chan->buffer_seq;
2707 l2cap_send_sframe(chan, &local_control);
2708
2709 chan->retry_count = 1;
2710 __set_monitor_timer(chan);
2711 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2712 }
2713 break;
2714 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2715 l2cap_process_reqseq(chan, control->reqseq);
2716 break;
2717 case L2CAP_EV_EXPLICIT_POLL:
2718 l2cap_send_rr_or_rnr(chan, 1);
2719 chan->retry_count = 1;
2720 __set_monitor_timer(chan);
2721 __clear_ack_timer(chan);
2722 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2723 break;
2724 case L2CAP_EV_RETRANS_TO:
2725 l2cap_send_rr_or_rnr(chan, 1);
2726 chan->retry_count = 1;
2727 __set_monitor_timer(chan);
2728 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2729 break;
2730 case L2CAP_EV_RECV_FBIT:
2731 /* Nothing to process */
2732 break;
2733 default:
2734 break;
2735 }
2736}
2737
2738static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2739 struct l2cap_ctrl *control,
2740 struct sk_buff_head *skbs, u8 event)
2741{
2742 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2743 event);
2744
2745 switch (event) {
2746 case L2CAP_EV_DATA_REQUEST:
2747 if (chan->tx_send_head == NULL)
2748 chan->tx_send_head = skb_peek(skbs);
2749 /* Queue data, but don't send. */
2750 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2751 break;
2752 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2753 BT_DBG("Enter LOCAL_BUSY");
2754 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2755
2756 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2757 /* The SREJ_SENT state must be aborted if we are to
2758 * enter the LOCAL_BUSY state.
2759 */
2760 l2cap_abort_rx_srej_sent(chan);
2761 }
2762
2763 l2cap_send_ack(chan);
2764
2765 break;
2766 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2767 BT_DBG("Exit LOCAL_BUSY");
2768 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2769
2770 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2771 struct l2cap_ctrl local_control;
2772 memset(&local_control, 0, sizeof(local_control));
2773 local_control.sframe = 1;
2774 local_control.super = L2CAP_SUPER_RR;
2775 local_control.poll = 1;
2776 local_control.reqseq = chan->buffer_seq;
2777 l2cap_send_sframe(chan, &local_control);
2778
2779 chan->retry_count = 1;
2780 __set_monitor_timer(chan);
2781 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2782 }
2783 break;
2784 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2785 l2cap_process_reqseq(chan, control->reqseq);
2786
2787 /* Fall through */
2788
2789 case L2CAP_EV_RECV_FBIT:
2790 if (control && control->final) {
2791 __clear_monitor_timer(chan);
2792 if (chan->unacked_frames > 0)
2793 __set_retrans_timer(chan);
2794 chan->retry_count = 0;
2795 chan->tx_state = L2CAP_TX_STATE_XMIT;
2796 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2797 }
2798 break;
2799 case L2CAP_EV_EXPLICIT_POLL:
2800 /* Ignore */
2801 break;
2802 case L2CAP_EV_MONITOR_TO:
2803 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2804 l2cap_send_rr_or_rnr(chan, 1);
2805 __set_monitor_timer(chan);
2806 chan->retry_count++;
2807 } else {
2808 l2cap_send_disconn_req(chan, ECONNABORTED);
2809 }
2810 break;
2811 default:
2812 break;
2813 }
2814}
2815
2816static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2817 struct sk_buff_head *skbs, u8 event)
2818{
2819 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2820 chan, control, skbs, event, chan->tx_state);
2821
2822 switch (chan->tx_state) {
2823 case L2CAP_TX_STATE_XMIT:
2824 l2cap_tx_state_xmit(chan, control, skbs, event);
2825 break;
2826 case L2CAP_TX_STATE_WAIT_F:
2827 l2cap_tx_state_wait_f(chan, control, skbs, event);
2828 break;
2829 default:
2830 /* Ignore event */
2831 break;
2832 }
2833}
2834
2835static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2836 struct l2cap_ctrl *control)
2837{
2838 BT_DBG("chan %p, control %p", chan, control);
2839 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2840}
2841
2842static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2843 struct l2cap_ctrl *control)
2844{
2845 BT_DBG("chan %p, control %p", chan, control);
2846 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2847}
2848
2849/* Copy frame to all raw sockets on that connection */
2850static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2851{
2852 struct sk_buff *nskb;
2853 struct l2cap_chan *chan;
2854
2855 BT_DBG("conn %p", conn);
2856
2857 mutex_lock(&conn->chan_lock);
2858
2859 list_for_each_entry(chan, &conn->chan_l, list) {
2860 if (chan->chan_type != L2CAP_CHAN_RAW)
2861 continue;
2862
2863 /* Don't send frame to the channel it came from */
2864 if (bt_cb(skb)->chan == chan)
2865 continue;
2866
2867 nskb = skb_clone(skb, GFP_KERNEL);
2868 if (!nskb)
2869 continue;
2870 if (chan->ops->recv(chan, nskb))
2871 kfree_skb(nskb);
2872 }
2873
2874 mutex_unlock(&conn->chan_lock);
2875}
2876
2877/* ---- L2CAP signalling commands ---- */
2878static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2879 u8 ident, u16 dlen, void *data)
2880{
2881 struct sk_buff *skb, **frag;
2882 struct l2cap_cmd_hdr *cmd;
2883 struct l2cap_hdr *lh;
2884 int len, count;
2885
2886 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2887 conn, code, ident, dlen);
2888
2889 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2890 return NULL;
2891
2892 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2893 count = min_t(unsigned int, conn->mtu, len);
2894
2895 skb = bt_skb_alloc(count, GFP_KERNEL);
2896 if (!skb)
2897 return NULL;
2898
2899 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2900 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2901
2902 if (conn->hcon->type == LE_LINK)
2903 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2904 else
2905 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2906
2907 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2908 cmd->code = code;
2909 cmd->ident = ident;
2910 cmd->len = cpu_to_le16(dlen);
2911
2912 if (dlen) {
2913 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2914 memcpy(skb_put(skb, count), data, count);
2915 data += count;
2916 }
2917
2918 len -= skb->len;
2919
2920 /* Continuation fragments (no L2CAP header) */
2921 frag = &skb_shinfo(skb)->frag_list;
2922 while (len) {
2923 count = min_t(unsigned int, conn->mtu, len);
2924
2925 *frag = bt_skb_alloc(count, GFP_KERNEL);
2926 if (!*frag)
2927 goto fail;
2928
2929 memcpy(skb_put(*frag, count), data, count);
2930
2931 len -= count;
2932 data += count;
2933
2934 frag = &(*frag)->next;
2935 }
2936
2937 return skb;
2938
2939fail:
2940 kfree_skb(skb);
2941 return NULL;
2942}
2943
2944static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2945 unsigned long *val)
2946{
2947 struct l2cap_conf_opt *opt = *ptr;
2948 int len;
2949
2950 len = L2CAP_CONF_OPT_SIZE + opt->len;
2951 *ptr += len;
2952
2953 *type = opt->type;
2954 *olen = opt->len;
2955
2956 switch (opt->len) {
2957 case 1:
2958 *val = *((u8 *) opt->val);
2959 break;
2960
2961 case 2:
2962 *val = get_unaligned_le16(opt->val);
2963 break;
2964
2965 case 4:
2966 *val = get_unaligned_le32(opt->val);
2967 break;
2968
2969 default:
2970 *val = (unsigned long) opt->val;
2971 break;
2972 }
2973
2974 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2975 return len;
2976}
2977
2978static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2979{
2980 struct l2cap_conf_opt *opt = *ptr;
2981
2982 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2983
2984 opt->type = type;
2985 opt->len = len;
2986
2987 switch (len) {
2988 case 1:
2989 *((u8 *) opt->val) = val;
2990 break;
2991
2992 case 2:
2993 put_unaligned_le16(val, opt->val);
2994 break;
2995
2996 case 4:
2997 put_unaligned_le32(val, opt->val);
2998 break;
2999
3000 default:
3001 memcpy(opt->val, (void *) val, len);
3002 break;
3003 }
3004
3005 *ptr += L2CAP_CONF_OPT_SIZE + len;
3006}
3007
3008static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3009{
3010 struct l2cap_conf_efs efs;
3011
3012 switch (chan->mode) {
3013 case L2CAP_MODE_ERTM:
3014 efs.id = chan->local_id;
3015 efs.stype = chan->local_stype;
3016 efs.msdu = cpu_to_le16(chan->local_msdu);
3017 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3018 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3019 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3020 break;
3021
3022 case L2CAP_MODE_STREAMING:
3023 efs.id = 1;
3024 efs.stype = L2CAP_SERV_BESTEFFORT;
3025 efs.msdu = cpu_to_le16(chan->local_msdu);
3026 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3027 efs.acc_lat = 0;
3028 efs.flush_to = 0;
3029 break;
3030
3031 default:
3032 return;
3033 }
3034
3035 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3036 (unsigned long) &efs);
3037}
3038
3039static void l2cap_ack_timeout(struct work_struct *work)
3040{
3041 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3042 ack_timer.work);
3043 u16 frames_to_ack;
3044
3045 BT_DBG("chan %p", chan);
3046
3047 l2cap_chan_lock(chan);
3048
3049 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3050 chan->last_acked_seq);
3051
3052 if (frames_to_ack)
3053 l2cap_send_rr_or_rnr(chan, 0);
3054
3055 l2cap_chan_unlock(chan);
3056 l2cap_chan_put(chan);
3057}
3058
3059int l2cap_ertm_init(struct l2cap_chan *chan)
3060{
3061 int err;
3062
3063 chan->next_tx_seq = 0;
3064 chan->expected_tx_seq = 0;
3065 chan->expected_ack_seq = 0;
3066 chan->unacked_frames = 0;
3067 chan->buffer_seq = 0;
3068 chan->frames_sent = 0;
3069 chan->last_acked_seq = 0;
3070 chan->sdu = NULL;
3071 chan->sdu_last_frag = NULL;
3072 chan->sdu_len = 0;
3073
3074 skb_queue_head_init(&chan->tx_q);
3075
3076 chan->local_amp_id = AMP_ID_BREDR;
3077 chan->move_id = AMP_ID_BREDR;
3078 chan->move_state = L2CAP_MOVE_STABLE;
3079 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3080
3081 if (chan->mode != L2CAP_MODE_ERTM)
3082 return 0;
3083
3084 chan->rx_state = L2CAP_RX_STATE_RECV;
3085 chan->tx_state = L2CAP_TX_STATE_XMIT;
3086
3087 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3088 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3089 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3090
3091 skb_queue_head_init(&chan->srej_q);
3092
3093 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3094 if (err < 0)
3095 return err;
3096
3097 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3098 if (err < 0)
3099 l2cap_seq_list_free(&chan->srej_list);
3100
3101 return err;
3102}
3103
3104static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3105{
3106 switch (mode) {
3107 case L2CAP_MODE_STREAMING:
3108 case L2CAP_MODE_ERTM:
3109 if (l2cap_mode_supported(mode, remote_feat_mask))
3110 return mode;
3111 /* fall through */
3112 default:
3113 return L2CAP_MODE_BASIC;
3114 }
3115}
3116
3117static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3118{
3119 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3120}
3121
3122static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3123{
3124 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3125}
3126
3127static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3128 struct l2cap_conf_rfc *rfc)
3129{
3130 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3131 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3132
3133 /* Class 1 devices have must have ERTM timeouts
3134 * exceeding the Link Supervision Timeout. The
3135 * default Link Supervision Timeout for AMP
3136 * controllers is 10 seconds.
3137 *
3138 * Class 1 devices use 0xffffffff for their
3139 * best-effort flush timeout, so the clamping logic
3140 * will result in a timeout that meets the above
3141 * requirement. ERTM timeouts are 16-bit values, so
3142 * the maximum timeout is 65.535 seconds.
3143 */
3144
3145 /* Convert timeout to milliseconds and round */
3146 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3147
3148 /* This is the recommended formula for class 2 devices
3149 * that start ERTM timers when packets are sent to the
3150 * controller.
3151 */
3152 ertm_to = 3 * ertm_to + 500;
3153
3154 if (ertm_to > 0xffff)
3155 ertm_to = 0xffff;
3156
3157 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3158 rfc->monitor_timeout = rfc->retrans_timeout;
3159 } else {
3160 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3161 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3162 }
3163}
3164
3165static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3166{
3167 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3168 __l2cap_ews_supported(chan->conn)) {
3169 /* use extended control field */
3170 set_bit(FLAG_EXT_CTRL, &chan->flags);
3171 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3172 } else {
3173 chan->tx_win = min_t(u16, chan->tx_win,
3174 L2CAP_DEFAULT_TX_WINDOW);
3175 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3176 }
3177 chan->ack_win = chan->tx_win;
3178}
3179
3180static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3181{
3182 struct l2cap_conf_req *req = data;
3183 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3184 void *ptr = req->data;
3185 u16 size;
3186
3187 BT_DBG("chan %p", chan);
3188
3189 if (chan->num_conf_req || chan->num_conf_rsp)
3190 goto done;
3191
3192 switch (chan->mode) {
3193 case L2CAP_MODE_STREAMING:
3194 case L2CAP_MODE_ERTM:
3195 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3196 break;
3197
3198 if (__l2cap_efs_supported(chan->conn))
3199 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3200
3201 /* fall through */
3202 default:
3203 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3204 break;
3205 }
3206
3207done:
3208 if (chan->imtu != L2CAP_DEFAULT_MTU)
3209 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3210
3211 switch (chan->mode) {
3212 case L2CAP_MODE_BASIC:
3213 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3214 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3215 break;
3216
3217 rfc.mode = L2CAP_MODE_BASIC;
3218 rfc.txwin_size = 0;
3219 rfc.max_transmit = 0;
3220 rfc.retrans_timeout = 0;
3221 rfc.monitor_timeout = 0;
3222 rfc.max_pdu_size = 0;
3223
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3225 (unsigned long) &rfc);
3226 break;
3227
3228 case L2CAP_MODE_ERTM:
3229 rfc.mode = L2CAP_MODE_ERTM;
3230 rfc.max_transmit = chan->max_tx;
3231
3232 __l2cap_set_ertm_timeouts(chan, &rfc);
3233
3234 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3235 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3236 L2CAP_FCS_SIZE);
3237 rfc.max_pdu_size = cpu_to_le16(size);
3238
3239 l2cap_txwin_setup(chan);
3240
3241 rfc.txwin_size = min_t(u16, chan->tx_win,
3242 L2CAP_DEFAULT_TX_WINDOW);
3243
3244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3245 (unsigned long) &rfc);
3246
3247 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3248 l2cap_add_opt_efs(&ptr, chan);
3249
3250 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3252 chan->tx_win);
3253
3254 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3255 if (chan->fcs == L2CAP_FCS_NONE ||
3256 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3257 chan->fcs = L2CAP_FCS_NONE;
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3259 chan->fcs);
3260 }
3261 break;
3262
3263 case L2CAP_MODE_STREAMING:
3264 l2cap_txwin_setup(chan);
3265 rfc.mode = L2CAP_MODE_STREAMING;
3266 rfc.txwin_size = 0;
3267 rfc.max_transmit = 0;
3268 rfc.retrans_timeout = 0;
3269 rfc.monitor_timeout = 0;
3270
3271 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3272 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3273 L2CAP_FCS_SIZE);
3274 rfc.max_pdu_size = cpu_to_le16(size);
3275
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3277 (unsigned long) &rfc);
3278
3279 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3280 l2cap_add_opt_efs(&ptr, chan);
3281
3282 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3283 if (chan->fcs == L2CAP_FCS_NONE ||
3284 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3285 chan->fcs = L2CAP_FCS_NONE;
3286 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3287 chan->fcs);
3288 }
3289 break;
3290 }
3291
3292 req->dcid = cpu_to_le16(chan->dcid);
3293 req->flags = cpu_to_le16(0);
3294
3295 return ptr - data;
3296}
3297
3298static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3299{
3300 struct l2cap_conf_rsp *rsp = data;
3301 void *ptr = rsp->data;
3302 void *req = chan->conf_req;
3303 int len = chan->conf_len;
3304 int type, hint, olen;
3305 unsigned long val;
3306 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3307 struct l2cap_conf_efs efs;
3308 u8 remote_efs = 0;
3309 u16 mtu = L2CAP_DEFAULT_MTU;
3310 u16 result = L2CAP_CONF_SUCCESS;
3311 u16 size;
3312
3313 BT_DBG("chan %p", chan);
3314
3315 while (len >= L2CAP_CONF_OPT_SIZE) {
3316 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3317
3318 hint = type & L2CAP_CONF_HINT;
3319 type &= L2CAP_CONF_MASK;
3320
3321 switch (type) {
3322 case L2CAP_CONF_MTU:
3323 mtu = val;
3324 break;
3325
3326 case L2CAP_CONF_FLUSH_TO:
3327 chan->flush_to = val;
3328 break;
3329
3330 case L2CAP_CONF_QOS:
3331 break;
3332
3333 case L2CAP_CONF_RFC:
3334 if (olen == sizeof(rfc))
3335 memcpy(&rfc, (void *) val, olen);
3336 break;
3337
3338 case L2CAP_CONF_FCS:
3339 if (val == L2CAP_FCS_NONE)
3340 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3341 break;
3342
3343 case L2CAP_CONF_EFS:
3344 remote_efs = 1;
3345 if (olen == sizeof(efs))
3346 memcpy(&efs, (void *) val, olen);
3347 break;
3348
3349 case L2CAP_CONF_EWS:
3350 if (!chan->conn->hs_enabled)
3351 return -ECONNREFUSED;
3352
3353 set_bit(FLAG_EXT_CTRL, &chan->flags);
3354 set_bit(CONF_EWS_RECV, &chan->conf_state);
3355 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3356 chan->remote_tx_win = val;
3357 break;
3358
3359 default:
3360 if (hint)
3361 break;
3362
3363 result = L2CAP_CONF_UNKNOWN;
3364 *((u8 *) ptr++) = type;
3365 break;
3366 }
3367 }
3368
3369 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3370 goto done;
3371
3372 switch (chan->mode) {
3373 case L2CAP_MODE_STREAMING:
3374 case L2CAP_MODE_ERTM:
3375 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3376 chan->mode = l2cap_select_mode(rfc.mode,
3377 chan->conn->feat_mask);
3378 break;
3379 }
3380
3381 if (remote_efs) {
3382 if (__l2cap_efs_supported(chan->conn))
3383 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3384 else
3385 return -ECONNREFUSED;
3386 }
3387
3388 if (chan->mode != rfc.mode)
3389 return -ECONNREFUSED;
3390
3391 break;
3392 }
3393
3394done:
3395 if (chan->mode != rfc.mode) {
3396 result = L2CAP_CONF_UNACCEPT;
3397 rfc.mode = chan->mode;
3398
3399 if (chan->num_conf_rsp == 1)
3400 return -ECONNREFUSED;
3401
3402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3403 (unsigned long) &rfc);
3404 }
3405
3406 if (result == L2CAP_CONF_SUCCESS) {
3407 /* Configure output options and let the other side know
3408 * which ones we don't like. */
3409
3410 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3411 result = L2CAP_CONF_UNACCEPT;
3412 else {
3413 chan->omtu = mtu;
3414 set_bit(CONF_MTU_DONE, &chan->conf_state);
3415 }
3416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3417
3418 if (remote_efs) {
3419 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3420 efs.stype != L2CAP_SERV_NOTRAFIC &&
3421 efs.stype != chan->local_stype) {
3422
3423 result = L2CAP_CONF_UNACCEPT;
3424
3425 if (chan->num_conf_req >= 1)
3426 return -ECONNREFUSED;
3427
3428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3429 sizeof(efs),
3430 (unsigned long) &efs);
3431 } else {
3432 /* Send PENDING Conf Rsp */
3433 result = L2CAP_CONF_PENDING;
3434 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3435 }
3436 }
3437
3438 switch (rfc.mode) {
3439 case L2CAP_MODE_BASIC:
3440 chan->fcs = L2CAP_FCS_NONE;
3441 set_bit(CONF_MODE_DONE, &chan->conf_state);
3442 break;
3443
3444 case L2CAP_MODE_ERTM:
3445 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3446 chan->remote_tx_win = rfc.txwin_size;
3447 else
3448 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3449
3450 chan->remote_max_tx = rfc.max_transmit;
3451
3452 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3453 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3454 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3455 rfc.max_pdu_size = cpu_to_le16(size);
3456 chan->remote_mps = size;
3457
3458 __l2cap_set_ertm_timeouts(chan, &rfc);
3459
3460 set_bit(CONF_MODE_DONE, &chan->conf_state);
3461
3462 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3463 sizeof(rfc), (unsigned long) &rfc);
3464
3465 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3466 chan->remote_id = efs.id;
3467 chan->remote_stype = efs.stype;
3468 chan->remote_msdu = le16_to_cpu(efs.msdu);
3469 chan->remote_flush_to =
3470 le32_to_cpu(efs.flush_to);
3471 chan->remote_acc_lat =
3472 le32_to_cpu(efs.acc_lat);
3473 chan->remote_sdu_itime =
3474 le32_to_cpu(efs.sdu_itime);
3475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3476 sizeof(efs),
3477 (unsigned long) &efs);
3478 }
3479 break;
3480
3481 case L2CAP_MODE_STREAMING:
3482 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3483 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3484 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3485 rfc.max_pdu_size = cpu_to_le16(size);
3486 chan->remote_mps = size;
3487
3488 set_bit(CONF_MODE_DONE, &chan->conf_state);
3489
3490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3491 (unsigned long) &rfc);
3492
3493 break;
3494
3495 default:
3496 result = L2CAP_CONF_UNACCEPT;
3497
3498 memset(&rfc, 0, sizeof(rfc));
3499 rfc.mode = chan->mode;
3500 }
3501
3502 if (result == L2CAP_CONF_SUCCESS)
3503 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3504 }
3505 rsp->scid = cpu_to_le16(chan->dcid);
3506 rsp->result = cpu_to_le16(result);
3507 rsp->flags = cpu_to_le16(0);
3508
3509 return ptr - data;
3510}
3511
3512static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3513 void *data, u16 *result)
3514{
3515 struct l2cap_conf_req *req = data;
3516 void *ptr = req->data;
3517 int type, olen;
3518 unsigned long val;
3519 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3520 struct l2cap_conf_efs efs;
3521
3522 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3523
3524 while (len >= L2CAP_CONF_OPT_SIZE) {
3525 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3526
3527 switch (type) {
3528 case L2CAP_CONF_MTU:
3529 if (val < L2CAP_DEFAULT_MIN_MTU) {
3530 *result = L2CAP_CONF_UNACCEPT;
3531 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3532 } else
3533 chan->imtu = val;
3534 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3535 break;
3536
3537 case L2CAP_CONF_FLUSH_TO:
3538 chan->flush_to = val;
3539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3540 2, chan->flush_to);
3541 break;
3542
3543 case L2CAP_CONF_RFC:
3544 if (olen == sizeof(rfc))
3545 memcpy(&rfc, (void *)val, olen);
3546
3547 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3548 rfc.mode != chan->mode)
3549 return -ECONNREFUSED;
3550
3551 chan->fcs = 0;
3552
3553 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3554 sizeof(rfc), (unsigned long) &rfc);
3555 break;
3556
3557 case L2CAP_CONF_EWS:
3558 chan->ack_win = min_t(u16, val, chan->ack_win);
3559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3560 chan->tx_win);
3561 break;
3562
3563 case L2CAP_CONF_EFS:
3564 if (olen == sizeof(efs))
3565 memcpy(&efs, (void *)val, olen);
3566
3567 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3568 efs.stype != L2CAP_SERV_NOTRAFIC &&
3569 efs.stype != chan->local_stype)
3570 return -ECONNREFUSED;
3571
3572 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3573 (unsigned long) &efs);
3574 break;
3575
3576 case L2CAP_CONF_FCS:
3577 if (*result == L2CAP_CONF_PENDING)
3578 if (val == L2CAP_FCS_NONE)
3579 set_bit(CONF_RECV_NO_FCS,
3580 &chan->conf_state);
3581 break;
3582 }
3583 }
3584
3585 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3586 return -ECONNREFUSED;
3587
3588 chan->mode = rfc.mode;
3589
3590 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3591 switch (rfc.mode) {
3592 case L2CAP_MODE_ERTM:
3593 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3594 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3595 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3596 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3597 chan->ack_win = min_t(u16, chan->ack_win,
3598 rfc.txwin_size);
3599
3600 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3601 chan->local_msdu = le16_to_cpu(efs.msdu);
3602 chan->local_sdu_itime =
3603 le32_to_cpu(efs.sdu_itime);
3604 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3605 chan->local_flush_to =
3606 le32_to_cpu(efs.flush_to);
3607 }
3608 break;
3609
3610 case L2CAP_MODE_STREAMING:
3611 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3612 }
3613 }
3614
3615 req->dcid = cpu_to_le16(chan->dcid);
3616 req->flags = cpu_to_le16(0);
3617
3618 return ptr - data;
3619}
3620
3621static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3622 u16 result, u16 flags)
3623{
3624 struct l2cap_conf_rsp *rsp = data;
3625 void *ptr = rsp->data;
3626
3627 BT_DBG("chan %p", chan);
3628
3629 rsp->scid = cpu_to_le16(chan->dcid);
3630 rsp->result = cpu_to_le16(result);
3631 rsp->flags = cpu_to_le16(flags);
3632
3633 return ptr - data;
3634}
3635
3636void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3637{
3638 struct l2cap_le_conn_rsp rsp;
3639 struct l2cap_conn *conn = chan->conn;
3640
3641 BT_DBG("chan %p", chan);
3642
3643 rsp.dcid = cpu_to_le16(chan->scid);
3644 rsp.mtu = cpu_to_le16(chan->imtu);
3645 rsp.mps = cpu_to_le16(chan->mps);
3646 rsp.credits = cpu_to_le16(chan->rx_credits);
3647 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3648
3649 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3650 &rsp);
3651}
3652
3653void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3654{
3655 struct l2cap_conn_rsp rsp;
3656 struct l2cap_conn *conn = chan->conn;
3657 u8 buf[128];
3658 u8 rsp_code;
3659
3660 rsp.scid = cpu_to_le16(chan->dcid);
3661 rsp.dcid = cpu_to_le16(chan->scid);
3662 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3663 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3664
3665 if (chan->hs_hcon)
3666 rsp_code = L2CAP_CREATE_CHAN_RSP;
3667 else
3668 rsp_code = L2CAP_CONN_RSP;
3669
3670 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3671
3672 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3673
3674 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3675 return;
3676
3677 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3678 l2cap_build_conf_req(chan, buf), buf);
3679 chan->num_conf_req++;
3680}
3681
3682static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3683{
3684 int type, olen;
3685 unsigned long val;
3686 /* Use sane default values in case a misbehaving remote device
3687 * did not send an RFC or extended window size option.
3688 */
3689 u16 txwin_ext = chan->ack_win;
3690 struct l2cap_conf_rfc rfc = {
3691 .mode = chan->mode,
3692 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3693 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3694 .max_pdu_size = cpu_to_le16(chan->imtu),
3695 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3696 };
3697
3698 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3699
3700 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3701 return;
3702
3703 while (len >= L2CAP_CONF_OPT_SIZE) {
3704 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3705
3706 switch (type) {
3707 case L2CAP_CONF_RFC:
3708 if (olen == sizeof(rfc))
3709 memcpy(&rfc, (void *)val, olen);
3710 break;
3711 case L2CAP_CONF_EWS:
3712 txwin_ext = val;
3713 break;
3714 }
3715 }
3716
3717 switch (rfc.mode) {
3718 case L2CAP_MODE_ERTM:
3719 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3720 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3721 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3722 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3723 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3724 else
3725 chan->ack_win = min_t(u16, chan->ack_win,
3726 rfc.txwin_size);
3727 break;
3728 case L2CAP_MODE_STREAMING:
3729 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3730 }
3731}
3732
3733static inline int l2cap_command_rej(struct l2cap_conn *conn,
3734 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3735 u8 *data)
3736{
3737 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3738
3739 if (cmd_len < sizeof(*rej))
3740 return -EPROTO;
3741
3742 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3743 return 0;
3744
3745 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3746 cmd->ident == conn->info_ident) {
3747 cancel_delayed_work(&conn->info_timer);
3748
3749 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3750 conn->info_ident = 0;
3751
3752 l2cap_conn_start(conn);
3753 }
3754
3755 return 0;
3756}
3757
3758static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3759 struct l2cap_cmd_hdr *cmd,
3760 u8 *data, u8 rsp_code, u8 amp_id)
3761{
3762 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3763 struct l2cap_conn_rsp rsp;
3764 struct l2cap_chan *chan = NULL, *pchan;
3765 int result, status = L2CAP_CS_NO_INFO;
3766
3767 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3768 __le16 psm = req->psm;
3769
3770 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3771
3772 /* Check if we have socket listening on psm */
3773 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3774 &conn->hcon->dst, ACL_LINK);
3775 if (!pchan) {
3776 result = L2CAP_CR_BAD_PSM;
3777 goto sendresp;
3778 }
3779
3780 mutex_lock(&conn->chan_lock);
3781 l2cap_chan_lock(pchan);
3782
3783 /* Check if the ACL is secure enough (if not SDP) */
3784 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3785 !hci_conn_check_link_mode(conn->hcon)) {
3786 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3787 result = L2CAP_CR_SEC_BLOCK;
3788 goto response;
3789 }
3790
3791 result = L2CAP_CR_NO_MEM;
3792
3793 /* Check if we already have channel with that dcid */
3794 if (__l2cap_get_chan_by_dcid(conn, scid))
3795 goto response;
3796
3797 chan = pchan->ops->new_connection(pchan);
3798 if (!chan)
3799 goto response;
3800
3801 /* For certain devices (ex: HID mouse), support for authentication,
3802 * pairing and bonding is optional. For such devices, inorder to avoid
3803 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3804 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3805 */
3806 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3807
3808 bacpy(&chan->src, &conn->hcon->src);
3809 bacpy(&chan->dst, &conn->hcon->dst);
3810 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3811 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3812 chan->psm = psm;
3813 chan->dcid = scid;
3814 chan->local_amp_id = amp_id;
3815
3816 __l2cap_chan_add(conn, chan);
3817
3818 dcid = chan->scid;
3819
3820 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3821
3822 chan->ident = cmd->ident;
3823
3824 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3825 if (l2cap_chan_check_security(chan)) {
3826 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3827 l2cap_state_change(chan, BT_CONNECT2);
3828 result = L2CAP_CR_PEND;
3829 status = L2CAP_CS_AUTHOR_PEND;
3830 chan->ops->defer(chan);
3831 } else {
3832 /* Force pending result for AMP controllers.
3833 * The connection will succeed after the
3834 * physical link is up.
3835 */
3836 if (amp_id == AMP_ID_BREDR) {
3837 l2cap_state_change(chan, BT_CONFIG);
3838 result = L2CAP_CR_SUCCESS;
3839 } else {
3840 l2cap_state_change(chan, BT_CONNECT2);
3841 result = L2CAP_CR_PEND;
3842 }
3843 status = L2CAP_CS_NO_INFO;
3844 }
3845 } else {
3846 l2cap_state_change(chan, BT_CONNECT2);
3847 result = L2CAP_CR_PEND;
3848 status = L2CAP_CS_AUTHEN_PEND;
3849 }
3850 } else {
3851 l2cap_state_change(chan, BT_CONNECT2);
3852 result = L2CAP_CR_PEND;
3853 status = L2CAP_CS_NO_INFO;
3854 }
3855
3856response:
3857 l2cap_chan_unlock(pchan);
3858 mutex_unlock(&conn->chan_lock);
3859
3860sendresp:
3861 rsp.scid = cpu_to_le16(scid);
3862 rsp.dcid = cpu_to_le16(dcid);
3863 rsp.result = cpu_to_le16(result);
3864 rsp.status = cpu_to_le16(status);
3865 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3866
3867 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3868 struct l2cap_info_req info;
3869 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3870
3871 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3872 conn->info_ident = l2cap_get_ident(conn);
3873
3874 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3875
3876 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3877 sizeof(info), &info);
3878 }
3879
3880 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3881 result == L2CAP_CR_SUCCESS) {
3882 u8 buf[128];
3883 set_bit(CONF_REQ_SENT, &chan->conf_state);
3884 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3885 l2cap_build_conf_req(chan, buf), buf);
3886 chan->num_conf_req++;
3887 }
3888
3889 return chan;
3890}
3891
3892static int l2cap_connect_req(struct l2cap_conn *conn,
3893 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3894{
3895 struct hci_dev *hdev = conn->hcon->hdev;
3896 struct hci_conn *hcon = conn->hcon;
3897
3898 if (cmd_len < sizeof(struct l2cap_conn_req))
3899 return -EPROTO;
3900
3901 hci_dev_lock(hdev);
3902 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3903 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3904 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3905 hcon->dst_type, 0, NULL, 0,
3906 hcon->dev_class);
3907 hci_dev_unlock(hdev);
3908
3909 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3910 return 0;
3911}
3912
3913static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3914 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3915 u8 *data)
3916{
3917 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3918 u16 scid, dcid, result, status;
3919 struct l2cap_chan *chan;
3920 u8 req[128];
3921 int err;
3922
3923 if (cmd_len < sizeof(*rsp))
3924 return -EPROTO;
3925
3926 scid = __le16_to_cpu(rsp->scid);
3927 dcid = __le16_to_cpu(rsp->dcid);
3928 result = __le16_to_cpu(rsp->result);
3929 status = __le16_to_cpu(rsp->status);
3930
3931 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3932 dcid, scid, result, status);
3933
3934 mutex_lock(&conn->chan_lock);
3935
3936 if (scid) {
3937 chan = __l2cap_get_chan_by_scid(conn, scid);
3938 if (!chan) {
3939 err = -EBADSLT;
3940 goto unlock;
3941 }
3942 } else {
3943 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3944 if (!chan) {
3945 err = -EBADSLT;
3946 goto unlock;
3947 }
3948 }
3949
3950 err = 0;
3951
3952 l2cap_chan_lock(chan);
3953
3954 switch (result) {
3955 case L2CAP_CR_SUCCESS:
3956 l2cap_state_change(chan, BT_CONFIG);
3957 chan->ident = 0;
3958 chan->dcid = dcid;
3959 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3960
3961 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3962 break;
3963
3964 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3965 l2cap_build_conf_req(chan, req), req);
3966 chan->num_conf_req++;
3967 break;
3968
3969 case L2CAP_CR_PEND:
3970 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3971 break;
3972
3973 default:
3974 l2cap_chan_del(chan, ECONNREFUSED);
3975 break;
3976 }
3977
3978 l2cap_chan_unlock(chan);
3979
3980unlock:
3981 mutex_unlock(&conn->chan_lock);
3982
3983 return err;
3984}
3985
3986static inline void set_default_fcs(struct l2cap_chan *chan)
3987{
3988 /* FCS is enabled only in ERTM or streaming mode, if one or both
3989 * sides request it.
3990 */
3991 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3992 chan->fcs = L2CAP_FCS_NONE;
3993 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3994 chan->fcs = L2CAP_FCS_CRC16;
3995}
3996
3997static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3998 u8 ident, u16 flags)
3999{
4000 struct l2cap_conn *conn = chan->conn;
4001
4002 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4003 flags);
4004
4005 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4006 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4007
4008 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4009 l2cap_build_conf_rsp(chan, data,
4010 L2CAP_CONF_SUCCESS, flags), data);
4011}
4012
4013static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4014 u16 scid, u16 dcid)
4015{
4016 struct l2cap_cmd_rej_cid rej;
4017
4018 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4019 rej.scid = __cpu_to_le16(scid);
4020 rej.dcid = __cpu_to_le16(dcid);
4021
4022 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4023}
4024
4025static inline int l2cap_config_req(struct l2cap_conn *conn,
4026 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4027 u8 *data)
4028{
4029 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4030 u16 dcid, flags;
4031 u8 rsp[64];
4032 struct l2cap_chan *chan;
4033 int len, err = 0;
4034
4035 if (cmd_len < sizeof(*req))
4036 return -EPROTO;
4037
4038 dcid = __le16_to_cpu(req->dcid);
4039 flags = __le16_to_cpu(req->flags);
4040
4041 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4042
4043 chan = l2cap_get_chan_by_scid(conn, dcid);
4044 if (!chan) {
4045 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4046 return 0;
4047 }
4048
4049 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4050 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4051 chan->dcid);
4052 goto unlock;
4053 }
4054
4055 /* Reject if config buffer is too small. */
4056 len = cmd_len - sizeof(*req);
4057 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4058 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4059 l2cap_build_conf_rsp(chan, rsp,
4060 L2CAP_CONF_REJECT, flags), rsp);
4061 goto unlock;
4062 }
4063
4064 /* Store config. */
4065 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4066 chan->conf_len += len;
4067
4068 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4069 /* Incomplete config. Send empty response. */
4070 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4071 l2cap_build_conf_rsp(chan, rsp,
4072 L2CAP_CONF_SUCCESS, flags), rsp);
4073 goto unlock;
4074 }
4075
4076 /* Complete config. */
4077 len = l2cap_parse_conf_req(chan, rsp);
4078 if (len < 0) {
4079 l2cap_send_disconn_req(chan, ECONNRESET);
4080 goto unlock;
4081 }
4082
4083 chan->ident = cmd->ident;
4084 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4085 chan->num_conf_rsp++;
4086
4087 /* Reset config buffer. */
4088 chan->conf_len = 0;
4089
4090 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4091 goto unlock;
4092
4093 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4094 set_default_fcs(chan);
4095
4096 if (chan->mode == L2CAP_MODE_ERTM ||
4097 chan->mode == L2CAP_MODE_STREAMING)
4098 err = l2cap_ertm_init(chan);
4099
4100 if (err < 0)
4101 l2cap_send_disconn_req(chan, -err);
4102 else
4103 l2cap_chan_ready(chan);
4104
4105 goto unlock;
4106 }
4107
4108 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4109 u8 buf[64];
4110 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4111 l2cap_build_conf_req(chan, buf), buf);
4112 chan->num_conf_req++;
4113 }
4114
4115 /* Got Conf Rsp PENDING from remote side and asume we sent
4116 Conf Rsp PENDING in the code above */
4117 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4118 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4119
4120 /* check compatibility */
4121
4122 /* Send rsp for BR/EDR channel */
4123 if (!chan->hs_hcon)
4124 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4125 else
4126 chan->ident = cmd->ident;
4127 }
4128
4129unlock:
4130 l2cap_chan_unlock(chan);
4131 return err;
4132}
4133
4134static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4135 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4136 u8 *data)
4137{
4138 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4139 u16 scid, flags, result;
4140 struct l2cap_chan *chan;
4141 int len = cmd_len - sizeof(*rsp);
4142 int err = 0;
4143
4144 if (cmd_len < sizeof(*rsp))
4145 return -EPROTO;
4146
4147 scid = __le16_to_cpu(rsp->scid);
4148 flags = __le16_to_cpu(rsp->flags);
4149 result = __le16_to_cpu(rsp->result);
4150
4151 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4152 result, len);
4153
4154 chan = l2cap_get_chan_by_scid(conn, scid);
4155 if (!chan)
4156 return 0;
4157
4158 switch (result) {
4159 case L2CAP_CONF_SUCCESS:
4160 l2cap_conf_rfc_get(chan, rsp->data, len);
4161 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4162 break;
4163
4164 case L2CAP_CONF_PENDING:
4165 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4166
4167 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4168 char buf[64];
4169
4170 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4171 buf, &result);
4172 if (len < 0) {
4173 l2cap_send_disconn_req(chan, ECONNRESET);
4174 goto done;
4175 }
4176
4177 if (!chan->hs_hcon) {
4178 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4179 0);
4180 } else {
4181 if (l2cap_check_efs(chan)) {
4182 amp_create_logical_link(chan);
4183 chan->ident = cmd->ident;
4184 }
4185 }
4186 }
4187 goto done;
4188
4189 case L2CAP_CONF_UNACCEPT:
4190 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4191 char req[64];
4192
4193 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4194 l2cap_send_disconn_req(chan, ECONNRESET);
4195 goto done;
4196 }
4197
4198 /* throw out any old stored conf requests */
4199 result = L2CAP_CONF_SUCCESS;
4200 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4201 req, &result);
4202 if (len < 0) {
4203 l2cap_send_disconn_req(chan, ECONNRESET);
4204 goto done;
4205 }
4206
4207 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4208 L2CAP_CONF_REQ, len, req);
4209 chan->num_conf_req++;
4210 if (result != L2CAP_CONF_SUCCESS)
4211 goto done;
4212 break;
4213 }
4214
4215 default:
4216 l2cap_chan_set_err(chan, ECONNRESET);
4217
4218 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4219 l2cap_send_disconn_req(chan, ECONNRESET);
4220 goto done;
4221 }
4222
4223 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4224 goto done;
4225
4226 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4227
4228 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4229 set_default_fcs(chan);
4230
4231 if (chan->mode == L2CAP_MODE_ERTM ||
4232 chan->mode == L2CAP_MODE_STREAMING)
4233 err = l2cap_ertm_init(chan);
4234
4235 if (err < 0)
4236 l2cap_send_disconn_req(chan, -err);
4237 else
4238 l2cap_chan_ready(chan);
4239 }
4240
4241done:
4242 l2cap_chan_unlock(chan);
4243 return err;
4244}
4245
4246static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4247 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4248 u8 *data)
4249{
4250 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4251 struct l2cap_disconn_rsp rsp;
4252 u16 dcid, scid;
4253 struct l2cap_chan *chan;
4254
4255 if (cmd_len != sizeof(*req))
4256 return -EPROTO;
4257
4258 scid = __le16_to_cpu(req->scid);
4259 dcid = __le16_to_cpu(req->dcid);
4260
4261 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4262
4263 mutex_lock(&conn->chan_lock);
4264
4265 chan = __l2cap_get_chan_by_scid(conn, dcid);
4266 if (!chan) {
4267 mutex_unlock(&conn->chan_lock);
4268 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4269 return 0;
4270 }
4271
4272 l2cap_chan_lock(chan);
4273
4274 rsp.dcid = cpu_to_le16(chan->scid);
4275 rsp.scid = cpu_to_le16(chan->dcid);
4276 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4277
4278 chan->ops->set_shutdown(chan);
4279
4280 l2cap_chan_hold(chan);
4281 l2cap_chan_del(chan, ECONNRESET);
4282
4283 l2cap_chan_unlock(chan);
4284
4285 chan->ops->close(chan);
4286 l2cap_chan_put(chan);
4287
4288 mutex_unlock(&conn->chan_lock);
4289
4290 return 0;
4291}
4292
4293static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4294 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4295 u8 *data)
4296{
4297 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4298 u16 dcid, scid;
4299 struct l2cap_chan *chan;
4300
4301 if (cmd_len != sizeof(*rsp))
4302 return -EPROTO;
4303
4304 scid = __le16_to_cpu(rsp->scid);
4305 dcid = __le16_to_cpu(rsp->dcid);
4306
4307 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4308
4309 mutex_lock(&conn->chan_lock);
4310
4311 chan = __l2cap_get_chan_by_scid(conn, scid);
4312 if (!chan) {
4313 mutex_unlock(&conn->chan_lock);
4314 return 0;
4315 }
4316
4317 l2cap_chan_lock(chan);
4318
4319 l2cap_chan_hold(chan);
4320 l2cap_chan_del(chan, 0);
4321
4322 l2cap_chan_unlock(chan);
4323
4324 chan->ops->close(chan);
4325 l2cap_chan_put(chan);
4326
4327 mutex_unlock(&conn->chan_lock);
4328
4329 return 0;
4330}
4331
4332static inline int l2cap_information_req(struct l2cap_conn *conn,
4333 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4334 u8 *data)
4335{
4336 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4337 u16 type;
4338
4339 if (cmd_len != sizeof(*req))
4340 return -EPROTO;
4341
4342 type = __le16_to_cpu(req->type);
4343
4344 BT_DBG("type 0x%4.4x", type);
4345
4346 if (type == L2CAP_IT_FEAT_MASK) {
4347 u8 buf[8];
4348 u32 feat_mask = l2cap_feat_mask;
4349 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4350 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4351 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4352 if (!disable_ertm)
4353 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4354 | L2CAP_FEAT_FCS;
4355 if (conn->hs_enabled)
4356 feat_mask |= L2CAP_FEAT_EXT_FLOW
4357 | L2CAP_FEAT_EXT_WINDOW;
4358
4359 put_unaligned_le32(feat_mask, rsp->data);
4360 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4361 buf);
4362 } else if (type == L2CAP_IT_FIXED_CHAN) {
4363 u8 buf[12];
4364 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4365
4366 if (conn->hs_enabled)
4367 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4368 else
4369 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4370
4371 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4372 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4373 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4374 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4375 buf);
4376 } else {
4377 struct l2cap_info_rsp rsp;
4378 rsp.type = cpu_to_le16(type);
4379 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4380 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4381 &rsp);
4382 }
4383
4384 return 0;
4385}
4386
4387static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4388 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4389 u8 *data)
4390{
4391 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4392 u16 type, result;
4393
4394 if (cmd_len < sizeof(*rsp))
4395 return -EPROTO;
4396
4397 type = __le16_to_cpu(rsp->type);
4398 result = __le16_to_cpu(rsp->result);
4399
4400 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4401
4402 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4403 if (cmd->ident != conn->info_ident ||
4404 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4405 return 0;
4406
4407 cancel_delayed_work(&conn->info_timer);
4408
4409 if (result != L2CAP_IR_SUCCESS) {
4410 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4411 conn->info_ident = 0;
4412
4413 l2cap_conn_start(conn);
4414
4415 return 0;
4416 }
4417
4418 switch (type) {
4419 case L2CAP_IT_FEAT_MASK:
4420 conn->feat_mask = get_unaligned_le32(rsp->data);
4421
4422 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4423 struct l2cap_info_req req;
4424 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4425
4426 conn->info_ident = l2cap_get_ident(conn);
4427
4428 l2cap_send_cmd(conn, conn->info_ident,
4429 L2CAP_INFO_REQ, sizeof(req), &req);
4430 } else {
4431 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4432 conn->info_ident = 0;
4433
4434 l2cap_conn_start(conn);
4435 }
4436 break;
4437
4438 case L2CAP_IT_FIXED_CHAN:
4439 conn->fixed_chan_mask = rsp->data[0];
4440 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4441 conn->info_ident = 0;
4442
4443 l2cap_conn_start(conn);
4444 break;
4445 }
4446
4447 return 0;
4448}
4449
4450static int l2cap_create_channel_req(struct l2cap_conn *conn,
4451 struct l2cap_cmd_hdr *cmd,
4452 u16 cmd_len, void *data)
4453{
4454 struct l2cap_create_chan_req *req = data;
4455 struct l2cap_create_chan_rsp rsp;
4456 struct l2cap_chan *chan;
4457 struct hci_dev *hdev;
4458 u16 psm, scid;
4459
4460 if (cmd_len != sizeof(*req))
4461 return -EPROTO;
4462
4463 if (!conn->hs_enabled)
4464 return -EINVAL;
4465
4466 psm = le16_to_cpu(req->psm);
4467 scid = le16_to_cpu(req->scid);
4468
4469 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4470
4471 /* For controller id 0 make BR/EDR connection */
4472 if (req->amp_id == AMP_ID_BREDR) {
4473 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4474 req->amp_id);
4475 return 0;
4476 }
4477
4478 /* Validate AMP controller id */
4479 hdev = hci_dev_get(req->amp_id);
4480 if (!hdev)
4481 goto error;
4482
4483 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4484 hci_dev_put(hdev);
4485 goto error;
4486 }
4487
4488 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4489 req->amp_id);
4490 if (chan) {
4491 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4492 struct hci_conn *hs_hcon;
4493
4494 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4495 &conn->hcon->dst);
4496 if (!hs_hcon) {
4497 hci_dev_put(hdev);
4498 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4499 chan->dcid);
4500 return 0;
4501 }
4502
4503 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4504
4505 mgr->bredr_chan = chan;
4506 chan->hs_hcon = hs_hcon;
4507 chan->fcs = L2CAP_FCS_NONE;
4508 conn->mtu = hdev->block_mtu;
4509 }
4510
4511 hci_dev_put(hdev);
4512
4513 return 0;
4514
4515error:
4516 rsp.dcid = 0;
4517 rsp.scid = cpu_to_le16(scid);
4518 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4519 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4520
4521 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4522 sizeof(rsp), &rsp);
4523
4524 return 0;
4525}
4526
4527static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4528{
4529 struct l2cap_move_chan_req req;
4530 u8 ident;
4531
4532 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4533
4534 ident = l2cap_get_ident(chan->conn);
4535 chan->ident = ident;
4536
4537 req.icid = cpu_to_le16(chan->scid);
4538 req.dest_amp_id = dest_amp_id;
4539
4540 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4541 &req);
4542
4543 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4544}
4545
4546static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4547{
4548 struct l2cap_move_chan_rsp rsp;
4549
4550 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4551
4552 rsp.icid = cpu_to_le16(chan->dcid);
4553 rsp.result = cpu_to_le16(result);
4554
4555 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4556 sizeof(rsp), &rsp);
4557}
4558
4559static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4560{
4561 struct l2cap_move_chan_cfm cfm;
4562
4563 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4564
4565 chan->ident = l2cap_get_ident(chan->conn);
4566
4567 cfm.icid = cpu_to_le16(chan->scid);
4568 cfm.result = cpu_to_le16(result);
4569
4570 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4571 sizeof(cfm), &cfm);
4572
4573 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4574}
4575
4576static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4577{
4578 struct l2cap_move_chan_cfm cfm;
4579
4580 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4581
4582 cfm.icid = cpu_to_le16(icid);
4583 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4584
4585 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4586 sizeof(cfm), &cfm);
4587}
4588
4589static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4590 u16 icid)
4591{
4592 struct l2cap_move_chan_cfm_rsp rsp;
4593
4594 BT_DBG("icid 0x%4.4x", icid);
4595
4596 rsp.icid = cpu_to_le16(icid);
4597 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4598}
4599
4600static void __release_logical_link(struct l2cap_chan *chan)
4601{
4602 chan->hs_hchan = NULL;
4603 chan->hs_hcon = NULL;
4604
4605 /* Placeholder - release the logical link */
4606}
4607
4608static void l2cap_logical_fail(struct l2cap_chan *chan)
4609{
4610 /* Logical link setup failed */
4611 if (chan->state != BT_CONNECTED) {
4612 /* Create channel failure, disconnect */
4613 l2cap_send_disconn_req(chan, ECONNRESET);
4614 return;
4615 }
4616
4617 switch (chan->move_role) {
4618 case L2CAP_MOVE_ROLE_RESPONDER:
4619 l2cap_move_done(chan);
4620 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4621 break;
4622 case L2CAP_MOVE_ROLE_INITIATOR:
4623 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4624 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4625 /* Remote has only sent pending or
4626 * success responses, clean up
4627 */
4628 l2cap_move_done(chan);
4629 }
4630
4631 /* Other amp move states imply that the move
4632 * has already aborted
4633 */
4634 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4635 break;
4636 }
4637}
4638
4639static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4640 struct hci_chan *hchan)
4641{
4642 struct l2cap_conf_rsp rsp;
4643
4644 chan->hs_hchan = hchan;
4645 chan->hs_hcon->l2cap_data = chan->conn;
4646
4647 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4648
4649 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4650 int err;
4651
4652 set_default_fcs(chan);
4653
4654 err = l2cap_ertm_init(chan);
4655 if (err < 0)
4656 l2cap_send_disconn_req(chan, -err);
4657 else
4658 l2cap_chan_ready(chan);
4659 }
4660}
4661
4662static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4663 struct hci_chan *hchan)
4664{
4665 chan->hs_hcon = hchan->conn;
4666 chan->hs_hcon->l2cap_data = chan->conn;
4667
4668 BT_DBG("move_state %d", chan->move_state);
4669
4670 switch (chan->move_state) {
4671 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4672 /* Move confirm will be sent after a success
4673 * response is received
4674 */
4675 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4676 break;
4677 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4678 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4679 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4680 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4681 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4682 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4683 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4684 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4685 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4686 }
4687 break;
4688 default:
4689 /* Move was not in expected state, free the channel */
4690 __release_logical_link(chan);
4691
4692 chan->move_state = L2CAP_MOVE_STABLE;
4693 }
4694}
4695
4696/* Call with chan locked */
4697void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4698 u8 status)
4699{
4700 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4701
4702 if (status) {
4703 l2cap_logical_fail(chan);
4704 __release_logical_link(chan);
4705 return;
4706 }
4707
4708 if (chan->state != BT_CONNECTED) {
4709 /* Ignore logical link if channel is on BR/EDR */
4710 if (chan->local_amp_id != AMP_ID_BREDR)
4711 l2cap_logical_finish_create(chan, hchan);
4712 } else {
4713 l2cap_logical_finish_move(chan, hchan);
4714 }
4715}
4716
4717void l2cap_move_start(struct l2cap_chan *chan)
4718{
4719 BT_DBG("chan %p", chan);
4720
4721 if (chan->local_amp_id == AMP_ID_BREDR) {
4722 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4723 return;
4724 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4725 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4726 /* Placeholder - start physical link setup */
4727 } else {
4728 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4729 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4730 chan->move_id = 0;
4731 l2cap_move_setup(chan);
4732 l2cap_send_move_chan_req(chan, 0);
4733 }
4734}
4735
4736static void l2cap_do_create(struct l2cap_chan *chan, int result,
4737 u8 local_amp_id, u8 remote_amp_id)
4738{
4739 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4740 local_amp_id, remote_amp_id);
4741
4742 chan->fcs = L2CAP_FCS_NONE;
4743
4744 /* Outgoing channel on AMP */
4745 if (chan->state == BT_CONNECT) {
4746 if (result == L2CAP_CR_SUCCESS) {
4747 chan->local_amp_id = local_amp_id;
4748 l2cap_send_create_chan_req(chan, remote_amp_id);
4749 } else {
4750 /* Revert to BR/EDR connect */
4751 l2cap_send_conn_req(chan);
4752 }
4753
4754 return;
4755 }
4756
4757 /* Incoming channel on AMP */
4758 if (__l2cap_no_conn_pending(chan)) {
4759 struct l2cap_conn_rsp rsp;
4760 char buf[128];
4761 rsp.scid = cpu_to_le16(chan->dcid);
4762 rsp.dcid = cpu_to_le16(chan->scid);
4763
4764 if (result == L2CAP_CR_SUCCESS) {
4765 /* Send successful response */
4766 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4767 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4768 } else {
4769 /* Send negative response */
4770 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4771 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4772 }
4773
4774 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4775 sizeof(rsp), &rsp);
4776
4777 if (result == L2CAP_CR_SUCCESS) {
4778 l2cap_state_change(chan, BT_CONFIG);
4779 set_bit(CONF_REQ_SENT, &chan->conf_state);
4780 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4781 L2CAP_CONF_REQ,
4782 l2cap_build_conf_req(chan, buf), buf);
4783 chan->num_conf_req++;
4784 }
4785 }
4786}
4787
4788static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4789 u8 remote_amp_id)
4790{
4791 l2cap_move_setup(chan);
4792 chan->move_id = local_amp_id;
4793 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4794
4795 l2cap_send_move_chan_req(chan, remote_amp_id);
4796}
4797
4798static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4799{
4800 struct hci_chan *hchan = NULL;
4801
4802 /* Placeholder - get hci_chan for logical link */
4803
4804 if (hchan) {
4805 if (hchan->state == BT_CONNECTED) {
4806 /* Logical link is ready to go */
4807 chan->hs_hcon = hchan->conn;
4808 chan->hs_hcon->l2cap_data = chan->conn;
4809 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4810 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4811
4812 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4813 } else {
4814 /* Wait for logical link to be ready */
4815 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4816 }
4817 } else {
4818 /* Logical link not available */
4819 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4820 }
4821}
4822
4823static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4824{
4825 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4826 u8 rsp_result;
4827 if (result == -EINVAL)
4828 rsp_result = L2CAP_MR_BAD_ID;
4829 else
4830 rsp_result = L2CAP_MR_NOT_ALLOWED;
4831
4832 l2cap_send_move_chan_rsp(chan, rsp_result);
4833 }
4834
4835 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4836 chan->move_state = L2CAP_MOVE_STABLE;
4837
4838 /* Restart data transmission */
4839 l2cap_ertm_send(chan);
4840}
4841
4842/* Invoke with locked chan */
4843void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4844{
4845 u8 local_amp_id = chan->local_amp_id;
4846 u8 remote_amp_id = chan->remote_amp_id;
4847
4848 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4849 chan, result, local_amp_id, remote_amp_id);
4850
4851 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4852 l2cap_chan_unlock(chan);
4853 return;
4854 }
4855
4856 if (chan->state != BT_CONNECTED) {
4857 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4858 } else if (result != L2CAP_MR_SUCCESS) {
4859 l2cap_do_move_cancel(chan, result);
4860 } else {
4861 switch (chan->move_role) {
4862 case L2CAP_MOVE_ROLE_INITIATOR:
4863 l2cap_do_move_initiate(chan, local_amp_id,
4864 remote_amp_id);
4865 break;
4866 case L2CAP_MOVE_ROLE_RESPONDER:
4867 l2cap_do_move_respond(chan, result);
4868 break;
4869 default:
4870 l2cap_do_move_cancel(chan, result);
4871 break;
4872 }
4873 }
4874}
4875
4876static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4877 struct l2cap_cmd_hdr *cmd,
4878 u16 cmd_len, void *data)
4879{
4880 struct l2cap_move_chan_req *req = data;
4881 struct l2cap_move_chan_rsp rsp;
4882 struct l2cap_chan *chan;
4883 u16 icid = 0;
4884 u16 result = L2CAP_MR_NOT_ALLOWED;
4885
4886 if (cmd_len != sizeof(*req))
4887 return -EPROTO;
4888
4889 icid = le16_to_cpu(req->icid);
4890
4891 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4892
4893 if (!conn->hs_enabled)
4894 return -EINVAL;
4895
4896 chan = l2cap_get_chan_by_dcid(conn, icid);
4897 if (!chan) {
4898 rsp.icid = cpu_to_le16(icid);
4899 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4900 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4901 sizeof(rsp), &rsp);
4902 return 0;
4903 }
4904
4905 chan->ident = cmd->ident;
4906
4907 if (chan->scid < L2CAP_CID_DYN_START ||
4908 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4909 (chan->mode != L2CAP_MODE_ERTM &&
4910 chan->mode != L2CAP_MODE_STREAMING)) {
4911 result = L2CAP_MR_NOT_ALLOWED;
4912 goto send_move_response;
4913 }
4914
4915 if (chan->local_amp_id == req->dest_amp_id) {
4916 result = L2CAP_MR_SAME_ID;
4917 goto send_move_response;
4918 }
4919
4920 if (req->dest_amp_id != AMP_ID_BREDR) {
4921 struct hci_dev *hdev;
4922 hdev = hci_dev_get(req->dest_amp_id);
4923 if (!hdev || hdev->dev_type != HCI_AMP ||
4924 !test_bit(HCI_UP, &hdev->flags)) {
4925 if (hdev)
4926 hci_dev_put(hdev);
4927
4928 result = L2CAP_MR_BAD_ID;
4929 goto send_move_response;
4930 }
4931 hci_dev_put(hdev);
4932 }
4933
4934 /* Detect a move collision. Only send a collision response
4935 * if this side has "lost", otherwise proceed with the move.
4936 * The winner has the larger bd_addr.
4937 */
4938 if ((__chan_is_moving(chan) ||
4939 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4940 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4941 result = L2CAP_MR_COLLISION;
4942 goto send_move_response;
4943 }
4944
4945 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4946 l2cap_move_setup(chan);
4947 chan->move_id = req->dest_amp_id;
4948 icid = chan->dcid;
4949
4950 if (req->dest_amp_id == AMP_ID_BREDR) {
4951 /* Moving to BR/EDR */
4952 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4953 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4954 result = L2CAP_MR_PEND;
4955 } else {
4956 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4957 result = L2CAP_MR_SUCCESS;
4958 }
4959 } else {
4960 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4961 /* Placeholder - uncomment when amp functions are available */
4962 /*amp_accept_physical(chan, req->dest_amp_id);*/
4963 result = L2CAP_MR_PEND;
4964 }
4965
4966send_move_response:
4967 l2cap_send_move_chan_rsp(chan, result);
4968
4969 l2cap_chan_unlock(chan);
4970
4971 return 0;
4972}
4973
4974static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4975{
4976 struct l2cap_chan *chan;
4977 struct hci_chan *hchan = NULL;
4978
4979 chan = l2cap_get_chan_by_scid(conn, icid);
4980 if (!chan) {
4981 l2cap_send_move_chan_cfm_icid(conn, icid);
4982 return;
4983 }
4984
4985 __clear_chan_timer(chan);
4986 if (result == L2CAP_MR_PEND)
4987 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4988
4989 switch (chan->move_state) {
4990 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4991 /* Move confirm will be sent when logical link
4992 * is complete.
4993 */
4994 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4995 break;
4996 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4997 if (result == L2CAP_MR_PEND) {
4998 break;
4999 } else if (test_bit(CONN_LOCAL_BUSY,
5000 &chan->conn_state)) {
5001 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5002 } else {
5003 /* Logical link is up or moving to BR/EDR,
5004 * proceed with move
5005 */
5006 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5007 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5008 }
5009 break;
5010 case L2CAP_MOVE_WAIT_RSP:
5011 /* Moving to AMP */
5012 if (result == L2CAP_MR_SUCCESS) {
5013 /* Remote is ready, send confirm immediately
5014 * after logical link is ready
5015 */
5016 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5017 } else {
5018 /* Both logical link and move success
5019 * are required to confirm
5020 */
5021 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5022 }
5023
5024 /* Placeholder - get hci_chan for logical link */
5025 if (!hchan) {
5026 /* Logical link not available */
5027 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5028 break;
5029 }
5030
5031 /* If the logical link is not yet connected, do not
5032 * send confirmation.
5033 */
5034 if (hchan->state != BT_CONNECTED)
5035 break;
5036
5037 /* Logical link is already ready to go */
5038
5039 chan->hs_hcon = hchan->conn;
5040 chan->hs_hcon->l2cap_data = chan->conn;
5041
5042 if (result == L2CAP_MR_SUCCESS) {
5043 /* Can confirm now */
5044 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5045 } else {
5046 /* Now only need move success
5047 * to confirm
5048 */
5049 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5050 }
5051
5052 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5053 break;
5054 default:
5055 /* Any other amp move state means the move failed. */
5056 chan->move_id = chan->local_amp_id;
5057 l2cap_move_done(chan);
5058 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5059 }
5060
5061 l2cap_chan_unlock(chan);
5062}
5063
5064static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5065 u16 result)
5066{
5067 struct l2cap_chan *chan;
5068
5069 chan = l2cap_get_chan_by_ident(conn, ident);
5070 if (!chan) {
5071 /* Could not locate channel, icid is best guess */
5072 l2cap_send_move_chan_cfm_icid(conn, icid);
5073 return;
5074 }
5075
5076 __clear_chan_timer(chan);
5077
5078 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5079 if (result == L2CAP_MR_COLLISION) {
5080 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5081 } else {
5082 /* Cleanup - cancel move */
5083 chan->move_id = chan->local_amp_id;
5084 l2cap_move_done(chan);
5085 }
5086 }
5087
5088 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5089
5090 l2cap_chan_unlock(chan);
5091}
5092
5093static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5094 struct l2cap_cmd_hdr *cmd,
5095 u16 cmd_len, void *data)
5096{
5097 struct l2cap_move_chan_rsp *rsp = data;
5098 u16 icid, result;
5099
5100 if (cmd_len != sizeof(*rsp))
5101 return -EPROTO;
5102
5103 icid = le16_to_cpu(rsp->icid);
5104 result = le16_to_cpu(rsp->result);
5105
5106 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5107
5108 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5109 l2cap_move_continue(conn, icid, result);
5110 else
5111 l2cap_move_fail(conn, cmd->ident, icid, result);
5112
5113 return 0;
5114}
5115
5116static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5117 struct l2cap_cmd_hdr *cmd,
5118 u16 cmd_len, void *data)
5119{
5120 struct l2cap_move_chan_cfm *cfm = data;
5121 struct l2cap_chan *chan;
5122 u16 icid, result;
5123
5124 if (cmd_len != sizeof(*cfm))
5125 return -EPROTO;
5126
5127 icid = le16_to_cpu(cfm->icid);
5128 result = le16_to_cpu(cfm->result);
5129
5130 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5131
5132 chan = l2cap_get_chan_by_dcid(conn, icid);
5133 if (!chan) {
5134 /* Spec requires a response even if the icid was not found */
5135 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5136 return 0;
5137 }
5138
5139 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5140 if (result == L2CAP_MC_CONFIRMED) {
5141 chan->local_amp_id = chan->move_id;
5142 if (chan->local_amp_id == AMP_ID_BREDR)
5143 __release_logical_link(chan);
5144 } else {
5145 chan->move_id = chan->local_amp_id;
5146 }
5147
5148 l2cap_move_done(chan);
5149 }
5150
5151 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5152
5153 l2cap_chan_unlock(chan);
5154
5155 return 0;
5156}
5157
5158static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5159 struct l2cap_cmd_hdr *cmd,
5160 u16 cmd_len, void *data)
5161{
5162 struct l2cap_move_chan_cfm_rsp *rsp = data;
5163 struct l2cap_chan *chan;
5164 u16 icid;
5165
5166 if (cmd_len != sizeof(*rsp))
5167 return -EPROTO;
5168
5169 icid = le16_to_cpu(rsp->icid);
5170
5171 BT_DBG("icid 0x%4.4x", icid);
5172
5173 chan = l2cap_get_chan_by_scid(conn, icid);
5174 if (!chan)
5175 return 0;
5176
5177 __clear_chan_timer(chan);
5178
5179 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5180 chan->local_amp_id = chan->move_id;
5181
5182 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5183 __release_logical_link(chan);
5184
5185 l2cap_move_done(chan);
5186 }
5187
5188 l2cap_chan_unlock(chan);
5189
5190 return 0;
5191}
5192
5193static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5194 u16 to_multiplier)
5195{
5196 u16 max_latency;
5197
5198 if (min > max || min < 6 || max > 3200)
5199 return -EINVAL;
5200
5201 if (to_multiplier < 10 || to_multiplier > 3200)
5202 return -EINVAL;
5203
5204 if (max >= to_multiplier * 8)
5205 return -EINVAL;
5206
5207 max_latency = (to_multiplier * 8 / max) - 1;
5208 if (latency > 499 || latency > max_latency)
5209 return -EINVAL;
5210
5211 return 0;
5212}
5213
5214static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5215 struct l2cap_cmd_hdr *cmd,
5216 u16 cmd_len, u8 *data)
5217{
5218 struct hci_conn *hcon = conn->hcon;
5219 struct l2cap_conn_param_update_req *req;
5220 struct l2cap_conn_param_update_rsp rsp;
5221 u16 min, max, latency, to_multiplier;
5222 int err;
5223
5224 if (!(hcon->link_mode & HCI_LM_MASTER))
5225 return -EINVAL;
5226
5227 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5228 return -EPROTO;
5229
5230 req = (struct l2cap_conn_param_update_req *) data;
5231 min = __le16_to_cpu(req->min);
5232 max = __le16_to_cpu(req->max);
5233 latency = __le16_to_cpu(req->latency);
5234 to_multiplier = __le16_to_cpu(req->to_multiplier);
5235
5236 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5237 min, max, latency, to_multiplier);
5238
5239 memset(&rsp, 0, sizeof(rsp));
5240
5241 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5242 if (err)
5243 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5244 else
5245 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5246
5247 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5248 sizeof(rsp), &rsp);
5249
5250 if (!err)
5251 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5252
5253 return 0;
5254}
5255
5256static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5257 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5258 u8 *data)
5259{
5260 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5261 u16 dcid, mtu, mps, credits, result;
5262 struct l2cap_chan *chan;
5263 int err;
5264
5265 if (cmd_len < sizeof(*rsp))
5266 return -EPROTO;
5267
5268 dcid = __le16_to_cpu(rsp->dcid);
5269 mtu = __le16_to_cpu(rsp->mtu);
5270 mps = __le16_to_cpu(rsp->mps);
5271 credits = __le16_to_cpu(rsp->credits);
5272 result = __le16_to_cpu(rsp->result);
5273
5274 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5275 return -EPROTO;
5276
5277 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5278 dcid, mtu, mps, credits, result);
5279
5280 mutex_lock(&conn->chan_lock);
5281
5282 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5283 if (!chan) {
5284 err = -EBADSLT;
5285 goto unlock;
5286 }
5287
5288 err = 0;
5289
5290 l2cap_chan_lock(chan);
5291
5292 switch (result) {
5293 case L2CAP_CR_SUCCESS:
5294 chan->ident = 0;
5295 chan->dcid = dcid;
5296 chan->omtu = mtu;
5297 chan->remote_mps = mps;
5298 chan->tx_credits = credits;
5299 l2cap_chan_ready(chan);
5300 break;
5301
5302 default:
5303 l2cap_chan_del(chan, ECONNREFUSED);
5304 break;
5305 }
5306
5307 l2cap_chan_unlock(chan);
5308
5309unlock:
5310 mutex_unlock(&conn->chan_lock);
5311
5312 return err;
5313}
5314
5315static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5316 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5317 u8 *data)
5318{
5319 int err = 0;
5320
5321 switch (cmd->code) {
5322 case L2CAP_COMMAND_REJ:
5323 l2cap_command_rej(conn, cmd, cmd_len, data);
5324 break;
5325
5326 case L2CAP_CONN_REQ:
5327 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5328 break;
5329
5330 case L2CAP_CONN_RSP:
5331 case L2CAP_CREATE_CHAN_RSP:
5332 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5333 break;
5334
5335 case L2CAP_CONF_REQ:
5336 err = l2cap_config_req(conn, cmd, cmd_len, data);
5337 break;
5338
5339 case L2CAP_CONF_RSP:
5340 l2cap_config_rsp(conn, cmd, cmd_len, data);
5341 break;
5342
5343 case L2CAP_DISCONN_REQ:
5344 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5345 break;
5346
5347 case L2CAP_DISCONN_RSP:
5348 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5349 break;
5350
5351 case L2CAP_ECHO_REQ:
5352 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5353 break;
5354
5355 case L2CAP_ECHO_RSP:
5356 break;
5357
5358 case L2CAP_INFO_REQ:
5359 err = l2cap_information_req(conn, cmd, cmd_len, data);
5360 break;
5361
5362 case L2CAP_INFO_RSP:
5363 l2cap_information_rsp(conn, cmd, cmd_len, data);
5364 break;
5365
5366 case L2CAP_CREATE_CHAN_REQ:
5367 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5368 break;
5369
5370 case L2CAP_MOVE_CHAN_REQ:
5371 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5372 break;
5373
5374 case L2CAP_MOVE_CHAN_RSP:
5375 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5376 break;
5377
5378 case L2CAP_MOVE_CHAN_CFM:
5379 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5380 break;
5381
5382 case L2CAP_MOVE_CHAN_CFM_RSP:
5383 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5384 break;
5385
5386 default:
5387 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5388 err = -EINVAL;
5389 break;
5390 }
5391
5392 return err;
5393}
5394
5395static int l2cap_le_connect_req(struct l2cap_conn *conn,
5396 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5397 u8 *data)
5398{
5399 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5400 struct l2cap_le_conn_rsp rsp;
5401 struct l2cap_chan *chan, *pchan;
5402 u16 dcid, scid, credits, mtu, mps;
5403 __le16 psm;
5404 u8 result;
5405
5406 if (cmd_len != sizeof(*req))
5407 return -EPROTO;
5408
5409 scid = __le16_to_cpu(req->scid);
5410 mtu = __le16_to_cpu(req->mtu);
5411 mps = __le16_to_cpu(req->mps);
5412 psm = req->psm;
5413 dcid = 0;
5414 credits = 0;
5415
5416 if (mtu < 23 || mps < 23)
5417 return -EPROTO;
5418
5419 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5420 scid, mtu, mps);
5421
5422 /* Check if we have socket listening on psm */
5423 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5424 &conn->hcon->dst, LE_LINK);
5425 if (!pchan) {
5426 result = L2CAP_CR_BAD_PSM;
5427 chan = NULL;
5428 goto response;
5429 }
5430
5431 mutex_lock(&conn->chan_lock);
5432 l2cap_chan_lock(pchan);
5433
5434 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5435 result = L2CAP_CR_AUTHENTICATION;
5436 chan = NULL;
5437 goto response_unlock;
5438 }
5439
5440 /* Check if we already have channel with that dcid */
5441 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5442 result = L2CAP_CR_NO_MEM;
5443 chan = NULL;
5444 goto response_unlock;
5445 }
5446
5447 chan = pchan->ops->new_connection(pchan);
5448 if (!chan) {
5449 result = L2CAP_CR_NO_MEM;
5450 goto response_unlock;
5451 }
5452
5453 l2cap_le_flowctl_init(chan);
5454
5455 bacpy(&chan->src, &conn->hcon->src);
5456 bacpy(&chan->dst, &conn->hcon->dst);
5457 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5458 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5459 chan->psm = psm;
5460 chan->dcid = scid;
5461 chan->omtu = mtu;
5462 chan->remote_mps = mps;
5463 chan->tx_credits = __le16_to_cpu(req->credits);
5464
5465 __l2cap_chan_add(conn, chan);
5466 dcid = chan->scid;
5467 credits = chan->rx_credits;
5468
5469 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5470
5471 chan->ident = cmd->ident;
5472
5473 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5474 l2cap_state_change(chan, BT_CONNECT2);
5475 result = L2CAP_CR_PEND;
5476 chan->ops->defer(chan);
5477 } else {
5478 l2cap_chan_ready(chan);
5479 result = L2CAP_CR_SUCCESS;
5480 }
5481
5482response_unlock:
5483 l2cap_chan_unlock(pchan);
5484 mutex_unlock(&conn->chan_lock);
5485
5486 if (result == L2CAP_CR_PEND)
5487 return 0;
5488
5489response:
5490 if (chan) {
5491 rsp.mtu = cpu_to_le16(chan->imtu);
5492 rsp.mps = cpu_to_le16(chan->mps);
5493 } else {
5494 rsp.mtu = 0;
5495 rsp.mps = 0;
5496 }
5497
5498 rsp.dcid = cpu_to_le16(dcid);
5499 rsp.credits = cpu_to_le16(credits);
5500 rsp.result = cpu_to_le16(result);
5501
5502 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5503
5504 return 0;
5505}
5506
5507static inline int l2cap_le_credits(struct l2cap_conn *conn,
5508 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5509 u8 *data)
5510{
5511 struct l2cap_le_credits *pkt;
5512 struct l2cap_chan *chan;
5513 u16 cid, credits, max_credits;
5514
5515 if (cmd_len != sizeof(*pkt))
5516 return -EPROTO;
5517
5518 pkt = (struct l2cap_le_credits *) data;
5519 cid = __le16_to_cpu(pkt->cid);
5520 credits = __le16_to_cpu(pkt->credits);
5521
5522 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5523
5524 chan = l2cap_get_chan_by_dcid(conn, cid);
5525 if (!chan)
5526 return -EBADSLT;
5527
5528 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5529 if (credits > max_credits) {
5530 BT_ERR("LE credits overflow");
5531 l2cap_send_disconn_req(chan, ECONNRESET);
5532
5533 /* Return 0 so that we don't trigger an unnecessary
5534 * command reject packet.
5535 */
5536 return 0;
5537 }
5538
5539 chan->tx_credits += credits;
5540
5541 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5542 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5543 chan->tx_credits--;
5544 }
5545
5546 if (chan->tx_credits)
5547 chan->ops->resume(chan);
5548
5549 l2cap_chan_unlock(chan);
5550
5551 return 0;
5552}
5553
5554static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5555 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5556 u8 *data)
5557{
5558 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5559 struct l2cap_chan *chan;
5560
5561 if (cmd_len < sizeof(*rej))
5562 return -EPROTO;
5563
5564 mutex_lock(&conn->chan_lock);
5565
5566 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5567 if (!chan)
5568 goto done;
5569
5570 l2cap_chan_lock(chan);
5571 l2cap_chan_del(chan, ECONNREFUSED);
5572 l2cap_chan_unlock(chan);
5573
5574done:
5575 mutex_unlock(&conn->chan_lock);
5576 return 0;
5577}
5578
5579static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5580 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5581 u8 *data)
5582{
5583 int err = 0;
5584
5585 switch (cmd->code) {
5586 case L2CAP_COMMAND_REJ:
5587 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5588 break;
5589
5590 case L2CAP_CONN_PARAM_UPDATE_REQ:
5591 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5592 break;
5593
5594 case L2CAP_CONN_PARAM_UPDATE_RSP:
5595 break;
5596
5597 case L2CAP_LE_CONN_RSP:
5598 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5599 break;
5600
5601 case L2CAP_LE_CONN_REQ:
5602 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5603 break;
5604
5605 case L2CAP_LE_CREDITS:
5606 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5607 break;
5608
5609 case L2CAP_DISCONN_REQ:
5610 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5611 break;
5612
5613 case L2CAP_DISCONN_RSP:
5614 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5615 break;
5616
5617 default:
5618 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5619 err = -EINVAL;
5620 break;
5621 }
5622
5623 return err;
5624}
5625
5626static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5627 struct sk_buff *skb)
5628{
5629 struct hci_conn *hcon = conn->hcon;
5630 struct l2cap_cmd_hdr *cmd;
5631 u16 len;
5632 int err;
5633
5634 if (hcon->type != LE_LINK)
5635 goto drop;
5636
5637 if (skb->len < L2CAP_CMD_HDR_SIZE)
5638 goto drop;
5639
5640 cmd = (void *) skb->data;
5641 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5642
5643 len = le16_to_cpu(cmd->len);
5644
5645 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5646
5647 if (len != skb->len || !cmd->ident) {
5648 BT_DBG("corrupted command");
5649 goto drop;
5650 }
5651
5652 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5653 if (err) {
5654 struct l2cap_cmd_rej_unk rej;
5655
5656 BT_ERR("Wrong link type (%d)", err);
5657
5658 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5659 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5660 sizeof(rej), &rej);
5661 }
5662
5663drop:
5664 kfree_skb(skb);
5665}
5666
5667static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5668 struct sk_buff *skb)
5669{
5670 struct hci_conn *hcon = conn->hcon;
5671 u8 *data = skb->data;
5672 int len = skb->len;
5673 struct l2cap_cmd_hdr cmd;
5674 int err;
5675
5676 l2cap_raw_recv(conn, skb);
5677
5678 if (hcon->type != ACL_LINK)
5679 goto drop;
5680
5681 while (len >= L2CAP_CMD_HDR_SIZE) {
5682 u16 cmd_len;
5683 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5684 data += L2CAP_CMD_HDR_SIZE;
5685 len -= L2CAP_CMD_HDR_SIZE;
5686
5687 cmd_len = le16_to_cpu(cmd.len);
5688
5689 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5690 cmd.ident);
5691
5692 if (cmd_len > len || !cmd.ident) {
5693 BT_DBG("corrupted command");
5694 break;
5695 }
5696
5697 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5698 if (err) {
5699 struct l2cap_cmd_rej_unk rej;
5700
5701 BT_ERR("Wrong link type (%d)", err);
5702
5703 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5704 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5705 sizeof(rej), &rej);
5706 }
5707
5708 data += cmd_len;
5709 len -= cmd_len;
5710 }
5711
5712drop:
5713 kfree_skb(skb);
5714}
5715
5716static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5717{
5718 u16 our_fcs, rcv_fcs;
5719 int hdr_size;
5720
5721 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5722 hdr_size = L2CAP_EXT_HDR_SIZE;
5723 else
5724 hdr_size = L2CAP_ENH_HDR_SIZE;
5725
5726 if (chan->fcs == L2CAP_FCS_CRC16) {
5727 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5728 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5729 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5730
5731 if (our_fcs != rcv_fcs)
5732 return -EBADMSG;
5733 }
5734 return 0;
5735}
5736
5737static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5738{
5739 struct l2cap_ctrl control;
5740
5741 BT_DBG("chan %p", chan);
5742
5743 memset(&control, 0, sizeof(control));
5744 control.sframe = 1;
5745 control.final = 1;
5746 control.reqseq = chan->buffer_seq;
5747 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5748
5749 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5750 control.super = L2CAP_SUPER_RNR;
5751 l2cap_send_sframe(chan, &control);
5752 }
5753
5754 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5755 chan->unacked_frames > 0)
5756 __set_retrans_timer(chan);
5757
5758 /* Send pending iframes */
5759 l2cap_ertm_send(chan);
5760
5761 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5762 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5763 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5764 * send it now.
5765 */
5766 control.super = L2CAP_SUPER_RR;
5767 l2cap_send_sframe(chan, &control);
5768 }
5769}
5770
5771static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5772 struct sk_buff **last_frag)
5773{
5774 /* skb->len reflects data in skb as well as all fragments
5775 * skb->data_len reflects only data in fragments
5776 */
5777 if (!skb_has_frag_list(skb))
5778 skb_shinfo(skb)->frag_list = new_frag;
5779
5780 new_frag->next = NULL;
5781
5782 (*last_frag)->next = new_frag;
5783 *last_frag = new_frag;
5784
5785 skb->len += new_frag->len;
5786 skb->data_len += new_frag->len;
5787 skb->truesize += new_frag->truesize;
5788}
5789
5790static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5791 struct l2cap_ctrl *control)
5792{
5793 int err = -EINVAL;
5794
5795 switch (control->sar) {
5796 case L2CAP_SAR_UNSEGMENTED:
5797 if (chan->sdu)
5798 break;
5799
5800 err = chan->ops->recv(chan, skb);
5801 break;
5802
5803 case L2CAP_SAR_START:
5804 if (chan->sdu)
5805 break;
5806
5807 chan->sdu_len = get_unaligned_le16(skb->data);
5808 skb_pull(skb, L2CAP_SDULEN_SIZE);
5809
5810 if (chan->sdu_len > chan->imtu) {
5811 err = -EMSGSIZE;
5812 break;
5813 }
5814
5815 if (skb->len >= chan->sdu_len)
5816 break;
5817
5818 chan->sdu = skb;
5819 chan->sdu_last_frag = skb;
5820
5821 skb = NULL;
5822 err = 0;
5823 break;
5824
5825 case L2CAP_SAR_CONTINUE:
5826 if (!chan->sdu)
5827 break;
5828
5829 append_skb_frag(chan->sdu, skb,
5830 &chan->sdu_last_frag);
5831 skb = NULL;
5832
5833 if (chan->sdu->len >= chan->sdu_len)
5834 break;
5835
5836 err = 0;
5837 break;
5838
5839 case L2CAP_SAR_END:
5840 if (!chan->sdu)
5841 break;
5842
5843 append_skb_frag(chan->sdu, skb,
5844 &chan->sdu_last_frag);
5845 skb = NULL;
5846
5847 if (chan->sdu->len != chan->sdu_len)
5848 break;
5849
5850 err = chan->ops->recv(chan, chan->sdu);
5851
5852 if (!err) {
5853 /* Reassembly complete */
5854 chan->sdu = NULL;
5855 chan->sdu_last_frag = NULL;
5856 chan->sdu_len = 0;
5857 }
5858 break;
5859 }
5860
5861 if (err) {
5862 kfree_skb(skb);
5863 kfree_skb(chan->sdu);
5864 chan->sdu = NULL;
5865 chan->sdu_last_frag = NULL;
5866 chan->sdu_len = 0;
5867 }
5868
5869 return err;
5870}
5871
5872static int l2cap_resegment(struct l2cap_chan *chan)
5873{
5874 /* Placeholder */
5875 return 0;
5876}
5877
5878void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5879{
5880 u8 event;
5881
5882 if (chan->mode != L2CAP_MODE_ERTM)
5883 return;
5884
5885 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5886 l2cap_tx(chan, NULL, NULL, event);
5887}
5888
5889static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5890{
5891 int err = 0;
5892 /* Pass sequential frames to l2cap_reassemble_sdu()
5893 * until a gap is encountered.
5894 */
5895
5896 BT_DBG("chan %p", chan);
5897
5898 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5899 struct sk_buff *skb;
5900 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5901 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5902
5903 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5904
5905 if (!skb)
5906 break;
5907
5908 skb_unlink(skb, &chan->srej_q);
5909 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5910 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5911 if (err)
5912 break;
5913 }
5914
5915 if (skb_queue_empty(&chan->srej_q)) {
5916 chan->rx_state = L2CAP_RX_STATE_RECV;
5917 l2cap_send_ack(chan);
5918 }
5919
5920 return err;
5921}
5922
5923static void l2cap_handle_srej(struct l2cap_chan *chan,
5924 struct l2cap_ctrl *control)
5925{
5926 struct sk_buff *skb;
5927
5928 BT_DBG("chan %p, control %p", chan, control);
5929
5930 if (control->reqseq == chan->next_tx_seq) {
5931 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5932 l2cap_send_disconn_req(chan, ECONNRESET);
5933 return;
5934 }
5935
5936 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5937
5938 if (skb == NULL) {
5939 BT_DBG("Seq %d not available for retransmission",
5940 control->reqseq);
5941 return;
5942 }
5943
5944 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5945 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5946 l2cap_send_disconn_req(chan, ECONNRESET);
5947 return;
5948 }
5949
5950 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5951
5952 if (control->poll) {
5953 l2cap_pass_to_tx(chan, control);
5954
5955 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5956 l2cap_retransmit(chan, control);
5957 l2cap_ertm_send(chan);
5958
5959 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5960 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5961 chan->srej_save_reqseq = control->reqseq;
5962 }
5963 } else {
5964 l2cap_pass_to_tx_fbit(chan, control);
5965
5966 if (control->final) {
5967 if (chan->srej_save_reqseq != control->reqseq ||
5968 !test_and_clear_bit(CONN_SREJ_ACT,
5969 &chan->conn_state))
5970 l2cap_retransmit(chan, control);
5971 } else {
5972 l2cap_retransmit(chan, control);
5973 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5974 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5975 chan->srej_save_reqseq = control->reqseq;
5976 }
5977 }
5978 }
5979}
5980
5981static void l2cap_handle_rej(struct l2cap_chan *chan,
5982 struct l2cap_ctrl *control)
5983{
5984 struct sk_buff *skb;
5985
5986 BT_DBG("chan %p, control %p", chan, control);
5987
5988 if (control->reqseq == chan->next_tx_seq) {
5989 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5990 l2cap_send_disconn_req(chan, ECONNRESET);
5991 return;
5992 }
5993
5994 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5995
5996 if (chan->max_tx && skb &&
5997 bt_cb(skb)->control.retries >= chan->max_tx) {
5998 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5999 l2cap_send_disconn_req(chan, ECONNRESET);
6000 return;
6001 }
6002
6003 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6004
6005 l2cap_pass_to_tx(chan, control);
6006
6007 if (control->final) {
6008 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6009 l2cap_retransmit_all(chan, control);
6010 } else {
6011 l2cap_retransmit_all(chan, control);
6012 l2cap_ertm_send(chan);
6013 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6014 set_bit(CONN_REJ_ACT, &chan->conn_state);
6015 }
6016}
6017
6018static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6019{
6020 BT_DBG("chan %p, txseq %d", chan, txseq);
6021
6022 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6023 chan->expected_tx_seq);
6024
6025 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6026 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6027 chan->tx_win) {
6028 /* See notes below regarding "double poll" and
6029 * invalid packets.
6030 */
6031 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6032 BT_DBG("Invalid/Ignore - after SREJ");
6033 return L2CAP_TXSEQ_INVALID_IGNORE;
6034 } else {
6035 BT_DBG("Invalid - in window after SREJ sent");
6036 return L2CAP_TXSEQ_INVALID;
6037 }
6038 }
6039
6040 if (chan->srej_list.head == txseq) {
6041 BT_DBG("Expected SREJ");
6042 return L2CAP_TXSEQ_EXPECTED_SREJ;
6043 }
6044
6045 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6046 BT_DBG("Duplicate SREJ - txseq already stored");
6047 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6048 }
6049
6050 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6051 BT_DBG("Unexpected SREJ - not requested");
6052 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6053 }
6054 }
6055
6056 if (chan->expected_tx_seq == txseq) {
6057 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6058 chan->tx_win) {
6059 BT_DBG("Invalid - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID;
6061 } else {
6062 BT_DBG("Expected");
6063 return L2CAP_TXSEQ_EXPECTED;
6064 }
6065 }
6066
6067 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6068 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6069 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6070 return L2CAP_TXSEQ_DUPLICATE;
6071 }
6072
6073 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6074 /* A source of invalid packets is a "double poll" condition,
6075 * where delays cause us to send multiple poll packets. If
6076 * the remote stack receives and processes both polls,
6077 * sequence numbers can wrap around in such a way that a
6078 * resent frame has a sequence number that looks like new data
6079 * with a sequence gap. This would trigger an erroneous SREJ
6080 * request.
6081 *
6082 * Fortunately, this is impossible with a tx window that's
6083 * less than half of the maximum sequence number, which allows
6084 * invalid frames to be safely ignored.
6085 *
6086 * With tx window sizes greater than half of the tx window
6087 * maximum, the frame is invalid and cannot be ignored. This
6088 * causes a disconnect.
6089 */
6090
6091 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6092 BT_DBG("Invalid/Ignore - txseq outside tx window");
6093 return L2CAP_TXSEQ_INVALID_IGNORE;
6094 } else {
6095 BT_DBG("Invalid - txseq outside tx window");
6096 return L2CAP_TXSEQ_INVALID;
6097 }
6098 } else {
6099 BT_DBG("Unexpected - txseq indicates missing frames");
6100 return L2CAP_TXSEQ_UNEXPECTED;
6101 }
6102}
6103
6104static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6105 struct l2cap_ctrl *control,
6106 struct sk_buff *skb, u8 event)
6107{
6108 int err = 0;
6109 bool skb_in_use = false;
6110
6111 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6112 event);
6113
6114 switch (event) {
6115 case L2CAP_EV_RECV_IFRAME:
6116 switch (l2cap_classify_txseq(chan, control->txseq)) {
6117 case L2CAP_TXSEQ_EXPECTED:
6118 l2cap_pass_to_tx(chan, control);
6119
6120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6121 BT_DBG("Busy, discarding expected seq %d",
6122 control->txseq);
6123 break;
6124 }
6125
6126 chan->expected_tx_seq = __next_seq(chan,
6127 control->txseq);
6128
6129 chan->buffer_seq = chan->expected_tx_seq;
6130 skb_in_use = true;
6131
6132 err = l2cap_reassemble_sdu(chan, skb, control);
6133 if (err)
6134 break;
6135
6136 if (control->final) {
6137 if (!test_and_clear_bit(CONN_REJ_ACT,
6138 &chan->conn_state)) {
6139 control->final = 0;
6140 l2cap_retransmit_all(chan, control);
6141 l2cap_ertm_send(chan);
6142 }
6143 }
6144
6145 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6146 l2cap_send_ack(chan);
6147 break;
6148 case L2CAP_TXSEQ_UNEXPECTED:
6149 l2cap_pass_to_tx(chan, control);
6150
6151 /* Can't issue SREJ frames in the local busy state.
6152 * Drop this frame, it will be seen as missing
6153 * when local busy is exited.
6154 */
6155 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6156 BT_DBG("Busy, discarding unexpected seq %d",
6157 control->txseq);
6158 break;
6159 }
6160
6161 /* There was a gap in the sequence, so an SREJ
6162 * must be sent for each missing frame. The
6163 * current frame is stored for later use.
6164 */
6165 skb_queue_tail(&chan->srej_q, skb);
6166 skb_in_use = true;
6167 BT_DBG("Queued %p (queue len %d)", skb,
6168 skb_queue_len(&chan->srej_q));
6169
6170 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6171 l2cap_seq_list_clear(&chan->srej_list);
6172 l2cap_send_srej(chan, control->txseq);
6173
6174 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6175 break;
6176 case L2CAP_TXSEQ_DUPLICATE:
6177 l2cap_pass_to_tx(chan, control);
6178 break;
6179 case L2CAP_TXSEQ_INVALID_IGNORE:
6180 break;
6181 case L2CAP_TXSEQ_INVALID:
6182 default:
6183 l2cap_send_disconn_req(chan, ECONNRESET);
6184 break;
6185 }
6186 break;
6187 case L2CAP_EV_RECV_RR:
6188 l2cap_pass_to_tx(chan, control);
6189 if (control->final) {
6190 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6191
6192 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6193 !__chan_is_moving(chan)) {
6194 control->final = 0;
6195 l2cap_retransmit_all(chan, control);
6196 }
6197
6198 l2cap_ertm_send(chan);
6199 } else if (control->poll) {
6200 l2cap_send_i_or_rr_or_rnr(chan);
6201 } else {
6202 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6203 &chan->conn_state) &&
6204 chan->unacked_frames)
6205 __set_retrans_timer(chan);
6206
6207 l2cap_ertm_send(chan);
6208 }
6209 break;
6210 case L2CAP_EV_RECV_RNR:
6211 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6212 l2cap_pass_to_tx(chan, control);
6213 if (control && control->poll) {
6214 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6215 l2cap_send_rr_or_rnr(chan, 0);
6216 }
6217 __clear_retrans_timer(chan);
6218 l2cap_seq_list_clear(&chan->retrans_list);
6219 break;
6220 case L2CAP_EV_RECV_REJ:
6221 l2cap_handle_rej(chan, control);
6222 break;
6223 case L2CAP_EV_RECV_SREJ:
6224 l2cap_handle_srej(chan, control);
6225 break;
6226 default:
6227 break;
6228 }
6229
6230 if (skb && !skb_in_use) {
6231 BT_DBG("Freeing %p", skb);
6232 kfree_skb(skb);
6233 }
6234
6235 return err;
6236}
6237
6238static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6239 struct l2cap_ctrl *control,
6240 struct sk_buff *skb, u8 event)
6241{
6242 int err = 0;
6243 u16 txseq = control->txseq;
6244 bool skb_in_use = false;
6245
6246 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6247 event);
6248
6249 switch (event) {
6250 case L2CAP_EV_RECV_IFRAME:
6251 switch (l2cap_classify_txseq(chan, txseq)) {
6252 case L2CAP_TXSEQ_EXPECTED:
6253 /* Keep frame for reassembly later */
6254 l2cap_pass_to_tx(chan, control);
6255 skb_queue_tail(&chan->srej_q, skb);
6256 skb_in_use = true;
6257 BT_DBG("Queued %p (queue len %d)", skb,
6258 skb_queue_len(&chan->srej_q));
6259
6260 chan->expected_tx_seq = __next_seq(chan, txseq);
6261 break;
6262 case L2CAP_TXSEQ_EXPECTED_SREJ:
6263 l2cap_seq_list_pop(&chan->srej_list);
6264
6265 l2cap_pass_to_tx(chan, control);
6266 skb_queue_tail(&chan->srej_q, skb);
6267 skb_in_use = true;
6268 BT_DBG("Queued %p (queue len %d)", skb,
6269 skb_queue_len(&chan->srej_q));
6270
6271 err = l2cap_rx_queued_iframes(chan);
6272 if (err)
6273 break;
6274
6275 break;
6276 case L2CAP_TXSEQ_UNEXPECTED:
6277 /* Got a frame that can't be reassembled yet.
6278 * Save it for later, and send SREJs to cover
6279 * the missing frames.
6280 */
6281 skb_queue_tail(&chan->srej_q, skb);
6282 skb_in_use = true;
6283 BT_DBG("Queued %p (queue len %d)", skb,
6284 skb_queue_len(&chan->srej_q));
6285
6286 l2cap_pass_to_tx(chan, control);
6287 l2cap_send_srej(chan, control->txseq);
6288 break;
6289 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6290 /* This frame was requested with an SREJ, but
6291 * some expected retransmitted frames are
6292 * missing. Request retransmission of missing
6293 * SREJ'd frames.
6294 */
6295 skb_queue_tail(&chan->srej_q, skb);
6296 skb_in_use = true;
6297 BT_DBG("Queued %p (queue len %d)", skb,
6298 skb_queue_len(&chan->srej_q));
6299
6300 l2cap_pass_to_tx(chan, control);
6301 l2cap_send_srej_list(chan, control->txseq);
6302 break;
6303 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6304 /* We've already queued this frame. Drop this copy. */
6305 l2cap_pass_to_tx(chan, control);
6306 break;
6307 case L2CAP_TXSEQ_DUPLICATE:
6308 /* Expecting a later sequence number, so this frame
6309 * was already received. Ignore it completely.
6310 */
6311 break;
6312 case L2CAP_TXSEQ_INVALID_IGNORE:
6313 break;
6314 case L2CAP_TXSEQ_INVALID:
6315 default:
6316 l2cap_send_disconn_req(chan, ECONNRESET);
6317 break;
6318 }
6319 break;
6320 case L2CAP_EV_RECV_RR:
6321 l2cap_pass_to_tx(chan, control);
6322 if (control->final) {
6323 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6324
6325 if (!test_and_clear_bit(CONN_REJ_ACT,
6326 &chan->conn_state)) {
6327 control->final = 0;
6328 l2cap_retransmit_all(chan, control);
6329 }
6330
6331 l2cap_ertm_send(chan);
6332 } else if (control->poll) {
6333 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6334 &chan->conn_state) &&
6335 chan->unacked_frames) {
6336 __set_retrans_timer(chan);
6337 }
6338
6339 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6340 l2cap_send_srej_tail(chan);
6341 } else {
6342 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6343 &chan->conn_state) &&
6344 chan->unacked_frames)
6345 __set_retrans_timer(chan);
6346
6347 l2cap_send_ack(chan);
6348 }
6349 break;
6350 case L2CAP_EV_RECV_RNR:
6351 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6352 l2cap_pass_to_tx(chan, control);
6353 if (control->poll) {
6354 l2cap_send_srej_tail(chan);
6355 } else {
6356 struct l2cap_ctrl rr_control;
6357 memset(&rr_control, 0, sizeof(rr_control));
6358 rr_control.sframe = 1;
6359 rr_control.super = L2CAP_SUPER_RR;
6360 rr_control.reqseq = chan->buffer_seq;
6361 l2cap_send_sframe(chan, &rr_control);
6362 }
6363
6364 break;
6365 case L2CAP_EV_RECV_REJ:
6366 l2cap_handle_rej(chan, control);
6367 break;
6368 case L2CAP_EV_RECV_SREJ:
6369 l2cap_handle_srej(chan, control);
6370 break;
6371 }
6372
6373 if (skb && !skb_in_use) {
6374 BT_DBG("Freeing %p", skb);
6375 kfree_skb(skb);
6376 }
6377
6378 return err;
6379}
6380
6381static int l2cap_finish_move(struct l2cap_chan *chan)
6382{
6383 BT_DBG("chan %p", chan);
6384
6385 chan->rx_state = L2CAP_RX_STATE_RECV;
6386
6387 if (chan->hs_hcon)
6388 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6389 else
6390 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6391
6392 return l2cap_resegment(chan);
6393}
6394
6395static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6396 struct l2cap_ctrl *control,
6397 struct sk_buff *skb, u8 event)
6398{
6399 int err;
6400
6401 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6402 event);
6403
6404 if (!control->poll)
6405 return -EPROTO;
6406
6407 l2cap_process_reqseq(chan, control->reqseq);
6408
6409 if (!skb_queue_empty(&chan->tx_q))
6410 chan->tx_send_head = skb_peek(&chan->tx_q);
6411 else
6412 chan->tx_send_head = NULL;
6413
6414 /* Rewind next_tx_seq to the point expected
6415 * by the receiver.
6416 */
6417 chan->next_tx_seq = control->reqseq;
6418 chan->unacked_frames = 0;
6419
6420 err = l2cap_finish_move(chan);
6421 if (err)
6422 return err;
6423
6424 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6425 l2cap_send_i_or_rr_or_rnr(chan);
6426
6427 if (event == L2CAP_EV_RECV_IFRAME)
6428 return -EPROTO;
6429
6430 return l2cap_rx_state_recv(chan, control, NULL, event);
6431}
6432
6433static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6434 struct l2cap_ctrl *control,
6435 struct sk_buff *skb, u8 event)
6436{
6437 int err;
6438
6439 if (!control->final)
6440 return -EPROTO;
6441
6442 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6443
6444 chan->rx_state = L2CAP_RX_STATE_RECV;
6445 l2cap_process_reqseq(chan, control->reqseq);
6446
6447 if (!skb_queue_empty(&chan->tx_q))
6448 chan->tx_send_head = skb_peek(&chan->tx_q);
6449 else
6450 chan->tx_send_head = NULL;
6451
6452 /* Rewind next_tx_seq to the point expected
6453 * by the receiver.
6454 */
6455 chan->next_tx_seq = control->reqseq;
6456 chan->unacked_frames = 0;
6457
6458 if (chan->hs_hcon)
6459 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6460 else
6461 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6462
6463 err = l2cap_resegment(chan);
6464
6465 if (!err)
6466 err = l2cap_rx_state_recv(chan, control, skb, event);
6467
6468 return err;
6469}
6470
6471static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6472{
6473 /* Make sure reqseq is for a packet that has been sent but not acked */
6474 u16 unacked;
6475
6476 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6477 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6478}
6479
6480static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6481 struct sk_buff *skb, u8 event)
6482{
6483 int err = 0;
6484
6485 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6486 control, skb, event, chan->rx_state);
6487
6488 if (__valid_reqseq(chan, control->reqseq)) {
6489 switch (chan->rx_state) {
6490 case L2CAP_RX_STATE_RECV:
6491 err = l2cap_rx_state_recv(chan, control, skb, event);
6492 break;
6493 case L2CAP_RX_STATE_SREJ_SENT:
6494 err = l2cap_rx_state_srej_sent(chan, control, skb,
6495 event);
6496 break;
6497 case L2CAP_RX_STATE_WAIT_P:
6498 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6499 break;
6500 case L2CAP_RX_STATE_WAIT_F:
6501 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6502 break;
6503 default:
6504 /* shut it down */
6505 break;
6506 }
6507 } else {
6508 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6509 control->reqseq, chan->next_tx_seq,
6510 chan->expected_ack_seq);
6511 l2cap_send_disconn_req(chan, ECONNRESET);
6512 }
6513
6514 return err;
6515}
6516
6517static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6518 struct sk_buff *skb)
6519{
6520 int err = 0;
6521
6522 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6523 chan->rx_state);
6524
6525 if (l2cap_classify_txseq(chan, control->txseq) ==
6526 L2CAP_TXSEQ_EXPECTED) {
6527 l2cap_pass_to_tx(chan, control);
6528
6529 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6530 __next_seq(chan, chan->buffer_seq));
6531
6532 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6533
6534 l2cap_reassemble_sdu(chan, skb, control);
6535 } else {
6536 if (chan->sdu) {
6537 kfree_skb(chan->sdu);
6538 chan->sdu = NULL;
6539 }
6540 chan->sdu_last_frag = NULL;
6541 chan->sdu_len = 0;
6542
6543 if (skb) {
6544 BT_DBG("Freeing %p", skb);
6545 kfree_skb(skb);
6546 }
6547 }
6548
6549 chan->last_acked_seq = control->txseq;
6550 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6551
6552 return err;
6553}
6554
6555static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6556{
6557 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6558 u16 len;
6559 u8 event;
6560
6561 __unpack_control(chan, skb);
6562
6563 len = skb->len;
6564
6565 /*
6566 * We can just drop the corrupted I-frame here.
6567 * Receiver will miss it and start proper recovery
6568 * procedures and ask for retransmission.
6569 */
6570 if (l2cap_check_fcs(chan, skb))
6571 goto drop;
6572
6573 if (!control->sframe && control->sar == L2CAP_SAR_START)
6574 len -= L2CAP_SDULEN_SIZE;
6575
6576 if (chan->fcs == L2CAP_FCS_CRC16)
6577 len -= L2CAP_FCS_SIZE;
6578
6579 if (len > chan->mps) {
6580 l2cap_send_disconn_req(chan, ECONNRESET);
6581 goto drop;
6582 }
6583
6584 if (!control->sframe) {
6585 int err;
6586
6587 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6588 control->sar, control->reqseq, control->final,
6589 control->txseq);
6590
6591 /* Validate F-bit - F=0 always valid, F=1 only
6592 * valid in TX WAIT_F
6593 */
6594 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6595 goto drop;
6596
6597 if (chan->mode != L2CAP_MODE_STREAMING) {
6598 event = L2CAP_EV_RECV_IFRAME;
6599 err = l2cap_rx(chan, control, skb, event);
6600 } else {
6601 err = l2cap_stream_rx(chan, control, skb);
6602 }
6603
6604 if (err)
6605 l2cap_send_disconn_req(chan, ECONNRESET);
6606 } else {
6607 const u8 rx_func_to_event[4] = {
6608 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6609 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6610 };
6611
6612 /* Only I-frames are expected in streaming mode */
6613 if (chan->mode == L2CAP_MODE_STREAMING)
6614 goto drop;
6615
6616 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6617 control->reqseq, control->final, control->poll,
6618 control->super);
6619
6620 if (len != 0) {
6621 BT_ERR("Trailing bytes: %d in sframe", len);
6622 l2cap_send_disconn_req(chan, ECONNRESET);
6623 goto drop;
6624 }
6625
6626 /* Validate F and P bits */
6627 if (control->final && (control->poll ||
6628 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6629 goto drop;
6630
6631 event = rx_func_to_event[control->super];
6632 if (l2cap_rx(chan, control, skb, event))
6633 l2cap_send_disconn_req(chan, ECONNRESET);
6634 }
6635
6636 return 0;
6637
6638drop:
6639 kfree_skb(skb);
6640 return 0;
6641}
6642
6643static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6644{
6645 struct l2cap_conn *conn = chan->conn;
6646 struct l2cap_le_credits pkt;
6647 u16 return_credits;
6648
6649 /* We return more credits to the sender only after the amount of
6650 * credits falls below half of the initial amount.
6651 */
6652 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6653 return;
6654
6655 return_credits = le_max_credits - chan->rx_credits;
6656
6657 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6658
6659 chan->rx_credits += return_credits;
6660
6661 pkt.cid = cpu_to_le16(chan->scid);
6662 pkt.credits = cpu_to_le16(return_credits);
6663
6664 chan->ident = l2cap_get_ident(conn);
6665
6666 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6667}
6668
6669static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6670{
6671 int err;
6672
6673 if (!chan->rx_credits) {
6674 BT_ERR("No credits to receive LE L2CAP data");
6675 l2cap_send_disconn_req(chan, ECONNRESET);
6676 return -ENOBUFS;
6677 }
6678
6679 if (chan->imtu < skb->len) {
6680 BT_ERR("Too big LE L2CAP PDU");
6681 return -ENOBUFS;
6682 }
6683
6684 chan->rx_credits--;
6685 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6686
6687 l2cap_chan_le_send_credits(chan);
6688
6689 err = 0;
6690
6691 if (!chan->sdu) {
6692 u16 sdu_len;
6693
6694 sdu_len = get_unaligned_le16(skb->data);
6695 skb_pull(skb, L2CAP_SDULEN_SIZE);
6696
6697 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6698 sdu_len, skb->len, chan->imtu);
6699
6700 if (sdu_len > chan->imtu) {
6701 BT_ERR("Too big LE L2CAP SDU length received");
6702 err = -EMSGSIZE;
6703 goto failed;
6704 }
6705
6706 if (skb->len > sdu_len) {
6707 BT_ERR("Too much LE L2CAP data received");
6708 err = -EINVAL;
6709 goto failed;
6710 }
6711
6712 if (skb->len == sdu_len)
6713 return chan->ops->recv(chan, skb);
6714
6715 chan->sdu = skb;
6716 chan->sdu_len = sdu_len;
6717 chan->sdu_last_frag = skb;
6718
6719 return 0;
6720 }
6721
6722 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6723 chan->sdu->len, skb->len, chan->sdu_len);
6724
6725 if (chan->sdu->len + skb->len > chan->sdu_len) {
6726 BT_ERR("Too much LE L2CAP data received");
6727 err = -EINVAL;
6728 goto failed;
6729 }
6730
6731 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6732 skb = NULL;
6733
6734 if (chan->sdu->len == chan->sdu_len) {
6735 err = chan->ops->recv(chan, chan->sdu);
6736 if (!err) {
6737 chan->sdu = NULL;
6738 chan->sdu_last_frag = NULL;
6739 chan->sdu_len = 0;
6740 }
6741 }
6742
6743failed:
6744 if (err) {
6745 kfree_skb(skb);
6746 kfree_skb(chan->sdu);
6747 chan->sdu = NULL;
6748 chan->sdu_last_frag = NULL;
6749 chan->sdu_len = 0;
6750 }
6751
6752 /* We can't return an error here since we took care of the skb
6753 * freeing internally. An error return would cause the caller to
6754 * do a double-free of the skb.
6755 */
6756 return 0;
6757}
6758
6759static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6760 struct sk_buff *skb)
6761{
6762 struct l2cap_chan *chan;
6763
6764 chan = l2cap_get_chan_by_scid(conn, cid);
6765 if (!chan) {
6766 if (cid == L2CAP_CID_A2MP) {
6767 chan = a2mp_channel_create(conn, skb);
6768 if (!chan) {
6769 kfree_skb(skb);
6770 return;
6771 }
6772
6773 l2cap_chan_lock(chan);
6774 } else {
6775 BT_DBG("unknown cid 0x%4.4x", cid);
6776 /* Drop packet and return */
6777 kfree_skb(skb);
6778 return;
6779 }
6780 }
6781
6782 BT_DBG("chan %p, len %d", chan, skb->len);
6783
6784 if (chan->state != BT_CONNECTED)
6785 goto drop;
6786
6787 switch (chan->mode) {
6788 case L2CAP_MODE_LE_FLOWCTL:
6789 if (l2cap_le_data_rcv(chan, skb) < 0)
6790 goto drop;
6791
6792 goto done;
6793
6794 case L2CAP_MODE_BASIC:
6795 /* If socket recv buffers overflows we drop data here
6796 * which is *bad* because L2CAP has to be reliable.
6797 * But we don't have any other choice. L2CAP doesn't
6798 * provide flow control mechanism. */
6799
6800 if (chan->imtu < skb->len) {
6801 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6802 goto drop;
6803 }
6804
6805 if (!chan->ops->recv(chan, skb))
6806 goto done;
6807 break;
6808
6809 case L2CAP_MODE_ERTM:
6810 case L2CAP_MODE_STREAMING:
6811 l2cap_data_rcv(chan, skb);
6812 goto done;
6813
6814 default:
6815 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6816 break;
6817 }
6818
6819drop:
6820 kfree_skb(skb);
6821
6822done:
6823 l2cap_chan_unlock(chan);
6824}
6825
6826static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6827 struct sk_buff *skb)
6828{
6829 struct hci_conn *hcon = conn->hcon;
6830 struct l2cap_chan *chan;
6831
6832 if (hcon->type != ACL_LINK)
6833 goto drop;
6834
6835 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6836 ACL_LINK);
6837 if (!chan)
6838 goto drop;
6839
6840 BT_DBG("chan %p, len %d", chan, skb->len);
6841
6842 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6843 goto drop;
6844
6845 if (chan->imtu < skb->len)
6846 goto drop;
6847
6848 /* Store remote BD_ADDR and PSM for msg_name */
6849 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6850 bt_cb(skb)->psm = psm;
6851
6852 if (!chan->ops->recv(chan, skb))
6853 return;
6854
6855drop:
6856 kfree_skb(skb);
6857}
6858
6859static void l2cap_att_channel(struct l2cap_conn *conn,
6860 struct sk_buff *skb)
6861{
6862 struct hci_conn *hcon = conn->hcon;
6863 struct l2cap_chan *chan;
6864
6865 if (hcon->type != LE_LINK)
6866 goto drop;
6867
6868 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6869 &hcon->src, &hcon->dst);
6870 if (!chan)
6871 goto drop;
6872
6873 BT_DBG("chan %p, len %d", chan, skb->len);
6874
6875 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6876 goto drop;
6877
6878 if (chan->imtu < skb->len)
6879 goto drop;
6880
6881 if (!chan->ops->recv(chan, skb))
6882 return;
6883
6884drop:
6885 kfree_skb(skb);
6886}
6887
6888static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6889{
6890 struct l2cap_hdr *lh = (void *) skb->data;
6891 struct hci_conn *hcon = conn->hcon;
6892 u16 cid, len;
6893 __le16 psm;
6894
6895 if (hcon->state != BT_CONNECTED) {
6896 BT_DBG("queueing pending rx skb");
6897 skb_queue_tail(&conn->pending_rx, skb);
6898 return;
6899 }
6900
6901 skb_pull(skb, L2CAP_HDR_SIZE);
6902 cid = __le16_to_cpu(lh->cid);
6903 len = __le16_to_cpu(lh->len);
6904
6905 if (len != skb->len) {
6906 kfree_skb(skb);
6907 return;
6908 }
6909
6910 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6911
6912 switch (cid) {
6913 case L2CAP_CID_SIGNALING:
6914 l2cap_sig_channel(conn, skb);
6915 break;
6916
6917 case L2CAP_CID_CONN_LESS:
6918 psm = get_unaligned((__le16 *) skb->data);
6919 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6920 l2cap_conless_channel(conn, psm, skb);
6921 break;
6922
6923 case L2CAP_CID_ATT:
6924 l2cap_att_channel(conn, skb);
6925 break;
6926
6927 case L2CAP_CID_LE_SIGNALING:
6928 l2cap_le_sig_channel(conn, skb);
6929 break;
6930
6931 case L2CAP_CID_SMP:
6932 if (smp_sig_channel(conn, skb))
6933 l2cap_conn_del(conn->hcon, EACCES);
6934 break;
6935
6936 case L2CAP_FC_6LOWPAN:
6937 bt_6lowpan_recv(conn, skb);
6938 break;
6939
6940 default:
6941 l2cap_data_channel(conn, cid, skb);
6942 break;
6943 }
6944}
6945
6946static void process_pending_rx(struct work_struct *work)
6947{
6948 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6949 pending_rx_work);
6950 struct sk_buff *skb;
6951
6952 BT_DBG("");
6953
6954 while ((skb = skb_dequeue(&conn->pending_rx)))
6955 l2cap_recv_frame(conn, skb);
6956}
6957
6958static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6959{
6960 struct l2cap_conn *conn = hcon->l2cap_data;
6961 struct hci_chan *hchan;
6962
6963 if (conn)
6964 return conn;
6965
6966 hchan = hci_chan_create(hcon);
6967 if (!hchan)
6968 return NULL;
6969
6970 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6971 if (!conn) {
6972 hci_chan_del(hchan);
6973 return NULL;
6974 }
6975
6976 kref_init(&conn->ref);
6977 hcon->l2cap_data = conn;
6978 conn->hcon = hcon;
6979 hci_conn_get(conn->hcon);
6980 conn->hchan = hchan;
6981
6982 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6983
6984 switch (hcon->type) {
6985 case LE_LINK:
6986 if (hcon->hdev->le_mtu) {
6987 conn->mtu = hcon->hdev->le_mtu;
6988 break;
6989 }
6990 /* fall through */
6991 default:
6992 conn->mtu = hcon->hdev->acl_mtu;
6993 break;
6994 }
6995
6996 conn->feat_mask = 0;
6997
6998 if (hcon->type == ACL_LINK)
6999 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7000 &hcon->hdev->dev_flags);
7001
7002 spin_lock_init(&conn->lock);
7003 mutex_init(&conn->chan_lock);
7004
7005 INIT_LIST_HEAD(&conn->chan_l);
7006 INIT_LIST_HEAD(&conn->users);
7007
7008 if (hcon->type == LE_LINK)
7009 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7010 else
7011 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7012
7013 skb_queue_head_init(&conn->pending_rx);
7014 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7015
7016 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7017
7018 return conn;
7019}
7020
7021static bool is_valid_psm(u16 psm, u8 dst_type) {
7022 if (!psm)
7023 return false;
7024
7025 if (bdaddr_type_is_le(dst_type))
7026 return (psm <= 0x00ff);
7027
7028 /* PSM must be odd and lsb of upper byte must be 0 */
7029 return ((psm & 0x0101) == 0x0001);
7030}
7031
7032int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7033 bdaddr_t *dst, u8 dst_type)
7034{
7035 struct l2cap_conn *conn;
7036 struct hci_conn *hcon;
7037 struct hci_dev *hdev;
7038 __u8 auth_type;
7039 int err;
7040
7041 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7042 dst_type, __le16_to_cpu(psm));
7043
7044 hdev = hci_get_route(dst, &chan->src);
7045 if (!hdev)
7046 return -EHOSTUNREACH;
7047
7048 hci_dev_lock(hdev);
7049
7050 l2cap_chan_lock(chan);
7051
7052 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7053 chan->chan_type != L2CAP_CHAN_RAW) {
7054 err = -EINVAL;
7055 goto done;
7056 }
7057
7058 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7059 err = -EINVAL;
7060 goto done;
7061 }
7062
7063 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7064 err = -EINVAL;
7065 goto done;
7066 }
7067
7068 switch (chan->mode) {
7069 case L2CAP_MODE_BASIC:
7070 break;
7071 case L2CAP_MODE_LE_FLOWCTL:
7072 l2cap_le_flowctl_init(chan);
7073 break;
7074 case L2CAP_MODE_ERTM:
7075 case L2CAP_MODE_STREAMING:
7076 if (!disable_ertm)
7077 break;
7078 /* fall through */
7079 default:
7080 err = -ENOTSUPP;
7081 goto done;
7082 }
7083
7084 switch (chan->state) {
7085 case BT_CONNECT:
7086 case BT_CONNECT2:
7087 case BT_CONFIG:
7088 /* Already connecting */
7089 err = 0;
7090 goto done;
7091
7092 case BT_CONNECTED:
7093 /* Already connected */
7094 err = -EISCONN;
7095 goto done;
7096
7097 case BT_OPEN:
7098 case BT_BOUND:
7099 /* Can connect */
7100 break;
7101
7102 default:
7103 err = -EBADFD;
7104 goto done;
7105 }
7106
7107 /* Set destination address and psm */
7108 bacpy(&chan->dst, dst);
7109 chan->dst_type = dst_type;
7110
7111 chan->psm = psm;
7112 chan->dcid = cid;
7113
7114 auth_type = l2cap_get_auth_type(chan);
7115
7116 if (bdaddr_type_is_le(dst_type)) {
7117 /* Convert from L2CAP channel address type to HCI address type
7118 */
7119 if (dst_type == BDADDR_LE_PUBLIC)
7120 dst_type = ADDR_LE_DEV_PUBLIC;
7121 else
7122 dst_type = ADDR_LE_DEV_RANDOM;
7123
7124 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7125 auth_type);
7126 } else {
7127 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7128 }
7129
7130 if (IS_ERR(hcon)) {
7131 err = PTR_ERR(hcon);
7132 goto done;
7133 }
7134
7135 conn = l2cap_conn_add(hcon);
7136 if (!conn) {
7137 hci_conn_drop(hcon);
7138 err = -ENOMEM;
7139 goto done;
7140 }
7141
7142 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7143 hci_conn_drop(hcon);
7144 err = -EBUSY;
7145 goto done;
7146 }
7147
7148 /* Update source addr of the socket */
7149 bacpy(&chan->src, &hcon->src);
7150 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7151
7152 l2cap_chan_unlock(chan);
7153 l2cap_chan_add(conn, chan);
7154 l2cap_chan_lock(chan);
7155
7156 /* l2cap_chan_add takes its own ref so we can drop this one */
7157 hci_conn_drop(hcon);
7158
7159 l2cap_state_change(chan, BT_CONNECT);
7160 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7161
7162 /* Release chan->sport so that it can be reused by other
7163 * sockets (as it's only used for listening sockets).
7164 */
7165 write_lock(&chan_list_lock);
7166 chan->sport = 0;
7167 write_unlock(&chan_list_lock);
7168
7169 if (hcon->state == BT_CONNECTED) {
7170 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7171 __clear_chan_timer(chan);
7172 if (l2cap_chan_check_security(chan))
7173 l2cap_state_change(chan, BT_CONNECTED);
7174 } else
7175 l2cap_do_start(chan);
7176 }
7177
7178 err = 0;
7179
7180done:
7181 l2cap_chan_unlock(chan);
7182 hci_dev_unlock(hdev);
7183 hci_dev_put(hdev);
7184 return err;
7185}
7186
7187/* ---- L2CAP interface with lower layer (HCI) ---- */
7188
7189int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7190{
7191 int exact = 0, lm1 = 0, lm2 = 0;
7192 struct l2cap_chan *c;
7193
7194 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7195
7196 /* Find listening sockets and check their link_mode */
7197 read_lock(&chan_list_lock);
7198 list_for_each_entry(c, &chan_list, global_l) {
7199 if (c->state != BT_LISTEN)
7200 continue;
7201
7202 if (!bacmp(&c->src, &hdev->bdaddr)) {
7203 lm1 |= HCI_LM_ACCEPT;
7204 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7205 lm1 |= HCI_LM_MASTER;
7206 exact++;
7207 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7208 lm2 |= HCI_LM_ACCEPT;
7209 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7210 lm2 |= HCI_LM_MASTER;
7211 }
7212 }
7213 read_unlock(&chan_list_lock);
7214
7215 return exact ? lm1 : lm2;
7216}
7217
7218void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7219{
7220 struct l2cap_conn *conn;
7221
7222 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7223
7224 if (!status) {
7225 conn = l2cap_conn_add(hcon);
7226 if (conn)
7227 l2cap_conn_ready(conn);
7228 } else {
7229 l2cap_conn_del(hcon, bt_to_errno(status));
7230 }
7231}
7232
7233int l2cap_disconn_ind(struct hci_conn *hcon)
7234{
7235 struct l2cap_conn *conn = hcon->l2cap_data;
7236
7237 BT_DBG("hcon %p", hcon);
7238
7239 if (!conn)
7240 return HCI_ERROR_REMOTE_USER_TERM;
7241 return conn->disc_reason;
7242}
7243
7244void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7245{
7246 BT_DBG("hcon %p reason %d", hcon, reason);
7247
7248 bt_6lowpan_del_conn(hcon->l2cap_data);
7249
7250 l2cap_conn_del(hcon, bt_to_errno(reason));
7251}
7252
7253static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7254{
7255 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7256 return;
7257
7258 if (encrypt == 0x00) {
7259 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7260 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7261 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7262 chan->sec_level == BT_SECURITY_FIPS)
7263 l2cap_chan_close(chan, ECONNREFUSED);
7264 } else {
7265 if (chan->sec_level == BT_SECURITY_MEDIUM)
7266 __clear_chan_timer(chan);
7267 }
7268}
7269
7270int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7271{
7272 struct l2cap_conn *conn = hcon->l2cap_data;
7273 struct l2cap_chan *chan;
7274
7275 if (!conn)
7276 return 0;
7277
7278 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7279
7280 if (hcon->type == LE_LINK) {
7281 if (!status && encrypt)
7282 smp_distribute_keys(conn);
7283 cancel_delayed_work(&conn->security_timer);
7284 }
7285
7286 mutex_lock(&conn->chan_lock);
7287
7288 list_for_each_entry(chan, &conn->chan_l, list) {
7289 l2cap_chan_lock(chan);
7290
7291 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7292 state_to_string(chan->state));
7293
7294 if (chan->scid == L2CAP_CID_A2MP) {
7295 l2cap_chan_unlock(chan);
7296 continue;
7297 }
7298
7299 if (chan->scid == L2CAP_CID_ATT) {
7300 if (!status && encrypt) {
7301 chan->sec_level = hcon->sec_level;
7302 l2cap_chan_ready(chan);
7303 }
7304
7305 l2cap_chan_unlock(chan);
7306 continue;
7307 }
7308
7309 if (!__l2cap_no_conn_pending(chan)) {
7310 l2cap_chan_unlock(chan);
7311 continue;
7312 }
7313
7314 if (!status && (chan->state == BT_CONNECTED ||
7315 chan->state == BT_CONFIG)) {
7316 chan->ops->resume(chan);
7317 l2cap_check_encryption(chan, encrypt);
7318 l2cap_chan_unlock(chan);
7319 continue;
7320 }
7321
7322 if (chan->state == BT_CONNECT) {
7323 if (!status)
7324 l2cap_start_connection(chan);
7325 else
7326 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7327 } else if (chan->state == BT_CONNECT2) {
7328 struct l2cap_conn_rsp rsp;
7329 __u16 res, stat;
7330
7331 if (!status) {
7332 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7333 res = L2CAP_CR_PEND;
7334 stat = L2CAP_CS_AUTHOR_PEND;
7335 chan->ops->defer(chan);
7336 } else {
7337 l2cap_state_change(chan, BT_CONFIG);
7338 res = L2CAP_CR_SUCCESS;
7339 stat = L2CAP_CS_NO_INFO;
7340 }
7341 } else {
7342 l2cap_state_change(chan, BT_DISCONN);
7343 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7344 res = L2CAP_CR_SEC_BLOCK;
7345 stat = L2CAP_CS_NO_INFO;
7346 }
7347
7348 rsp.scid = cpu_to_le16(chan->dcid);
7349 rsp.dcid = cpu_to_le16(chan->scid);
7350 rsp.result = cpu_to_le16(res);
7351 rsp.status = cpu_to_le16(stat);
7352 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7353 sizeof(rsp), &rsp);
7354
7355 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7356 res == L2CAP_CR_SUCCESS) {
7357 char buf[128];
7358 set_bit(CONF_REQ_SENT, &chan->conf_state);
7359 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7360 L2CAP_CONF_REQ,
7361 l2cap_build_conf_req(chan, buf),
7362 buf);
7363 chan->num_conf_req++;
7364 }
7365 }
7366
7367 l2cap_chan_unlock(chan);
7368 }
7369
7370 mutex_unlock(&conn->chan_lock);
7371
7372 return 0;
7373}
7374
7375int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7376{
7377 struct l2cap_conn *conn = hcon->l2cap_data;
7378 struct l2cap_hdr *hdr;
7379 int len;
7380
7381 /* For AMP controller do not create l2cap conn */
7382 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7383 goto drop;
7384
7385 if (!conn)
7386 conn = l2cap_conn_add(hcon);
7387
7388 if (!conn)
7389 goto drop;
7390
7391 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7392
7393 switch (flags) {
7394 case ACL_START:
7395 case ACL_START_NO_FLUSH:
7396 case ACL_COMPLETE:
7397 if (conn->rx_len) {
7398 BT_ERR("Unexpected start frame (len %d)", skb->len);
7399 kfree_skb(conn->rx_skb);
7400 conn->rx_skb = NULL;
7401 conn->rx_len = 0;
7402 l2cap_conn_unreliable(conn, ECOMM);
7403 }
7404
7405 /* Start fragment always begin with Basic L2CAP header */
7406 if (skb->len < L2CAP_HDR_SIZE) {
7407 BT_ERR("Frame is too short (len %d)", skb->len);
7408 l2cap_conn_unreliable(conn, ECOMM);
7409 goto drop;
7410 }
7411
7412 hdr = (struct l2cap_hdr *) skb->data;
7413 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7414
7415 if (len == skb->len) {
7416 /* Complete frame received */
7417 l2cap_recv_frame(conn, skb);
7418 return 0;
7419 }
7420
7421 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7422
7423 if (skb->len > len) {
7424 BT_ERR("Frame is too long (len %d, expected len %d)",
7425 skb->len, len);
7426 l2cap_conn_unreliable(conn, ECOMM);
7427 goto drop;
7428 }
7429
7430 /* Allocate skb for the complete frame (with header) */
7431 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7432 if (!conn->rx_skb)
7433 goto drop;
7434
7435 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7436 skb->len);
7437 conn->rx_len = len - skb->len;
7438 break;
7439
7440 case ACL_CONT:
7441 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7442
7443 if (!conn->rx_len) {
7444 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7445 l2cap_conn_unreliable(conn, ECOMM);
7446 goto drop;
7447 }
7448
7449 if (skb->len > conn->rx_len) {
7450 BT_ERR("Fragment is too long (len %d, expected %d)",
7451 skb->len, conn->rx_len);
7452 kfree_skb(conn->rx_skb);
7453 conn->rx_skb = NULL;
7454 conn->rx_len = 0;
7455 l2cap_conn_unreliable(conn, ECOMM);
7456 goto drop;
7457 }
7458
7459 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7460 skb->len);
7461 conn->rx_len -= skb->len;
7462
7463 if (!conn->rx_len) {
7464 /* Complete frame received. l2cap_recv_frame
7465 * takes ownership of the skb so set the global
7466 * rx_skb pointer to NULL first.
7467 */
7468 struct sk_buff *rx_skb = conn->rx_skb;
7469 conn->rx_skb = NULL;
7470 l2cap_recv_frame(conn, rx_skb);
7471 }
7472 break;
7473 }
7474
7475drop:
7476 kfree_skb(skb);
7477 return 0;
7478}
7479
7480static int l2cap_debugfs_show(struct seq_file *f, void *p)
7481{
7482 struct l2cap_chan *c;
7483
7484 read_lock(&chan_list_lock);
7485
7486 list_for_each_entry(c, &chan_list, global_l) {
7487 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7488 &c->src, &c->dst,
7489 c->state, __le16_to_cpu(c->psm),
7490 c->scid, c->dcid, c->imtu, c->omtu,
7491 c->sec_level, c->mode);
7492 }
7493
7494 read_unlock(&chan_list_lock);
7495
7496 return 0;
7497}
7498
7499static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7500{
7501 return single_open(file, l2cap_debugfs_show, inode->i_private);
7502}
7503
7504static const struct file_operations l2cap_debugfs_fops = {
7505 .open = l2cap_debugfs_open,
7506 .read = seq_read,
7507 .llseek = seq_lseek,
7508 .release = single_release,
7509};
7510
7511static struct dentry *l2cap_debugfs;
7512
7513int __init l2cap_init(void)
7514{
7515 int err;
7516
7517 err = l2cap_init_sockets();
7518 if (err < 0)
7519 return err;
7520
7521 if (IS_ERR_OR_NULL(bt_debugfs))
7522 return 0;
7523
7524 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7525 NULL, &l2cap_debugfs_fops);
7526
7527 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7528 &le_max_credits);
7529 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7530 &le_default_mps);
7531
7532 bt_6lowpan_init();
7533
7534 return 0;
7535}
7536
7537void l2cap_exit(void)
7538{
7539 bt_6lowpan_cleanup();
7540 debugfs_remove(l2cap_debugfs);
7541 l2cap_cleanup_sockets();
7542}
7543
7544module_param(disable_ertm, bool, 0644);
7545MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.065698 seconds and 5 git commands to generate.