Bluetooth: Refactor L2CAP connect rejection to its own function
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 bool disable_ertm;
45
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 void *data);
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
61
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
63 {
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
67 else
68 return BDADDR_LE_RANDOM;
69 }
70
71 return BDADDR_BREDR;
72 }
73
74 /* ---- L2CAP channels ---- */
75
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
77 u16 cid)
78 {
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
84 }
85 return NULL;
86 }
87
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
89 u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
109 if (c)
110 l2cap_chan_lock(c);
111 mutex_unlock(&conn->chan_lock);
112
113 return c;
114 }
115
116 /* Find channel with given DCID.
117 * Returns locked channel.
118 */
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
120 u16 cid)
121 {
122 struct l2cap_chan *c;
123
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
126 if (c)
127 l2cap_chan_lock(c);
128 mutex_unlock(&conn->chan_lock);
129
130 return c;
131 }
132
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 u8 ident)
135 {
136 struct l2cap_chan *c;
137
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
140 return c;
141 }
142 return NULL;
143 }
144
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 u8 ident)
147 {
148 struct l2cap_chan *c;
149
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
152 if (c)
153 l2cap_chan_lock(c);
154 mutex_unlock(&conn->chan_lock);
155
156 return c;
157 }
158
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
160 {
161 struct l2cap_chan *c;
162
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
165 return c;
166 }
167 return NULL;
168 }
169
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
171 {
172 int err;
173
174 write_lock(&chan_list_lock);
175
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 err = -EADDRINUSE;
178 goto done;
179 }
180
181 if (psm) {
182 chan->psm = psm;
183 chan->sport = psm;
184 err = 0;
185 } else {
186 u16 p;
187
188 err = -EINVAL;
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
193 err = 0;
194 break;
195 }
196 }
197
198 done:
199 write_unlock(&chan_list_lock);
200 return err;
201 }
202
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
204 {
205 write_lock(&chan_list_lock);
206
207 chan->scid = scid;
208
209 write_unlock(&chan_list_lock);
210
211 return 0;
212 }
213
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
215 {
216 u16 cid = L2CAP_CID_DYN_START;
217
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
220 return cid;
221 }
222
223 return 0;
224 }
225
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
227 {
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
230
231 chan->state = state;
232 chan->ops->state_change(chan, state, 0);
233 }
234
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
236 int state, int err)
237 {
238 chan->state = state;
239 chan->ops->state_change(chan, chan->state, err);
240 }
241
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
243 {
244 chan->ops->state_change(chan, chan->state, err);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
509 else
510 chan->scid = l2cap_alloc_cid(conn);
511 } else {
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
515 }
516 break;
517
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 break;
531
532 default:
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
537 }
538
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
545
546 l2cap_chan_hold(chan);
547
548 hci_conn_hold(conn->hcon);
549
550 list_add(&chan->list, &conn->chan_l);
551 }
552
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 {
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
558 }
559
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
561 {
562 struct l2cap_conn *conn = chan->conn;
563
564 __clear_chan_timer(chan);
565
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567
568 if (conn) {
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
572
573 l2cap_chan_put(chan);
574
575 chan->conn = NULL;
576
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
579
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
582 }
583
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
586
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
589 }
590
591 chan->ops->teardown(chan, err);
592
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 return;
595
596 switch(chan->mode) {
597 case L2CAP_MODE_BASIC:
598 break;
599
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
604
605 skb_queue_purge(&chan->srej_q);
606
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
609
610 /* fall through */
611
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
614 break;
615 }
616
617 return;
618 }
619
620 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
621 {
622 struct l2cap_conn *conn = chan->conn;
623 struct l2cap_conn_rsp rsp;
624 u16 result;
625
626 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
627 result = L2CAP_CR_SEC_BLOCK;
628 else
629 result = L2CAP_CR_BAD_PSM;
630
631 l2cap_state_change(chan, BT_DISCONN);
632
633 rsp.scid = cpu_to_le16(chan->dcid);
634 rsp.dcid = cpu_to_le16(chan->scid);
635 rsp.result = cpu_to_le16(result);
636 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
637
638 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
639 }
640
641 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
642 {
643 struct l2cap_conn *conn = chan->conn;
644
645 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
646
647 switch (chan->state) {
648 case BT_LISTEN:
649 chan->ops->teardown(chan, 0);
650 break;
651
652 case BT_CONNECTED:
653 case BT_CONFIG:
654 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
655 conn->hcon->type == ACL_LINK) {
656 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
657 l2cap_send_disconn_req(chan, reason);
658 } else
659 l2cap_chan_del(chan, reason);
660 break;
661
662 case BT_CONNECT2:
663 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
664 if (conn->hcon->type == ACL_LINK)
665 l2cap_chan_connect_reject(chan);
666 }
667
668 l2cap_chan_del(chan, reason);
669 break;
670
671 case BT_CONNECT:
672 case BT_DISCONN:
673 l2cap_chan_del(chan, reason);
674 break;
675
676 default:
677 chan->ops->teardown(chan, 0);
678 break;
679 }
680 }
681
682 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
683 {
684 switch (chan->chan_type) {
685 case L2CAP_CHAN_RAW:
686 switch (chan->sec_level) {
687 case BT_SECURITY_HIGH:
688 return HCI_AT_DEDICATED_BONDING_MITM;
689 case BT_SECURITY_MEDIUM:
690 return HCI_AT_DEDICATED_BONDING;
691 default:
692 return HCI_AT_NO_BONDING;
693 }
694 break;
695 case L2CAP_CHAN_CONN_LESS:
696 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
697 if (chan->sec_level == BT_SECURITY_LOW)
698 chan->sec_level = BT_SECURITY_SDP;
699 }
700 if (chan->sec_level == BT_SECURITY_HIGH)
701 return HCI_AT_NO_BONDING_MITM;
702 else
703 return HCI_AT_NO_BONDING;
704 break;
705 case L2CAP_CHAN_CONN_ORIENTED:
706 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
707 if (chan->sec_level == BT_SECURITY_LOW)
708 chan->sec_level = BT_SECURITY_SDP;
709
710 if (chan->sec_level == BT_SECURITY_HIGH)
711 return HCI_AT_NO_BONDING_MITM;
712 else
713 return HCI_AT_NO_BONDING;
714 }
715 /* fall through */
716 default:
717 switch (chan->sec_level) {
718 case BT_SECURITY_HIGH:
719 return HCI_AT_GENERAL_BONDING_MITM;
720 case BT_SECURITY_MEDIUM:
721 return HCI_AT_GENERAL_BONDING;
722 default:
723 return HCI_AT_NO_BONDING;
724 }
725 break;
726 }
727 }
728
729 /* Service level security */
730 int l2cap_chan_check_security(struct l2cap_chan *chan)
731 {
732 struct l2cap_conn *conn = chan->conn;
733 __u8 auth_type;
734
735 if (conn->hcon->type == LE_LINK)
736 return smp_conn_security(conn->hcon, chan->sec_level);
737
738 auth_type = l2cap_get_auth_type(chan);
739
740 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
741 }
742
743 static u8 l2cap_get_ident(struct l2cap_conn *conn)
744 {
745 u8 id;
746
747 /* Get next available identificator.
748 * 1 - 128 are used by kernel.
749 * 129 - 199 are reserved.
750 * 200 - 254 are used by utilities like l2ping, etc.
751 */
752
753 spin_lock(&conn->lock);
754
755 if (++conn->tx_ident > 128)
756 conn->tx_ident = 1;
757
758 id = conn->tx_ident;
759
760 spin_unlock(&conn->lock);
761
762 return id;
763 }
764
765 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
766 void *data)
767 {
768 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
769 u8 flags;
770
771 BT_DBG("code 0x%2.2x", code);
772
773 if (!skb)
774 return;
775
776 if (lmp_no_flush_capable(conn->hcon->hdev))
777 flags = ACL_START_NO_FLUSH;
778 else
779 flags = ACL_START;
780
781 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
782 skb->priority = HCI_PRIO_MAX;
783
784 hci_send_acl(conn->hchan, skb, flags);
785 }
786
787 static bool __chan_is_moving(struct l2cap_chan *chan)
788 {
789 return chan->move_state != L2CAP_MOVE_STABLE &&
790 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
791 }
792
793 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
794 {
795 struct hci_conn *hcon = chan->conn->hcon;
796 u16 flags;
797
798 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
799 skb->priority);
800
801 if (chan->hs_hcon && !__chan_is_moving(chan)) {
802 if (chan->hs_hchan)
803 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
804 else
805 kfree_skb(skb);
806
807 return;
808 }
809
810 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
811 lmp_no_flush_capable(hcon->hdev))
812 flags = ACL_START_NO_FLUSH;
813 else
814 flags = ACL_START;
815
816 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
817 hci_send_acl(chan->conn->hchan, skb, flags);
818 }
819
820 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
821 {
822 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
823 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
824
825 if (enh & L2CAP_CTRL_FRAME_TYPE) {
826 /* S-Frame */
827 control->sframe = 1;
828 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
829 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
830
831 control->sar = 0;
832 control->txseq = 0;
833 } else {
834 /* I-Frame */
835 control->sframe = 0;
836 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
837 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
838
839 control->poll = 0;
840 control->super = 0;
841 }
842 }
843
844 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
845 {
846 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
847 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
848
849 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
850 /* S-Frame */
851 control->sframe = 1;
852 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
853 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
854
855 control->sar = 0;
856 control->txseq = 0;
857 } else {
858 /* I-Frame */
859 control->sframe = 0;
860 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
861 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
862
863 control->poll = 0;
864 control->super = 0;
865 }
866 }
867
868 static inline void __unpack_control(struct l2cap_chan *chan,
869 struct sk_buff *skb)
870 {
871 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
872 __unpack_extended_control(get_unaligned_le32(skb->data),
873 &bt_cb(skb)->control);
874 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
875 } else {
876 __unpack_enhanced_control(get_unaligned_le16(skb->data),
877 &bt_cb(skb)->control);
878 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
879 }
880 }
881
882 static u32 __pack_extended_control(struct l2cap_ctrl *control)
883 {
884 u32 packed;
885
886 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
887 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
888
889 if (control->sframe) {
890 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
891 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
892 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
893 } else {
894 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
895 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
896 }
897
898 return packed;
899 }
900
901 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
902 {
903 u16 packed;
904
905 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
906 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
907
908 if (control->sframe) {
909 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
910 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
911 packed |= L2CAP_CTRL_FRAME_TYPE;
912 } else {
913 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
914 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
915 }
916
917 return packed;
918 }
919
920 static inline void __pack_control(struct l2cap_chan *chan,
921 struct l2cap_ctrl *control,
922 struct sk_buff *skb)
923 {
924 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
925 put_unaligned_le32(__pack_extended_control(control),
926 skb->data + L2CAP_HDR_SIZE);
927 } else {
928 put_unaligned_le16(__pack_enhanced_control(control),
929 skb->data + L2CAP_HDR_SIZE);
930 }
931 }
932
933 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
934 {
935 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
936 return L2CAP_EXT_HDR_SIZE;
937 else
938 return L2CAP_ENH_HDR_SIZE;
939 }
940
941 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
942 u32 control)
943 {
944 struct sk_buff *skb;
945 struct l2cap_hdr *lh;
946 int hlen = __ertm_hdr_size(chan);
947
948 if (chan->fcs == L2CAP_FCS_CRC16)
949 hlen += L2CAP_FCS_SIZE;
950
951 skb = bt_skb_alloc(hlen, GFP_KERNEL);
952
953 if (!skb)
954 return ERR_PTR(-ENOMEM);
955
956 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
957 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
958 lh->cid = cpu_to_le16(chan->dcid);
959
960 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
961 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
962 else
963 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
964
965 if (chan->fcs == L2CAP_FCS_CRC16) {
966 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
967 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
968 }
969
970 skb->priority = HCI_PRIO_MAX;
971 return skb;
972 }
973
974 static void l2cap_send_sframe(struct l2cap_chan *chan,
975 struct l2cap_ctrl *control)
976 {
977 struct sk_buff *skb;
978 u32 control_field;
979
980 BT_DBG("chan %p, control %p", chan, control);
981
982 if (!control->sframe)
983 return;
984
985 if (__chan_is_moving(chan))
986 return;
987
988 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
989 !control->poll)
990 control->final = 1;
991
992 if (control->super == L2CAP_SUPER_RR)
993 clear_bit(CONN_RNR_SENT, &chan->conn_state);
994 else if (control->super == L2CAP_SUPER_RNR)
995 set_bit(CONN_RNR_SENT, &chan->conn_state);
996
997 if (control->super != L2CAP_SUPER_SREJ) {
998 chan->last_acked_seq = control->reqseq;
999 __clear_ack_timer(chan);
1000 }
1001
1002 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1003 control->final, control->poll, control->super);
1004
1005 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1006 control_field = __pack_extended_control(control);
1007 else
1008 control_field = __pack_enhanced_control(control);
1009
1010 skb = l2cap_create_sframe_pdu(chan, control_field);
1011 if (!IS_ERR(skb))
1012 l2cap_do_send(chan, skb);
1013 }
1014
1015 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1016 {
1017 struct l2cap_ctrl control;
1018
1019 BT_DBG("chan %p, poll %d", chan, poll);
1020
1021 memset(&control, 0, sizeof(control));
1022 control.sframe = 1;
1023 control.poll = poll;
1024
1025 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1026 control.super = L2CAP_SUPER_RNR;
1027 else
1028 control.super = L2CAP_SUPER_RR;
1029
1030 control.reqseq = chan->buffer_seq;
1031 l2cap_send_sframe(chan, &control);
1032 }
1033
1034 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1035 {
1036 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1037 }
1038
1039 static bool __amp_capable(struct l2cap_chan *chan)
1040 {
1041 struct l2cap_conn *conn = chan->conn;
1042 struct hci_dev *hdev;
1043 bool amp_available = false;
1044
1045 if (!conn->hs_enabled)
1046 return false;
1047
1048 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1049 return false;
1050
1051 read_lock(&hci_dev_list_lock);
1052 list_for_each_entry(hdev, &hci_dev_list, list) {
1053 if (hdev->amp_type != AMP_TYPE_BREDR &&
1054 test_bit(HCI_UP, &hdev->flags)) {
1055 amp_available = true;
1056 break;
1057 }
1058 }
1059 read_unlock(&hci_dev_list_lock);
1060
1061 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1062 return amp_available;
1063
1064 return false;
1065 }
1066
1067 static bool l2cap_check_efs(struct l2cap_chan *chan)
1068 {
1069 /* Check EFS parameters */
1070 return true;
1071 }
1072
1073 void l2cap_send_conn_req(struct l2cap_chan *chan)
1074 {
1075 struct l2cap_conn *conn = chan->conn;
1076 struct l2cap_conn_req req;
1077
1078 req.scid = cpu_to_le16(chan->scid);
1079 req.psm = chan->psm;
1080
1081 chan->ident = l2cap_get_ident(conn);
1082
1083 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1084
1085 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1086 }
1087
1088 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1089 {
1090 struct l2cap_create_chan_req req;
1091 req.scid = cpu_to_le16(chan->scid);
1092 req.psm = chan->psm;
1093 req.amp_id = amp_id;
1094
1095 chan->ident = l2cap_get_ident(chan->conn);
1096
1097 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1098 sizeof(req), &req);
1099 }
1100
1101 static void l2cap_move_setup(struct l2cap_chan *chan)
1102 {
1103 struct sk_buff *skb;
1104
1105 BT_DBG("chan %p", chan);
1106
1107 if (chan->mode != L2CAP_MODE_ERTM)
1108 return;
1109
1110 __clear_retrans_timer(chan);
1111 __clear_monitor_timer(chan);
1112 __clear_ack_timer(chan);
1113
1114 chan->retry_count = 0;
1115 skb_queue_walk(&chan->tx_q, skb) {
1116 if (bt_cb(skb)->control.retries)
1117 bt_cb(skb)->control.retries = 1;
1118 else
1119 break;
1120 }
1121
1122 chan->expected_tx_seq = chan->buffer_seq;
1123
1124 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1125 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1126 l2cap_seq_list_clear(&chan->retrans_list);
1127 l2cap_seq_list_clear(&chan->srej_list);
1128 skb_queue_purge(&chan->srej_q);
1129
1130 chan->tx_state = L2CAP_TX_STATE_XMIT;
1131 chan->rx_state = L2CAP_RX_STATE_MOVE;
1132
1133 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1134 }
1135
1136 static void l2cap_move_done(struct l2cap_chan *chan)
1137 {
1138 u8 move_role = chan->move_role;
1139 BT_DBG("chan %p", chan);
1140
1141 chan->move_state = L2CAP_MOVE_STABLE;
1142 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1143
1144 if (chan->mode != L2CAP_MODE_ERTM)
1145 return;
1146
1147 switch (move_role) {
1148 case L2CAP_MOVE_ROLE_INITIATOR:
1149 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1150 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1151 break;
1152 case L2CAP_MOVE_ROLE_RESPONDER:
1153 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1154 break;
1155 }
1156 }
1157
1158 static void l2cap_chan_ready(struct l2cap_chan *chan)
1159 {
1160 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1161 chan->conf_state = 0;
1162 __clear_chan_timer(chan);
1163
1164 chan->state = BT_CONNECTED;
1165
1166 chan->ops->ready(chan);
1167 }
1168
1169 static void l2cap_le_connect(struct l2cap_chan *chan)
1170 {
1171 struct l2cap_conn *conn = chan->conn;
1172 struct l2cap_le_conn_req req;
1173
1174 req.psm = chan->psm;
1175 req.scid = cpu_to_le16(chan->scid);
1176 req.mtu = cpu_to_le16(chan->imtu);
1177 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1178 req.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
1179
1180 chan->ident = l2cap_get_ident(conn);
1181
1182 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1183 sizeof(req), &req);
1184 }
1185
1186 static void l2cap_le_start(struct l2cap_chan *chan)
1187 {
1188 struct l2cap_conn *conn = chan->conn;
1189
1190 if (!smp_conn_security(conn->hcon, chan->sec_level))
1191 return;
1192
1193 if (!chan->psm) {
1194 l2cap_chan_ready(chan);
1195 return;
1196 }
1197
1198 if (chan->state == BT_CONNECT)
1199 l2cap_le_connect(chan);
1200 }
1201
1202 static void l2cap_start_connection(struct l2cap_chan *chan)
1203 {
1204 if (__amp_capable(chan)) {
1205 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1206 a2mp_discover_amp(chan);
1207 } else if (chan->conn->hcon->type == LE_LINK) {
1208 l2cap_le_start(chan);
1209 } else {
1210 l2cap_send_conn_req(chan);
1211 }
1212 }
1213
1214 static void l2cap_do_start(struct l2cap_chan *chan)
1215 {
1216 struct l2cap_conn *conn = chan->conn;
1217
1218 if (conn->hcon->type == LE_LINK) {
1219 l2cap_le_start(chan);
1220 return;
1221 }
1222
1223 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1224 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1225 return;
1226
1227 if (l2cap_chan_check_security(chan) &&
1228 __l2cap_no_conn_pending(chan)) {
1229 l2cap_start_connection(chan);
1230 }
1231 } else {
1232 struct l2cap_info_req req;
1233 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1234
1235 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1236 conn->info_ident = l2cap_get_ident(conn);
1237
1238 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1239
1240 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1241 sizeof(req), &req);
1242 }
1243 }
1244
1245 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1246 {
1247 u32 local_feat_mask = l2cap_feat_mask;
1248 if (!disable_ertm)
1249 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1250
1251 switch (mode) {
1252 case L2CAP_MODE_ERTM:
1253 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1254 case L2CAP_MODE_STREAMING:
1255 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1256 default:
1257 return 0x00;
1258 }
1259 }
1260
1261 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1262 {
1263 struct l2cap_conn *conn = chan->conn;
1264 struct l2cap_disconn_req req;
1265
1266 if (!conn)
1267 return;
1268
1269 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1270 __clear_retrans_timer(chan);
1271 __clear_monitor_timer(chan);
1272 __clear_ack_timer(chan);
1273 }
1274
1275 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1276 l2cap_state_change(chan, BT_DISCONN);
1277 return;
1278 }
1279
1280 req.dcid = cpu_to_le16(chan->dcid);
1281 req.scid = cpu_to_le16(chan->scid);
1282 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1283 sizeof(req), &req);
1284
1285 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1286 }
1287
1288 /* ---- L2CAP connections ---- */
1289 static void l2cap_conn_start(struct l2cap_conn *conn)
1290 {
1291 struct l2cap_chan *chan, *tmp;
1292
1293 BT_DBG("conn %p", conn);
1294
1295 mutex_lock(&conn->chan_lock);
1296
1297 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1298 l2cap_chan_lock(chan);
1299
1300 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1301 l2cap_chan_unlock(chan);
1302 continue;
1303 }
1304
1305 if (chan->state == BT_CONNECT) {
1306 if (!l2cap_chan_check_security(chan) ||
1307 !__l2cap_no_conn_pending(chan)) {
1308 l2cap_chan_unlock(chan);
1309 continue;
1310 }
1311
1312 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1313 && test_bit(CONF_STATE2_DEVICE,
1314 &chan->conf_state)) {
1315 l2cap_chan_close(chan, ECONNRESET);
1316 l2cap_chan_unlock(chan);
1317 continue;
1318 }
1319
1320 l2cap_start_connection(chan);
1321
1322 } else if (chan->state == BT_CONNECT2) {
1323 struct l2cap_conn_rsp rsp;
1324 char buf[128];
1325 rsp.scid = cpu_to_le16(chan->dcid);
1326 rsp.dcid = cpu_to_le16(chan->scid);
1327
1328 if (l2cap_chan_check_security(chan)) {
1329 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1330 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1331 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1332 chan->ops->defer(chan);
1333
1334 } else {
1335 l2cap_state_change(chan, BT_CONFIG);
1336 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1337 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1338 }
1339 } else {
1340 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1341 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1342 }
1343
1344 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1345 sizeof(rsp), &rsp);
1346
1347 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1348 rsp.result != L2CAP_CR_SUCCESS) {
1349 l2cap_chan_unlock(chan);
1350 continue;
1351 }
1352
1353 set_bit(CONF_REQ_SENT, &chan->conf_state);
1354 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1355 l2cap_build_conf_req(chan, buf), buf);
1356 chan->num_conf_req++;
1357 }
1358
1359 l2cap_chan_unlock(chan);
1360 }
1361
1362 mutex_unlock(&conn->chan_lock);
1363 }
1364
1365 /* Find socket with cid and source/destination bdaddr.
1366 * Returns closest match, locked.
1367 */
1368 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1369 bdaddr_t *src,
1370 bdaddr_t *dst)
1371 {
1372 struct l2cap_chan *c, *c1 = NULL;
1373
1374 read_lock(&chan_list_lock);
1375
1376 list_for_each_entry(c, &chan_list, global_l) {
1377 if (state && c->state != state)
1378 continue;
1379
1380 if (c->scid == cid) {
1381 int src_match, dst_match;
1382 int src_any, dst_any;
1383
1384 /* Exact match. */
1385 src_match = !bacmp(&c->src, src);
1386 dst_match = !bacmp(&c->dst, dst);
1387 if (src_match && dst_match) {
1388 read_unlock(&chan_list_lock);
1389 return c;
1390 }
1391
1392 /* Closest match */
1393 src_any = !bacmp(&c->src, BDADDR_ANY);
1394 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1395 if ((src_match && dst_any) || (src_any && dst_match) ||
1396 (src_any && dst_any))
1397 c1 = c;
1398 }
1399 }
1400
1401 read_unlock(&chan_list_lock);
1402
1403 return c1;
1404 }
1405
1406 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1407 {
1408 struct hci_conn *hcon = conn->hcon;
1409 struct l2cap_chan *chan, *pchan;
1410 u8 dst_type;
1411
1412 BT_DBG("");
1413
1414 /* Check if we have socket listening on cid */
1415 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1416 &hcon->src, &hcon->dst);
1417 if (!pchan)
1418 return;
1419
1420 /* Client ATT sockets should override the server one */
1421 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1422 return;
1423
1424 dst_type = bdaddr_type(hcon, hcon->dst_type);
1425
1426 /* If device is blocked, do not create a channel for it */
1427 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1428 return;
1429
1430 l2cap_chan_lock(pchan);
1431
1432 chan = pchan->ops->new_connection(pchan);
1433 if (!chan)
1434 goto clean;
1435
1436 chan->dcid = L2CAP_CID_ATT;
1437
1438 bacpy(&chan->src, &hcon->src);
1439 bacpy(&chan->dst, &hcon->dst);
1440 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1441 chan->dst_type = dst_type;
1442
1443 __l2cap_chan_add(conn, chan);
1444
1445 clean:
1446 l2cap_chan_unlock(pchan);
1447 }
1448
1449 static void l2cap_conn_ready(struct l2cap_conn *conn)
1450 {
1451 struct l2cap_chan *chan;
1452 struct hci_conn *hcon = conn->hcon;
1453
1454 BT_DBG("conn %p", conn);
1455
1456 /* For outgoing pairing which doesn't necessarily have an
1457 * associated socket (e.g. mgmt_pair_device).
1458 */
1459 if (hcon->out && hcon->type == LE_LINK)
1460 smp_conn_security(hcon, hcon->pending_sec_level);
1461
1462 mutex_lock(&conn->chan_lock);
1463
1464 if (hcon->type == LE_LINK)
1465 l2cap_le_conn_ready(conn);
1466
1467 list_for_each_entry(chan, &conn->chan_l, list) {
1468
1469 l2cap_chan_lock(chan);
1470
1471 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1472 l2cap_chan_unlock(chan);
1473 continue;
1474 }
1475
1476 if (hcon->type == LE_LINK) {
1477 l2cap_le_start(chan);
1478 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1479 l2cap_chan_ready(chan);
1480
1481 } else if (chan->state == BT_CONNECT) {
1482 l2cap_do_start(chan);
1483 }
1484
1485 l2cap_chan_unlock(chan);
1486 }
1487
1488 mutex_unlock(&conn->chan_lock);
1489 }
1490
1491 /* Notify sockets that we cannot guaranty reliability anymore */
1492 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1493 {
1494 struct l2cap_chan *chan;
1495
1496 BT_DBG("conn %p", conn);
1497
1498 mutex_lock(&conn->chan_lock);
1499
1500 list_for_each_entry(chan, &conn->chan_l, list) {
1501 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1502 l2cap_chan_set_err(chan, err);
1503 }
1504
1505 mutex_unlock(&conn->chan_lock);
1506 }
1507
1508 static void l2cap_info_timeout(struct work_struct *work)
1509 {
1510 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1511 info_timer.work);
1512
1513 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1514 conn->info_ident = 0;
1515
1516 l2cap_conn_start(conn);
1517 }
1518
1519 /*
1520 * l2cap_user
1521 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1522 * callback is called during registration. The ->remove callback is called
1523 * during unregistration.
1524 * An l2cap_user object can either be explicitly unregistered or when the
1525 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1526 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1527 * External modules must own a reference to the l2cap_conn object if they intend
1528 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1529 * any time if they don't.
1530 */
1531
1532 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1533 {
1534 struct hci_dev *hdev = conn->hcon->hdev;
1535 int ret;
1536
1537 /* We need to check whether l2cap_conn is registered. If it is not, we
1538 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1539 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1540 * relies on the parent hci_conn object to be locked. This itself relies
1541 * on the hci_dev object to be locked. So we must lock the hci device
1542 * here, too. */
1543
1544 hci_dev_lock(hdev);
1545
1546 if (user->list.next || user->list.prev) {
1547 ret = -EINVAL;
1548 goto out_unlock;
1549 }
1550
1551 /* conn->hchan is NULL after l2cap_conn_del() was called */
1552 if (!conn->hchan) {
1553 ret = -ENODEV;
1554 goto out_unlock;
1555 }
1556
1557 ret = user->probe(conn, user);
1558 if (ret)
1559 goto out_unlock;
1560
1561 list_add(&user->list, &conn->users);
1562 ret = 0;
1563
1564 out_unlock:
1565 hci_dev_unlock(hdev);
1566 return ret;
1567 }
1568 EXPORT_SYMBOL(l2cap_register_user);
1569
1570 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1571 {
1572 struct hci_dev *hdev = conn->hcon->hdev;
1573
1574 hci_dev_lock(hdev);
1575
1576 if (!user->list.next || !user->list.prev)
1577 goto out_unlock;
1578
1579 list_del(&user->list);
1580 user->list.next = NULL;
1581 user->list.prev = NULL;
1582 user->remove(conn, user);
1583
1584 out_unlock:
1585 hci_dev_unlock(hdev);
1586 }
1587 EXPORT_SYMBOL(l2cap_unregister_user);
1588
1589 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1590 {
1591 struct l2cap_user *user;
1592
1593 while (!list_empty(&conn->users)) {
1594 user = list_first_entry(&conn->users, struct l2cap_user, list);
1595 list_del(&user->list);
1596 user->list.next = NULL;
1597 user->list.prev = NULL;
1598 user->remove(conn, user);
1599 }
1600 }
1601
1602 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1603 {
1604 struct l2cap_conn *conn = hcon->l2cap_data;
1605 struct l2cap_chan *chan, *l;
1606
1607 if (!conn)
1608 return;
1609
1610 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1611
1612 kfree_skb(conn->rx_skb);
1613
1614 l2cap_unregister_all_users(conn);
1615
1616 mutex_lock(&conn->chan_lock);
1617
1618 /* Kill channels */
1619 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1620 l2cap_chan_hold(chan);
1621 l2cap_chan_lock(chan);
1622
1623 l2cap_chan_del(chan, err);
1624
1625 l2cap_chan_unlock(chan);
1626
1627 chan->ops->close(chan);
1628 l2cap_chan_put(chan);
1629 }
1630
1631 mutex_unlock(&conn->chan_lock);
1632
1633 hci_chan_del(conn->hchan);
1634
1635 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1636 cancel_delayed_work_sync(&conn->info_timer);
1637
1638 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1639 cancel_delayed_work_sync(&conn->security_timer);
1640 smp_chan_destroy(conn);
1641 }
1642
1643 hcon->l2cap_data = NULL;
1644 conn->hchan = NULL;
1645 l2cap_conn_put(conn);
1646 }
1647
1648 static void security_timeout(struct work_struct *work)
1649 {
1650 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1651 security_timer.work);
1652
1653 BT_DBG("conn %p", conn);
1654
1655 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1656 smp_chan_destroy(conn);
1657 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1658 }
1659 }
1660
1661 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1662 {
1663 struct l2cap_conn *conn = hcon->l2cap_data;
1664 struct hci_chan *hchan;
1665
1666 if (conn)
1667 return conn;
1668
1669 hchan = hci_chan_create(hcon);
1670 if (!hchan)
1671 return NULL;
1672
1673 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1674 if (!conn) {
1675 hci_chan_del(hchan);
1676 return NULL;
1677 }
1678
1679 kref_init(&conn->ref);
1680 hcon->l2cap_data = conn;
1681 conn->hcon = hcon;
1682 hci_conn_get(conn->hcon);
1683 conn->hchan = hchan;
1684
1685 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1686
1687 switch (hcon->type) {
1688 case LE_LINK:
1689 if (hcon->hdev->le_mtu) {
1690 conn->mtu = hcon->hdev->le_mtu;
1691 break;
1692 }
1693 /* fall through */
1694 default:
1695 conn->mtu = hcon->hdev->acl_mtu;
1696 break;
1697 }
1698
1699 conn->feat_mask = 0;
1700
1701 if (hcon->type == ACL_LINK)
1702 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1703 &hcon->hdev->dev_flags);
1704
1705 spin_lock_init(&conn->lock);
1706 mutex_init(&conn->chan_lock);
1707
1708 INIT_LIST_HEAD(&conn->chan_l);
1709 INIT_LIST_HEAD(&conn->users);
1710
1711 if (hcon->type == LE_LINK)
1712 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1713 else
1714 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1715
1716 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1717
1718 return conn;
1719 }
1720
1721 static void l2cap_conn_free(struct kref *ref)
1722 {
1723 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1724
1725 hci_conn_put(conn->hcon);
1726 kfree(conn);
1727 }
1728
1729 void l2cap_conn_get(struct l2cap_conn *conn)
1730 {
1731 kref_get(&conn->ref);
1732 }
1733 EXPORT_SYMBOL(l2cap_conn_get);
1734
1735 void l2cap_conn_put(struct l2cap_conn *conn)
1736 {
1737 kref_put(&conn->ref, l2cap_conn_free);
1738 }
1739 EXPORT_SYMBOL(l2cap_conn_put);
1740
1741 /* ---- Socket interface ---- */
1742
1743 /* Find socket with psm and source / destination bdaddr.
1744 * Returns closest match.
1745 */
1746 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1747 bdaddr_t *src,
1748 bdaddr_t *dst,
1749 u8 link_type)
1750 {
1751 struct l2cap_chan *c, *c1 = NULL;
1752
1753 read_lock(&chan_list_lock);
1754
1755 list_for_each_entry(c, &chan_list, global_l) {
1756 if (state && c->state != state)
1757 continue;
1758
1759 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1760 continue;
1761
1762 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1763 continue;
1764
1765 if (c->psm == psm) {
1766 int src_match, dst_match;
1767 int src_any, dst_any;
1768
1769 /* Exact match. */
1770 src_match = !bacmp(&c->src, src);
1771 dst_match = !bacmp(&c->dst, dst);
1772 if (src_match && dst_match) {
1773 read_unlock(&chan_list_lock);
1774 return c;
1775 }
1776
1777 /* Closest match */
1778 src_any = !bacmp(&c->src, BDADDR_ANY);
1779 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1780 if ((src_match && dst_any) || (src_any && dst_match) ||
1781 (src_any && dst_any))
1782 c1 = c;
1783 }
1784 }
1785
1786 read_unlock(&chan_list_lock);
1787
1788 return c1;
1789 }
1790
1791 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1792 bdaddr_t *dst, u8 dst_type)
1793 {
1794 struct l2cap_conn *conn;
1795 struct hci_conn *hcon;
1796 struct hci_dev *hdev;
1797 __u8 auth_type;
1798 int err;
1799
1800 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1801 dst_type, __le16_to_cpu(psm));
1802
1803 hdev = hci_get_route(dst, &chan->src);
1804 if (!hdev)
1805 return -EHOSTUNREACH;
1806
1807 hci_dev_lock(hdev);
1808
1809 l2cap_chan_lock(chan);
1810
1811 /* PSM must be odd and lsb of upper byte must be 0 */
1812 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1813 chan->chan_type != L2CAP_CHAN_RAW) {
1814 err = -EINVAL;
1815 goto done;
1816 }
1817
1818 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1819 err = -EINVAL;
1820 goto done;
1821 }
1822
1823 switch (chan->mode) {
1824 case L2CAP_MODE_BASIC:
1825 break;
1826 case L2CAP_MODE_ERTM:
1827 case L2CAP_MODE_STREAMING:
1828 if (!disable_ertm)
1829 break;
1830 /* fall through */
1831 default:
1832 err = -ENOTSUPP;
1833 goto done;
1834 }
1835
1836 switch (chan->state) {
1837 case BT_CONNECT:
1838 case BT_CONNECT2:
1839 case BT_CONFIG:
1840 /* Already connecting */
1841 err = 0;
1842 goto done;
1843
1844 case BT_CONNECTED:
1845 /* Already connected */
1846 err = -EISCONN;
1847 goto done;
1848
1849 case BT_OPEN:
1850 case BT_BOUND:
1851 /* Can connect */
1852 break;
1853
1854 default:
1855 err = -EBADFD;
1856 goto done;
1857 }
1858
1859 /* Set destination address and psm */
1860 bacpy(&chan->dst, dst);
1861 chan->dst_type = dst_type;
1862
1863 chan->psm = psm;
1864 chan->dcid = cid;
1865
1866 auth_type = l2cap_get_auth_type(chan);
1867
1868 if (bdaddr_type_is_le(dst_type))
1869 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1870 chan->sec_level, auth_type);
1871 else
1872 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1873 chan->sec_level, auth_type);
1874
1875 if (IS_ERR(hcon)) {
1876 err = PTR_ERR(hcon);
1877 goto done;
1878 }
1879
1880 conn = l2cap_conn_add(hcon);
1881 if (!conn) {
1882 hci_conn_drop(hcon);
1883 err = -ENOMEM;
1884 goto done;
1885 }
1886
1887 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1888 hci_conn_drop(hcon);
1889 err = -EBUSY;
1890 goto done;
1891 }
1892
1893 /* Update source addr of the socket */
1894 bacpy(&chan->src, &hcon->src);
1895 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1896
1897 l2cap_chan_unlock(chan);
1898 l2cap_chan_add(conn, chan);
1899 l2cap_chan_lock(chan);
1900
1901 /* l2cap_chan_add takes its own ref so we can drop this one */
1902 hci_conn_drop(hcon);
1903
1904 l2cap_state_change(chan, BT_CONNECT);
1905 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1906
1907 if (hcon->state == BT_CONNECTED) {
1908 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1909 __clear_chan_timer(chan);
1910 if (l2cap_chan_check_security(chan))
1911 l2cap_state_change(chan, BT_CONNECTED);
1912 } else
1913 l2cap_do_start(chan);
1914 }
1915
1916 err = 0;
1917
1918 done:
1919 l2cap_chan_unlock(chan);
1920 hci_dev_unlock(hdev);
1921 hci_dev_put(hdev);
1922 return err;
1923 }
1924
1925 static void l2cap_monitor_timeout(struct work_struct *work)
1926 {
1927 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1928 monitor_timer.work);
1929
1930 BT_DBG("chan %p", chan);
1931
1932 l2cap_chan_lock(chan);
1933
1934 if (!chan->conn) {
1935 l2cap_chan_unlock(chan);
1936 l2cap_chan_put(chan);
1937 return;
1938 }
1939
1940 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1941
1942 l2cap_chan_unlock(chan);
1943 l2cap_chan_put(chan);
1944 }
1945
1946 static void l2cap_retrans_timeout(struct work_struct *work)
1947 {
1948 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1949 retrans_timer.work);
1950
1951 BT_DBG("chan %p", chan);
1952
1953 l2cap_chan_lock(chan);
1954
1955 if (!chan->conn) {
1956 l2cap_chan_unlock(chan);
1957 l2cap_chan_put(chan);
1958 return;
1959 }
1960
1961 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1962 l2cap_chan_unlock(chan);
1963 l2cap_chan_put(chan);
1964 }
1965
1966 static void l2cap_streaming_send(struct l2cap_chan *chan,
1967 struct sk_buff_head *skbs)
1968 {
1969 struct sk_buff *skb;
1970 struct l2cap_ctrl *control;
1971
1972 BT_DBG("chan %p, skbs %p", chan, skbs);
1973
1974 if (__chan_is_moving(chan))
1975 return;
1976
1977 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1978
1979 while (!skb_queue_empty(&chan->tx_q)) {
1980
1981 skb = skb_dequeue(&chan->tx_q);
1982
1983 bt_cb(skb)->control.retries = 1;
1984 control = &bt_cb(skb)->control;
1985
1986 control->reqseq = 0;
1987 control->txseq = chan->next_tx_seq;
1988
1989 __pack_control(chan, control, skb);
1990
1991 if (chan->fcs == L2CAP_FCS_CRC16) {
1992 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1994 }
1995
1996 l2cap_do_send(chan, skb);
1997
1998 BT_DBG("Sent txseq %u", control->txseq);
1999
2000 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2001 chan->frames_sent++;
2002 }
2003 }
2004
2005 static int l2cap_ertm_send(struct l2cap_chan *chan)
2006 {
2007 struct sk_buff *skb, *tx_skb;
2008 struct l2cap_ctrl *control;
2009 int sent = 0;
2010
2011 BT_DBG("chan %p", chan);
2012
2013 if (chan->state != BT_CONNECTED)
2014 return -ENOTCONN;
2015
2016 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2017 return 0;
2018
2019 if (__chan_is_moving(chan))
2020 return 0;
2021
2022 while (chan->tx_send_head &&
2023 chan->unacked_frames < chan->remote_tx_win &&
2024 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2025
2026 skb = chan->tx_send_head;
2027
2028 bt_cb(skb)->control.retries = 1;
2029 control = &bt_cb(skb)->control;
2030
2031 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2032 control->final = 1;
2033
2034 control->reqseq = chan->buffer_seq;
2035 chan->last_acked_seq = chan->buffer_seq;
2036 control->txseq = chan->next_tx_seq;
2037
2038 __pack_control(chan, control, skb);
2039
2040 if (chan->fcs == L2CAP_FCS_CRC16) {
2041 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2042 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2043 }
2044
2045 /* Clone after data has been modified. Data is assumed to be
2046 read-only (for locking purposes) on cloned sk_buffs.
2047 */
2048 tx_skb = skb_clone(skb, GFP_KERNEL);
2049
2050 if (!tx_skb)
2051 break;
2052
2053 __set_retrans_timer(chan);
2054
2055 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2056 chan->unacked_frames++;
2057 chan->frames_sent++;
2058 sent++;
2059
2060 if (skb_queue_is_last(&chan->tx_q, skb))
2061 chan->tx_send_head = NULL;
2062 else
2063 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2064
2065 l2cap_do_send(chan, tx_skb);
2066 BT_DBG("Sent txseq %u", control->txseq);
2067 }
2068
2069 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2070 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2071
2072 return sent;
2073 }
2074
2075 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2076 {
2077 struct l2cap_ctrl control;
2078 struct sk_buff *skb;
2079 struct sk_buff *tx_skb;
2080 u16 seq;
2081
2082 BT_DBG("chan %p", chan);
2083
2084 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2085 return;
2086
2087 if (__chan_is_moving(chan))
2088 return;
2089
2090 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2091 seq = l2cap_seq_list_pop(&chan->retrans_list);
2092
2093 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2094 if (!skb) {
2095 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2096 seq);
2097 continue;
2098 }
2099
2100 bt_cb(skb)->control.retries++;
2101 control = bt_cb(skb)->control;
2102
2103 if (chan->max_tx != 0 &&
2104 bt_cb(skb)->control.retries > chan->max_tx) {
2105 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2106 l2cap_send_disconn_req(chan, ECONNRESET);
2107 l2cap_seq_list_clear(&chan->retrans_list);
2108 break;
2109 }
2110
2111 control.reqseq = chan->buffer_seq;
2112 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2113 control.final = 1;
2114 else
2115 control.final = 0;
2116
2117 if (skb_cloned(skb)) {
2118 /* Cloned sk_buffs are read-only, so we need a
2119 * writeable copy
2120 */
2121 tx_skb = skb_copy(skb, GFP_KERNEL);
2122 } else {
2123 tx_skb = skb_clone(skb, GFP_KERNEL);
2124 }
2125
2126 if (!tx_skb) {
2127 l2cap_seq_list_clear(&chan->retrans_list);
2128 break;
2129 }
2130
2131 /* Update skb contents */
2132 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2133 put_unaligned_le32(__pack_extended_control(&control),
2134 tx_skb->data + L2CAP_HDR_SIZE);
2135 } else {
2136 put_unaligned_le16(__pack_enhanced_control(&control),
2137 tx_skb->data + L2CAP_HDR_SIZE);
2138 }
2139
2140 if (chan->fcs == L2CAP_FCS_CRC16) {
2141 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2142 put_unaligned_le16(fcs, skb_put(tx_skb,
2143 L2CAP_FCS_SIZE));
2144 }
2145
2146 l2cap_do_send(chan, tx_skb);
2147
2148 BT_DBG("Resent txseq %d", control.txseq);
2149
2150 chan->last_acked_seq = chan->buffer_seq;
2151 }
2152 }
2153
2154 static void l2cap_retransmit(struct l2cap_chan *chan,
2155 struct l2cap_ctrl *control)
2156 {
2157 BT_DBG("chan %p, control %p", chan, control);
2158
2159 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2160 l2cap_ertm_resend(chan);
2161 }
2162
2163 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2164 struct l2cap_ctrl *control)
2165 {
2166 struct sk_buff *skb;
2167
2168 BT_DBG("chan %p, control %p", chan, control);
2169
2170 if (control->poll)
2171 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2172
2173 l2cap_seq_list_clear(&chan->retrans_list);
2174
2175 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2176 return;
2177
2178 if (chan->unacked_frames) {
2179 skb_queue_walk(&chan->tx_q, skb) {
2180 if (bt_cb(skb)->control.txseq == control->reqseq ||
2181 skb == chan->tx_send_head)
2182 break;
2183 }
2184
2185 skb_queue_walk_from(&chan->tx_q, skb) {
2186 if (skb == chan->tx_send_head)
2187 break;
2188
2189 l2cap_seq_list_append(&chan->retrans_list,
2190 bt_cb(skb)->control.txseq);
2191 }
2192
2193 l2cap_ertm_resend(chan);
2194 }
2195 }
2196
2197 static void l2cap_send_ack(struct l2cap_chan *chan)
2198 {
2199 struct l2cap_ctrl control;
2200 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2201 chan->last_acked_seq);
2202 int threshold;
2203
2204 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2205 chan, chan->last_acked_seq, chan->buffer_seq);
2206
2207 memset(&control, 0, sizeof(control));
2208 control.sframe = 1;
2209
2210 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2211 chan->rx_state == L2CAP_RX_STATE_RECV) {
2212 __clear_ack_timer(chan);
2213 control.super = L2CAP_SUPER_RNR;
2214 control.reqseq = chan->buffer_seq;
2215 l2cap_send_sframe(chan, &control);
2216 } else {
2217 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2218 l2cap_ertm_send(chan);
2219 /* If any i-frames were sent, they included an ack */
2220 if (chan->buffer_seq == chan->last_acked_seq)
2221 frames_to_ack = 0;
2222 }
2223
2224 /* Ack now if the window is 3/4ths full.
2225 * Calculate without mul or div
2226 */
2227 threshold = chan->ack_win;
2228 threshold += threshold << 1;
2229 threshold >>= 2;
2230
2231 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2232 threshold);
2233
2234 if (frames_to_ack >= threshold) {
2235 __clear_ack_timer(chan);
2236 control.super = L2CAP_SUPER_RR;
2237 control.reqseq = chan->buffer_seq;
2238 l2cap_send_sframe(chan, &control);
2239 frames_to_ack = 0;
2240 }
2241
2242 if (frames_to_ack)
2243 __set_ack_timer(chan);
2244 }
2245 }
2246
2247 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2248 struct msghdr *msg, int len,
2249 int count, struct sk_buff *skb)
2250 {
2251 struct l2cap_conn *conn = chan->conn;
2252 struct sk_buff **frag;
2253 int sent = 0;
2254
2255 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2256 return -EFAULT;
2257
2258 sent += count;
2259 len -= count;
2260
2261 /* Continuation fragments (no L2CAP header) */
2262 frag = &skb_shinfo(skb)->frag_list;
2263 while (len) {
2264 struct sk_buff *tmp;
2265
2266 count = min_t(unsigned int, conn->mtu, len);
2267
2268 tmp = chan->ops->alloc_skb(chan, count,
2269 msg->msg_flags & MSG_DONTWAIT);
2270 if (IS_ERR(tmp))
2271 return PTR_ERR(tmp);
2272
2273 *frag = tmp;
2274
2275 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2276 return -EFAULT;
2277
2278 (*frag)->priority = skb->priority;
2279
2280 sent += count;
2281 len -= count;
2282
2283 skb->len += (*frag)->len;
2284 skb->data_len += (*frag)->len;
2285
2286 frag = &(*frag)->next;
2287 }
2288
2289 return sent;
2290 }
2291
2292 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2293 struct msghdr *msg, size_t len,
2294 u32 priority)
2295 {
2296 struct l2cap_conn *conn = chan->conn;
2297 struct sk_buff *skb;
2298 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2299 struct l2cap_hdr *lh;
2300
2301 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2302 __le16_to_cpu(chan->psm), len, priority);
2303
2304 count = min_t(unsigned int, (conn->mtu - hlen), len);
2305
2306 skb = chan->ops->alloc_skb(chan, count + hlen,
2307 msg->msg_flags & MSG_DONTWAIT);
2308 if (IS_ERR(skb))
2309 return skb;
2310
2311 skb->priority = priority;
2312
2313 /* Create L2CAP header */
2314 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2315 lh->cid = cpu_to_le16(chan->dcid);
2316 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2317 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2318
2319 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2320 if (unlikely(err < 0)) {
2321 kfree_skb(skb);
2322 return ERR_PTR(err);
2323 }
2324 return skb;
2325 }
2326
2327 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2328 struct msghdr *msg, size_t len,
2329 u32 priority)
2330 {
2331 struct l2cap_conn *conn = chan->conn;
2332 struct sk_buff *skb;
2333 int err, count;
2334 struct l2cap_hdr *lh;
2335
2336 BT_DBG("chan %p len %zu", chan, len);
2337
2338 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2339
2340 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2341 msg->msg_flags & MSG_DONTWAIT);
2342 if (IS_ERR(skb))
2343 return skb;
2344
2345 skb->priority = priority;
2346
2347 /* Create L2CAP header */
2348 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2349 lh->cid = cpu_to_le16(chan->dcid);
2350 lh->len = cpu_to_le16(len);
2351
2352 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2353 if (unlikely(err < 0)) {
2354 kfree_skb(skb);
2355 return ERR_PTR(err);
2356 }
2357 return skb;
2358 }
2359
2360 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2361 struct msghdr *msg, size_t len,
2362 u16 sdulen)
2363 {
2364 struct l2cap_conn *conn = chan->conn;
2365 struct sk_buff *skb;
2366 int err, count, hlen;
2367 struct l2cap_hdr *lh;
2368
2369 BT_DBG("chan %p len %zu", chan, len);
2370
2371 if (!conn)
2372 return ERR_PTR(-ENOTCONN);
2373
2374 hlen = __ertm_hdr_size(chan);
2375
2376 if (sdulen)
2377 hlen += L2CAP_SDULEN_SIZE;
2378
2379 if (chan->fcs == L2CAP_FCS_CRC16)
2380 hlen += L2CAP_FCS_SIZE;
2381
2382 count = min_t(unsigned int, (conn->mtu - hlen), len);
2383
2384 skb = chan->ops->alloc_skb(chan, count + hlen,
2385 msg->msg_flags & MSG_DONTWAIT);
2386 if (IS_ERR(skb))
2387 return skb;
2388
2389 /* Create L2CAP header */
2390 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2391 lh->cid = cpu_to_le16(chan->dcid);
2392 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2393
2394 /* Control header is populated later */
2395 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2396 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2397 else
2398 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2399
2400 if (sdulen)
2401 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2402
2403 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2404 if (unlikely(err < 0)) {
2405 kfree_skb(skb);
2406 return ERR_PTR(err);
2407 }
2408
2409 bt_cb(skb)->control.fcs = chan->fcs;
2410 bt_cb(skb)->control.retries = 0;
2411 return skb;
2412 }
2413
2414 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2415 struct sk_buff_head *seg_queue,
2416 struct msghdr *msg, size_t len)
2417 {
2418 struct sk_buff *skb;
2419 u16 sdu_len;
2420 size_t pdu_len;
2421 u8 sar;
2422
2423 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2424
2425 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2426 * so fragmented skbs are not used. The HCI layer's handling
2427 * of fragmented skbs is not compatible with ERTM's queueing.
2428 */
2429
2430 /* PDU size is derived from the HCI MTU */
2431 pdu_len = chan->conn->mtu;
2432
2433 /* Constrain PDU size for BR/EDR connections */
2434 if (!chan->hs_hcon)
2435 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2436
2437 /* Adjust for largest possible L2CAP overhead. */
2438 if (chan->fcs)
2439 pdu_len -= L2CAP_FCS_SIZE;
2440
2441 pdu_len -= __ertm_hdr_size(chan);
2442
2443 /* Remote device may have requested smaller PDUs */
2444 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2445
2446 if (len <= pdu_len) {
2447 sar = L2CAP_SAR_UNSEGMENTED;
2448 sdu_len = 0;
2449 pdu_len = len;
2450 } else {
2451 sar = L2CAP_SAR_START;
2452 sdu_len = len;
2453 pdu_len -= L2CAP_SDULEN_SIZE;
2454 }
2455
2456 while (len > 0) {
2457 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2458
2459 if (IS_ERR(skb)) {
2460 __skb_queue_purge(seg_queue);
2461 return PTR_ERR(skb);
2462 }
2463
2464 bt_cb(skb)->control.sar = sar;
2465 __skb_queue_tail(seg_queue, skb);
2466
2467 len -= pdu_len;
2468 if (sdu_len) {
2469 sdu_len = 0;
2470 pdu_len += L2CAP_SDULEN_SIZE;
2471 }
2472
2473 if (len <= pdu_len) {
2474 sar = L2CAP_SAR_END;
2475 pdu_len = len;
2476 } else {
2477 sar = L2CAP_SAR_CONTINUE;
2478 }
2479 }
2480
2481 return 0;
2482 }
2483
2484 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2485 u32 priority)
2486 {
2487 struct sk_buff *skb;
2488 int err;
2489 struct sk_buff_head seg_queue;
2490
2491 if (!chan->conn)
2492 return -ENOTCONN;
2493
2494 /* Connectionless channel */
2495 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2496 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2497 if (IS_ERR(skb))
2498 return PTR_ERR(skb);
2499
2500 l2cap_do_send(chan, skb);
2501 return len;
2502 }
2503
2504 switch (chan->mode) {
2505 case L2CAP_MODE_BASIC:
2506 /* Check outgoing MTU */
2507 if (len > chan->omtu)
2508 return -EMSGSIZE;
2509
2510 /* Create a basic PDU */
2511 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2512 if (IS_ERR(skb))
2513 return PTR_ERR(skb);
2514
2515 l2cap_do_send(chan, skb);
2516 err = len;
2517 break;
2518
2519 case L2CAP_MODE_ERTM:
2520 case L2CAP_MODE_STREAMING:
2521 /* Check outgoing MTU */
2522 if (len > chan->omtu) {
2523 err = -EMSGSIZE;
2524 break;
2525 }
2526
2527 __skb_queue_head_init(&seg_queue);
2528
2529 /* Do segmentation before calling in to the state machine,
2530 * since it's possible to block while waiting for memory
2531 * allocation.
2532 */
2533 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2534
2535 /* The channel could have been closed while segmenting,
2536 * check that it is still connected.
2537 */
2538 if (chan->state != BT_CONNECTED) {
2539 __skb_queue_purge(&seg_queue);
2540 err = -ENOTCONN;
2541 }
2542
2543 if (err)
2544 break;
2545
2546 if (chan->mode == L2CAP_MODE_ERTM)
2547 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2548 else
2549 l2cap_streaming_send(chan, &seg_queue);
2550
2551 err = len;
2552
2553 /* If the skbs were not queued for sending, they'll still be in
2554 * seg_queue and need to be purged.
2555 */
2556 __skb_queue_purge(&seg_queue);
2557 break;
2558
2559 default:
2560 BT_DBG("bad state %1.1x", chan->mode);
2561 err = -EBADFD;
2562 }
2563
2564 return err;
2565 }
2566
2567 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2568 {
2569 struct l2cap_ctrl control;
2570 u16 seq;
2571
2572 BT_DBG("chan %p, txseq %u", chan, txseq);
2573
2574 memset(&control, 0, sizeof(control));
2575 control.sframe = 1;
2576 control.super = L2CAP_SUPER_SREJ;
2577
2578 for (seq = chan->expected_tx_seq; seq != txseq;
2579 seq = __next_seq(chan, seq)) {
2580 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2581 control.reqseq = seq;
2582 l2cap_send_sframe(chan, &control);
2583 l2cap_seq_list_append(&chan->srej_list, seq);
2584 }
2585 }
2586
2587 chan->expected_tx_seq = __next_seq(chan, txseq);
2588 }
2589
2590 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2591 {
2592 struct l2cap_ctrl control;
2593
2594 BT_DBG("chan %p", chan);
2595
2596 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2597 return;
2598
2599 memset(&control, 0, sizeof(control));
2600 control.sframe = 1;
2601 control.super = L2CAP_SUPER_SREJ;
2602 control.reqseq = chan->srej_list.tail;
2603 l2cap_send_sframe(chan, &control);
2604 }
2605
2606 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2607 {
2608 struct l2cap_ctrl control;
2609 u16 initial_head;
2610 u16 seq;
2611
2612 BT_DBG("chan %p, txseq %u", chan, txseq);
2613
2614 memset(&control, 0, sizeof(control));
2615 control.sframe = 1;
2616 control.super = L2CAP_SUPER_SREJ;
2617
2618 /* Capture initial list head to allow only one pass through the list. */
2619 initial_head = chan->srej_list.head;
2620
2621 do {
2622 seq = l2cap_seq_list_pop(&chan->srej_list);
2623 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2624 break;
2625
2626 control.reqseq = seq;
2627 l2cap_send_sframe(chan, &control);
2628 l2cap_seq_list_append(&chan->srej_list, seq);
2629 } while (chan->srej_list.head != initial_head);
2630 }
2631
2632 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2633 {
2634 struct sk_buff *acked_skb;
2635 u16 ackseq;
2636
2637 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2638
2639 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2640 return;
2641
2642 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2643 chan->expected_ack_seq, chan->unacked_frames);
2644
2645 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2646 ackseq = __next_seq(chan, ackseq)) {
2647
2648 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2649 if (acked_skb) {
2650 skb_unlink(acked_skb, &chan->tx_q);
2651 kfree_skb(acked_skb);
2652 chan->unacked_frames--;
2653 }
2654 }
2655
2656 chan->expected_ack_seq = reqseq;
2657
2658 if (chan->unacked_frames == 0)
2659 __clear_retrans_timer(chan);
2660
2661 BT_DBG("unacked_frames %u", chan->unacked_frames);
2662 }
2663
2664 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2665 {
2666 BT_DBG("chan %p", chan);
2667
2668 chan->expected_tx_seq = chan->buffer_seq;
2669 l2cap_seq_list_clear(&chan->srej_list);
2670 skb_queue_purge(&chan->srej_q);
2671 chan->rx_state = L2CAP_RX_STATE_RECV;
2672 }
2673
2674 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2675 struct l2cap_ctrl *control,
2676 struct sk_buff_head *skbs, u8 event)
2677 {
2678 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2679 event);
2680
2681 switch (event) {
2682 case L2CAP_EV_DATA_REQUEST:
2683 if (chan->tx_send_head == NULL)
2684 chan->tx_send_head = skb_peek(skbs);
2685
2686 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2687 l2cap_ertm_send(chan);
2688 break;
2689 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2690 BT_DBG("Enter LOCAL_BUSY");
2691 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2692
2693 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2694 /* The SREJ_SENT state must be aborted if we are to
2695 * enter the LOCAL_BUSY state.
2696 */
2697 l2cap_abort_rx_srej_sent(chan);
2698 }
2699
2700 l2cap_send_ack(chan);
2701
2702 break;
2703 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2704 BT_DBG("Exit LOCAL_BUSY");
2705 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2706
2707 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2708 struct l2cap_ctrl local_control;
2709
2710 memset(&local_control, 0, sizeof(local_control));
2711 local_control.sframe = 1;
2712 local_control.super = L2CAP_SUPER_RR;
2713 local_control.poll = 1;
2714 local_control.reqseq = chan->buffer_seq;
2715 l2cap_send_sframe(chan, &local_control);
2716
2717 chan->retry_count = 1;
2718 __set_monitor_timer(chan);
2719 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2720 }
2721 break;
2722 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2723 l2cap_process_reqseq(chan, control->reqseq);
2724 break;
2725 case L2CAP_EV_EXPLICIT_POLL:
2726 l2cap_send_rr_or_rnr(chan, 1);
2727 chan->retry_count = 1;
2728 __set_monitor_timer(chan);
2729 __clear_ack_timer(chan);
2730 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2731 break;
2732 case L2CAP_EV_RETRANS_TO:
2733 l2cap_send_rr_or_rnr(chan, 1);
2734 chan->retry_count = 1;
2735 __set_monitor_timer(chan);
2736 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2737 break;
2738 case L2CAP_EV_RECV_FBIT:
2739 /* Nothing to process */
2740 break;
2741 default:
2742 break;
2743 }
2744 }
2745
2746 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2747 struct l2cap_ctrl *control,
2748 struct sk_buff_head *skbs, u8 event)
2749 {
2750 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2751 event);
2752
2753 switch (event) {
2754 case L2CAP_EV_DATA_REQUEST:
2755 if (chan->tx_send_head == NULL)
2756 chan->tx_send_head = skb_peek(skbs);
2757 /* Queue data, but don't send. */
2758 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2759 break;
2760 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2761 BT_DBG("Enter LOCAL_BUSY");
2762 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2763
2764 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2765 /* The SREJ_SENT state must be aborted if we are to
2766 * enter the LOCAL_BUSY state.
2767 */
2768 l2cap_abort_rx_srej_sent(chan);
2769 }
2770
2771 l2cap_send_ack(chan);
2772
2773 break;
2774 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2775 BT_DBG("Exit LOCAL_BUSY");
2776 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2777
2778 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2779 struct l2cap_ctrl local_control;
2780 memset(&local_control, 0, sizeof(local_control));
2781 local_control.sframe = 1;
2782 local_control.super = L2CAP_SUPER_RR;
2783 local_control.poll = 1;
2784 local_control.reqseq = chan->buffer_seq;
2785 l2cap_send_sframe(chan, &local_control);
2786
2787 chan->retry_count = 1;
2788 __set_monitor_timer(chan);
2789 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2790 }
2791 break;
2792 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2793 l2cap_process_reqseq(chan, control->reqseq);
2794
2795 /* Fall through */
2796
2797 case L2CAP_EV_RECV_FBIT:
2798 if (control && control->final) {
2799 __clear_monitor_timer(chan);
2800 if (chan->unacked_frames > 0)
2801 __set_retrans_timer(chan);
2802 chan->retry_count = 0;
2803 chan->tx_state = L2CAP_TX_STATE_XMIT;
2804 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2805 }
2806 break;
2807 case L2CAP_EV_EXPLICIT_POLL:
2808 /* Ignore */
2809 break;
2810 case L2CAP_EV_MONITOR_TO:
2811 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2812 l2cap_send_rr_or_rnr(chan, 1);
2813 __set_monitor_timer(chan);
2814 chan->retry_count++;
2815 } else {
2816 l2cap_send_disconn_req(chan, ECONNABORTED);
2817 }
2818 break;
2819 default:
2820 break;
2821 }
2822 }
2823
2824 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2825 struct sk_buff_head *skbs, u8 event)
2826 {
2827 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2828 chan, control, skbs, event, chan->tx_state);
2829
2830 switch (chan->tx_state) {
2831 case L2CAP_TX_STATE_XMIT:
2832 l2cap_tx_state_xmit(chan, control, skbs, event);
2833 break;
2834 case L2CAP_TX_STATE_WAIT_F:
2835 l2cap_tx_state_wait_f(chan, control, skbs, event);
2836 break;
2837 default:
2838 /* Ignore event */
2839 break;
2840 }
2841 }
2842
2843 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2844 struct l2cap_ctrl *control)
2845 {
2846 BT_DBG("chan %p, control %p", chan, control);
2847 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2848 }
2849
2850 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2851 struct l2cap_ctrl *control)
2852 {
2853 BT_DBG("chan %p, control %p", chan, control);
2854 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2855 }
2856
2857 /* Copy frame to all raw sockets on that connection */
2858 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2859 {
2860 struct sk_buff *nskb;
2861 struct l2cap_chan *chan;
2862
2863 BT_DBG("conn %p", conn);
2864
2865 mutex_lock(&conn->chan_lock);
2866
2867 list_for_each_entry(chan, &conn->chan_l, list) {
2868 if (chan->chan_type != L2CAP_CHAN_RAW)
2869 continue;
2870
2871 /* Don't send frame to the channel it came from */
2872 if (bt_cb(skb)->chan == chan)
2873 continue;
2874
2875 nskb = skb_clone(skb, GFP_KERNEL);
2876 if (!nskb)
2877 continue;
2878 if (chan->ops->recv(chan, nskb))
2879 kfree_skb(nskb);
2880 }
2881
2882 mutex_unlock(&conn->chan_lock);
2883 }
2884
2885 /* ---- L2CAP signalling commands ---- */
2886 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2887 u8 ident, u16 dlen, void *data)
2888 {
2889 struct sk_buff *skb, **frag;
2890 struct l2cap_cmd_hdr *cmd;
2891 struct l2cap_hdr *lh;
2892 int len, count;
2893
2894 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2895 conn, code, ident, dlen);
2896
2897 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2898 return NULL;
2899
2900 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2901 count = min_t(unsigned int, conn->mtu, len);
2902
2903 skb = bt_skb_alloc(count, GFP_KERNEL);
2904 if (!skb)
2905 return NULL;
2906
2907 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2908 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2909
2910 if (conn->hcon->type == LE_LINK)
2911 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2912 else
2913 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2914
2915 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2916 cmd->code = code;
2917 cmd->ident = ident;
2918 cmd->len = cpu_to_le16(dlen);
2919
2920 if (dlen) {
2921 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2922 memcpy(skb_put(skb, count), data, count);
2923 data += count;
2924 }
2925
2926 len -= skb->len;
2927
2928 /* Continuation fragments (no L2CAP header) */
2929 frag = &skb_shinfo(skb)->frag_list;
2930 while (len) {
2931 count = min_t(unsigned int, conn->mtu, len);
2932
2933 *frag = bt_skb_alloc(count, GFP_KERNEL);
2934 if (!*frag)
2935 goto fail;
2936
2937 memcpy(skb_put(*frag, count), data, count);
2938
2939 len -= count;
2940 data += count;
2941
2942 frag = &(*frag)->next;
2943 }
2944
2945 return skb;
2946
2947 fail:
2948 kfree_skb(skb);
2949 return NULL;
2950 }
2951
2952 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2953 unsigned long *val)
2954 {
2955 struct l2cap_conf_opt *opt = *ptr;
2956 int len;
2957
2958 len = L2CAP_CONF_OPT_SIZE + opt->len;
2959 *ptr += len;
2960
2961 *type = opt->type;
2962 *olen = opt->len;
2963
2964 switch (opt->len) {
2965 case 1:
2966 *val = *((u8 *) opt->val);
2967 break;
2968
2969 case 2:
2970 *val = get_unaligned_le16(opt->val);
2971 break;
2972
2973 case 4:
2974 *val = get_unaligned_le32(opt->val);
2975 break;
2976
2977 default:
2978 *val = (unsigned long) opt->val;
2979 break;
2980 }
2981
2982 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2983 return len;
2984 }
2985
2986 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2987 {
2988 struct l2cap_conf_opt *opt = *ptr;
2989
2990 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2991
2992 opt->type = type;
2993 opt->len = len;
2994
2995 switch (len) {
2996 case 1:
2997 *((u8 *) opt->val) = val;
2998 break;
2999
3000 case 2:
3001 put_unaligned_le16(val, opt->val);
3002 break;
3003
3004 case 4:
3005 put_unaligned_le32(val, opt->val);
3006 break;
3007
3008 default:
3009 memcpy(opt->val, (void *) val, len);
3010 break;
3011 }
3012
3013 *ptr += L2CAP_CONF_OPT_SIZE + len;
3014 }
3015
3016 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3017 {
3018 struct l2cap_conf_efs efs;
3019
3020 switch (chan->mode) {
3021 case L2CAP_MODE_ERTM:
3022 efs.id = chan->local_id;
3023 efs.stype = chan->local_stype;
3024 efs.msdu = cpu_to_le16(chan->local_msdu);
3025 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3026 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3027 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3028 break;
3029
3030 case L2CAP_MODE_STREAMING:
3031 efs.id = 1;
3032 efs.stype = L2CAP_SERV_BESTEFFORT;
3033 efs.msdu = cpu_to_le16(chan->local_msdu);
3034 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3035 efs.acc_lat = 0;
3036 efs.flush_to = 0;
3037 break;
3038
3039 default:
3040 return;
3041 }
3042
3043 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3044 (unsigned long) &efs);
3045 }
3046
3047 static void l2cap_ack_timeout(struct work_struct *work)
3048 {
3049 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3050 ack_timer.work);
3051 u16 frames_to_ack;
3052
3053 BT_DBG("chan %p", chan);
3054
3055 l2cap_chan_lock(chan);
3056
3057 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3058 chan->last_acked_seq);
3059
3060 if (frames_to_ack)
3061 l2cap_send_rr_or_rnr(chan, 0);
3062
3063 l2cap_chan_unlock(chan);
3064 l2cap_chan_put(chan);
3065 }
3066
3067 int l2cap_ertm_init(struct l2cap_chan *chan)
3068 {
3069 int err;
3070
3071 chan->next_tx_seq = 0;
3072 chan->expected_tx_seq = 0;
3073 chan->expected_ack_seq = 0;
3074 chan->unacked_frames = 0;
3075 chan->buffer_seq = 0;
3076 chan->frames_sent = 0;
3077 chan->last_acked_seq = 0;
3078 chan->sdu = NULL;
3079 chan->sdu_last_frag = NULL;
3080 chan->sdu_len = 0;
3081
3082 skb_queue_head_init(&chan->tx_q);
3083
3084 chan->local_amp_id = AMP_ID_BREDR;
3085 chan->move_id = AMP_ID_BREDR;
3086 chan->move_state = L2CAP_MOVE_STABLE;
3087 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3088
3089 if (chan->mode != L2CAP_MODE_ERTM)
3090 return 0;
3091
3092 chan->rx_state = L2CAP_RX_STATE_RECV;
3093 chan->tx_state = L2CAP_TX_STATE_XMIT;
3094
3095 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3096 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3097 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3098
3099 skb_queue_head_init(&chan->srej_q);
3100
3101 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3102 if (err < 0)
3103 return err;
3104
3105 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3106 if (err < 0)
3107 l2cap_seq_list_free(&chan->srej_list);
3108
3109 return err;
3110 }
3111
3112 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3113 {
3114 switch (mode) {
3115 case L2CAP_MODE_STREAMING:
3116 case L2CAP_MODE_ERTM:
3117 if (l2cap_mode_supported(mode, remote_feat_mask))
3118 return mode;
3119 /* fall through */
3120 default:
3121 return L2CAP_MODE_BASIC;
3122 }
3123 }
3124
3125 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3126 {
3127 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3128 }
3129
3130 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3131 {
3132 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3133 }
3134
3135 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3136 struct l2cap_conf_rfc *rfc)
3137 {
3138 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3139 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3140
3141 /* Class 1 devices have must have ERTM timeouts
3142 * exceeding the Link Supervision Timeout. The
3143 * default Link Supervision Timeout for AMP
3144 * controllers is 10 seconds.
3145 *
3146 * Class 1 devices use 0xffffffff for their
3147 * best-effort flush timeout, so the clamping logic
3148 * will result in a timeout that meets the above
3149 * requirement. ERTM timeouts are 16-bit values, so
3150 * the maximum timeout is 65.535 seconds.
3151 */
3152
3153 /* Convert timeout to milliseconds and round */
3154 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3155
3156 /* This is the recommended formula for class 2 devices
3157 * that start ERTM timers when packets are sent to the
3158 * controller.
3159 */
3160 ertm_to = 3 * ertm_to + 500;
3161
3162 if (ertm_to > 0xffff)
3163 ertm_to = 0xffff;
3164
3165 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3166 rfc->monitor_timeout = rfc->retrans_timeout;
3167 } else {
3168 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3169 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3170 }
3171 }
3172
3173 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3174 {
3175 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3176 __l2cap_ews_supported(chan->conn)) {
3177 /* use extended control field */
3178 set_bit(FLAG_EXT_CTRL, &chan->flags);
3179 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3180 } else {
3181 chan->tx_win = min_t(u16, chan->tx_win,
3182 L2CAP_DEFAULT_TX_WINDOW);
3183 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3184 }
3185 chan->ack_win = chan->tx_win;
3186 }
3187
3188 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3189 {
3190 struct l2cap_conf_req *req = data;
3191 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3192 void *ptr = req->data;
3193 u16 size;
3194
3195 BT_DBG("chan %p", chan);
3196
3197 if (chan->num_conf_req || chan->num_conf_rsp)
3198 goto done;
3199
3200 switch (chan->mode) {
3201 case L2CAP_MODE_STREAMING:
3202 case L2CAP_MODE_ERTM:
3203 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3204 break;
3205
3206 if (__l2cap_efs_supported(chan->conn))
3207 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3208
3209 /* fall through */
3210 default:
3211 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3212 break;
3213 }
3214
3215 done:
3216 if (chan->imtu != L2CAP_DEFAULT_MTU)
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3218
3219 switch (chan->mode) {
3220 case L2CAP_MODE_BASIC:
3221 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3222 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3223 break;
3224
3225 rfc.mode = L2CAP_MODE_BASIC;
3226 rfc.txwin_size = 0;
3227 rfc.max_transmit = 0;
3228 rfc.retrans_timeout = 0;
3229 rfc.monitor_timeout = 0;
3230 rfc.max_pdu_size = 0;
3231
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3233 (unsigned long) &rfc);
3234 break;
3235
3236 case L2CAP_MODE_ERTM:
3237 rfc.mode = L2CAP_MODE_ERTM;
3238 rfc.max_transmit = chan->max_tx;
3239
3240 __l2cap_set_ertm_timeouts(chan, &rfc);
3241
3242 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3243 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3244 L2CAP_FCS_SIZE);
3245 rfc.max_pdu_size = cpu_to_le16(size);
3246
3247 l2cap_txwin_setup(chan);
3248
3249 rfc.txwin_size = min_t(u16, chan->tx_win,
3250 L2CAP_DEFAULT_TX_WINDOW);
3251
3252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3253 (unsigned long) &rfc);
3254
3255 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3256 l2cap_add_opt_efs(&ptr, chan);
3257
3258 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3260 chan->tx_win);
3261
3262 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3263 if (chan->fcs == L2CAP_FCS_NONE ||
3264 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3265 chan->fcs = L2CAP_FCS_NONE;
3266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3267 chan->fcs);
3268 }
3269 break;
3270
3271 case L2CAP_MODE_STREAMING:
3272 l2cap_txwin_setup(chan);
3273 rfc.mode = L2CAP_MODE_STREAMING;
3274 rfc.txwin_size = 0;
3275 rfc.max_transmit = 0;
3276 rfc.retrans_timeout = 0;
3277 rfc.monitor_timeout = 0;
3278
3279 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3280 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3281 L2CAP_FCS_SIZE);
3282 rfc.max_pdu_size = cpu_to_le16(size);
3283
3284 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3285 (unsigned long) &rfc);
3286
3287 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3288 l2cap_add_opt_efs(&ptr, chan);
3289
3290 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3291 if (chan->fcs == L2CAP_FCS_NONE ||
3292 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3293 chan->fcs = L2CAP_FCS_NONE;
3294 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3295 chan->fcs);
3296 }
3297 break;
3298 }
3299
3300 req->dcid = cpu_to_le16(chan->dcid);
3301 req->flags = __constant_cpu_to_le16(0);
3302
3303 return ptr - data;
3304 }
3305
3306 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3307 {
3308 struct l2cap_conf_rsp *rsp = data;
3309 void *ptr = rsp->data;
3310 void *req = chan->conf_req;
3311 int len = chan->conf_len;
3312 int type, hint, olen;
3313 unsigned long val;
3314 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3315 struct l2cap_conf_efs efs;
3316 u8 remote_efs = 0;
3317 u16 mtu = L2CAP_DEFAULT_MTU;
3318 u16 result = L2CAP_CONF_SUCCESS;
3319 u16 size;
3320
3321 BT_DBG("chan %p", chan);
3322
3323 while (len >= L2CAP_CONF_OPT_SIZE) {
3324 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3325
3326 hint = type & L2CAP_CONF_HINT;
3327 type &= L2CAP_CONF_MASK;
3328
3329 switch (type) {
3330 case L2CAP_CONF_MTU:
3331 mtu = val;
3332 break;
3333
3334 case L2CAP_CONF_FLUSH_TO:
3335 chan->flush_to = val;
3336 break;
3337
3338 case L2CAP_CONF_QOS:
3339 break;
3340
3341 case L2CAP_CONF_RFC:
3342 if (olen == sizeof(rfc))
3343 memcpy(&rfc, (void *) val, olen);
3344 break;
3345
3346 case L2CAP_CONF_FCS:
3347 if (val == L2CAP_FCS_NONE)
3348 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3349 break;
3350
3351 case L2CAP_CONF_EFS:
3352 remote_efs = 1;
3353 if (olen == sizeof(efs))
3354 memcpy(&efs, (void *) val, olen);
3355 break;
3356
3357 case L2CAP_CONF_EWS:
3358 if (!chan->conn->hs_enabled)
3359 return -ECONNREFUSED;
3360
3361 set_bit(FLAG_EXT_CTRL, &chan->flags);
3362 set_bit(CONF_EWS_RECV, &chan->conf_state);
3363 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3364 chan->remote_tx_win = val;
3365 break;
3366
3367 default:
3368 if (hint)
3369 break;
3370
3371 result = L2CAP_CONF_UNKNOWN;
3372 *((u8 *) ptr++) = type;
3373 break;
3374 }
3375 }
3376
3377 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3378 goto done;
3379
3380 switch (chan->mode) {
3381 case L2CAP_MODE_STREAMING:
3382 case L2CAP_MODE_ERTM:
3383 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3384 chan->mode = l2cap_select_mode(rfc.mode,
3385 chan->conn->feat_mask);
3386 break;
3387 }
3388
3389 if (remote_efs) {
3390 if (__l2cap_efs_supported(chan->conn))
3391 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3392 else
3393 return -ECONNREFUSED;
3394 }
3395
3396 if (chan->mode != rfc.mode)
3397 return -ECONNREFUSED;
3398
3399 break;
3400 }
3401
3402 done:
3403 if (chan->mode != rfc.mode) {
3404 result = L2CAP_CONF_UNACCEPT;
3405 rfc.mode = chan->mode;
3406
3407 if (chan->num_conf_rsp == 1)
3408 return -ECONNREFUSED;
3409
3410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3411 (unsigned long) &rfc);
3412 }
3413
3414 if (result == L2CAP_CONF_SUCCESS) {
3415 /* Configure output options and let the other side know
3416 * which ones we don't like. */
3417
3418 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3419 result = L2CAP_CONF_UNACCEPT;
3420 else {
3421 chan->omtu = mtu;
3422 set_bit(CONF_MTU_DONE, &chan->conf_state);
3423 }
3424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3425
3426 if (remote_efs) {
3427 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3428 efs.stype != L2CAP_SERV_NOTRAFIC &&
3429 efs.stype != chan->local_stype) {
3430
3431 result = L2CAP_CONF_UNACCEPT;
3432
3433 if (chan->num_conf_req >= 1)
3434 return -ECONNREFUSED;
3435
3436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3437 sizeof(efs),
3438 (unsigned long) &efs);
3439 } else {
3440 /* Send PENDING Conf Rsp */
3441 result = L2CAP_CONF_PENDING;
3442 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3443 }
3444 }
3445
3446 switch (rfc.mode) {
3447 case L2CAP_MODE_BASIC:
3448 chan->fcs = L2CAP_FCS_NONE;
3449 set_bit(CONF_MODE_DONE, &chan->conf_state);
3450 break;
3451
3452 case L2CAP_MODE_ERTM:
3453 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3454 chan->remote_tx_win = rfc.txwin_size;
3455 else
3456 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3457
3458 chan->remote_max_tx = rfc.max_transmit;
3459
3460 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3461 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3462 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3463 rfc.max_pdu_size = cpu_to_le16(size);
3464 chan->remote_mps = size;
3465
3466 __l2cap_set_ertm_timeouts(chan, &rfc);
3467
3468 set_bit(CONF_MODE_DONE, &chan->conf_state);
3469
3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3471 sizeof(rfc), (unsigned long) &rfc);
3472
3473 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3474 chan->remote_id = efs.id;
3475 chan->remote_stype = efs.stype;
3476 chan->remote_msdu = le16_to_cpu(efs.msdu);
3477 chan->remote_flush_to =
3478 le32_to_cpu(efs.flush_to);
3479 chan->remote_acc_lat =
3480 le32_to_cpu(efs.acc_lat);
3481 chan->remote_sdu_itime =
3482 le32_to_cpu(efs.sdu_itime);
3483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3484 sizeof(efs),
3485 (unsigned long) &efs);
3486 }
3487 break;
3488
3489 case L2CAP_MODE_STREAMING:
3490 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3491 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3492 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3493 rfc.max_pdu_size = cpu_to_le16(size);
3494 chan->remote_mps = size;
3495
3496 set_bit(CONF_MODE_DONE, &chan->conf_state);
3497
3498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3499 (unsigned long) &rfc);
3500
3501 break;
3502
3503 default:
3504 result = L2CAP_CONF_UNACCEPT;
3505
3506 memset(&rfc, 0, sizeof(rfc));
3507 rfc.mode = chan->mode;
3508 }
3509
3510 if (result == L2CAP_CONF_SUCCESS)
3511 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3512 }
3513 rsp->scid = cpu_to_le16(chan->dcid);
3514 rsp->result = cpu_to_le16(result);
3515 rsp->flags = __constant_cpu_to_le16(0);
3516
3517 return ptr - data;
3518 }
3519
3520 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3521 void *data, u16 *result)
3522 {
3523 struct l2cap_conf_req *req = data;
3524 void *ptr = req->data;
3525 int type, olen;
3526 unsigned long val;
3527 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3528 struct l2cap_conf_efs efs;
3529
3530 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3531
3532 while (len >= L2CAP_CONF_OPT_SIZE) {
3533 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3534
3535 switch (type) {
3536 case L2CAP_CONF_MTU:
3537 if (val < L2CAP_DEFAULT_MIN_MTU) {
3538 *result = L2CAP_CONF_UNACCEPT;
3539 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3540 } else
3541 chan->imtu = val;
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3543 break;
3544
3545 case L2CAP_CONF_FLUSH_TO:
3546 chan->flush_to = val;
3547 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3548 2, chan->flush_to);
3549 break;
3550
3551 case L2CAP_CONF_RFC:
3552 if (olen == sizeof(rfc))
3553 memcpy(&rfc, (void *)val, olen);
3554
3555 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3556 rfc.mode != chan->mode)
3557 return -ECONNREFUSED;
3558
3559 chan->fcs = 0;
3560
3561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3562 sizeof(rfc), (unsigned long) &rfc);
3563 break;
3564
3565 case L2CAP_CONF_EWS:
3566 chan->ack_win = min_t(u16, val, chan->ack_win);
3567 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3568 chan->tx_win);
3569 break;
3570
3571 case L2CAP_CONF_EFS:
3572 if (olen == sizeof(efs))
3573 memcpy(&efs, (void *)val, olen);
3574
3575 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3576 efs.stype != L2CAP_SERV_NOTRAFIC &&
3577 efs.stype != chan->local_stype)
3578 return -ECONNREFUSED;
3579
3580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3581 (unsigned long) &efs);
3582 break;
3583
3584 case L2CAP_CONF_FCS:
3585 if (*result == L2CAP_CONF_PENDING)
3586 if (val == L2CAP_FCS_NONE)
3587 set_bit(CONF_RECV_NO_FCS,
3588 &chan->conf_state);
3589 break;
3590 }
3591 }
3592
3593 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3594 return -ECONNREFUSED;
3595
3596 chan->mode = rfc.mode;
3597
3598 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3599 switch (rfc.mode) {
3600 case L2CAP_MODE_ERTM:
3601 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3602 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3603 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3604 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3605 chan->ack_win = min_t(u16, chan->ack_win,
3606 rfc.txwin_size);
3607
3608 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3609 chan->local_msdu = le16_to_cpu(efs.msdu);
3610 chan->local_sdu_itime =
3611 le32_to_cpu(efs.sdu_itime);
3612 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3613 chan->local_flush_to =
3614 le32_to_cpu(efs.flush_to);
3615 }
3616 break;
3617
3618 case L2CAP_MODE_STREAMING:
3619 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3620 }
3621 }
3622
3623 req->dcid = cpu_to_le16(chan->dcid);
3624 req->flags = __constant_cpu_to_le16(0);
3625
3626 return ptr - data;
3627 }
3628
3629 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3630 u16 result, u16 flags)
3631 {
3632 struct l2cap_conf_rsp *rsp = data;
3633 void *ptr = rsp->data;
3634
3635 BT_DBG("chan %p", chan);
3636
3637 rsp->scid = cpu_to_le16(chan->dcid);
3638 rsp->result = cpu_to_le16(result);
3639 rsp->flags = cpu_to_le16(flags);
3640
3641 return ptr - data;
3642 }
3643
3644 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3645 {
3646 struct l2cap_conn_rsp rsp;
3647 struct l2cap_conn *conn = chan->conn;
3648 u8 buf[128];
3649 u8 rsp_code;
3650
3651 rsp.scid = cpu_to_le16(chan->dcid);
3652 rsp.dcid = cpu_to_le16(chan->scid);
3653 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3655
3656 if (chan->hs_hcon)
3657 rsp_code = L2CAP_CREATE_CHAN_RSP;
3658 else
3659 rsp_code = L2CAP_CONN_RSP;
3660
3661 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3662
3663 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3664
3665 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3666 return;
3667
3668 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3669 l2cap_build_conf_req(chan, buf), buf);
3670 chan->num_conf_req++;
3671 }
3672
3673 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3674 {
3675 int type, olen;
3676 unsigned long val;
3677 /* Use sane default values in case a misbehaving remote device
3678 * did not send an RFC or extended window size option.
3679 */
3680 u16 txwin_ext = chan->ack_win;
3681 struct l2cap_conf_rfc rfc = {
3682 .mode = chan->mode,
3683 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3684 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3685 .max_pdu_size = cpu_to_le16(chan->imtu),
3686 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3687 };
3688
3689 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3690
3691 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3692 return;
3693
3694 while (len >= L2CAP_CONF_OPT_SIZE) {
3695 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3696
3697 switch (type) {
3698 case L2CAP_CONF_RFC:
3699 if (olen == sizeof(rfc))
3700 memcpy(&rfc, (void *)val, olen);
3701 break;
3702 case L2CAP_CONF_EWS:
3703 txwin_ext = val;
3704 break;
3705 }
3706 }
3707
3708 switch (rfc.mode) {
3709 case L2CAP_MODE_ERTM:
3710 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3711 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3712 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3713 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3714 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3715 else
3716 chan->ack_win = min_t(u16, chan->ack_win,
3717 rfc.txwin_size);
3718 break;
3719 case L2CAP_MODE_STREAMING:
3720 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3721 }
3722 }
3723
3724 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3725 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3726 u8 *data)
3727 {
3728 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3729
3730 if (cmd_len < sizeof(*rej))
3731 return -EPROTO;
3732
3733 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3734 return 0;
3735
3736 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3737 cmd->ident == conn->info_ident) {
3738 cancel_delayed_work(&conn->info_timer);
3739
3740 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3741 conn->info_ident = 0;
3742
3743 l2cap_conn_start(conn);
3744 }
3745
3746 return 0;
3747 }
3748
3749 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3750 struct l2cap_cmd_hdr *cmd,
3751 u8 *data, u8 rsp_code, u8 amp_id)
3752 {
3753 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3754 struct l2cap_conn_rsp rsp;
3755 struct l2cap_chan *chan = NULL, *pchan;
3756 int result, status = L2CAP_CS_NO_INFO;
3757
3758 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3759 __le16 psm = req->psm;
3760
3761 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3762
3763 /* Check if we have socket listening on psm */
3764 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3765 &conn->hcon->dst, ACL_LINK);
3766 if (!pchan) {
3767 result = L2CAP_CR_BAD_PSM;
3768 goto sendresp;
3769 }
3770
3771 mutex_lock(&conn->chan_lock);
3772 l2cap_chan_lock(pchan);
3773
3774 /* Check if the ACL is secure enough (if not SDP) */
3775 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3776 !hci_conn_check_link_mode(conn->hcon)) {
3777 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3778 result = L2CAP_CR_SEC_BLOCK;
3779 goto response;
3780 }
3781
3782 result = L2CAP_CR_NO_MEM;
3783
3784 /* Check if we already have channel with that dcid */
3785 if (__l2cap_get_chan_by_dcid(conn, scid))
3786 goto response;
3787
3788 chan = pchan->ops->new_connection(pchan);
3789 if (!chan)
3790 goto response;
3791
3792 /* For certain devices (ex: HID mouse), support for authentication,
3793 * pairing and bonding is optional. For such devices, inorder to avoid
3794 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3795 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3796 */
3797 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3798
3799 bacpy(&chan->src, &conn->hcon->src);
3800 bacpy(&chan->dst, &conn->hcon->dst);
3801 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3802 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3803 chan->psm = psm;
3804 chan->dcid = scid;
3805 chan->local_amp_id = amp_id;
3806
3807 __l2cap_chan_add(conn, chan);
3808
3809 dcid = chan->scid;
3810
3811 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3812
3813 chan->ident = cmd->ident;
3814
3815 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3816 if (l2cap_chan_check_security(chan)) {
3817 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3818 l2cap_state_change(chan, BT_CONNECT2);
3819 result = L2CAP_CR_PEND;
3820 status = L2CAP_CS_AUTHOR_PEND;
3821 chan->ops->defer(chan);
3822 } else {
3823 /* Force pending result for AMP controllers.
3824 * The connection will succeed after the
3825 * physical link is up.
3826 */
3827 if (amp_id == AMP_ID_BREDR) {
3828 l2cap_state_change(chan, BT_CONFIG);
3829 result = L2CAP_CR_SUCCESS;
3830 } else {
3831 l2cap_state_change(chan, BT_CONNECT2);
3832 result = L2CAP_CR_PEND;
3833 }
3834 status = L2CAP_CS_NO_INFO;
3835 }
3836 } else {
3837 l2cap_state_change(chan, BT_CONNECT2);
3838 result = L2CAP_CR_PEND;
3839 status = L2CAP_CS_AUTHEN_PEND;
3840 }
3841 } else {
3842 l2cap_state_change(chan, BT_CONNECT2);
3843 result = L2CAP_CR_PEND;
3844 status = L2CAP_CS_NO_INFO;
3845 }
3846
3847 response:
3848 l2cap_chan_unlock(pchan);
3849 mutex_unlock(&conn->chan_lock);
3850
3851 sendresp:
3852 rsp.scid = cpu_to_le16(scid);
3853 rsp.dcid = cpu_to_le16(dcid);
3854 rsp.result = cpu_to_le16(result);
3855 rsp.status = cpu_to_le16(status);
3856 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3857
3858 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3859 struct l2cap_info_req info;
3860 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3861
3862 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3863 conn->info_ident = l2cap_get_ident(conn);
3864
3865 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3866
3867 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3868 sizeof(info), &info);
3869 }
3870
3871 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3872 result == L2CAP_CR_SUCCESS) {
3873 u8 buf[128];
3874 set_bit(CONF_REQ_SENT, &chan->conf_state);
3875 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3876 l2cap_build_conf_req(chan, buf), buf);
3877 chan->num_conf_req++;
3878 }
3879
3880 return chan;
3881 }
3882
3883 static int l2cap_connect_req(struct l2cap_conn *conn,
3884 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3885 {
3886 struct hci_dev *hdev = conn->hcon->hdev;
3887 struct hci_conn *hcon = conn->hcon;
3888
3889 if (cmd_len < sizeof(struct l2cap_conn_req))
3890 return -EPROTO;
3891
3892 hci_dev_lock(hdev);
3893 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3894 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3895 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3896 hcon->dst_type, 0, NULL, 0,
3897 hcon->dev_class);
3898 hci_dev_unlock(hdev);
3899
3900 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3901 return 0;
3902 }
3903
3904 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3905 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3906 u8 *data)
3907 {
3908 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3909 u16 scid, dcid, result, status;
3910 struct l2cap_chan *chan;
3911 u8 req[128];
3912 int err;
3913
3914 if (cmd_len < sizeof(*rsp))
3915 return -EPROTO;
3916
3917 scid = __le16_to_cpu(rsp->scid);
3918 dcid = __le16_to_cpu(rsp->dcid);
3919 result = __le16_to_cpu(rsp->result);
3920 status = __le16_to_cpu(rsp->status);
3921
3922 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3923 dcid, scid, result, status);
3924
3925 mutex_lock(&conn->chan_lock);
3926
3927 if (scid) {
3928 chan = __l2cap_get_chan_by_scid(conn, scid);
3929 if (!chan) {
3930 err = -EBADSLT;
3931 goto unlock;
3932 }
3933 } else {
3934 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3935 if (!chan) {
3936 err = -EBADSLT;
3937 goto unlock;
3938 }
3939 }
3940
3941 err = 0;
3942
3943 l2cap_chan_lock(chan);
3944
3945 switch (result) {
3946 case L2CAP_CR_SUCCESS:
3947 l2cap_state_change(chan, BT_CONFIG);
3948 chan->ident = 0;
3949 chan->dcid = dcid;
3950 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3951
3952 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3953 break;
3954
3955 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3956 l2cap_build_conf_req(chan, req), req);
3957 chan->num_conf_req++;
3958 break;
3959
3960 case L2CAP_CR_PEND:
3961 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3962 break;
3963
3964 default:
3965 l2cap_chan_del(chan, ECONNREFUSED);
3966 break;
3967 }
3968
3969 l2cap_chan_unlock(chan);
3970
3971 unlock:
3972 mutex_unlock(&conn->chan_lock);
3973
3974 return err;
3975 }
3976
3977 static inline void set_default_fcs(struct l2cap_chan *chan)
3978 {
3979 /* FCS is enabled only in ERTM or streaming mode, if one or both
3980 * sides request it.
3981 */
3982 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3983 chan->fcs = L2CAP_FCS_NONE;
3984 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3985 chan->fcs = L2CAP_FCS_CRC16;
3986 }
3987
3988 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3989 u8 ident, u16 flags)
3990 {
3991 struct l2cap_conn *conn = chan->conn;
3992
3993 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3994 flags);
3995
3996 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3997 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3998
3999 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4000 l2cap_build_conf_rsp(chan, data,
4001 L2CAP_CONF_SUCCESS, flags), data);
4002 }
4003
4004 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4005 u16 scid, u16 dcid)
4006 {
4007 struct l2cap_cmd_rej_cid rej;
4008
4009 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4010 rej.scid = __cpu_to_le16(scid);
4011 rej.dcid = __cpu_to_le16(dcid);
4012
4013 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4014 }
4015
4016 static inline int l2cap_config_req(struct l2cap_conn *conn,
4017 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4018 u8 *data)
4019 {
4020 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4021 u16 dcid, flags;
4022 u8 rsp[64];
4023 struct l2cap_chan *chan;
4024 int len, err = 0;
4025
4026 if (cmd_len < sizeof(*req))
4027 return -EPROTO;
4028
4029 dcid = __le16_to_cpu(req->dcid);
4030 flags = __le16_to_cpu(req->flags);
4031
4032 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4033
4034 chan = l2cap_get_chan_by_scid(conn, dcid);
4035 if (!chan) {
4036 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4037 return 0;
4038 }
4039
4040 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4041 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4042 chan->dcid);
4043 goto unlock;
4044 }
4045
4046 /* Reject if config buffer is too small. */
4047 len = cmd_len - sizeof(*req);
4048 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4049 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4050 l2cap_build_conf_rsp(chan, rsp,
4051 L2CAP_CONF_REJECT, flags), rsp);
4052 goto unlock;
4053 }
4054
4055 /* Store config. */
4056 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4057 chan->conf_len += len;
4058
4059 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4060 /* Incomplete config. Send empty response. */
4061 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4062 l2cap_build_conf_rsp(chan, rsp,
4063 L2CAP_CONF_SUCCESS, flags), rsp);
4064 goto unlock;
4065 }
4066
4067 /* Complete config. */
4068 len = l2cap_parse_conf_req(chan, rsp);
4069 if (len < 0) {
4070 l2cap_send_disconn_req(chan, ECONNRESET);
4071 goto unlock;
4072 }
4073
4074 chan->ident = cmd->ident;
4075 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4076 chan->num_conf_rsp++;
4077
4078 /* Reset config buffer. */
4079 chan->conf_len = 0;
4080
4081 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4082 goto unlock;
4083
4084 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4085 set_default_fcs(chan);
4086
4087 if (chan->mode == L2CAP_MODE_ERTM ||
4088 chan->mode == L2CAP_MODE_STREAMING)
4089 err = l2cap_ertm_init(chan);
4090
4091 if (err < 0)
4092 l2cap_send_disconn_req(chan, -err);
4093 else
4094 l2cap_chan_ready(chan);
4095
4096 goto unlock;
4097 }
4098
4099 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4100 u8 buf[64];
4101 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4102 l2cap_build_conf_req(chan, buf), buf);
4103 chan->num_conf_req++;
4104 }
4105
4106 /* Got Conf Rsp PENDING from remote side and asume we sent
4107 Conf Rsp PENDING in the code above */
4108 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4109 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4110
4111 /* check compatibility */
4112
4113 /* Send rsp for BR/EDR channel */
4114 if (!chan->hs_hcon)
4115 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4116 else
4117 chan->ident = cmd->ident;
4118 }
4119
4120 unlock:
4121 l2cap_chan_unlock(chan);
4122 return err;
4123 }
4124
4125 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4126 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4127 u8 *data)
4128 {
4129 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4130 u16 scid, flags, result;
4131 struct l2cap_chan *chan;
4132 int len = cmd_len - sizeof(*rsp);
4133 int err = 0;
4134
4135 if (cmd_len < sizeof(*rsp))
4136 return -EPROTO;
4137
4138 scid = __le16_to_cpu(rsp->scid);
4139 flags = __le16_to_cpu(rsp->flags);
4140 result = __le16_to_cpu(rsp->result);
4141
4142 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4143 result, len);
4144
4145 chan = l2cap_get_chan_by_scid(conn, scid);
4146 if (!chan)
4147 return 0;
4148
4149 switch (result) {
4150 case L2CAP_CONF_SUCCESS:
4151 l2cap_conf_rfc_get(chan, rsp->data, len);
4152 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4153 break;
4154
4155 case L2CAP_CONF_PENDING:
4156 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4157
4158 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4159 char buf[64];
4160
4161 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4162 buf, &result);
4163 if (len < 0) {
4164 l2cap_send_disconn_req(chan, ECONNRESET);
4165 goto done;
4166 }
4167
4168 if (!chan->hs_hcon) {
4169 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4170 0);
4171 } else {
4172 if (l2cap_check_efs(chan)) {
4173 amp_create_logical_link(chan);
4174 chan->ident = cmd->ident;
4175 }
4176 }
4177 }
4178 goto done;
4179
4180 case L2CAP_CONF_UNACCEPT:
4181 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4182 char req[64];
4183
4184 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4185 l2cap_send_disconn_req(chan, ECONNRESET);
4186 goto done;
4187 }
4188
4189 /* throw out any old stored conf requests */
4190 result = L2CAP_CONF_SUCCESS;
4191 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4192 req, &result);
4193 if (len < 0) {
4194 l2cap_send_disconn_req(chan, ECONNRESET);
4195 goto done;
4196 }
4197
4198 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4199 L2CAP_CONF_REQ, len, req);
4200 chan->num_conf_req++;
4201 if (result != L2CAP_CONF_SUCCESS)
4202 goto done;
4203 break;
4204 }
4205
4206 default:
4207 l2cap_chan_set_err(chan, ECONNRESET);
4208
4209 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4210 l2cap_send_disconn_req(chan, ECONNRESET);
4211 goto done;
4212 }
4213
4214 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4215 goto done;
4216
4217 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4218
4219 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4220 set_default_fcs(chan);
4221
4222 if (chan->mode == L2CAP_MODE_ERTM ||
4223 chan->mode == L2CAP_MODE_STREAMING)
4224 err = l2cap_ertm_init(chan);
4225
4226 if (err < 0)
4227 l2cap_send_disconn_req(chan, -err);
4228 else
4229 l2cap_chan_ready(chan);
4230 }
4231
4232 done:
4233 l2cap_chan_unlock(chan);
4234 return err;
4235 }
4236
4237 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4238 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4239 u8 *data)
4240 {
4241 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4242 struct l2cap_disconn_rsp rsp;
4243 u16 dcid, scid;
4244 struct l2cap_chan *chan;
4245
4246 if (cmd_len != sizeof(*req))
4247 return -EPROTO;
4248
4249 scid = __le16_to_cpu(req->scid);
4250 dcid = __le16_to_cpu(req->dcid);
4251
4252 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4253
4254 mutex_lock(&conn->chan_lock);
4255
4256 chan = __l2cap_get_chan_by_scid(conn, dcid);
4257 if (!chan) {
4258 mutex_unlock(&conn->chan_lock);
4259 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4260 return 0;
4261 }
4262
4263 l2cap_chan_lock(chan);
4264
4265 rsp.dcid = cpu_to_le16(chan->scid);
4266 rsp.scid = cpu_to_le16(chan->dcid);
4267 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4268
4269 chan->ops->set_shutdown(chan);
4270
4271 l2cap_chan_hold(chan);
4272 l2cap_chan_del(chan, ECONNRESET);
4273
4274 l2cap_chan_unlock(chan);
4275
4276 chan->ops->close(chan);
4277 l2cap_chan_put(chan);
4278
4279 mutex_unlock(&conn->chan_lock);
4280
4281 return 0;
4282 }
4283
4284 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4285 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4286 u8 *data)
4287 {
4288 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4289 u16 dcid, scid;
4290 struct l2cap_chan *chan;
4291
4292 if (cmd_len != sizeof(*rsp))
4293 return -EPROTO;
4294
4295 scid = __le16_to_cpu(rsp->scid);
4296 dcid = __le16_to_cpu(rsp->dcid);
4297
4298 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4299
4300 mutex_lock(&conn->chan_lock);
4301
4302 chan = __l2cap_get_chan_by_scid(conn, scid);
4303 if (!chan) {
4304 mutex_unlock(&conn->chan_lock);
4305 return 0;
4306 }
4307
4308 l2cap_chan_lock(chan);
4309
4310 l2cap_chan_hold(chan);
4311 l2cap_chan_del(chan, 0);
4312
4313 l2cap_chan_unlock(chan);
4314
4315 chan->ops->close(chan);
4316 l2cap_chan_put(chan);
4317
4318 mutex_unlock(&conn->chan_lock);
4319
4320 return 0;
4321 }
4322
4323 static inline int l2cap_information_req(struct l2cap_conn *conn,
4324 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4325 u8 *data)
4326 {
4327 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4328 u16 type;
4329
4330 if (cmd_len != sizeof(*req))
4331 return -EPROTO;
4332
4333 type = __le16_to_cpu(req->type);
4334
4335 BT_DBG("type 0x%4.4x", type);
4336
4337 if (type == L2CAP_IT_FEAT_MASK) {
4338 u8 buf[8];
4339 u32 feat_mask = l2cap_feat_mask;
4340 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4341 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4342 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4343 if (!disable_ertm)
4344 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4345 | L2CAP_FEAT_FCS;
4346 if (conn->hs_enabled)
4347 feat_mask |= L2CAP_FEAT_EXT_FLOW
4348 | L2CAP_FEAT_EXT_WINDOW;
4349
4350 put_unaligned_le32(feat_mask, rsp->data);
4351 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4352 buf);
4353 } else if (type == L2CAP_IT_FIXED_CHAN) {
4354 u8 buf[12];
4355 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4356
4357 if (conn->hs_enabled)
4358 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4359 else
4360 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4361
4362 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4363 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4364 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4365 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4366 buf);
4367 } else {
4368 struct l2cap_info_rsp rsp;
4369 rsp.type = cpu_to_le16(type);
4370 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4371 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4372 &rsp);
4373 }
4374
4375 return 0;
4376 }
4377
4378 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4379 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4380 u8 *data)
4381 {
4382 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4383 u16 type, result;
4384
4385 if (cmd_len < sizeof(*rsp))
4386 return -EPROTO;
4387
4388 type = __le16_to_cpu(rsp->type);
4389 result = __le16_to_cpu(rsp->result);
4390
4391 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4392
4393 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4394 if (cmd->ident != conn->info_ident ||
4395 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4396 return 0;
4397
4398 cancel_delayed_work(&conn->info_timer);
4399
4400 if (result != L2CAP_IR_SUCCESS) {
4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4402 conn->info_ident = 0;
4403
4404 l2cap_conn_start(conn);
4405
4406 return 0;
4407 }
4408
4409 switch (type) {
4410 case L2CAP_IT_FEAT_MASK:
4411 conn->feat_mask = get_unaligned_le32(rsp->data);
4412
4413 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4414 struct l2cap_info_req req;
4415 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4416
4417 conn->info_ident = l2cap_get_ident(conn);
4418
4419 l2cap_send_cmd(conn, conn->info_ident,
4420 L2CAP_INFO_REQ, sizeof(req), &req);
4421 } else {
4422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4423 conn->info_ident = 0;
4424
4425 l2cap_conn_start(conn);
4426 }
4427 break;
4428
4429 case L2CAP_IT_FIXED_CHAN:
4430 conn->fixed_chan_mask = rsp->data[0];
4431 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4432 conn->info_ident = 0;
4433
4434 l2cap_conn_start(conn);
4435 break;
4436 }
4437
4438 return 0;
4439 }
4440
4441 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4442 struct l2cap_cmd_hdr *cmd,
4443 u16 cmd_len, void *data)
4444 {
4445 struct l2cap_create_chan_req *req = data;
4446 struct l2cap_create_chan_rsp rsp;
4447 struct l2cap_chan *chan;
4448 struct hci_dev *hdev;
4449 u16 psm, scid;
4450
4451 if (cmd_len != sizeof(*req))
4452 return -EPROTO;
4453
4454 if (!conn->hs_enabled)
4455 return -EINVAL;
4456
4457 psm = le16_to_cpu(req->psm);
4458 scid = le16_to_cpu(req->scid);
4459
4460 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4461
4462 /* For controller id 0 make BR/EDR connection */
4463 if (req->amp_id == AMP_ID_BREDR) {
4464 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4465 req->amp_id);
4466 return 0;
4467 }
4468
4469 /* Validate AMP controller id */
4470 hdev = hci_dev_get(req->amp_id);
4471 if (!hdev)
4472 goto error;
4473
4474 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4475 hci_dev_put(hdev);
4476 goto error;
4477 }
4478
4479 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4480 req->amp_id);
4481 if (chan) {
4482 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4483 struct hci_conn *hs_hcon;
4484
4485 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4486 &conn->hcon->dst);
4487 if (!hs_hcon) {
4488 hci_dev_put(hdev);
4489 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4490 chan->dcid);
4491 return 0;
4492 }
4493
4494 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4495
4496 mgr->bredr_chan = chan;
4497 chan->hs_hcon = hs_hcon;
4498 chan->fcs = L2CAP_FCS_NONE;
4499 conn->mtu = hdev->block_mtu;
4500 }
4501
4502 hci_dev_put(hdev);
4503
4504 return 0;
4505
4506 error:
4507 rsp.dcid = 0;
4508 rsp.scid = cpu_to_le16(scid);
4509 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4510 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4511
4512 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4513 sizeof(rsp), &rsp);
4514
4515 return 0;
4516 }
4517
4518 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4519 {
4520 struct l2cap_move_chan_req req;
4521 u8 ident;
4522
4523 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4524
4525 ident = l2cap_get_ident(chan->conn);
4526 chan->ident = ident;
4527
4528 req.icid = cpu_to_le16(chan->scid);
4529 req.dest_amp_id = dest_amp_id;
4530
4531 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4532 &req);
4533
4534 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4535 }
4536
4537 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4538 {
4539 struct l2cap_move_chan_rsp rsp;
4540
4541 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4542
4543 rsp.icid = cpu_to_le16(chan->dcid);
4544 rsp.result = cpu_to_le16(result);
4545
4546 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4547 sizeof(rsp), &rsp);
4548 }
4549
4550 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4551 {
4552 struct l2cap_move_chan_cfm cfm;
4553
4554 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4555
4556 chan->ident = l2cap_get_ident(chan->conn);
4557
4558 cfm.icid = cpu_to_le16(chan->scid);
4559 cfm.result = cpu_to_le16(result);
4560
4561 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4562 sizeof(cfm), &cfm);
4563
4564 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4565 }
4566
4567 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4568 {
4569 struct l2cap_move_chan_cfm cfm;
4570
4571 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4572
4573 cfm.icid = cpu_to_le16(icid);
4574 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4575
4576 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4577 sizeof(cfm), &cfm);
4578 }
4579
4580 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4581 u16 icid)
4582 {
4583 struct l2cap_move_chan_cfm_rsp rsp;
4584
4585 BT_DBG("icid 0x%4.4x", icid);
4586
4587 rsp.icid = cpu_to_le16(icid);
4588 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4589 }
4590
4591 static void __release_logical_link(struct l2cap_chan *chan)
4592 {
4593 chan->hs_hchan = NULL;
4594 chan->hs_hcon = NULL;
4595
4596 /* Placeholder - release the logical link */
4597 }
4598
4599 static void l2cap_logical_fail(struct l2cap_chan *chan)
4600 {
4601 /* Logical link setup failed */
4602 if (chan->state != BT_CONNECTED) {
4603 /* Create channel failure, disconnect */
4604 l2cap_send_disconn_req(chan, ECONNRESET);
4605 return;
4606 }
4607
4608 switch (chan->move_role) {
4609 case L2CAP_MOVE_ROLE_RESPONDER:
4610 l2cap_move_done(chan);
4611 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4612 break;
4613 case L2CAP_MOVE_ROLE_INITIATOR:
4614 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4615 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4616 /* Remote has only sent pending or
4617 * success responses, clean up
4618 */
4619 l2cap_move_done(chan);
4620 }
4621
4622 /* Other amp move states imply that the move
4623 * has already aborted
4624 */
4625 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4626 break;
4627 }
4628 }
4629
4630 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4631 struct hci_chan *hchan)
4632 {
4633 struct l2cap_conf_rsp rsp;
4634
4635 chan->hs_hchan = hchan;
4636 chan->hs_hcon->l2cap_data = chan->conn;
4637
4638 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4639
4640 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4641 int err;
4642
4643 set_default_fcs(chan);
4644
4645 err = l2cap_ertm_init(chan);
4646 if (err < 0)
4647 l2cap_send_disconn_req(chan, -err);
4648 else
4649 l2cap_chan_ready(chan);
4650 }
4651 }
4652
4653 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4654 struct hci_chan *hchan)
4655 {
4656 chan->hs_hcon = hchan->conn;
4657 chan->hs_hcon->l2cap_data = chan->conn;
4658
4659 BT_DBG("move_state %d", chan->move_state);
4660
4661 switch (chan->move_state) {
4662 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4663 /* Move confirm will be sent after a success
4664 * response is received
4665 */
4666 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4667 break;
4668 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4669 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4670 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4671 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4672 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4673 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4674 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4675 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4676 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4677 }
4678 break;
4679 default:
4680 /* Move was not in expected state, free the channel */
4681 __release_logical_link(chan);
4682
4683 chan->move_state = L2CAP_MOVE_STABLE;
4684 }
4685 }
4686
4687 /* Call with chan locked */
4688 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4689 u8 status)
4690 {
4691 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4692
4693 if (status) {
4694 l2cap_logical_fail(chan);
4695 __release_logical_link(chan);
4696 return;
4697 }
4698
4699 if (chan->state != BT_CONNECTED) {
4700 /* Ignore logical link if channel is on BR/EDR */
4701 if (chan->local_amp_id != AMP_ID_BREDR)
4702 l2cap_logical_finish_create(chan, hchan);
4703 } else {
4704 l2cap_logical_finish_move(chan, hchan);
4705 }
4706 }
4707
4708 void l2cap_move_start(struct l2cap_chan *chan)
4709 {
4710 BT_DBG("chan %p", chan);
4711
4712 if (chan->local_amp_id == AMP_ID_BREDR) {
4713 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4714 return;
4715 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4716 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4717 /* Placeholder - start physical link setup */
4718 } else {
4719 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4720 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4721 chan->move_id = 0;
4722 l2cap_move_setup(chan);
4723 l2cap_send_move_chan_req(chan, 0);
4724 }
4725 }
4726
4727 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4728 u8 local_amp_id, u8 remote_amp_id)
4729 {
4730 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4731 local_amp_id, remote_amp_id);
4732
4733 chan->fcs = L2CAP_FCS_NONE;
4734
4735 /* Outgoing channel on AMP */
4736 if (chan->state == BT_CONNECT) {
4737 if (result == L2CAP_CR_SUCCESS) {
4738 chan->local_amp_id = local_amp_id;
4739 l2cap_send_create_chan_req(chan, remote_amp_id);
4740 } else {
4741 /* Revert to BR/EDR connect */
4742 l2cap_send_conn_req(chan);
4743 }
4744
4745 return;
4746 }
4747
4748 /* Incoming channel on AMP */
4749 if (__l2cap_no_conn_pending(chan)) {
4750 struct l2cap_conn_rsp rsp;
4751 char buf[128];
4752 rsp.scid = cpu_to_le16(chan->dcid);
4753 rsp.dcid = cpu_to_le16(chan->scid);
4754
4755 if (result == L2CAP_CR_SUCCESS) {
4756 /* Send successful response */
4757 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4758 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4759 } else {
4760 /* Send negative response */
4761 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4762 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4763 }
4764
4765 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4766 sizeof(rsp), &rsp);
4767
4768 if (result == L2CAP_CR_SUCCESS) {
4769 l2cap_state_change(chan, BT_CONFIG);
4770 set_bit(CONF_REQ_SENT, &chan->conf_state);
4771 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4772 L2CAP_CONF_REQ,
4773 l2cap_build_conf_req(chan, buf), buf);
4774 chan->num_conf_req++;
4775 }
4776 }
4777 }
4778
4779 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4780 u8 remote_amp_id)
4781 {
4782 l2cap_move_setup(chan);
4783 chan->move_id = local_amp_id;
4784 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4785
4786 l2cap_send_move_chan_req(chan, remote_amp_id);
4787 }
4788
4789 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4790 {
4791 struct hci_chan *hchan = NULL;
4792
4793 /* Placeholder - get hci_chan for logical link */
4794
4795 if (hchan) {
4796 if (hchan->state == BT_CONNECTED) {
4797 /* Logical link is ready to go */
4798 chan->hs_hcon = hchan->conn;
4799 chan->hs_hcon->l2cap_data = chan->conn;
4800 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4801 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4802
4803 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4804 } else {
4805 /* Wait for logical link to be ready */
4806 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4807 }
4808 } else {
4809 /* Logical link not available */
4810 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4811 }
4812 }
4813
4814 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4815 {
4816 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4817 u8 rsp_result;
4818 if (result == -EINVAL)
4819 rsp_result = L2CAP_MR_BAD_ID;
4820 else
4821 rsp_result = L2CAP_MR_NOT_ALLOWED;
4822
4823 l2cap_send_move_chan_rsp(chan, rsp_result);
4824 }
4825
4826 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4827 chan->move_state = L2CAP_MOVE_STABLE;
4828
4829 /* Restart data transmission */
4830 l2cap_ertm_send(chan);
4831 }
4832
4833 /* Invoke with locked chan */
4834 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4835 {
4836 u8 local_amp_id = chan->local_amp_id;
4837 u8 remote_amp_id = chan->remote_amp_id;
4838
4839 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4840 chan, result, local_amp_id, remote_amp_id);
4841
4842 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4843 l2cap_chan_unlock(chan);
4844 return;
4845 }
4846
4847 if (chan->state != BT_CONNECTED) {
4848 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4849 } else if (result != L2CAP_MR_SUCCESS) {
4850 l2cap_do_move_cancel(chan, result);
4851 } else {
4852 switch (chan->move_role) {
4853 case L2CAP_MOVE_ROLE_INITIATOR:
4854 l2cap_do_move_initiate(chan, local_amp_id,
4855 remote_amp_id);
4856 break;
4857 case L2CAP_MOVE_ROLE_RESPONDER:
4858 l2cap_do_move_respond(chan, result);
4859 break;
4860 default:
4861 l2cap_do_move_cancel(chan, result);
4862 break;
4863 }
4864 }
4865 }
4866
4867 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4868 struct l2cap_cmd_hdr *cmd,
4869 u16 cmd_len, void *data)
4870 {
4871 struct l2cap_move_chan_req *req = data;
4872 struct l2cap_move_chan_rsp rsp;
4873 struct l2cap_chan *chan;
4874 u16 icid = 0;
4875 u16 result = L2CAP_MR_NOT_ALLOWED;
4876
4877 if (cmd_len != sizeof(*req))
4878 return -EPROTO;
4879
4880 icid = le16_to_cpu(req->icid);
4881
4882 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4883
4884 if (!conn->hs_enabled)
4885 return -EINVAL;
4886
4887 chan = l2cap_get_chan_by_dcid(conn, icid);
4888 if (!chan) {
4889 rsp.icid = cpu_to_le16(icid);
4890 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4891 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4892 sizeof(rsp), &rsp);
4893 return 0;
4894 }
4895
4896 chan->ident = cmd->ident;
4897
4898 if (chan->scid < L2CAP_CID_DYN_START ||
4899 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4900 (chan->mode != L2CAP_MODE_ERTM &&
4901 chan->mode != L2CAP_MODE_STREAMING)) {
4902 result = L2CAP_MR_NOT_ALLOWED;
4903 goto send_move_response;
4904 }
4905
4906 if (chan->local_amp_id == req->dest_amp_id) {
4907 result = L2CAP_MR_SAME_ID;
4908 goto send_move_response;
4909 }
4910
4911 if (req->dest_amp_id != AMP_ID_BREDR) {
4912 struct hci_dev *hdev;
4913 hdev = hci_dev_get(req->dest_amp_id);
4914 if (!hdev || hdev->dev_type != HCI_AMP ||
4915 !test_bit(HCI_UP, &hdev->flags)) {
4916 if (hdev)
4917 hci_dev_put(hdev);
4918
4919 result = L2CAP_MR_BAD_ID;
4920 goto send_move_response;
4921 }
4922 hci_dev_put(hdev);
4923 }
4924
4925 /* Detect a move collision. Only send a collision response
4926 * if this side has "lost", otherwise proceed with the move.
4927 * The winner has the larger bd_addr.
4928 */
4929 if ((__chan_is_moving(chan) ||
4930 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4931 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4932 result = L2CAP_MR_COLLISION;
4933 goto send_move_response;
4934 }
4935
4936 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4937 l2cap_move_setup(chan);
4938 chan->move_id = req->dest_amp_id;
4939 icid = chan->dcid;
4940
4941 if (req->dest_amp_id == AMP_ID_BREDR) {
4942 /* Moving to BR/EDR */
4943 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4944 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4945 result = L2CAP_MR_PEND;
4946 } else {
4947 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4948 result = L2CAP_MR_SUCCESS;
4949 }
4950 } else {
4951 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4952 /* Placeholder - uncomment when amp functions are available */
4953 /*amp_accept_physical(chan, req->dest_amp_id);*/
4954 result = L2CAP_MR_PEND;
4955 }
4956
4957 send_move_response:
4958 l2cap_send_move_chan_rsp(chan, result);
4959
4960 l2cap_chan_unlock(chan);
4961
4962 return 0;
4963 }
4964
4965 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4966 {
4967 struct l2cap_chan *chan;
4968 struct hci_chan *hchan = NULL;
4969
4970 chan = l2cap_get_chan_by_scid(conn, icid);
4971 if (!chan) {
4972 l2cap_send_move_chan_cfm_icid(conn, icid);
4973 return;
4974 }
4975
4976 __clear_chan_timer(chan);
4977 if (result == L2CAP_MR_PEND)
4978 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4979
4980 switch (chan->move_state) {
4981 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4982 /* Move confirm will be sent when logical link
4983 * is complete.
4984 */
4985 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4986 break;
4987 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4988 if (result == L2CAP_MR_PEND) {
4989 break;
4990 } else if (test_bit(CONN_LOCAL_BUSY,
4991 &chan->conn_state)) {
4992 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4993 } else {
4994 /* Logical link is up or moving to BR/EDR,
4995 * proceed with move
4996 */
4997 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4998 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4999 }
5000 break;
5001 case L2CAP_MOVE_WAIT_RSP:
5002 /* Moving to AMP */
5003 if (result == L2CAP_MR_SUCCESS) {
5004 /* Remote is ready, send confirm immediately
5005 * after logical link is ready
5006 */
5007 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5008 } else {
5009 /* Both logical link and move success
5010 * are required to confirm
5011 */
5012 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5013 }
5014
5015 /* Placeholder - get hci_chan for logical link */
5016 if (!hchan) {
5017 /* Logical link not available */
5018 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5019 break;
5020 }
5021
5022 /* If the logical link is not yet connected, do not
5023 * send confirmation.
5024 */
5025 if (hchan->state != BT_CONNECTED)
5026 break;
5027
5028 /* Logical link is already ready to go */
5029
5030 chan->hs_hcon = hchan->conn;
5031 chan->hs_hcon->l2cap_data = chan->conn;
5032
5033 if (result == L2CAP_MR_SUCCESS) {
5034 /* Can confirm now */
5035 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5036 } else {
5037 /* Now only need move success
5038 * to confirm
5039 */
5040 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5041 }
5042
5043 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5044 break;
5045 default:
5046 /* Any other amp move state means the move failed. */
5047 chan->move_id = chan->local_amp_id;
5048 l2cap_move_done(chan);
5049 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5050 }
5051
5052 l2cap_chan_unlock(chan);
5053 }
5054
5055 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5056 u16 result)
5057 {
5058 struct l2cap_chan *chan;
5059
5060 chan = l2cap_get_chan_by_ident(conn, ident);
5061 if (!chan) {
5062 /* Could not locate channel, icid is best guess */
5063 l2cap_send_move_chan_cfm_icid(conn, icid);
5064 return;
5065 }
5066
5067 __clear_chan_timer(chan);
5068
5069 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5070 if (result == L2CAP_MR_COLLISION) {
5071 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5072 } else {
5073 /* Cleanup - cancel move */
5074 chan->move_id = chan->local_amp_id;
5075 l2cap_move_done(chan);
5076 }
5077 }
5078
5079 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5080
5081 l2cap_chan_unlock(chan);
5082 }
5083
5084 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5085 struct l2cap_cmd_hdr *cmd,
5086 u16 cmd_len, void *data)
5087 {
5088 struct l2cap_move_chan_rsp *rsp = data;
5089 u16 icid, result;
5090
5091 if (cmd_len != sizeof(*rsp))
5092 return -EPROTO;
5093
5094 icid = le16_to_cpu(rsp->icid);
5095 result = le16_to_cpu(rsp->result);
5096
5097 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5098
5099 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5100 l2cap_move_continue(conn, icid, result);
5101 else
5102 l2cap_move_fail(conn, cmd->ident, icid, result);
5103
5104 return 0;
5105 }
5106
5107 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5108 struct l2cap_cmd_hdr *cmd,
5109 u16 cmd_len, void *data)
5110 {
5111 struct l2cap_move_chan_cfm *cfm = data;
5112 struct l2cap_chan *chan;
5113 u16 icid, result;
5114
5115 if (cmd_len != sizeof(*cfm))
5116 return -EPROTO;
5117
5118 icid = le16_to_cpu(cfm->icid);
5119 result = le16_to_cpu(cfm->result);
5120
5121 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5122
5123 chan = l2cap_get_chan_by_dcid(conn, icid);
5124 if (!chan) {
5125 /* Spec requires a response even if the icid was not found */
5126 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5127 return 0;
5128 }
5129
5130 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5131 if (result == L2CAP_MC_CONFIRMED) {
5132 chan->local_amp_id = chan->move_id;
5133 if (chan->local_amp_id == AMP_ID_BREDR)
5134 __release_logical_link(chan);
5135 } else {
5136 chan->move_id = chan->local_amp_id;
5137 }
5138
5139 l2cap_move_done(chan);
5140 }
5141
5142 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5143
5144 l2cap_chan_unlock(chan);
5145
5146 return 0;
5147 }
5148
5149 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5150 struct l2cap_cmd_hdr *cmd,
5151 u16 cmd_len, void *data)
5152 {
5153 struct l2cap_move_chan_cfm_rsp *rsp = data;
5154 struct l2cap_chan *chan;
5155 u16 icid;
5156
5157 if (cmd_len != sizeof(*rsp))
5158 return -EPROTO;
5159
5160 icid = le16_to_cpu(rsp->icid);
5161
5162 BT_DBG("icid 0x%4.4x", icid);
5163
5164 chan = l2cap_get_chan_by_scid(conn, icid);
5165 if (!chan)
5166 return 0;
5167
5168 __clear_chan_timer(chan);
5169
5170 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5171 chan->local_amp_id = chan->move_id;
5172
5173 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5174 __release_logical_link(chan);
5175
5176 l2cap_move_done(chan);
5177 }
5178
5179 l2cap_chan_unlock(chan);
5180
5181 return 0;
5182 }
5183
5184 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5185 u16 to_multiplier)
5186 {
5187 u16 max_latency;
5188
5189 if (min > max || min < 6 || max > 3200)
5190 return -EINVAL;
5191
5192 if (to_multiplier < 10 || to_multiplier > 3200)
5193 return -EINVAL;
5194
5195 if (max >= to_multiplier * 8)
5196 return -EINVAL;
5197
5198 max_latency = (to_multiplier * 8 / max) - 1;
5199 if (latency > 499 || latency > max_latency)
5200 return -EINVAL;
5201
5202 return 0;
5203 }
5204
5205 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5206 struct l2cap_cmd_hdr *cmd,
5207 u16 cmd_len, u8 *data)
5208 {
5209 struct hci_conn *hcon = conn->hcon;
5210 struct l2cap_conn_param_update_req *req;
5211 struct l2cap_conn_param_update_rsp rsp;
5212 u16 min, max, latency, to_multiplier;
5213 int err;
5214
5215 if (!(hcon->link_mode & HCI_LM_MASTER))
5216 return -EINVAL;
5217
5218 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5219 return -EPROTO;
5220
5221 req = (struct l2cap_conn_param_update_req *) data;
5222 min = __le16_to_cpu(req->min);
5223 max = __le16_to_cpu(req->max);
5224 latency = __le16_to_cpu(req->latency);
5225 to_multiplier = __le16_to_cpu(req->to_multiplier);
5226
5227 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5228 min, max, latency, to_multiplier);
5229
5230 memset(&rsp, 0, sizeof(rsp));
5231
5232 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5233 if (err)
5234 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5235 else
5236 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5237
5238 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5239 sizeof(rsp), &rsp);
5240
5241 if (!err)
5242 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5243
5244 return 0;
5245 }
5246
5247 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5248 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5249 u8 *data)
5250 {
5251 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5252 u16 dcid, mtu, mps, credits, result;
5253 struct l2cap_chan *chan;
5254 int err;
5255
5256 if (cmd_len < sizeof(*rsp))
5257 return -EPROTO;
5258
5259 dcid = __le16_to_cpu(rsp->dcid);
5260 mtu = __le16_to_cpu(rsp->mtu);
5261 mps = __le16_to_cpu(rsp->mps);
5262 credits = __le16_to_cpu(rsp->credits);
5263 result = __le16_to_cpu(rsp->result);
5264
5265 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5266 return -EPROTO;
5267
5268 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5269 dcid, mtu, mps, credits, result);
5270
5271 mutex_lock(&conn->chan_lock);
5272
5273 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5274 if (!chan) {
5275 err = -EBADSLT;
5276 goto unlock;
5277 }
5278
5279 err = 0;
5280
5281 l2cap_chan_lock(chan);
5282
5283 switch (result) {
5284 case L2CAP_CR_SUCCESS:
5285 chan->ident = 0;
5286 chan->dcid = dcid;
5287 chan->omtu = mtu;
5288 chan->remote_mps = mps;
5289 l2cap_chan_ready(chan);
5290 break;
5291
5292 default:
5293 l2cap_chan_del(chan, ECONNREFUSED);
5294 break;
5295 }
5296
5297 l2cap_chan_unlock(chan);
5298
5299 unlock:
5300 mutex_unlock(&conn->chan_lock);
5301
5302 return err;
5303 }
5304
5305 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5306 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5307 u8 *data)
5308 {
5309 int err = 0;
5310
5311 switch (cmd->code) {
5312 case L2CAP_COMMAND_REJ:
5313 l2cap_command_rej(conn, cmd, cmd_len, data);
5314 break;
5315
5316 case L2CAP_CONN_REQ:
5317 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5318 break;
5319
5320 case L2CAP_CONN_RSP:
5321 case L2CAP_CREATE_CHAN_RSP:
5322 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5323 break;
5324
5325 case L2CAP_CONF_REQ:
5326 err = l2cap_config_req(conn, cmd, cmd_len, data);
5327 break;
5328
5329 case L2CAP_CONF_RSP:
5330 l2cap_config_rsp(conn, cmd, cmd_len, data);
5331 break;
5332
5333 case L2CAP_DISCONN_REQ:
5334 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5335 break;
5336
5337 case L2CAP_DISCONN_RSP:
5338 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5339 break;
5340
5341 case L2CAP_ECHO_REQ:
5342 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5343 break;
5344
5345 case L2CAP_ECHO_RSP:
5346 break;
5347
5348 case L2CAP_INFO_REQ:
5349 err = l2cap_information_req(conn, cmd, cmd_len, data);
5350 break;
5351
5352 case L2CAP_INFO_RSP:
5353 l2cap_information_rsp(conn, cmd, cmd_len, data);
5354 break;
5355
5356 case L2CAP_CREATE_CHAN_REQ:
5357 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5358 break;
5359
5360 case L2CAP_MOVE_CHAN_REQ:
5361 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5362 break;
5363
5364 case L2CAP_MOVE_CHAN_RSP:
5365 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5366 break;
5367
5368 case L2CAP_MOVE_CHAN_CFM:
5369 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5370 break;
5371
5372 case L2CAP_MOVE_CHAN_CFM_RSP:
5373 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5374 break;
5375
5376 default:
5377 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5378 err = -EINVAL;
5379 break;
5380 }
5381
5382 return err;
5383 }
5384
5385 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5386 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5387 u8 *data)
5388 {
5389 switch (cmd->code) {
5390 case L2CAP_COMMAND_REJ:
5391 return 0;
5392
5393 case L2CAP_CONN_PARAM_UPDATE_REQ:
5394 return l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5395
5396 case L2CAP_CONN_PARAM_UPDATE_RSP:
5397 return 0;
5398
5399 case L2CAP_LE_CONN_RSP:
5400 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5401 return 0;
5402
5403 default:
5404 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5405 return -EINVAL;
5406 }
5407 }
5408
5409 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5410 struct sk_buff *skb)
5411 {
5412 struct hci_conn *hcon = conn->hcon;
5413 struct l2cap_cmd_hdr *cmd;
5414 u16 len;
5415 int err;
5416
5417 if (hcon->type != LE_LINK)
5418 goto drop;
5419
5420 if (skb->len < L2CAP_CMD_HDR_SIZE)
5421 goto drop;
5422
5423 cmd = (void *) skb->data;
5424 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5425
5426 len = le16_to_cpu(cmd->len);
5427
5428 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5429
5430 if (len != skb->len || !cmd->ident) {
5431 BT_DBG("corrupted command");
5432 goto drop;
5433 }
5434
5435 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5436 if (err) {
5437 struct l2cap_cmd_rej_unk rej;
5438
5439 BT_ERR("Wrong link type (%d)", err);
5440
5441 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5442 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5443 sizeof(rej), &rej);
5444 }
5445
5446 drop:
5447 kfree_skb(skb);
5448 }
5449
5450 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5451 struct sk_buff *skb)
5452 {
5453 struct hci_conn *hcon = conn->hcon;
5454 u8 *data = skb->data;
5455 int len = skb->len;
5456 struct l2cap_cmd_hdr cmd;
5457 int err;
5458
5459 l2cap_raw_recv(conn, skb);
5460
5461 if (hcon->type != ACL_LINK)
5462 goto drop;
5463
5464 while (len >= L2CAP_CMD_HDR_SIZE) {
5465 u16 cmd_len;
5466 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5467 data += L2CAP_CMD_HDR_SIZE;
5468 len -= L2CAP_CMD_HDR_SIZE;
5469
5470 cmd_len = le16_to_cpu(cmd.len);
5471
5472 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5473 cmd.ident);
5474
5475 if (cmd_len > len || !cmd.ident) {
5476 BT_DBG("corrupted command");
5477 break;
5478 }
5479
5480 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5481 if (err) {
5482 struct l2cap_cmd_rej_unk rej;
5483
5484 BT_ERR("Wrong link type (%d)", err);
5485
5486 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5487 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5488 sizeof(rej), &rej);
5489 }
5490
5491 data += cmd_len;
5492 len -= cmd_len;
5493 }
5494
5495 drop:
5496 kfree_skb(skb);
5497 }
5498
5499 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5500 {
5501 u16 our_fcs, rcv_fcs;
5502 int hdr_size;
5503
5504 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5505 hdr_size = L2CAP_EXT_HDR_SIZE;
5506 else
5507 hdr_size = L2CAP_ENH_HDR_SIZE;
5508
5509 if (chan->fcs == L2CAP_FCS_CRC16) {
5510 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5511 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5512 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5513
5514 if (our_fcs != rcv_fcs)
5515 return -EBADMSG;
5516 }
5517 return 0;
5518 }
5519
5520 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5521 {
5522 struct l2cap_ctrl control;
5523
5524 BT_DBG("chan %p", chan);
5525
5526 memset(&control, 0, sizeof(control));
5527 control.sframe = 1;
5528 control.final = 1;
5529 control.reqseq = chan->buffer_seq;
5530 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5531
5532 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5533 control.super = L2CAP_SUPER_RNR;
5534 l2cap_send_sframe(chan, &control);
5535 }
5536
5537 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5538 chan->unacked_frames > 0)
5539 __set_retrans_timer(chan);
5540
5541 /* Send pending iframes */
5542 l2cap_ertm_send(chan);
5543
5544 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5545 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5546 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5547 * send it now.
5548 */
5549 control.super = L2CAP_SUPER_RR;
5550 l2cap_send_sframe(chan, &control);
5551 }
5552 }
5553
5554 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5555 struct sk_buff **last_frag)
5556 {
5557 /* skb->len reflects data in skb as well as all fragments
5558 * skb->data_len reflects only data in fragments
5559 */
5560 if (!skb_has_frag_list(skb))
5561 skb_shinfo(skb)->frag_list = new_frag;
5562
5563 new_frag->next = NULL;
5564
5565 (*last_frag)->next = new_frag;
5566 *last_frag = new_frag;
5567
5568 skb->len += new_frag->len;
5569 skb->data_len += new_frag->len;
5570 skb->truesize += new_frag->truesize;
5571 }
5572
5573 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5574 struct l2cap_ctrl *control)
5575 {
5576 int err = -EINVAL;
5577
5578 switch (control->sar) {
5579 case L2CAP_SAR_UNSEGMENTED:
5580 if (chan->sdu)
5581 break;
5582
5583 err = chan->ops->recv(chan, skb);
5584 break;
5585
5586 case L2CAP_SAR_START:
5587 if (chan->sdu)
5588 break;
5589
5590 chan->sdu_len = get_unaligned_le16(skb->data);
5591 skb_pull(skb, L2CAP_SDULEN_SIZE);
5592
5593 if (chan->sdu_len > chan->imtu) {
5594 err = -EMSGSIZE;
5595 break;
5596 }
5597
5598 if (skb->len >= chan->sdu_len)
5599 break;
5600
5601 chan->sdu = skb;
5602 chan->sdu_last_frag = skb;
5603
5604 skb = NULL;
5605 err = 0;
5606 break;
5607
5608 case L2CAP_SAR_CONTINUE:
5609 if (!chan->sdu)
5610 break;
5611
5612 append_skb_frag(chan->sdu, skb,
5613 &chan->sdu_last_frag);
5614 skb = NULL;
5615
5616 if (chan->sdu->len >= chan->sdu_len)
5617 break;
5618
5619 err = 0;
5620 break;
5621
5622 case L2CAP_SAR_END:
5623 if (!chan->sdu)
5624 break;
5625
5626 append_skb_frag(chan->sdu, skb,
5627 &chan->sdu_last_frag);
5628 skb = NULL;
5629
5630 if (chan->sdu->len != chan->sdu_len)
5631 break;
5632
5633 err = chan->ops->recv(chan, chan->sdu);
5634
5635 if (!err) {
5636 /* Reassembly complete */
5637 chan->sdu = NULL;
5638 chan->sdu_last_frag = NULL;
5639 chan->sdu_len = 0;
5640 }
5641 break;
5642 }
5643
5644 if (err) {
5645 kfree_skb(skb);
5646 kfree_skb(chan->sdu);
5647 chan->sdu = NULL;
5648 chan->sdu_last_frag = NULL;
5649 chan->sdu_len = 0;
5650 }
5651
5652 return err;
5653 }
5654
5655 static int l2cap_resegment(struct l2cap_chan *chan)
5656 {
5657 /* Placeholder */
5658 return 0;
5659 }
5660
5661 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5662 {
5663 u8 event;
5664
5665 if (chan->mode != L2CAP_MODE_ERTM)
5666 return;
5667
5668 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5669 l2cap_tx(chan, NULL, NULL, event);
5670 }
5671
5672 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5673 {
5674 int err = 0;
5675 /* Pass sequential frames to l2cap_reassemble_sdu()
5676 * until a gap is encountered.
5677 */
5678
5679 BT_DBG("chan %p", chan);
5680
5681 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5682 struct sk_buff *skb;
5683 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5684 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5685
5686 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5687
5688 if (!skb)
5689 break;
5690
5691 skb_unlink(skb, &chan->srej_q);
5692 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5693 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5694 if (err)
5695 break;
5696 }
5697
5698 if (skb_queue_empty(&chan->srej_q)) {
5699 chan->rx_state = L2CAP_RX_STATE_RECV;
5700 l2cap_send_ack(chan);
5701 }
5702
5703 return err;
5704 }
5705
5706 static void l2cap_handle_srej(struct l2cap_chan *chan,
5707 struct l2cap_ctrl *control)
5708 {
5709 struct sk_buff *skb;
5710
5711 BT_DBG("chan %p, control %p", chan, control);
5712
5713 if (control->reqseq == chan->next_tx_seq) {
5714 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5715 l2cap_send_disconn_req(chan, ECONNRESET);
5716 return;
5717 }
5718
5719 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5720
5721 if (skb == NULL) {
5722 BT_DBG("Seq %d not available for retransmission",
5723 control->reqseq);
5724 return;
5725 }
5726
5727 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5728 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5729 l2cap_send_disconn_req(chan, ECONNRESET);
5730 return;
5731 }
5732
5733 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5734
5735 if (control->poll) {
5736 l2cap_pass_to_tx(chan, control);
5737
5738 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5739 l2cap_retransmit(chan, control);
5740 l2cap_ertm_send(chan);
5741
5742 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5743 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5744 chan->srej_save_reqseq = control->reqseq;
5745 }
5746 } else {
5747 l2cap_pass_to_tx_fbit(chan, control);
5748
5749 if (control->final) {
5750 if (chan->srej_save_reqseq != control->reqseq ||
5751 !test_and_clear_bit(CONN_SREJ_ACT,
5752 &chan->conn_state))
5753 l2cap_retransmit(chan, control);
5754 } else {
5755 l2cap_retransmit(chan, control);
5756 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5757 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5758 chan->srej_save_reqseq = control->reqseq;
5759 }
5760 }
5761 }
5762 }
5763
5764 static void l2cap_handle_rej(struct l2cap_chan *chan,
5765 struct l2cap_ctrl *control)
5766 {
5767 struct sk_buff *skb;
5768
5769 BT_DBG("chan %p, control %p", chan, control);
5770
5771 if (control->reqseq == chan->next_tx_seq) {
5772 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5773 l2cap_send_disconn_req(chan, ECONNRESET);
5774 return;
5775 }
5776
5777 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5778
5779 if (chan->max_tx && skb &&
5780 bt_cb(skb)->control.retries >= chan->max_tx) {
5781 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5782 l2cap_send_disconn_req(chan, ECONNRESET);
5783 return;
5784 }
5785
5786 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5787
5788 l2cap_pass_to_tx(chan, control);
5789
5790 if (control->final) {
5791 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5792 l2cap_retransmit_all(chan, control);
5793 } else {
5794 l2cap_retransmit_all(chan, control);
5795 l2cap_ertm_send(chan);
5796 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5797 set_bit(CONN_REJ_ACT, &chan->conn_state);
5798 }
5799 }
5800
5801 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5802 {
5803 BT_DBG("chan %p, txseq %d", chan, txseq);
5804
5805 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5806 chan->expected_tx_seq);
5807
5808 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5809 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5810 chan->tx_win) {
5811 /* See notes below regarding "double poll" and
5812 * invalid packets.
5813 */
5814 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5815 BT_DBG("Invalid/Ignore - after SREJ");
5816 return L2CAP_TXSEQ_INVALID_IGNORE;
5817 } else {
5818 BT_DBG("Invalid - in window after SREJ sent");
5819 return L2CAP_TXSEQ_INVALID;
5820 }
5821 }
5822
5823 if (chan->srej_list.head == txseq) {
5824 BT_DBG("Expected SREJ");
5825 return L2CAP_TXSEQ_EXPECTED_SREJ;
5826 }
5827
5828 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5829 BT_DBG("Duplicate SREJ - txseq already stored");
5830 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5831 }
5832
5833 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5834 BT_DBG("Unexpected SREJ - not requested");
5835 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5836 }
5837 }
5838
5839 if (chan->expected_tx_seq == txseq) {
5840 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5841 chan->tx_win) {
5842 BT_DBG("Invalid - txseq outside tx window");
5843 return L2CAP_TXSEQ_INVALID;
5844 } else {
5845 BT_DBG("Expected");
5846 return L2CAP_TXSEQ_EXPECTED;
5847 }
5848 }
5849
5850 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5851 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5852 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5853 return L2CAP_TXSEQ_DUPLICATE;
5854 }
5855
5856 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5857 /* A source of invalid packets is a "double poll" condition,
5858 * where delays cause us to send multiple poll packets. If
5859 * the remote stack receives and processes both polls,
5860 * sequence numbers can wrap around in such a way that a
5861 * resent frame has a sequence number that looks like new data
5862 * with a sequence gap. This would trigger an erroneous SREJ
5863 * request.
5864 *
5865 * Fortunately, this is impossible with a tx window that's
5866 * less than half of the maximum sequence number, which allows
5867 * invalid frames to be safely ignored.
5868 *
5869 * With tx window sizes greater than half of the tx window
5870 * maximum, the frame is invalid and cannot be ignored. This
5871 * causes a disconnect.
5872 */
5873
5874 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5875 BT_DBG("Invalid/Ignore - txseq outside tx window");
5876 return L2CAP_TXSEQ_INVALID_IGNORE;
5877 } else {
5878 BT_DBG("Invalid - txseq outside tx window");
5879 return L2CAP_TXSEQ_INVALID;
5880 }
5881 } else {
5882 BT_DBG("Unexpected - txseq indicates missing frames");
5883 return L2CAP_TXSEQ_UNEXPECTED;
5884 }
5885 }
5886
5887 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5888 struct l2cap_ctrl *control,
5889 struct sk_buff *skb, u8 event)
5890 {
5891 int err = 0;
5892 bool skb_in_use = false;
5893
5894 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5895 event);
5896
5897 switch (event) {
5898 case L2CAP_EV_RECV_IFRAME:
5899 switch (l2cap_classify_txseq(chan, control->txseq)) {
5900 case L2CAP_TXSEQ_EXPECTED:
5901 l2cap_pass_to_tx(chan, control);
5902
5903 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5904 BT_DBG("Busy, discarding expected seq %d",
5905 control->txseq);
5906 break;
5907 }
5908
5909 chan->expected_tx_seq = __next_seq(chan,
5910 control->txseq);
5911
5912 chan->buffer_seq = chan->expected_tx_seq;
5913 skb_in_use = true;
5914
5915 err = l2cap_reassemble_sdu(chan, skb, control);
5916 if (err)
5917 break;
5918
5919 if (control->final) {
5920 if (!test_and_clear_bit(CONN_REJ_ACT,
5921 &chan->conn_state)) {
5922 control->final = 0;
5923 l2cap_retransmit_all(chan, control);
5924 l2cap_ertm_send(chan);
5925 }
5926 }
5927
5928 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5929 l2cap_send_ack(chan);
5930 break;
5931 case L2CAP_TXSEQ_UNEXPECTED:
5932 l2cap_pass_to_tx(chan, control);
5933
5934 /* Can't issue SREJ frames in the local busy state.
5935 * Drop this frame, it will be seen as missing
5936 * when local busy is exited.
5937 */
5938 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5939 BT_DBG("Busy, discarding unexpected seq %d",
5940 control->txseq);
5941 break;
5942 }
5943
5944 /* There was a gap in the sequence, so an SREJ
5945 * must be sent for each missing frame. The
5946 * current frame is stored for later use.
5947 */
5948 skb_queue_tail(&chan->srej_q, skb);
5949 skb_in_use = true;
5950 BT_DBG("Queued %p (queue len %d)", skb,
5951 skb_queue_len(&chan->srej_q));
5952
5953 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5954 l2cap_seq_list_clear(&chan->srej_list);
5955 l2cap_send_srej(chan, control->txseq);
5956
5957 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5958 break;
5959 case L2CAP_TXSEQ_DUPLICATE:
5960 l2cap_pass_to_tx(chan, control);
5961 break;
5962 case L2CAP_TXSEQ_INVALID_IGNORE:
5963 break;
5964 case L2CAP_TXSEQ_INVALID:
5965 default:
5966 l2cap_send_disconn_req(chan, ECONNRESET);
5967 break;
5968 }
5969 break;
5970 case L2CAP_EV_RECV_RR:
5971 l2cap_pass_to_tx(chan, control);
5972 if (control->final) {
5973 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5974
5975 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5976 !__chan_is_moving(chan)) {
5977 control->final = 0;
5978 l2cap_retransmit_all(chan, control);
5979 }
5980
5981 l2cap_ertm_send(chan);
5982 } else if (control->poll) {
5983 l2cap_send_i_or_rr_or_rnr(chan);
5984 } else {
5985 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5986 &chan->conn_state) &&
5987 chan->unacked_frames)
5988 __set_retrans_timer(chan);
5989
5990 l2cap_ertm_send(chan);
5991 }
5992 break;
5993 case L2CAP_EV_RECV_RNR:
5994 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5995 l2cap_pass_to_tx(chan, control);
5996 if (control && control->poll) {
5997 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5998 l2cap_send_rr_or_rnr(chan, 0);
5999 }
6000 __clear_retrans_timer(chan);
6001 l2cap_seq_list_clear(&chan->retrans_list);
6002 break;
6003 case L2CAP_EV_RECV_REJ:
6004 l2cap_handle_rej(chan, control);
6005 break;
6006 case L2CAP_EV_RECV_SREJ:
6007 l2cap_handle_srej(chan, control);
6008 break;
6009 default:
6010 break;
6011 }
6012
6013 if (skb && !skb_in_use) {
6014 BT_DBG("Freeing %p", skb);
6015 kfree_skb(skb);
6016 }
6017
6018 return err;
6019 }
6020
6021 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6022 struct l2cap_ctrl *control,
6023 struct sk_buff *skb, u8 event)
6024 {
6025 int err = 0;
6026 u16 txseq = control->txseq;
6027 bool skb_in_use = false;
6028
6029 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6030 event);
6031
6032 switch (event) {
6033 case L2CAP_EV_RECV_IFRAME:
6034 switch (l2cap_classify_txseq(chan, txseq)) {
6035 case L2CAP_TXSEQ_EXPECTED:
6036 /* Keep frame for reassembly later */
6037 l2cap_pass_to_tx(chan, control);
6038 skb_queue_tail(&chan->srej_q, skb);
6039 skb_in_use = true;
6040 BT_DBG("Queued %p (queue len %d)", skb,
6041 skb_queue_len(&chan->srej_q));
6042
6043 chan->expected_tx_seq = __next_seq(chan, txseq);
6044 break;
6045 case L2CAP_TXSEQ_EXPECTED_SREJ:
6046 l2cap_seq_list_pop(&chan->srej_list);
6047
6048 l2cap_pass_to_tx(chan, control);
6049 skb_queue_tail(&chan->srej_q, skb);
6050 skb_in_use = true;
6051 BT_DBG("Queued %p (queue len %d)", skb,
6052 skb_queue_len(&chan->srej_q));
6053
6054 err = l2cap_rx_queued_iframes(chan);
6055 if (err)
6056 break;
6057
6058 break;
6059 case L2CAP_TXSEQ_UNEXPECTED:
6060 /* Got a frame that can't be reassembled yet.
6061 * Save it for later, and send SREJs to cover
6062 * the missing frames.
6063 */
6064 skb_queue_tail(&chan->srej_q, skb);
6065 skb_in_use = true;
6066 BT_DBG("Queued %p (queue len %d)", skb,
6067 skb_queue_len(&chan->srej_q));
6068
6069 l2cap_pass_to_tx(chan, control);
6070 l2cap_send_srej(chan, control->txseq);
6071 break;
6072 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6073 /* This frame was requested with an SREJ, but
6074 * some expected retransmitted frames are
6075 * missing. Request retransmission of missing
6076 * SREJ'd frames.
6077 */
6078 skb_queue_tail(&chan->srej_q, skb);
6079 skb_in_use = true;
6080 BT_DBG("Queued %p (queue len %d)", skb,
6081 skb_queue_len(&chan->srej_q));
6082
6083 l2cap_pass_to_tx(chan, control);
6084 l2cap_send_srej_list(chan, control->txseq);
6085 break;
6086 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6087 /* We've already queued this frame. Drop this copy. */
6088 l2cap_pass_to_tx(chan, control);
6089 break;
6090 case L2CAP_TXSEQ_DUPLICATE:
6091 /* Expecting a later sequence number, so this frame
6092 * was already received. Ignore it completely.
6093 */
6094 break;
6095 case L2CAP_TXSEQ_INVALID_IGNORE:
6096 break;
6097 case L2CAP_TXSEQ_INVALID:
6098 default:
6099 l2cap_send_disconn_req(chan, ECONNRESET);
6100 break;
6101 }
6102 break;
6103 case L2CAP_EV_RECV_RR:
6104 l2cap_pass_to_tx(chan, control);
6105 if (control->final) {
6106 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6107
6108 if (!test_and_clear_bit(CONN_REJ_ACT,
6109 &chan->conn_state)) {
6110 control->final = 0;
6111 l2cap_retransmit_all(chan, control);
6112 }
6113
6114 l2cap_ertm_send(chan);
6115 } else if (control->poll) {
6116 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6117 &chan->conn_state) &&
6118 chan->unacked_frames) {
6119 __set_retrans_timer(chan);
6120 }
6121
6122 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6123 l2cap_send_srej_tail(chan);
6124 } else {
6125 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6126 &chan->conn_state) &&
6127 chan->unacked_frames)
6128 __set_retrans_timer(chan);
6129
6130 l2cap_send_ack(chan);
6131 }
6132 break;
6133 case L2CAP_EV_RECV_RNR:
6134 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6135 l2cap_pass_to_tx(chan, control);
6136 if (control->poll) {
6137 l2cap_send_srej_tail(chan);
6138 } else {
6139 struct l2cap_ctrl rr_control;
6140 memset(&rr_control, 0, sizeof(rr_control));
6141 rr_control.sframe = 1;
6142 rr_control.super = L2CAP_SUPER_RR;
6143 rr_control.reqseq = chan->buffer_seq;
6144 l2cap_send_sframe(chan, &rr_control);
6145 }
6146
6147 break;
6148 case L2CAP_EV_RECV_REJ:
6149 l2cap_handle_rej(chan, control);
6150 break;
6151 case L2CAP_EV_RECV_SREJ:
6152 l2cap_handle_srej(chan, control);
6153 break;
6154 }
6155
6156 if (skb && !skb_in_use) {
6157 BT_DBG("Freeing %p", skb);
6158 kfree_skb(skb);
6159 }
6160
6161 return err;
6162 }
6163
6164 static int l2cap_finish_move(struct l2cap_chan *chan)
6165 {
6166 BT_DBG("chan %p", chan);
6167
6168 chan->rx_state = L2CAP_RX_STATE_RECV;
6169
6170 if (chan->hs_hcon)
6171 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6172 else
6173 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6174
6175 return l2cap_resegment(chan);
6176 }
6177
6178 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6179 struct l2cap_ctrl *control,
6180 struct sk_buff *skb, u8 event)
6181 {
6182 int err;
6183
6184 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6185 event);
6186
6187 if (!control->poll)
6188 return -EPROTO;
6189
6190 l2cap_process_reqseq(chan, control->reqseq);
6191
6192 if (!skb_queue_empty(&chan->tx_q))
6193 chan->tx_send_head = skb_peek(&chan->tx_q);
6194 else
6195 chan->tx_send_head = NULL;
6196
6197 /* Rewind next_tx_seq to the point expected
6198 * by the receiver.
6199 */
6200 chan->next_tx_seq = control->reqseq;
6201 chan->unacked_frames = 0;
6202
6203 err = l2cap_finish_move(chan);
6204 if (err)
6205 return err;
6206
6207 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6208 l2cap_send_i_or_rr_or_rnr(chan);
6209
6210 if (event == L2CAP_EV_RECV_IFRAME)
6211 return -EPROTO;
6212
6213 return l2cap_rx_state_recv(chan, control, NULL, event);
6214 }
6215
6216 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6217 struct l2cap_ctrl *control,
6218 struct sk_buff *skb, u8 event)
6219 {
6220 int err;
6221
6222 if (!control->final)
6223 return -EPROTO;
6224
6225 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6226
6227 chan->rx_state = L2CAP_RX_STATE_RECV;
6228 l2cap_process_reqseq(chan, control->reqseq);
6229
6230 if (!skb_queue_empty(&chan->tx_q))
6231 chan->tx_send_head = skb_peek(&chan->tx_q);
6232 else
6233 chan->tx_send_head = NULL;
6234
6235 /* Rewind next_tx_seq to the point expected
6236 * by the receiver.
6237 */
6238 chan->next_tx_seq = control->reqseq;
6239 chan->unacked_frames = 0;
6240
6241 if (chan->hs_hcon)
6242 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6243 else
6244 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6245
6246 err = l2cap_resegment(chan);
6247
6248 if (!err)
6249 err = l2cap_rx_state_recv(chan, control, skb, event);
6250
6251 return err;
6252 }
6253
6254 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6255 {
6256 /* Make sure reqseq is for a packet that has been sent but not acked */
6257 u16 unacked;
6258
6259 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6260 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6261 }
6262
6263 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6264 struct sk_buff *skb, u8 event)
6265 {
6266 int err = 0;
6267
6268 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6269 control, skb, event, chan->rx_state);
6270
6271 if (__valid_reqseq(chan, control->reqseq)) {
6272 switch (chan->rx_state) {
6273 case L2CAP_RX_STATE_RECV:
6274 err = l2cap_rx_state_recv(chan, control, skb, event);
6275 break;
6276 case L2CAP_RX_STATE_SREJ_SENT:
6277 err = l2cap_rx_state_srej_sent(chan, control, skb,
6278 event);
6279 break;
6280 case L2CAP_RX_STATE_WAIT_P:
6281 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6282 break;
6283 case L2CAP_RX_STATE_WAIT_F:
6284 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6285 break;
6286 default:
6287 /* shut it down */
6288 break;
6289 }
6290 } else {
6291 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6292 control->reqseq, chan->next_tx_seq,
6293 chan->expected_ack_seq);
6294 l2cap_send_disconn_req(chan, ECONNRESET);
6295 }
6296
6297 return err;
6298 }
6299
6300 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6301 struct sk_buff *skb)
6302 {
6303 int err = 0;
6304
6305 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6306 chan->rx_state);
6307
6308 if (l2cap_classify_txseq(chan, control->txseq) ==
6309 L2CAP_TXSEQ_EXPECTED) {
6310 l2cap_pass_to_tx(chan, control);
6311
6312 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6313 __next_seq(chan, chan->buffer_seq));
6314
6315 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6316
6317 l2cap_reassemble_sdu(chan, skb, control);
6318 } else {
6319 if (chan->sdu) {
6320 kfree_skb(chan->sdu);
6321 chan->sdu = NULL;
6322 }
6323 chan->sdu_last_frag = NULL;
6324 chan->sdu_len = 0;
6325
6326 if (skb) {
6327 BT_DBG("Freeing %p", skb);
6328 kfree_skb(skb);
6329 }
6330 }
6331
6332 chan->last_acked_seq = control->txseq;
6333 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6334
6335 return err;
6336 }
6337
6338 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6339 {
6340 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6341 u16 len;
6342 u8 event;
6343
6344 __unpack_control(chan, skb);
6345
6346 len = skb->len;
6347
6348 /*
6349 * We can just drop the corrupted I-frame here.
6350 * Receiver will miss it and start proper recovery
6351 * procedures and ask for retransmission.
6352 */
6353 if (l2cap_check_fcs(chan, skb))
6354 goto drop;
6355
6356 if (!control->sframe && control->sar == L2CAP_SAR_START)
6357 len -= L2CAP_SDULEN_SIZE;
6358
6359 if (chan->fcs == L2CAP_FCS_CRC16)
6360 len -= L2CAP_FCS_SIZE;
6361
6362 if (len > chan->mps) {
6363 l2cap_send_disconn_req(chan, ECONNRESET);
6364 goto drop;
6365 }
6366
6367 if (!control->sframe) {
6368 int err;
6369
6370 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6371 control->sar, control->reqseq, control->final,
6372 control->txseq);
6373
6374 /* Validate F-bit - F=0 always valid, F=1 only
6375 * valid in TX WAIT_F
6376 */
6377 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6378 goto drop;
6379
6380 if (chan->mode != L2CAP_MODE_STREAMING) {
6381 event = L2CAP_EV_RECV_IFRAME;
6382 err = l2cap_rx(chan, control, skb, event);
6383 } else {
6384 err = l2cap_stream_rx(chan, control, skb);
6385 }
6386
6387 if (err)
6388 l2cap_send_disconn_req(chan, ECONNRESET);
6389 } else {
6390 const u8 rx_func_to_event[4] = {
6391 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6392 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6393 };
6394
6395 /* Only I-frames are expected in streaming mode */
6396 if (chan->mode == L2CAP_MODE_STREAMING)
6397 goto drop;
6398
6399 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6400 control->reqseq, control->final, control->poll,
6401 control->super);
6402
6403 if (len != 0) {
6404 BT_ERR("Trailing bytes: %d in sframe", len);
6405 l2cap_send_disconn_req(chan, ECONNRESET);
6406 goto drop;
6407 }
6408
6409 /* Validate F and P bits */
6410 if (control->final && (control->poll ||
6411 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6412 goto drop;
6413
6414 event = rx_func_to_event[control->super];
6415 if (l2cap_rx(chan, control, skb, event))
6416 l2cap_send_disconn_req(chan, ECONNRESET);
6417 }
6418
6419 return 0;
6420
6421 drop:
6422 kfree_skb(skb);
6423 return 0;
6424 }
6425
6426 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6427 struct sk_buff *skb)
6428 {
6429 struct l2cap_chan *chan;
6430
6431 chan = l2cap_get_chan_by_scid(conn, cid);
6432 if (!chan) {
6433 if (cid == L2CAP_CID_A2MP) {
6434 chan = a2mp_channel_create(conn, skb);
6435 if (!chan) {
6436 kfree_skb(skb);
6437 return;
6438 }
6439
6440 l2cap_chan_lock(chan);
6441 } else {
6442 BT_DBG("unknown cid 0x%4.4x", cid);
6443 /* Drop packet and return */
6444 kfree_skb(skb);
6445 return;
6446 }
6447 }
6448
6449 BT_DBG("chan %p, len %d", chan, skb->len);
6450
6451 if (chan->state != BT_CONNECTED)
6452 goto drop;
6453
6454 switch (chan->mode) {
6455 case L2CAP_MODE_BASIC:
6456 /* If socket recv buffers overflows we drop data here
6457 * which is *bad* because L2CAP has to be reliable.
6458 * But we don't have any other choice. L2CAP doesn't
6459 * provide flow control mechanism. */
6460
6461 if (chan->imtu < skb->len)
6462 goto drop;
6463
6464 if (!chan->ops->recv(chan, skb))
6465 goto done;
6466 break;
6467
6468 case L2CAP_MODE_ERTM:
6469 case L2CAP_MODE_STREAMING:
6470 l2cap_data_rcv(chan, skb);
6471 goto done;
6472
6473 default:
6474 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6475 break;
6476 }
6477
6478 drop:
6479 kfree_skb(skb);
6480
6481 done:
6482 l2cap_chan_unlock(chan);
6483 }
6484
6485 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6486 struct sk_buff *skb)
6487 {
6488 struct hci_conn *hcon = conn->hcon;
6489 struct l2cap_chan *chan;
6490
6491 if (hcon->type != ACL_LINK)
6492 goto drop;
6493
6494 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6495 ACL_LINK);
6496 if (!chan)
6497 goto drop;
6498
6499 BT_DBG("chan %p, len %d", chan, skb->len);
6500
6501 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6502 goto drop;
6503
6504 if (chan->imtu < skb->len)
6505 goto drop;
6506
6507 /* Store remote BD_ADDR and PSM for msg_name */
6508 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6509 bt_cb(skb)->psm = psm;
6510
6511 if (!chan->ops->recv(chan, skb))
6512 return;
6513
6514 drop:
6515 kfree_skb(skb);
6516 }
6517
6518 static void l2cap_att_channel(struct l2cap_conn *conn,
6519 struct sk_buff *skb)
6520 {
6521 struct hci_conn *hcon = conn->hcon;
6522 struct l2cap_chan *chan;
6523
6524 if (hcon->type != LE_LINK)
6525 goto drop;
6526
6527 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6528 &hcon->src, &hcon->dst);
6529 if (!chan)
6530 goto drop;
6531
6532 BT_DBG("chan %p, len %d", chan, skb->len);
6533
6534 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6535 goto drop;
6536
6537 if (chan->imtu < skb->len)
6538 goto drop;
6539
6540 if (!chan->ops->recv(chan, skb))
6541 return;
6542
6543 drop:
6544 kfree_skb(skb);
6545 }
6546
6547 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6548 {
6549 struct l2cap_hdr *lh = (void *) skb->data;
6550 u16 cid, len;
6551 __le16 psm;
6552
6553 skb_pull(skb, L2CAP_HDR_SIZE);
6554 cid = __le16_to_cpu(lh->cid);
6555 len = __le16_to_cpu(lh->len);
6556
6557 if (len != skb->len) {
6558 kfree_skb(skb);
6559 return;
6560 }
6561
6562 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6563
6564 switch (cid) {
6565 case L2CAP_CID_SIGNALING:
6566 l2cap_sig_channel(conn, skb);
6567 break;
6568
6569 case L2CAP_CID_CONN_LESS:
6570 psm = get_unaligned((__le16 *) skb->data);
6571 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6572 l2cap_conless_channel(conn, psm, skb);
6573 break;
6574
6575 case L2CAP_CID_ATT:
6576 l2cap_att_channel(conn, skb);
6577 break;
6578
6579 case L2CAP_CID_LE_SIGNALING:
6580 l2cap_le_sig_channel(conn, skb);
6581 break;
6582
6583 case L2CAP_CID_SMP:
6584 if (smp_sig_channel(conn, skb))
6585 l2cap_conn_del(conn->hcon, EACCES);
6586 break;
6587
6588 default:
6589 l2cap_data_channel(conn, cid, skb);
6590 break;
6591 }
6592 }
6593
6594 /* ---- L2CAP interface with lower layer (HCI) ---- */
6595
6596 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6597 {
6598 int exact = 0, lm1 = 0, lm2 = 0;
6599 struct l2cap_chan *c;
6600
6601 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6602
6603 /* Find listening sockets and check their link_mode */
6604 read_lock(&chan_list_lock);
6605 list_for_each_entry(c, &chan_list, global_l) {
6606 if (c->state != BT_LISTEN)
6607 continue;
6608
6609 if (!bacmp(&c->src, &hdev->bdaddr)) {
6610 lm1 |= HCI_LM_ACCEPT;
6611 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6612 lm1 |= HCI_LM_MASTER;
6613 exact++;
6614 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6615 lm2 |= HCI_LM_ACCEPT;
6616 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6617 lm2 |= HCI_LM_MASTER;
6618 }
6619 }
6620 read_unlock(&chan_list_lock);
6621
6622 return exact ? lm1 : lm2;
6623 }
6624
6625 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6626 {
6627 struct l2cap_conn *conn;
6628
6629 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6630
6631 if (!status) {
6632 conn = l2cap_conn_add(hcon);
6633 if (conn)
6634 l2cap_conn_ready(conn);
6635 } else {
6636 l2cap_conn_del(hcon, bt_to_errno(status));
6637 }
6638 }
6639
6640 int l2cap_disconn_ind(struct hci_conn *hcon)
6641 {
6642 struct l2cap_conn *conn = hcon->l2cap_data;
6643
6644 BT_DBG("hcon %p", hcon);
6645
6646 if (!conn)
6647 return HCI_ERROR_REMOTE_USER_TERM;
6648 return conn->disc_reason;
6649 }
6650
6651 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6652 {
6653 BT_DBG("hcon %p reason %d", hcon, reason);
6654
6655 l2cap_conn_del(hcon, bt_to_errno(reason));
6656 }
6657
6658 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6659 {
6660 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6661 return;
6662
6663 if (encrypt == 0x00) {
6664 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6665 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6666 } else if (chan->sec_level == BT_SECURITY_HIGH)
6667 l2cap_chan_close(chan, ECONNREFUSED);
6668 } else {
6669 if (chan->sec_level == BT_SECURITY_MEDIUM)
6670 __clear_chan_timer(chan);
6671 }
6672 }
6673
6674 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6675 {
6676 struct l2cap_conn *conn = hcon->l2cap_data;
6677 struct l2cap_chan *chan;
6678
6679 if (!conn)
6680 return 0;
6681
6682 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6683
6684 if (hcon->type == LE_LINK) {
6685 if (!status && encrypt)
6686 smp_distribute_keys(conn, 0);
6687 cancel_delayed_work(&conn->security_timer);
6688 }
6689
6690 mutex_lock(&conn->chan_lock);
6691
6692 list_for_each_entry(chan, &conn->chan_l, list) {
6693 l2cap_chan_lock(chan);
6694
6695 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6696 state_to_string(chan->state));
6697
6698 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6699 l2cap_chan_unlock(chan);
6700 continue;
6701 }
6702
6703 if (chan->scid == L2CAP_CID_ATT) {
6704 if (!status && encrypt) {
6705 chan->sec_level = hcon->sec_level;
6706 l2cap_chan_ready(chan);
6707 }
6708
6709 l2cap_chan_unlock(chan);
6710 continue;
6711 }
6712
6713 if (!__l2cap_no_conn_pending(chan)) {
6714 l2cap_chan_unlock(chan);
6715 continue;
6716 }
6717
6718 if (!status && (chan->state == BT_CONNECTED ||
6719 chan->state == BT_CONFIG)) {
6720 chan->ops->resume(chan);
6721 l2cap_check_encryption(chan, encrypt);
6722 l2cap_chan_unlock(chan);
6723 continue;
6724 }
6725
6726 if (chan->state == BT_CONNECT) {
6727 if (!status)
6728 l2cap_start_connection(chan);
6729 else
6730 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6731 } else if (chan->state == BT_CONNECT2) {
6732 struct l2cap_conn_rsp rsp;
6733 __u16 res, stat;
6734
6735 if (!status) {
6736 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6737 res = L2CAP_CR_PEND;
6738 stat = L2CAP_CS_AUTHOR_PEND;
6739 chan->ops->defer(chan);
6740 } else {
6741 l2cap_state_change(chan, BT_CONFIG);
6742 res = L2CAP_CR_SUCCESS;
6743 stat = L2CAP_CS_NO_INFO;
6744 }
6745 } else {
6746 l2cap_state_change(chan, BT_DISCONN);
6747 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6748 res = L2CAP_CR_SEC_BLOCK;
6749 stat = L2CAP_CS_NO_INFO;
6750 }
6751
6752 rsp.scid = cpu_to_le16(chan->dcid);
6753 rsp.dcid = cpu_to_le16(chan->scid);
6754 rsp.result = cpu_to_le16(res);
6755 rsp.status = cpu_to_le16(stat);
6756 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6757 sizeof(rsp), &rsp);
6758
6759 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6760 res == L2CAP_CR_SUCCESS) {
6761 char buf[128];
6762 set_bit(CONF_REQ_SENT, &chan->conf_state);
6763 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6764 L2CAP_CONF_REQ,
6765 l2cap_build_conf_req(chan, buf),
6766 buf);
6767 chan->num_conf_req++;
6768 }
6769 }
6770
6771 l2cap_chan_unlock(chan);
6772 }
6773
6774 mutex_unlock(&conn->chan_lock);
6775
6776 return 0;
6777 }
6778
6779 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6780 {
6781 struct l2cap_conn *conn = hcon->l2cap_data;
6782 struct l2cap_hdr *hdr;
6783 int len;
6784
6785 /* For AMP controller do not create l2cap conn */
6786 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6787 goto drop;
6788
6789 if (!conn)
6790 conn = l2cap_conn_add(hcon);
6791
6792 if (!conn)
6793 goto drop;
6794
6795 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6796
6797 switch (flags) {
6798 case ACL_START:
6799 case ACL_START_NO_FLUSH:
6800 case ACL_COMPLETE:
6801 if (conn->rx_len) {
6802 BT_ERR("Unexpected start frame (len %d)", skb->len);
6803 kfree_skb(conn->rx_skb);
6804 conn->rx_skb = NULL;
6805 conn->rx_len = 0;
6806 l2cap_conn_unreliable(conn, ECOMM);
6807 }
6808
6809 /* Start fragment always begin with Basic L2CAP header */
6810 if (skb->len < L2CAP_HDR_SIZE) {
6811 BT_ERR("Frame is too short (len %d)", skb->len);
6812 l2cap_conn_unreliable(conn, ECOMM);
6813 goto drop;
6814 }
6815
6816 hdr = (struct l2cap_hdr *) skb->data;
6817 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6818
6819 if (len == skb->len) {
6820 /* Complete frame received */
6821 l2cap_recv_frame(conn, skb);
6822 return 0;
6823 }
6824
6825 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6826
6827 if (skb->len > len) {
6828 BT_ERR("Frame is too long (len %d, expected len %d)",
6829 skb->len, len);
6830 l2cap_conn_unreliable(conn, ECOMM);
6831 goto drop;
6832 }
6833
6834 /* Allocate skb for the complete frame (with header) */
6835 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6836 if (!conn->rx_skb)
6837 goto drop;
6838
6839 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6840 skb->len);
6841 conn->rx_len = len - skb->len;
6842 break;
6843
6844 case ACL_CONT:
6845 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6846
6847 if (!conn->rx_len) {
6848 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6849 l2cap_conn_unreliable(conn, ECOMM);
6850 goto drop;
6851 }
6852
6853 if (skb->len > conn->rx_len) {
6854 BT_ERR("Fragment is too long (len %d, expected %d)",
6855 skb->len, conn->rx_len);
6856 kfree_skb(conn->rx_skb);
6857 conn->rx_skb = NULL;
6858 conn->rx_len = 0;
6859 l2cap_conn_unreliable(conn, ECOMM);
6860 goto drop;
6861 }
6862
6863 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6864 skb->len);
6865 conn->rx_len -= skb->len;
6866
6867 if (!conn->rx_len) {
6868 /* Complete frame received. l2cap_recv_frame
6869 * takes ownership of the skb so set the global
6870 * rx_skb pointer to NULL first.
6871 */
6872 struct sk_buff *rx_skb = conn->rx_skb;
6873 conn->rx_skb = NULL;
6874 l2cap_recv_frame(conn, rx_skb);
6875 }
6876 break;
6877 }
6878
6879 drop:
6880 kfree_skb(skb);
6881 return 0;
6882 }
6883
6884 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6885 {
6886 struct l2cap_chan *c;
6887
6888 read_lock(&chan_list_lock);
6889
6890 list_for_each_entry(c, &chan_list, global_l) {
6891 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6892 &c->src, &c->dst,
6893 c->state, __le16_to_cpu(c->psm),
6894 c->scid, c->dcid, c->imtu, c->omtu,
6895 c->sec_level, c->mode);
6896 }
6897
6898 read_unlock(&chan_list_lock);
6899
6900 return 0;
6901 }
6902
6903 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6904 {
6905 return single_open(file, l2cap_debugfs_show, inode->i_private);
6906 }
6907
6908 static const struct file_operations l2cap_debugfs_fops = {
6909 .open = l2cap_debugfs_open,
6910 .read = seq_read,
6911 .llseek = seq_lseek,
6912 .release = single_release,
6913 };
6914
6915 static struct dentry *l2cap_debugfs;
6916
6917 int __init l2cap_init(void)
6918 {
6919 int err;
6920
6921 err = l2cap_init_sockets();
6922 if (err < 0)
6923 return err;
6924
6925 if (IS_ERR_OR_NULL(bt_debugfs))
6926 return 0;
6927
6928 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6929 NULL, &l2cap_debugfs_fops);
6930
6931 return 0;
6932 }
6933
6934 void l2cap_exit(void)
6935 {
6936 debugfs_remove(l2cap_debugfs);
6937 l2cap_cleanup_sockets();
6938 }
6939
6940 module_param(disable_ertm, bool, 0644);
6941 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.164617 seconds and 6 git commands to generate.