Bluetooth: improve readability of l2cap_seq_list code
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87 }
88
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
109
110 return c;
111 }
112
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 {
115 struct l2cap_chan *c;
116
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
119 return c;
120 }
121 return NULL;
122 }
123
124 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
125 {
126 struct l2cap_chan *c;
127
128 list_for_each_entry(c, &chan_list, global_l) {
129 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
130 return c;
131 }
132 return NULL;
133 }
134
135 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
136 {
137 int err;
138
139 write_lock(&chan_list_lock);
140
141 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
142 err = -EADDRINUSE;
143 goto done;
144 }
145
146 if (psm) {
147 chan->psm = psm;
148 chan->sport = psm;
149 err = 0;
150 } else {
151 u16 p;
152
153 err = -EINVAL;
154 for (p = 0x1001; p < 0x1100; p += 2)
155 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
156 chan->psm = cpu_to_le16(p);
157 chan->sport = cpu_to_le16(p);
158 err = 0;
159 break;
160 }
161 }
162
163 done:
164 write_unlock(&chan_list_lock);
165 return err;
166 }
167
168 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
169 {
170 write_lock(&chan_list_lock);
171
172 chan->scid = scid;
173
174 write_unlock(&chan_list_lock);
175
176 return 0;
177 }
178
179 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
180 {
181 u16 cid = L2CAP_CID_DYN_START;
182
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(conn, cid))
185 return cid;
186 }
187
188 return 0;
189 }
190
191 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
192 {
193 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
194 state_to_string(state));
195
196 chan->state = state;
197 chan->ops->state_change(chan->data, state);
198 }
199
200 static void l2cap_state_change(struct l2cap_chan *chan, int state)
201 {
202 struct sock *sk = chan->sk;
203
204 lock_sock(sk);
205 __l2cap_state_change(chan, state);
206 release_sock(sk);
207 }
208
209 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
210 {
211 struct sock *sk = chan->sk;
212
213 sk->sk_err = err;
214 }
215
216 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
217 {
218 struct sock *sk = chan->sk;
219
220 lock_sock(sk);
221 __l2cap_chan_set_err(chan, err);
222 release_sock(sk);
223 }
224
225 /* ---- L2CAP sequence number lists ---- */
226
227 /* For ERTM, ordered lists of sequence numbers must be tracked for
228 * SREJ requests that are received and for frames that are to be
229 * retransmitted. These seq_list functions implement a singly-linked
230 * list in an array, where membership in the list can also be checked
231 * in constant time. Items can also be added to the tail of the list
232 * and removed from the head in constant time, without further memory
233 * allocs or frees.
234 */
235
236 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
237 {
238 size_t alloc_size, i;
239
240 /* Allocated size is a power of 2 to map sequence numbers
241 * (which may be up to 14 bits) in to a smaller array that is
242 * sized for the negotiated ERTM transmit windows.
243 */
244 alloc_size = roundup_pow_of_two(size);
245
246 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
247 if (!seq_list->list)
248 return -ENOMEM;
249
250 seq_list->mask = alloc_size - 1;
251 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
252 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
253 for (i = 0; i < alloc_size; i++)
254 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
255
256 return 0;
257 }
258
259 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
260 {
261 kfree(seq_list->list);
262 }
263
264 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
265 u16 seq)
266 {
267 /* Constant-time check for list membership */
268 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
269 }
270
271 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
272 {
273 u16 mask = seq_list->mask;
274
275 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
276 /* In case someone tries to pop the head of an empty list */
277 return L2CAP_SEQ_LIST_CLEAR;
278 } else if (seq_list->head == seq) {
279 /* Head can be removed in constant time */
280 seq_list->head = seq_list->list[seq & mask];
281 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
282
283 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
284 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
285 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
286 }
287 } else {
288 /* Walk the list to find the sequence number */
289 u16 prev = seq_list->head;
290 while (seq_list->list[prev & mask] != seq) {
291 prev = seq_list->list[prev & mask];
292 if (prev == L2CAP_SEQ_LIST_TAIL)
293 return L2CAP_SEQ_LIST_CLEAR;
294 }
295
296 /* Unlink the number from the list and clear it */
297 seq_list->list[prev & mask] = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
299 if (seq_list->tail == seq)
300 seq_list->tail = prev;
301 }
302 return seq;
303 }
304
305 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
306 {
307 /* Remove the head in constant time */
308 return l2cap_seq_list_remove(seq_list, seq_list->head);
309 }
310
311 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
312 {
313 u16 i;
314
315 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
316 return;
317
318 for (i = 0; i <= seq_list->mask; i++)
319 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
320
321 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
322 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
323 }
324
325 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
326 {
327 u16 mask = seq_list->mask;
328
329 /* All appends happen in constant time */
330
331 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
332 return;
333
334 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
335 seq_list->head = seq;
336 else
337 seq_list->list[seq_list->tail & mask] = seq;
338
339 seq_list->tail = seq;
340 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
341 }
342
343 static void l2cap_chan_timeout(struct work_struct *work)
344 {
345 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
346 chan_timer.work);
347 struct l2cap_conn *conn = chan->conn;
348 int reason;
349
350 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
351
352 mutex_lock(&conn->chan_lock);
353 l2cap_chan_lock(chan);
354
355 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
356 reason = ECONNREFUSED;
357 else if (chan->state == BT_CONNECT &&
358 chan->sec_level != BT_SECURITY_SDP)
359 reason = ECONNREFUSED;
360 else
361 reason = ETIMEDOUT;
362
363 l2cap_chan_close(chan, reason);
364
365 l2cap_chan_unlock(chan);
366
367 chan->ops->close(chan->data);
368 mutex_unlock(&conn->chan_lock);
369
370 l2cap_chan_put(chan);
371 }
372
373 struct l2cap_chan *l2cap_chan_create(void)
374 {
375 struct l2cap_chan *chan;
376
377 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
378 if (!chan)
379 return NULL;
380
381 mutex_init(&chan->lock);
382
383 write_lock(&chan_list_lock);
384 list_add(&chan->global_l, &chan_list);
385 write_unlock(&chan_list_lock);
386
387 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
388
389 chan->state = BT_OPEN;
390
391 atomic_set(&chan->refcnt, 1);
392
393 BT_DBG("chan %p", chan);
394
395 return chan;
396 }
397
398 void l2cap_chan_destroy(struct l2cap_chan *chan)
399 {
400 write_lock(&chan_list_lock);
401 list_del(&chan->global_l);
402 write_unlock(&chan_list_lock);
403
404 l2cap_chan_put(chan);
405 }
406
407 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
408 {
409 chan->fcs = L2CAP_FCS_CRC16;
410 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
411 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
412 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
413 chan->sec_level = BT_SECURITY_LOW;
414
415 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
416 }
417
418 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
419 {
420 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
421 __le16_to_cpu(chan->psm), chan->dcid);
422
423 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
424
425 chan->conn = conn;
426
427 switch (chan->chan_type) {
428 case L2CAP_CHAN_CONN_ORIENTED:
429 if (conn->hcon->type == LE_LINK) {
430 /* LE connection */
431 chan->omtu = L2CAP_LE_DEFAULT_MTU;
432 chan->scid = L2CAP_CID_LE_DATA;
433 chan->dcid = L2CAP_CID_LE_DATA;
434 } else {
435 /* Alloc CID for connection-oriented socket */
436 chan->scid = l2cap_alloc_cid(conn);
437 chan->omtu = L2CAP_DEFAULT_MTU;
438 }
439 break;
440
441 case L2CAP_CHAN_CONN_LESS:
442 /* Connectionless socket */
443 chan->scid = L2CAP_CID_CONN_LESS;
444 chan->dcid = L2CAP_CID_CONN_LESS;
445 chan->omtu = L2CAP_DEFAULT_MTU;
446 break;
447
448 default:
449 /* Raw socket can send/recv signalling messages only */
450 chan->scid = L2CAP_CID_SIGNALING;
451 chan->dcid = L2CAP_CID_SIGNALING;
452 chan->omtu = L2CAP_DEFAULT_MTU;
453 }
454
455 chan->local_id = L2CAP_BESTEFFORT_ID;
456 chan->local_stype = L2CAP_SERV_BESTEFFORT;
457 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
458 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
459 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
460 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
461
462 l2cap_chan_hold(chan);
463
464 list_add(&chan->list, &conn->chan_l);
465 }
466
467 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
468 {
469 mutex_lock(&conn->chan_lock);
470 __l2cap_chan_add(conn, chan);
471 mutex_unlock(&conn->chan_lock);
472 }
473
474 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
475 {
476 struct sock *sk = chan->sk;
477 struct l2cap_conn *conn = chan->conn;
478 struct sock *parent = bt_sk(sk)->parent;
479
480 __clear_chan_timer(chan);
481
482 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
483
484 if (conn) {
485 /* Delete from channel list */
486 list_del(&chan->list);
487
488 l2cap_chan_put(chan);
489
490 chan->conn = NULL;
491 hci_conn_put(conn->hcon);
492 }
493
494 lock_sock(sk);
495
496 __l2cap_state_change(chan, BT_CLOSED);
497 sock_set_flag(sk, SOCK_ZAPPED);
498
499 if (err)
500 __l2cap_chan_set_err(chan, err);
501
502 if (parent) {
503 bt_accept_unlink(sk);
504 parent->sk_data_ready(parent, 0);
505 } else
506 sk->sk_state_change(sk);
507
508 release_sock(sk);
509
510 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
511 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
512 return;
513
514 skb_queue_purge(&chan->tx_q);
515
516 if (chan->mode == L2CAP_MODE_ERTM) {
517 struct srej_list *l, *tmp;
518
519 __clear_retrans_timer(chan);
520 __clear_monitor_timer(chan);
521 __clear_ack_timer(chan);
522
523 skb_queue_purge(&chan->srej_q);
524
525 l2cap_seq_list_free(&chan->srej_list);
526 l2cap_seq_list_free(&chan->retrans_list);
527 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
528 list_del(&l->list);
529 kfree(l);
530 }
531 }
532 }
533
534 static void l2cap_chan_cleanup_listen(struct sock *parent)
535 {
536 struct sock *sk;
537
538 BT_DBG("parent %p", parent);
539
540 /* Close not yet accepted channels */
541 while ((sk = bt_accept_dequeue(parent, NULL))) {
542 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
543
544 l2cap_chan_lock(chan);
545 __clear_chan_timer(chan);
546 l2cap_chan_close(chan, ECONNRESET);
547 l2cap_chan_unlock(chan);
548
549 chan->ops->close(chan->data);
550 }
551 }
552
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
554 {
555 struct l2cap_conn *conn = chan->conn;
556 struct sock *sk = chan->sk;
557
558 BT_DBG("chan %p state %s sk %p", chan,
559 state_to_string(chan->state), sk);
560
561 switch (chan->state) {
562 case BT_LISTEN:
563 lock_sock(sk);
564 l2cap_chan_cleanup_listen(sk);
565
566 __l2cap_state_change(chan, BT_CLOSED);
567 sock_set_flag(sk, SOCK_ZAPPED);
568 release_sock(sk);
569 break;
570
571 case BT_CONNECTED:
572 case BT_CONFIG:
573 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
574 conn->hcon->type == ACL_LINK) {
575 __set_chan_timer(chan, sk->sk_sndtimeo);
576 l2cap_send_disconn_req(conn, chan, reason);
577 } else
578 l2cap_chan_del(chan, reason);
579 break;
580
581 case BT_CONNECT2:
582 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
583 conn->hcon->type == ACL_LINK) {
584 struct l2cap_conn_rsp rsp;
585 __u16 result;
586
587 if (bt_sk(sk)->defer_setup)
588 result = L2CAP_CR_SEC_BLOCK;
589 else
590 result = L2CAP_CR_BAD_PSM;
591 l2cap_state_change(chan, BT_DISCONN);
592
593 rsp.scid = cpu_to_le16(chan->dcid);
594 rsp.dcid = cpu_to_le16(chan->scid);
595 rsp.result = cpu_to_le16(result);
596 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
597 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
598 sizeof(rsp), &rsp);
599 }
600
601 l2cap_chan_del(chan, reason);
602 break;
603
604 case BT_CONNECT:
605 case BT_DISCONN:
606 l2cap_chan_del(chan, reason);
607 break;
608
609 default:
610 lock_sock(sk);
611 sock_set_flag(sk, SOCK_ZAPPED);
612 release_sock(sk);
613 break;
614 }
615 }
616
617 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
618 {
619 if (chan->chan_type == L2CAP_CHAN_RAW) {
620 switch (chan->sec_level) {
621 case BT_SECURITY_HIGH:
622 return HCI_AT_DEDICATED_BONDING_MITM;
623 case BT_SECURITY_MEDIUM:
624 return HCI_AT_DEDICATED_BONDING;
625 default:
626 return HCI_AT_NO_BONDING;
627 }
628 } else if (chan->psm == cpu_to_le16(0x0001)) {
629 if (chan->sec_level == BT_SECURITY_LOW)
630 chan->sec_level = BT_SECURITY_SDP;
631
632 if (chan->sec_level == BT_SECURITY_HIGH)
633 return HCI_AT_NO_BONDING_MITM;
634 else
635 return HCI_AT_NO_BONDING;
636 } else {
637 switch (chan->sec_level) {
638 case BT_SECURITY_HIGH:
639 return HCI_AT_GENERAL_BONDING_MITM;
640 case BT_SECURITY_MEDIUM:
641 return HCI_AT_GENERAL_BONDING;
642 default:
643 return HCI_AT_NO_BONDING;
644 }
645 }
646 }
647
648 /* Service level security */
649 int l2cap_chan_check_security(struct l2cap_chan *chan)
650 {
651 struct l2cap_conn *conn = chan->conn;
652 __u8 auth_type;
653
654 auth_type = l2cap_get_auth_type(chan);
655
656 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
657 }
658
659 static u8 l2cap_get_ident(struct l2cap_conn *conn)
660 {
661 u8 id;
662
663 /* Get next available identificator.
664 * 1 - 128 are used by kernel.
665 * 129 - 199 are reserved.
666 * 200 - 254 are used by utilities like l2ping, etc.
667 */
668
669 spin_lock(&conn->lock);
670
671 if (++conn->tx_ident > 128)
672 conn->tx_ident = 1;
673
674 id = conn->tx_ident;
675
676 spin_unlock(&conn->lock);
677
678 return id;
679 }
680
681 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
682 {
683 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
684 u8 flags;
685
686 BT_DBG("code 0x%2.2x", code);
687
688 if (!skb)
689 return;
690
691 if (lmp_no_flush_capable(conn->hcon->hdev))
692 flags = ACL_START_NO_FLUSH;
693 else
694 flags = ACL_START;
695
696 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
697 skb->priority = HCI_PRIO_MAX;
698
699 hci_send_acl(conn->hchan, skb, flags);
700 }
701
702 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
703 {
704 struct hci_conn *hcon = chan->conn->hcon;
705 u16 flags;
706
707 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
708 skb->priority);
709
710 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
711 lmp_no_flush_capable(hcon->hdev))
712 flags = ACL_START_NO_FLUSH;
713 else
714 flags = ACL_START;
715
716 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
717 hci_send_acl(chan->conn->hchan, skb, flags);
718 }
719
720 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
721 {
722 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
723 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
724
725 if (enh & L2CAP_CTRL_FRAME_TYPE) {
726 /* S-Frame */
727 control->sframe = 1;
728 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
729 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
730
731 control->sar = 0;
732 control->txseq = 0;
733 } else {
734 /* I-Frame */
735 control->sframe = 0;
736 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
737 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
738
739 control->poll = 0;
740 control->super = 0;
741 }
742 }
743
744 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
745 {
746 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
747 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
748
749 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
750 /* S-Frame */
751 control->sframe = 1;
752 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
753 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
754
755 control->sar = 0;
756 control->txseq = 0;
757 } else {
758 /* I-Frame */
759 control->sframe = 0;
760 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
761 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
762
763 control->poll = 0;
764 control->super = 0;
765 }
766 }
767
768 static inline void __unpack_control(struct l2cap_chan *chan,
769 struct sk_buff *skb)
770 {
771 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
772 __unpack_extended_control(get_unaligned_le32(skb->data),
773 &bt_cb(skb)->control);
774 } else {
775 __unpack_enhanced_control(get_unaligned_le16(skb->data),
776 &bt_cb(skb)->control);
777 }
778 }
779
780 static u32 __pack_extended_control(struct l2cap_ctrl *control)
781 {
782 u32 packed;
783
784 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
785 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
786
787 if (control->sframe) {
788 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
789 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
790 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
791 } else {
792 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
793 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
794 }
795
796 return packed;
797 }
798
799 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
800 {
801 u16 packed;
802
803 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
804 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
805
806 if (control->sframe) {
807 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
808 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
809 packed |= L2CAP_CTRL_FRAME_TYPE;
810 } else {
811 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
812 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
813 }
814
815 return packed;
816 }
817
818 static inline void __pack_control(struct l2cap_chan *chan,
819 struct l2cap_ctrl *control,
820 struct sk_buff *skb)
821 {
822 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
823 put_unaligned_le32(__pack_extended_control(control),
824 skb->data + L2CAP_HDR_SIZE);
825 } else {
826 put_unaligned_le16(__pack_enhanced_control(control),
827 skb->data + L2CAP_HDR_SIZE);
828 }
829 }
830
831 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
832 {
833 struct sk_buff *skb;
834 struct l2cap_hdr *lh;
835 struct l2cap_conn *conn = chan->conn;
836 int count, hlen;
837
838 if (chan->state != BT_CONNECTED)
839 return;
840
841 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
842 hlen = L2CAP_EXT_HDR_SIZE;
843 else
844 hlen = L2CAP_ENH_HDR_SIZE;
845
846 if (chan->fcs == L2CAP_FCS_CRC16)
847 hlen += L2CAP_FCS_SIZE;
848
849 BT_DBG("chan %p, control 0x%8.8x", chan, control);
850
851 count = min_t(unsigned int, conn->mtu, hlen);
852
853 control |= __set_sframe(chan);
854
855 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
856 control |= __set_ctrl_final(chan);
857
858 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
859 control |= __set_ctrl_poll(chan);
860
861 skb = bt_skb_alloc(count, GFP_ATOMIC);
862 if (!skb)
863 return;
864
865 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
866 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
867 lh->cid = cpu_to_le16(chan->dcid);
868
869 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
870
871 if (chan->fcs == L2CAP_FCS_CRC16) {
872 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
873 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
874 }
875
876 skb->priority = HCI_PRIO_MAX;
877 l2cap_do_send(chan, skb);
878 }
879
880 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
881 {
882 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
883 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
884 set_bit(CONN_RNR_SENT, &chan->conn_state);
885 } else
886 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
887
888 control |= __set_reqseq(chan, chan->buffer_seq);
889
890 l2cap_send_sframe(chan, control);
891 }
892
893 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
894 {
895 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
896 }
897
898 static void l2cap_send_conn_req(struct l2cap_chan *chan)
899 {
900 struct l2cap_conn *conn = chan->conn;
901 struct l2cap_conn_req req;
902
903 req.scid = cpu_to_le16(chan->scid);
904 req.psm = chan->psm;
905
906 chan->ident = l2cap_get_ident(conn);
907
908 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
909
910 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
911 }
912
913 static void l2cap_chan_ready(struct l2cap_chan *chan)
914 {
915 struct sock *sk = chan->sk;
916 struct sock *parent;
917
918 lock_sock(sk);
919
920 parent = bt_sk(sk)->parent;
921
922 BT_DBG("sk %p, parent %p", sk, parent);
923
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
926
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
929
930 if (parent)
931 parent->sk_data_ready(parent, 0);
932
933 release_sock(sk);
934 }
935
936 static void l2cap_do_start(struct l2cap_chan *chan)
937 {
938 struct l2cap_conn *conn = chan->conn;
939
940 if (conn->hcon->type == LE_LINK) {
941 l2cap_chan_ready(chan);
942 return;
943 }
944
945 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
946 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
947 return;
948
949 if (l2cap_chan_check_security(chan) &&
950 __l2cap_no_conn_pending(chan))
951 l2cap_send_conn_req(chan);
952 } else {
953 struct l2cap_info_req req;
954 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
955
956 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
957 conn->info_ident = l2cap_get_ident(conn);
958
959 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
960
961 l2cap_send_cmd(conn, conn->info_ident,
962 L2CAP_INFO_REQ, sizeof(req), &req);
963 }
964 }
965
966 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
967 {
968 u32 local_feat_mask = l2cap_feat_mask;
969 if (!disable_ertm)
970 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
971
972 switch (mode) {
973 case L2CAP_MODE_ERTM:
974 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
975 case L2CAP_MODE_STREAMING:
976 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
977 default:
978 return 0x00;
979 }
980 }
981
982 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
983 {
984 struct sock *sk = chan->sk;
985 struct l2cap_disconn_req req;
986
987 if (!conn)
988 return;
989
990 if (chan->mode == L2CAP_MODE_ERTM) {
991 __clear_retrans_timer(chan);
992 __clear_monitor_timer(chan);
993 __clear_ack_timer(chan);
994 }
995
996 req.dcid = cpu_to_le16(chan->dcid);
997 req.scid = cpu_to_le16(chan->scid);
998 l2cap_send_cmd(conn, l2cap_get_ident(conn),
999 L2CAP_DISCONN_REQ, sizeof(req), &req);
1000
1001 lock_sock(sk);
1002 __l2cap_state_change(chan, BT_DISCONN);
1003 __l2cap_chan_set_err(chan, err);
1004 release_sock(sk);
1005 }
1006
1007 /* ---- L2CAP connections ---- */
1008 static void l2cap_conn_start(struct l2cap_conn *conn)
1009 {
1010 struct l2cap_chan *chan, *tmp;
1011
1012 BT_DBG("conn %p", conn);
1013
1014 mutex_lock(&conn->chan_lock);
1015
1016 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1017 struct sock *sk = chan->sk;
1018
1019 l2cap_chan_lock(chan);
1020
1021 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1022 l2cap_chan_unlock(chan);
1023 continue;
1024 }
1025
1026 if (chan->state == BT_CONNECT) {
1027 if (!l2cap_chan_check_security(chan) ||
1028 !__l2cap_no_conn_pending(chan)) {
1029 l2cap_chan_unlock(chan);
1030 continue;
1031 }
1032
1033 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1034 && test_bit(CONF_STATE2_DEVICE,
1035 &chan->conf_state)) {
1036 l2cap_chan_close(chan, ECONNRESET);
1037 l2cap_chan_unlock(chan);
1038 continue;
1039 }
1040
1041 l2cap_send_conn_req(chan);
1042
1043 } else if (chan->state == BT_CONNECT2) {
1044 struct l2cap_conn_rsp rsp;
1045 char buf[128];
1046 rsp.scid = cpu_to_le16(chan->dcid);
1047 rsp.dcid = cpu_to_le16(chan->scid);
1048
1049 if (l2cap_chan_check_security(chan)) {
1050 lock_sock(sk);
1051 if (bt_sk(sk)->defer_setup) {
1052 struct sock *parent = bt_sk(sk)->parent;
1053 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1054 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1055 if (parent)
1056 parent->sk_data_ready(parent, 0);
1057
1058 } else {
1059 __l2cap_state_change(chan, BT_CONFIG);
1060 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1061 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1062 }
1063 release_sock(sk);
1064 } else {
1065 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1066 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1067 }
1068
1069 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1070 sizeof(rsp), &rsp);
1071
1072 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1073 rsp.result != L2CAP_CR_SUCCESS) {
1074 l2cap_chan_unlock(chan);
1075 continue;
1076 }
1077
1078 set_bit(CONF_REQ_SENT, &chan->conf_state);
1079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1080 l2cap_build_conf_req(chan, buf), buf);
1081 chan->num_conf_req++;
1082 }
1083
1084 l2cap_chan_unlock(chan);
1085 }
1086
1087 mutex_unlock(&conn->chan_lock);
1088 }
1089
1090 /* Find socket with cid and source/destination bdaddr.
1091 * Returns closest match, locked.
1092 */
1093 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1094 bdaddr_t *src,
1095 bdaddr_t *dst)
1096 {
1097 struct l2cap_chan *c, *c1 = NULL;
1098
1099 read_lock(&chan_list_lock);
1100
1101 list_for_each_entry(c, &chan_list, global_l) {
1102 struct sock *sk = c->sk;
1103
1104 if (state && c->state != state)
1105 continue;
1106
1107 if (c->scid == cid) {
1108 int src_match, dst_match;
1109 int src_any, dst_any;
1110
1111 /* Exact match. */
1112 src_match = !bacmp(&bt_sk(sk)->src, src);
1113 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1114 if (src_match && dst_match) {
1115 read_unlock(&chan_list_lock);
1116 return c;
1117 }
1118
1119 /* Closest match */
1120 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1121 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1122 if ((src_match && dst_any) || (src_any && dst_match) ||
1123 (src_any && dst_any))
1124 c1 = c;
1125 }
1126 }
1127
1128 read_unlock(&chan_list_lock);
1129
1130 return c1;
1131 }
1132
1133 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1134 {
1135 struct sock *parent, *sk;
1136 struct l2cap_chan *chan, *pchan;
1137
1138 BT_DBG("");
1139
1140 /* Check if we have socket listening on cid */
1141 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1142 conn->src, conn->dst);
1143 if (!pchan)
1144 return;
1145
1146 parent = pchan->sk;
1147
1148 lock_sock(parent);
1149
1150 /* Check for backlog size */
1151 if (sk_acceptq_is_full(parent)) {
1152 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1153 goto clean;
1154 }
1155
1156 chan = pchan->ops->new_connection(pchan->data);
1157 if (!chan)
1158 goto clean;
1159
1160 sk = chan->sk;
1161
1162 hci_conn_hold(conn->hcon);
1163
1164 bacpy(&bt_sk(sk)->src, conn->src);
1165 bacpy(&bt_sk(sk)->dst, conn->dst);
1166
1167 bt_accept_enqueue(parent, sk);
1168
1169 l2cap_chan_add(conn, chan);
1170
1171 __set_chan_timer(chan, sk->sk_sndtimeo);
1172
1173 __l2cap_state_change(chan, BT_CONNECTED);
1174 parent->sk_data_ready(parent, 0);
1175
1176 clean:
1177 release_sock(parent);
1178 }
1179
1180 static void l2cap_conn_ready(struct l2cap_conn *conn)
1181 {
1182 struct l2cap_chan *chan;
1183
1184 BT_DBG("conn %p", conn);
1185
1186 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1187 l2cap_le_conn_ready(conn);
1188
1189 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1190 smp_conn_security(conn, conn->hcon->pending_sec_level);
1191
1192 mutex_lock(&conn->chan_lock);
1193
1194 list_for_each_entry(chan, &conn->chan_l, list) {
1195
1196 l2cap_chan_lock(chan);
1197
1198 if (conn->hcon->type == LE_LINK) {
1199 if (smp_conn_security(conn, chan->sec_level))
1200 l2cap_chan_ready(chan);
1201
1202 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1203 struct sock *sk = chan->sk;
1204 __clear_chan_timer(chan);
1205 lock_sock(sk);
1206 __l2cap_state_change(chan, BT_CONNECTED);
1207 sk->sk_state_change(sk);
1208 release_sock(sk);
1209
1210 } else if (chan->state == BT_CONNECT)
1211 l2cap_do_start(chan);
1212
1213 l2cap_chan_unlock(chan);
1214 }
1215
1216 mutex_unlock(&conn->chan_lock);
1217 }
1218
1219 /* Notify sockets that we cannot guaranty reliability anymore */
1220 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1221 {
1222 struct l2cap_chan *chan;
1223
1224 BT_DBG("conn %p", conn);
1225
1226 mutex_lock(&conn->chan_lock);
1227
1228 list_for_each_entry(chan, &conn->chan_l, list) {
1229 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1230 __l2cap_chan_set_err(chan, err);
1231 }
1232
1233 mutex_unlock(&conn->chan_lock);
1234 }
1235
1236 static void l2cap_info_timeout(struct work_struct *work)
1237 {
1238 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1239 info_timer.work);
1240
1241 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1242 conn->info_ident = 0;
1243
1244 l2cap_conn_start(conn);
1245 }
1246
1247 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1248 {
1249 struct l2cap_conn *conn = hcon->l2cap_data;
1250 struct l2cap_chan *chan, *l;
1251
1252 if (!conn)
1253 return;
1254
1255 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1256
1257 kfree_skb(conn->rx_skb);
1258
1259 mutex_lock(&conn->chan_lock);
1260
1261 /* Kill channels */
1262 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1263 l2cap_chan_hold(chan);
1264 l2cap_chan_lock(chan);
1265
1266 l2cap_chan_del(chan, err);
1267
1268 l2cap_chan_unlock(chan);
1269
1270 chan->ops->close(chan->data);
1271 l2cap_chan_put(chan);
1272 }
1273
1274 mutex_unlock(&conn->chan_lock);
1275
1276 hci_chan_del(conn->hchan);
1277
1278 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1279 cancel_delayed_work_sync(&conn->info_timer);
1280
1281 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1282 cancel_delayed_work_sync(&conn->security_timer);
1283 smp_chan_destroy(conn);
1284 }
1285
1286 hcon->l2cap_data = NULL;
1287 kfree(conn);
1288 }
1289
1290 static void security_timeout(struct work_struct *work)
1291 {
1292 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1293 security_timer.work);
1294
1295 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1296 }
1297
1298 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1299 {
1300 struct l2cap_conn *conn = hcon->l2cap_data;
1301 struct hci_chan *hchan;
1302
1303 if (conn || status)
1304 return conn;
1305
1306 hchan = hci_chan_create(hcon);
1307 if (!hchan)
1308 return NULL;
1309
1310 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1311 if (!conn) {
1312 hci_chan_del(hchan);
1313 return NULL;
1314 }
1315
1316 hcon->l2cap_data = conn;
1317 conn->hcon = hcon;
1318 conn->hchan = hchan;
1319
1320 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1321
1322 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1323 conn->mtu = hcon->hdev->le_mtu;
1324 else
1325 conn->mtu = hcon->hdev->acl_mtu;
1326
1327 conn->src = &hcon->hdev->bdaddr;
1328 conn->dst = &hcon->dst;
1329
1330 conn->feat_mask = 0;
1331
1332 spin_lock_init(&conn->lock);
1333 mutex_init(&conn->chan_lock);
1334
1335 INIT_LIST_HEAD(&conn->chan_l);
1336
1337 if (hcon->type == LE_LINK)
1338 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1339 else
1340 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1341
1342 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1343
1344 return conn;
1345 }
1346
1347 /* ---- Socket interface ---- */
1348
1349 /* Find socket with psm and source / destination bdaddr.
1350 * Returns closest match.
1351 */
1352 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1353 bdaddr_t *src,
1354 bdaddr_t *dst)
1355 {
1356 struct l2cap_chan *c, *c1 = NULL;
1357
1358 read_lock(&chan_list_lock);
1359
1360 list_for_each_entry(c, &chan_list, global_l) {
1361 struct sock *sk = c->sk;
1362
1363 if (state && c->state != state)
1364 continue;
1365
1366 if (c->psm == psm) {
1367 int src_match, dst_match;
1368 int src_any, dst_any;
1369
1370 /* Exact match. */
1371 src_match = !bacmp(&bt_sk(sk)->src, src);
1372 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1373 if (src_match && dst_match) {
1374 read_unlock(&chan_list_lock);
1375 return c;
1376 }
1377
1378 /* Closest match */
1379 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1380 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1381 if ((src_match && dst_any) || (src_any && dst_match) ||
1382 (src_any && dst_any))
1383 c1 = c;
1384 }
1385 }
1386
1387 read_unlock(&chan_list_lock);
1388
1389 return c1;
1390 }
1391
1392 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1393 bdaddr_t *dst, u8 dst_type)
1394 {
1395 struct sock *sk = chan->sk;
1396 bdaddr_t *src = &bt_sk(sk)->src;
1397 struct l2cap_conn *conn;
1398 struct hci_conn *hcon;
1399 struct hci_dev *hdev;
1400 __u8 auth_type;
1401 int err;
1402
1403 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1404 dst_type, __le16_to_cpu(chan->psm));
1405
1406 hdev = hci_get_route(dst, src);
1407 if (!hdev)
1408 return -EHOSTUNREACH;
1409
1410 hci_dev_lock(hdev);
1411
1412 l2cap_chan_lock(chan);
1413
1414 /* PSM must be odd and lsb of upper byte must be 0 */
1415 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1416 chan->chan_type != L2CAP_CHAN_RAW) {
1417 err = -EINVAL;
1418 goto done;
1419 }
1420
1421 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1422 err = -EINVAL;
1423 goto done;
1424 }
1425
1426 switch (chan->mode) {
1427 case L2CAP_MODE_BASIC:
1428 break;
1429 case L2CAP_MODE_ERTM:
1430 case L2CAP_MODE_STREAMING:
1431 if (!disable_ertm)
1432 break;
1433 /* fall through */
1434 default:
1435 err = -ENOTSUPP;
1436 goto done;
1437 }
1438
1439 lock_sock(sk);
1440
1441 switch (sk->sk_state) {
1442 case BT_CONNECT:
1443 case BT_CONNECT2:
1444 case BT_CONFIG:
1445 /* Already connecting */
1446 err = 0;
1447 release_sock(sk);
1448 goto done;
1449
1450 case BT_CONNECTED:
1451 /* Already connected */
1452 err = -EISCONN;
1453 release_sock(sk);
1454 goto done;
1455
1456 case BT_OPEN:
1457 case BT_BOUND:
1458 /* Can connect */
1459 break;
1460
1461 default:
1462 err = -EBADFD;
1463 release_sock(sk);
1464 goto done;
1465 }
1466
1467 /* Set destination address and psm */
1468 bacpy(&bt_sk(sk)->dst, dst);
1469
1470 release_sock(sk);
1471
1472 chan->psm = psm;
1473 chan->dcid = cid;
1474
1475 auth_type = l2cap_get_auth_type(chan);
1476
1477 if (chan->dcid == L2CAP_CID_LE_DATA)
1478 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1479 chan->sec_level, auth_type);
1480 else
1481 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1482 chan->sec_level, auth_type);
1483
1484 if (IS_ERR(hcon)) {
1485 err = PTR_ERR(hcon);
1486 goto done;
1487 }
1488
1489 conn = l2cap_conn_add(hcon, 0);
1490 if (!conn) {
1491 hci_conn_put(hcon);
1492 err = -ENOMEM;
1493 goto done;
1494 }
1495
1496 if (hcon->type == LE_LINK) {
1497 err = 0;
1498
1499 if (!list_empty(&conn->chan_l)) {
1500 err = -EBUSY;
1501 hci_conn_put(hcon);
1502 }
1503
1504 if (err)
1505 goto done;
1506 }
1507
1508 /* Update source addr of the socket */
1509 bacpy(src, conn->src);
1510
1511 l2cap_chan_unlock(chan);
1512 l2cap_chan_add(conn, chan);
1513 l2cap_chan_lock(chan);
1514
1515 l2cap_state_change(chan, BT_CONNECT);
1516 __set_chan_timer(chan, sk->sk_sndtimeo);
1517
1518 if (hcon->state == BT_CONNECTED) {
1519 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1520 __clear_chan_timer(chan);
1521 if (l2cap_chan_check_security(chan))
1522 l2cap_state_change(chan, BT_CONNECTED);
1523 } else
1524 l2cap_do_start(chan);
1525 }
1526
1527 err = 0;
1528
1529 done:
1530 l2cap_chan_unlock(chan);
1531 hci_dev_unlock(hdev);
1532 hci_dev_put(hdev);
1533 return err;
1534 }
1535
1536 int __l2cap_wait_ack(struct sock *sk)
1537 {
1538 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1539 DECLARE_WAITQUEUE(wait, current);
1540 int err = 0;
1541 int timeo = HZ/5;
1542
1543 add_wait_queue(sk_sleep(sk), &wait);
1544 set_current_state(TASK_INTERRUPTIBLE);
1545 while (chan->unacked_frames > 0 && chan->conn) {
1546 if (!timeo)
1547 timeo = HZ/5;
1548
1549 if (signal_pending(current)) {
1550 err = sock_intr_errno(timeo);
1551 break;
1552 }
1553
1554 release_sock(sk);
1555 timeo = schedule_timeout(timeo);
1556 lock_sock(sk);
1557 set_current_state(TASK_INTERRUPTIBLE);
1558
1559 err = sock_error(sk);
1560 if (err)
1561 break;
1562 }
1563 set_current_state(TASK_RUNNING);
1564 remove_wait_queue(sk_sleep(sk), &wait);
1565 return err;
1566 }
1567
1568 static void l2cap_monitor_timeout(struct work_struct *work)
1569 {
1570 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1571 monitor_timer.work);
1572
1573 BT_DBG("chan %p", chan);
1574
1575 l2cap_chan_lock(chan);
1576
1577 if (chan->retry_count >= chan->remote_max_tx) {
1578 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1579 l2cap_chan_unlock(chan);
1580 l2cap_chan_put(chan);
1581 return;
1582 }
1583
1584 chan->retry_count++;
1585 __set_monitor_timer(chan);
1586
1587 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1588 l2cap_chan_unlock(chan);
1589 l2cap_chan_put(chan);
1590 }
1591
1592 static void l2cap_retrans_timeout(struct work_struct *work)
1593 {
1594 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1595 retrans_timer.work);
1596
1597 BT_DBG("chan %p", chan);
1598
1599 l2cap_chan_lock(chan);
1600
1601 chan->retry_count = 1;
1602 __set_monitor_timer(chan);
1603
1604 set_bit(CONN_WAIT_F, &chan->conn_state);
1605
1606 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1607
1608 l2cap_chan_unlock(chan);
1609 l2cap_chan_put(chan);
1610 }
1611
1612 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1613 {
1614 struct sk_buff *skb;
1615
1616 while ((skb = skb_peek(&chan->tx_q)) &&
1617 chan->unacked_frames) {
1618 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1619 break;
1620
1621 skb = skb_dequeue(&chan->tx_q);
1622 kfree_skb(skb);
1623
1624 chan->unacked_frames--;
1625 }
1626
1627 if (!chan->unacked_frames)
1628 __clear_retrans_timer(chan);
1629 }
1630
1631 static void l2cap_streaming_send(struct l2cap_chan *chan)
1632 {
1633 struct sk_buff *skb;
1634 u32 control;
1635 u16 fcs;
1636
1637 while ((skb = skb_dequeue(&chan->tx_q))) {
1638 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1639 control |= __set_txseq(chan, chan->next_tx_seq);
1640 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1641 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1642
1643 if (chan->fcs == L2CAP_FCS_CRC16) {
1644 fcs = crc16(0, (u8 *)skb->data,
1645 skb->len - L2CAP_FCS_SIZE);
1646 put_unaligned_le16(fcs,
1647 skb->data + skb->len - L2CAP_FCS_SIZE);
1648 }
1649
1650 l2cap_do_send(chan, skb);
1651
1652 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1653 }
1654 }
1655
1656 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1657 {
1658 struct sk_buff *skb, *tx_skb;
1659 u16 fcs;
1660 u32 control;
1661
1662 skb = skb_peek(&chan->tx_q);
1663 if (!skb)
1664 return;
1665
1666 while (bt_cb(skb)->control.txseq != tx_seq) {
1667 if (skb_queue_is_last(&chan->tx_q, skb))
1668 return;
1669
1670 skb = skb_queue_next(&chan->tx_q, skb);
1671 }
1672
1673 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1674 chan->remote_max_tx) {
1675 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1676 return;
1677 }
1678
1679 tx_skb = skb_clone(skb, GFP_ATOMIC);
1680 bt_cb(skb)->control.retries++;
1681
1682 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1683 control &= __get_sar_mask(chan);
1684
1685 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1686 control |= __set_ctrl_final(chan);
1687
1688 control |= __set_reqseq(chan, chan->buffer_seq);
1689 control |= __set_txseq(chan, tx_seq);
1690
1691 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1692
1693 if (chan->fcs == L2CAP_FCS_CRC16) {
1694 fcs = crc16(0, (u8 *)tx_skb->data,
1695 tx_skb->len - L2CAP_FCS_SIZE);
1696 put_unaligned_le16(fcs,
1697 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1698 }
1699
1700 l2cap_do_send(chan, tx_skb);
1701 }
1702
1703 static int l2cap_ertm_send(struct l2cap_chan *chan)
1704 {
1705 struct sk_buff *skb, *tx_skb;
1706 u16 fcs;
1707 u32 control;
1708 int nsent = 0;
1709
1710 if (chan->state != BT_CONNECTED)
1711 return -ENOTCONN;
1712
1713 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1714 return 0;
1715
1716 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1717
1718 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1719 chan->remote_max_tx) {
1720 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1721 break;
1722 }
1723
1724 tx_skb = skb_clone(skb, GFP_ATOMIC);
1725
1726 bt_cb(skb)->control.retries++;
1727
1728 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1729 control &= __get_sar_mask(chan);
1730
1731 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1732 control |= __set_ctrl_final(chan);
1733
1734 control |= __set_reqseq(chan, chan->buffer_seq);
1735 control |= __set_txseq(chan, chan->next_tx_seq);
1736 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1737
1738 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1739
1740 if (chan->fcs == L2CAP_FCS_CRC16) {
1741 fcs = crc16(0, (u8 *)skb->data,
1742 tx_skb->len - L2CAP_FCS_SIZE);
1743 put_unaligned_le16(fcs, skb->data +
1744 tx_skb->len - L2CAP_FCS_SIZE);
1745 }
1746
1747 l2cap_do_send(chan, tx_skb);
1748
1749 __set_retrans_timer(chan);
1750
1751 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1752
1753 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1754
1755 if (bt_cb(skb)->control.retries == 1) {
1756 chan->unacked_frames++;
1757
1758 if (!nsent++)
1759 __clear_ack_timer(chan);
1760 }
1761
1762 chan->frames_sent++;
1763
1764 if (skb_queue_is_last(&chan->tx_q, skb))
1765 chan->tx_send_head = NULL;
1766 else
1767 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1768 }
1769
1770 return nsent;
1771 }
1772
1773 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1774 {
1775 int ret;
1776
1777 if (!skb_queue_empty(&chan->tx_q))
1778 chan->tx_send_head = chan->tx_q.next;
1779
1780 chan->next_tx_seq = chan->expected_ack_seq;
1781 ret = l2cap_ertm_send(chan);
1782 return ret;
1783 }
1784
1785 static void __l2cap_send_ack(struct l2cap_chan *chan)
1786 {
1787 u32 control = 0;
1788
1789 control |= __set_reqseq(chan, chan->buffer_seq);
1790
1791 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1792 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1793 set_bit(CONN_RNR_SENT, &chan->conn_state);
1794 l2cap_send_sframe(chan, control);
1795 return;
1796 }
1797
1798 if (l2cap_ertm_send(chan) > 0)
1799 return;
1800
1801 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1802 l2cap_send_sframe(chan, control);
1803 }
1804
1805 static void l2cap_send_ack(struct l2cap_chan *chan)
1806 {
1807 __clear_ack_timer(chan);
1808 __l2cap_send_ack(chan);
1809 }
1810
1811 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1812 {
1813 struct srej_list *tail;
1814 u32 control;
1815
1816 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1817 control |= __set_ctrl_final(chan);
1818
1819 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1820 control |= __set_reqseq(chan, tail->tx_seq);
1821
1822 l2cap_send_sframe(chan, control);
1823 }
1824
1825 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1826 struct msghdr *msg, int len,
1827 int count, struct sk_buff *skb)
1828 {
1829 struct l2cap_conn *conn = chan->conn;
1830 struct sk_buff **frag;
1831 int sent = 0;
1832
1833 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1834 return -EFAULT;
1835
1836 sent += count;
1837 len -= count;
1838
1839 /* Continuation fragments (no L2CAP header) */
1840 frag = &skb_shinfo(skb)->frag_list;
1841 while (len) {
1842 struct sk_buff *tmp;
1843
1844 count = min_t(unsigned int, conn->mtu, len);
1845
1846 tmp = chan->ops->alloc_skb(chan, count,
1847 msg->msg_flags & MSG_DONTWAIT);
1848 if (IS_ERR(tmp))
1849 return PTR_ERR(tmp);
1850
1851 *frag = tmp;
1852
1853 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1854 return -EFAULT;
1855
1856 (*frag)->priority = skb->priority;
1857
1858 sent += count;
1859 len -= count;
1860
1861 skb->len += (*frag)->len;
1862 skb->data_len += (*frag)->len;
1863
1864 frag = &(*frag)->next;
1865 }
1866
1867 return sent;
1868 }
1869
1870 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1871 struct msghdr *msg, size_t len,
1872 u32 priority)
1873 {
1874 struct l2cap_conn *conn = chan->conn;
1875 struct sk_buff *skb;
1876 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1877 struct l2cap_hdr *lh;
1878
1879 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1880
1881 count = min_t(unsigned int, (conn->mtu - hlen), len);
1882
1883 skb = chan->ops->alloc_skb(chan, count + hlen,
1884 msg->msg_flags & MSG_DONTWAIT);
1885 if (IS_ERR(skb))
1886 return skb;
1887
1888 skb->priority = priority;
1889
1890 /* Create L2CAP header */
1891 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1892 lh->cid = cpu_to_le16(chan->dcid);
1893 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1894 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1895
1896 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1897 if (unlikely(err < 0)) {
1898 kfree_skb(skb);
1899 return ERR_PTR(err);
1900 }
1901 return skb;
1902 }
1903
1904 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1905 struct msghdr *msg, size_t len,
1906 u32 priority)
1907 {
1908 struct l2cap_conn *conn = chan->conn;
1909 struct sk_buff *skb;
1910 int err, count;
1911 struct l2cap_hdr *lh;
1912
1913 BT_DBG("chan %p len %d", chan, (int)len);
1914
1915 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1916
1917 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1918 msg->msg_flags & MSG_DONTWAIT);
1919 if (IS_ERR(skb))
1920 return skb;
1921
1922 skb->priority = priority;
1923
1924 /* Create L2CAP header */
1925 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1926 lh->cid = cpu_to_le16(chan->dcid);
1927 lh->len = cpu_to_le16(len);
1928
1929 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1930 if (unlikely(err < 0)) {
1931 kfree_skb(skb);
1932 return ERR_PTR(err);
1933 }
1934 return skb;
1935 }
1936
1937 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1938 struct msghdr *msg, size_t len,
1939 u16 sdulen)
1940 {
1941 struct l2cap_conn *conn = chan->conn;
1942 struct sk_buff *skb;
1943 int err, count, hlen;
1944 struct l2cap_hdr *lh;
1945
1946 BT_DBG("chan %p len %d", chan, (int)len);
1947
1948 if (!conn)
1949 return ERR_PTR(-ENOTCONN);
1950
1951 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1952 hlen = L2CAP_EXT_HDR_SIZE;
1953 else
1954 hlen = L2CAP_ENH_HDR_SIZE;
1955
1956 if (sdulen)
1957 hlen += L2CAP_SDULEN_SIZE;
1958
1959 if (chan->fcs == L2CAP_FCS_CRC16)
1960 hlen += L2CAP_FCS_SIZE;
1961
1962 count = min_t(unsigned int, (conn->mtu - hlen), len);
1963
1964 skb = chan->ops->alloc_skb(chan, count + hlen,
1965 msg->msg_flags & MSG_DONTWAIT);
1966 if (IS_ERR(skb))
1967 return skb;
1968
1969 /* Create L2CAP header */
1970 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1971 lh->cid = cpu_to_le16(chan->dcid);
1972 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1973
1974 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1975
1976 if (sdulen)
1977 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1978
1979 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1980 if (unlikely(err < 0)) {
1981 kfree_skb(skb);
1982 return ERR_PTR(err);
1983 }
1984
1985 if (chan->fcs == L2CAP_FCS_CRC16)
1986 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1987
1988 bt_cb(skb)->control.retries = 0;
1989 return skb;
1990 }
1991
1992 static int l2cap_segment_sdu(struct l2cap_chan *chan,
1993 struct sk_buff_head *seg_queue,
1994 struct msghdr *msg, size_t len)
1995 {
1996 struct sk_buff *skb;
1997 u16 sdu_len;
1998 size_t pdu_len;
1999 int err = 0;
2000 u8 sar;
2001
2002 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2003
2004 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2005 * so fragmented skbs are not used. The HCI layer's handling
2006 * of fragmented skbs is not compatible with ERTM's queueing.
2007 */
2008
2009 /* PDU size is derived from the HCI MTU */
2010 pdu_len = chan->conn->mtu;
2011
2012 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2013
2014 /* Adjust for largest possible L2CAP overhead. */
2015 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2016
2017 /* Remote device may have requested smaller PDUs */
2018 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2019
2020 if (len <= pdu_len) {
2021 sar = L2CAP_SAR_UNSEGMENTED;
2022 sdu_len = 0;
2023 pdu_len = len;
2024 } else {
2025 sar = L2CAP_SAR_START;
2026 sdu_len = len;
2027 pdu_len -= L2CAP_SDULEN_SIZE;
2028 }
2029
2030 while (len > 0) {
2031 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2032
2033 if (IS_ERR(skb)) {
2034 __skb_queue_purge(seg_queue);
2035 return PTR_ERR(skb);
2036 }
2037
2038 bt_cb(skb)->control.sar = sar;
2039 __skb_queue_tail(seg_queue, skb);
2040
2041 len -= pdu_len;
2042 if (sdu_len) {
2043 sdu_len = 0;
2044 pdu_len += L2CAP_SDULEN_SIZE;
2045 }
2046
2047 if (len <= pdu_len) {
2048 sar = L2CAP_SAR_END;
2049 pdu_len = len;
2050 } else {
2051 sar = L2CAP_SAR_CONTINUE;
2052 }
2053 }
2054
2055 return err;
2056 }
2057
2058 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2059 u32 priority)
2060 {
2061 struct sk_buff *skb;
2062 int err;
2063 struct sk_buff_head seg_queue;
2064
2065 /* Connectionless channel */
2066 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2067 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2068 if (IS_ERR(skb))
2069 return PTR_ERR(skb);
2070
2071 l2cap_do_send(chan, skb);
2072 return len;
2073 }
2074
2075 switch (chan->mode) {
2076 case L2CAP_MODE_BASIC:
2077 /* Check outgoing MTU */
2078 if (len > chan->omtu)
2079 return -EMSGSIZE;
2080
2081 /* Create a basic PDU */
2082 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2083 if (IS_ERR(skb))
2084 return PTR_ERR(skb);
2085
2086 l2cap_do_send(chan, skb);
2087 err = len;
2088 break;
2089
2090 case L2CAP_MODE_ERTM:
2091 case L2CAP_MODE_STREAMING:
2092 /* Check outgoing MTU */
2093 if (len > chan->omtu) {
2094 err = -EMSGSIZE;
2095 break;
2096 }
2097
2098 __skb_queue_head_init(&seg_queue);
2099
2100 /* Do segmentation before calling in to the state machine,
2101 * since it's possible to block while waiting for memory
2102 * allocation.
2103 */
2104 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2105
2106 /* The channel could have been closed while segmenting,
2107 * check that it is still connected.
2108 */
2109 if (chan->state != BT_CONNECTED) {
2110 __skb_queue_purge(&seg_queue);
2111 err = -ENOTCONN;
2112 }
2113
2114 if (err)
2115 break;
2116
2117 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2118 chan->tx_send_head = seg_queue.next;
2119 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2120
2121 if (chan->mode == L2CAP_MODE_ERTM)
2122 err = l2cap_ertm_send(chan);
2123 else
2124 l2cap_streaming_send(chan);
2125
2126 if (err >= 0)
2127 err = len;
2128
2129 /* If the skbs were not queued for sending, they'll still be in
2130 * seg_queue and need to be purged.
2131 */
2132 __skb_queue_purge(&seg_queue);
2133 break;
2134
2135 default:
2136 BT_DBG("bad state %1.1x", chan->mode);
2137 err = -EBADFD;
2138 }
2139
2140 return err;
2141 }
2142
2143 /* Copy frame to all raw sockets on that connection */
2144 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2145 {
2146 struct sk_buff *nskb;
2147 struct l2cap_chan *chan;
2148
2149 BT_DBG("conn %p", conn);
2150
2151 mutex_lock(&conn->chan_lock);
2152
2153 list_for_each_entry(chan, &conn->chan_l, list) {
2154 struct sock *sk = chan->sk;
2155 if (chan->chan_type != L2CAP_CHAN_RAW)
2156 continue;
2157
2158 /* Don't send frame to the socket it came from */
2159 if (skb->sk == sk)
2160 continue;
2161 nskb = skb_clone(skb, GFP_ATOMIC);
2162 if (!nskb)
2163 continue;
2164
2165 if (chan->ops->recv(chan->data, nskb))
2166 kfree_skb(nskb);
2167 }
2168
2169 mutex_unlock(&conn->chan_lock);
2170 }
2171
2172 /* ---- L2CAP signalling commands ---- */
2173 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2174 u8 code, u8 ident, u16 dlen, void *data)
2175 {
2176 struct sk_buff *skb, **frag;
2177 struct l2cap_cmd_hdr *cmd;
2178 struct l2cap_hdr *lh;
2179 int len, count;
2180
2181 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2182 conn, code, ident, dlen);
2183
2184 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2185 count = min_t(unsigned int, conn->mtu, len);
2186
2187 skb = bt_skb_alloc(count, GFP_ATOMIC);
2188 if (!skb)
2189 return NULL;
2190
2191 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2192 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2193
2194 if (conn->hcon->type == LE_LINK)
2195 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2196 else
2197 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2198
2199 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2200 cmd->code = code;
2201 cmd->ident = ident;
2202 cmd->len = cpu_to_le16(dlen);
2203
2204 if (dlen) {
2205 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2206 memcpy(skb_put(skb, count), data, count);
2207 data += count;
2208 }
2209
2210 len -= skb->len;
2211
2212 /* Continuation fragments (no L2CAP header) */
2213 frag = &skb_shinfo(skb)->frag_list;
2214 while (len) {
2215 count = min_t(unsigned int, conn->mtu, len);
2216
2217 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2218 if (!*frag)
2219 goto fail;
2220
2221 memcpy(skb_put(*frag, count), data, count);
2222
2223 len -= count;
2224 data += count;
2225
2226 frag = &(*frag)->next;
2227 }
2228
2229 return skb;
2230
2231 fail:
2232 kfree_skb(skb);
2233 return NULL;
2234 }
2235
2236 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2237 {
2238 struct l2cap_conf_opt *opt = *ptr;
2239 int len;
2240
2241 len = L2CAP_CONF_OPT_SIZE + opt->len;
2242 *ptr += len;
2243
2244 *type = opt->type;
2245 *olen = opt->len;
2246
2247 switch (opt->len) {
2248 case 1:
2249 *val = *((u8 *) opt->val);
2250 break;
2251
2252 case 2:
2253 *val = get_unaligned_le16(opt->val);
2254 break;
2255
2256 case 4:
2257 *val = get_unaligned_le32(opt->val);
2258 break;
2259
2260 default:
2261 *val = (unsigned long) opt->val;
2262 break;
2263 }
2264
2265 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2266 return len;
2267 }
2268
2269 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2270 {
2271 struct l2cap_conf_opt *opt = *ptr;
2272
2273 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2274
2275 opt->type = type;
2276 opt->len = len;
2277
2278 switch (len) {
2279 case 1:
2280 *((u8 *) opt->val) = val;
2281 break;
2282
2283 case 2:
2284 put_unaligned_le16(val, opt->val);
2285 break;
2286
2287 case 4:
2288 put_unaligned_le32(val, opt->val);
2289 break;
2290
2291 default:
2292 memcpy(opt->val, (void *) val, len);
2293 break;
2294 }
2295
2296 *ptr += L2CAP_CONF_OPT_SIZE + len;
2297 }
2298
2299 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2300 {
2301 struct l2cap_conf_efs efs;
2302
2303 switch (chan->mode) {
2304 case L2CAP_MODE_ERTM:
2305 efs.id = chan->local_id;
2306 efs.stype = chan->local_stype;
2307 efs.msdu = cpu_to_le16(chan->local_msdu);
2308 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2309 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2310 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2311 break;
2312
2313 case L2CAP_MODE_STREAMING:
2314 efs.id = 1;
2315 efs.stype = L2CAP_SERV_BESTEFFORT;
2316 efs.msdu = cpu_to_le16(chan->local_msdu);
2317 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2318 efs.acc_lat = 0;
2319 efs.flush_to = 0;
2320 break;
2321
2322 default:
2323 return;
2324 }
2325
2326 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2327 (unsigned long) &efs);
2328 }
2329
2330 static void l2cap_ack_timeout(struct work_struct *work)
2331 {
2332 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2333 ack_timer.work);
2334
2335 BT_DBG("chan %p", chan);
2336
2337 l2cap_chan_lock(chan);
2338
2339 __l2cap_send_ack(chan);
2340
2341 l2cap_chan_unlock(chan);
2342
2343 l2cap_chan_put(chan);
2344 }
2345
2346 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2347 {
2348 int err;
2349
2350 chan->next_tx_seq = 0;
2351 chan->expected_tx_seq = 0;
2352 chan->expected_ack_seq = 0;
2353 chan->unacked_frames = 0;
2354 chan->buffer_seq = 0;
2355 chan->num_acked = 0;
2356 chan->frames_sent = 0;
2357 chan->last_acked_seq = 0;
2358 chan->sdu = NULL;
2359 chan->sdu_last_frag = NULL;
2360 chan->sdu_len = 0;
2361
2362 skb_queue_head_init(&chan->tx_q);
2363
2364 if (chan->mode != L2CAP_MODE_ERTM)
2365 return 0;
2366
2367 chan->rx_state = L2CAP_RX_STATE_RECV;
2368 chan->tx_state = L2CAP_TX_STATE_XMIT;
2369
2370 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2371 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2372 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2373
2374 skb_queue_head_init(&chan->srej_q);
2375
2376 INIT_LIST_HEAD(&chan->srej_l);
2377 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2378 if (err < 0)
2379 return err;
2380
2381 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2382 }
2383
2384 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2385 {
2386 switch (mode) {
2387 case L2CAP_MODE_STREAMING:
2388 case L2CAP_MODE_ERTM:
2389 if (l2cap_mode_supported(mode, remote_feat_mask))
2390 return mode;
2391 /* fall through */
2392 default:
2393 return L2CAP_MODE_BASIC;
2394 }
2395 }
2396
2397 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2398 {
2399 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2400 }
2401
2402 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2403 {
2404 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2405 }
2406
2407 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2408 {
2409 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2410 __l2cap_ews_supported(chan)) {
2411 /* use extended control field */
2412 set_bit(FLAG_EXT_CTRL, &chan->flags);
2413 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2414 } else {
2415 chan->tx_win = min_t(u16, chan->tx_win,
2416 L2CAP_DEFAULT_TX_WINDOW);
2417 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2418 }
2419 }
2420
2421 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2422 {
2423 struct l2cap_conf_req *req = data;
2424 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2425 void *ptr = req->data;
2426 u16 size;
2427
2428 BT_DBG("chan %p", chan);
2429
2430 if (chan->num_conf_req || chan->num_conf_rsp)
2431 goto done;
2432
2433 switch (chan->mode) {
2434 case L2CAP_MODE_STREAMING:
2435 case L2CAP_MODE_ERTM:
2436 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2437 break;
2438
2439 if (__l2cap_efs_supported(chan))
2440 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2441
2442 /* fall through */
2443 default:
2444 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2445 break;
2446 }
2447
2448 done:
2449 if (chan->imtu != L2CAP_DEFAULT_MTU)
2450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2451
2452 switch (chan->mode) {
2453 case L2CAP_MODE_BASIC:
2454 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2455 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2456 break;
2457
2458 rfc.mode = L2CAP_MODE_BASIC;
2459 rfc.txwin_size = 0;
2460 rfc.max_transmit = 0;
2461 rfc.retrans_timeout = 0;
2462 rfc.monitor_timeout = 0;
2463 rfc.max_pdu_size = 0;
2464
2465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2466 (unsigned long) &rfc);
2467 break;
2468
2469 case L2CAP_MODE_ERTM:
2470 rfc.mode = L2CAP_MODE_ERTM;
2471 rfc.max_transmit = chan->max_tx;
2472 rfc.retrans_timeout = 0;
2473 rfc.monitor_timeout = 0;
2474
2475 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2476 L2CAP_EXT_HDR_SIZE -
2477 L2CAP_SDULEN_SIZE -
2478 L2CAP_FCS_SIZE);
2479 rfc.max_pdu_size = cpu_to_le16(size);
2480
2481 l2cap_txwin_setup(chan);
2482
2483 rfc.txwin_size = min_t(u16, chan->tx_win,
2484 L2CAP_DEFAULT_TX_WINDOW);
2485
2486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2487 (unsigned long) &rfc);
2488
2489 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2490 l2cap_add_opt_efs(&ptr, chan);
2491
2492 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2493 break;
2494
2495 if (chan->fcs == L2CAP_FCS_NONE ||
2496 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2497 chan->fcs = L2CAP_FCS_NONE;
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2499 }
2500
2501 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2503 chan->tx_win);
2504 break;
2505
2506 case L2CAP_MODE_STREAMING:
2507 rfc.mode = L2CAP_MODE_STREAMING;
2508 rfc.txwin_size = 0;
2509 rfc.max_transmit = 0;
2510 rfc.retrans_timeout = 0;
2511 rfc.monitor_timeout = 0;
2512
2513 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2514 L2CAP_EXT_HDR_SIZE -
2515 L2CAP_SDULEN_SIZE -
2516 L2CAP_FCS_SIZE);
2517 rfc.max_pdu_size = cpu_to_le16(size);
2518
2519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2520 (unsigned long) &rfc);
2521
2522 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2523 l2cap_add_opt_efs(&ptr, chan);
2524
2525 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2526 break;
2527
2528 if (chan->fcs == L2CAP_FCS_NONE ||
2529 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2530 chan->fcs = L2CAP_FCS_NONE;
2531 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2532 }
2533 break;
2534 }
2535
2536 req->dcid = cpu_to_le16(chan->dcid);
2537 req->flags = cpu_to_le16(0);
2538
2539 return ptr - data;
2540 }
2541
2542 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2543 {
2544 struct l2cap_conf_rsp *rsp = data;
2545 void *ptr = rsp->data;
2546 void *req = chan->conf_req;
2547 int len = chan->conf_len;
2548 int type, hint, olen;
2549 unsigned long val;
2550 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2551 struct l2cap_conf_efs efs;
2552 u8 remote_efs = 0;
2553 u16 mtu = L2CAP_DEFAULT_MTU;
2554 u16 result = L2CAP_CONF_SUCCESS;
2555 u16 size;
2556
2557 BT_DBG("chan %p", chan);
2558
2559 while (len >= L2CAP_CONF_OPT_SIZE) {
2560 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2561
2562 hint = type & L2CAP_CONF_HINT;
2563 type &= L2CAP_CONF_MASK;
2564
2565 switch (type) {
2566 case L2CAP_CONF_MTU:
2567 mtu = val;
2568 break;
2569
2570 case L2CAP_CONF_FLUSH_TO:
2571 chan->flush_to = val;
2572 break;
2573
2574 case L2CAP_CONF_QOS:
2575 break;
2576
2577 case L2CAP_CONF_RFC:
2578 if (olen == sizeof(rfc))
2579 memcpy(&rfc, (void *) val, olen);
2580 break;
2581
2582 case L2CAP_CONF_FCS:
2583 if (val == L2CAP_FCS_NONE)
2584 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2585 break;
2586
2587 case L2CAP_CONF_EFS:
2588 remote_efs = 1;
2589 if (olen == sizeof(efs))
2590 memcpy(&efs, (void *) val, olen);
2591 break;
2592
2593 case L2CAP_CONF_EWS:
2594 if (!enable_hs)
2595 return -ECONNREFUSED;
2596
2597 set_bit(FLAG_EXT_CTRL, &chan->flags);
2598 set_bit(CONF_EWS_RECV, &chan->conf_state);
2599 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2600 chan->remote_tx_win = val;
2601 break;
2602
2603 default:
2604 if (hint)
2605 break;
2606
2607 result = L2CAP_CONF_UNKNOWN;
2608 *((u8 *) ptr++) = type;
2609 break;
2610 }
2611 }
2612
2613 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2614 goto done;
2615
2616 switch (chan->mode) {
2617 case L2CAP_MODE_STREAMING:
2618 case L2CAP_MODE_ERTM:
2619 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2620 chan->mode = l2cap_select_mode(rfc.mode,
2621 chan->conn->feat_mask);
2622 break;
2623 }
2624
2625 if (remote_efs) {
2626 if (__l2cap_efs_supported(chan))
2627 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2628 else
2629 return -ECONNREFUSED;
2630 }
2631
2632 if (chan->mode != rfc.mode)
2633 return -ECONNREFUSED;
2634
2635 break;
2636 }
2637
2638 done:
2639 if (chan->mode != rfc.mode) {
2640 result = L2CAP_CONF_UNACCEPT;
2641 rfc.mode = chan->mode;
2642
2643 if (chan->num_conf_rsp == 1)
2644 return -ECONNREFUSED;
2645
2646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2647 sizeof(rfc), (unsigned long) &rfc);
2648 }
2649
2650 if (result == L2CAP_CONF_SUCCESS) {
2651 /* Configure output options and let the other side know
2652 * which ones we don't like. */
2653
2654 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2655 result = L2CAP_CONF_UNACCEPT;
2656 else {
2657 chan->omtu = mtu;
2658 set_bit(CONF_MTU_DONE, &chan->conf_state);
2659 }
2660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2661
2662 if (remote_efs) {
2663 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2664 efs.stype != L2CAP_SERV_NOTRAFIC &&
2665 efs.stype != chan->local_stype) {
2666
2667 result = L2CAP_CONF_UNACCEPT;
2668
2669 if (chan->num_conf_req >= 1)
2670 return -ECONNREFUSED;
2671
2672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2673 sizeof(efs),
2674 (unsigned long) &efs);
2675 } else {
2676 /* Send PENDING Conf Rsp */
2677 result = L2CAP_CONF_PENDING;
2678 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2679 }
2680 }
2681
2682 switch (rfc.mode) {
2683 case L2CAP_MODE_BASIC:
2684 chan->fcs = L2CAP_FCS_NONE;
2685 set_bit(CONF_MODE_DONE, &chan->conf_state);
2686 break;
2687
2688 case L2CAP_MODE_ERTM:
2689 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2690 chan->remote_tx_win = rfc.txwin_size;
2691 else
2692 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2693
2694 chan->remote_max_tx = rfc.max_transmit;
2695
2696 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2697 chan->conn->mtu -
2698 L2CAP_EXT_HDR_SIZE -
2699 L2CAP_SDULEN_SIZE -
2700 L2CAP_FCS_SIZE);
2701 rfc.max_pdu_size = cpu_to_le16(size);
2702 chan->remote_mps = size;
2703
2704 rfc.retrans_timeout =
2705 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2706 rfc.monitor_timeout =
2707 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2708
2709 set_bit(CONF_MODE_DONE, &chan->conf_state);
2710
2711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2712 sizeof(rfc), (unsigned long) &rfc);
2713
2714 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2715 chan->remote_id = efs.id;
2716 chan->remote_stype = efs.stype;
2717 chan->remote_msdu = le16_to_cpu(efs.msdu);
2718 chan->remote_flush_to =
2719 le32_to_cpu(efs.flush_to);
2720 chan->remote_acc_lat =
2721 le32_to_cpu(efs.acc_lat);
2722 chan->remote_sdu_itime =
2723 le32_to_cpu(efs.sdu_itime);
2724 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2725 sizeof(efs), (unsigned long) &efs);
2726 }
2727 break;
2728
2729 case L2CAP_MODE_STREAMING:
2730 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2731 chan->conn->mtu -
2732 L2CAP_EXT_HDR_SIZE -
2733 L2CAP_SDULEN_SIZE -
2734 L2CAP_FCS_SIZE);
2735 rfc.max_pdu_size = cpu_to_le16(size);
2736 chan->remote_mps = size;
2737
2738 set_bit(CONF_MODE_DONE, &chan->conf_state);
2739
2740 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2741 sizeof(rfc), (unsigned long) &rfc);
2742
2743 break;
2744
2745 default:
2746 result = L2CAP_CONF_UNACCEPT;
2747
2748 memset(&rfc, 0, sizeof(rfc));
2749 rfc.mode = chan->mode;
2750 }
2751
2752 if (result == L2CAP_CONF_SUCCESS)
2753 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2754 }
2755 rsp->scid = cpu_to_le16(chan->dcid);
2756 rsp->result = cpu_to_le16(result);
2757 rsp->flags = cpu_to_le16(0x0000);
2758
2759 return ptr - data;
2760 }
2761
2762 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2763 {
2764 struct l2cap_conf_req *req = data;
2765 void *ptr = req->data;
2766 int type, olen;
2767 unsigned long val;
2768 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2769 struct l2cap_conf_efs efs;
2770
2771 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2772
2773 while (len >= L2CAP_CONF_OPT_SIZE) {
2774 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2775
2776 switch (type) {
2777 case L2CAP_CONF_MTU:
2778 if (val < L2CAP_DEFAULT_MIN_MTU) {
2779 *result = L2CAP_CONF_UNACCEPT;
2780 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2781 } else
2782 chan->imtu = val;
2783 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2784 break;
2785
2786 case L2CAP_CONF_FLUSH_TO:
2787 chan->flush_to = val;
2788 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2789 2, chan->flush_to);
2790 break;
2791
2792 case L2CAP_CONF_RFC:
2793 if (olen == sizeof(rfc))
2794 memcpy(&rfc, (void *)val, olen);
2795
2796 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2797 rfc.mode != chan->mode)
2798 return -ECONNREFUSED;
2799
2800 chan->fcs = 0;
2801
2802 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2803 sizeof(rfc), (unsigned long) &rfc);
2804 break;
2805
2806 case L2CAP_CONF_EWS:
2807 chan->tx_win = min_t(u16, val,
2808 L2CAP_DEFAULT_EXT_WINDOW);
2809 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2810 chan->tx_win);
2811 break;
2812
2813 case L2CAP_CONF_EFS:
2814 if (olen == sizeof(efs))
2815 memcpy(&efs, (void *)val, olen);
2816
2817 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2818 efs.stype != L2CAP_SERV_NOTRAFIC &&
2819 efs.stype != chan->local_stype)
2820 return -ECONNREFUSED;
2821
2822 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2823 sizeof(efs), (unsigned long) &efs);
2824 break;
2825 }
2826 }
2827
2828 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2829 return -ECONNREFUSED;
2830
2831 chan->mode = rfc.mode;
2832
2833 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2834 switch (rfc.mode) {
2835 case L2CAP_MODE_ERTM:
2836 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2837 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2838 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2839
2840 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2841 chan->local_msdu = le16_to_cpu(efs.msdu);
2842 chan->local_sdu_itime =
2843 le32_to_cpu(efs.sdu_itime);
2844 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2845 chan->local_flush_to =
2846 le32_to_cpu(efs.flush_to);
2847 }
2848 break;
2849
2850 case L2CAP_MODE_STREAMING:
2851 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2852 }
2853 }
2854
2855 req->dcid = cpu_to_le16(chan->dcid);
2856 req->flags = cpu_to_le16(0x0000);
2857
2858 return ptr - data;
2859 }
2860
2861 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2862 {
2863 struct l2cap_conf_rsp *rsp = data;
2864 void *ptr = rsp->data;
2865
2866 BT_DBG("chan %p", chan);
2867
2868 rsp->scid = cpu_to_le16(chan->dcid);
2869 rsp->result = cpu_to_le16(result);
2870 rsp->flags = cpu_to_le16(flags);
2871
2872 return ptr - data;
2873 }
2874
2875 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2876 {
2877 struct l2cap_conn_rsp rsp;
2878 struct l2cap_conn *conn = chan->conn;
2879 u8 buf[128];
2880
2881 rsp.scid = cpu_to_le16(chan->dcid);
2882 rsp.dcid = cpu_to_le16(chan->scid);
2883 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2884 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2885 l2cap_send_cmd(conn, chan->ident,
2886 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2887
2888 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2889 return;
2890
2891 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2892 l2cap_build_conf_req(chan, buf), buf);
2893 chan->num_conf_req++;
2894 }
2895
2896 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2897 {
2898 int type, olen;
2899 unsigned long val;
2900 struct l2cap_conf_rfc rfc;
2901
2902 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2903
2904 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2905 return;
2906
2907 while (len >= L2CAP_CONF_OPT_SIZE) {
2908 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2909
2910 switch (type) {
2911 case L2CAP_CONF_RFC:
2912 if (olen == sizeof(rfc))
2913 memcpy(&rfc, (void *)val, olen);
2914 goto done;
2915 }
2916 }
2917
2918 /* Use sane default values in case a misbehaving remote device
2919 * did not send an RFC option.
2920 */
2921 rfc.mode = chan->mode;
2922 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2923 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2924 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2925
2926 BT_ERR("Expected RFC option was not found, using defaults");
2927
2928 done:
2929 switch (rfc.mode) {
2930 case L2CAP_MODE_ERTM:
2931 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2932 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2933 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2934 break;
2935 case L2CAP_MODE_STREAMING:
2936 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2937 }
2938 }
2939
2940 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2941 {
2942 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2943
2944 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2945 return 0;
2946
2947 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2948 cmd->ident == conn->info_ident) {
2949 cancel_delayed_work(&conn->info_timer);
2950
2951 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2952 conn->info_ident = 0;
2953
2954 l2cap_conn_start(conn);
2955 }
2956
2957 return 0;
2958 }
2959
2960 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2961 {
2962 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2963 struct l2cap_conn_rsp rsp;
2964 struct l2cap_chan *chan = NULL, *pchan;
2965 struct sock *parent, *sk = NULL;
2966 int result, status = L2CAP_CS_NO_INFO;
2967
2968 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2969 __le16 psm = req->psm;
2970
2971 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2972
2973 /* Check if we have socket listening on psm */
2974 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2975 if (!pchan) {
2976 result = L2CAP_CR_BAD_PSM;
2977 goto sendresp;
2978 }
2979
2980 parent = pchan->sk;
2981
2982 mutex_lock(&conn->chan_lock);
2983 lock_sock(parent);
2984
2985 /* Check if the ACL is secure enough (if not SDP) */
2986 if (psm != cpu_to_le16(0x0001) &&
2987 !hci_conn_check_link_mode(conn->hcon)) {
2988 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2989 result = L2CAP_CR_SEC_BLOCK;
2990 goto response;
2991 }
2992
2993 result = L2CAP_CR_NO_MEM;
2994
2995 /* Check for backlog size */
2996 if (sk_acceptq_is_full(parent)) {
2997 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2998 goto response;
2999 }
3000
3001 chan = pchan->ops->new_connection(pchan->data);
3002 if (!chan)
3003 goto response;
3004
3005 sk = chan->sk;
3006
3007 /* Check if we already have channel with that dcid */
3008 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3009 sock_set_flag(sk, SOCK_ZAPPED);
3010 chan->ops->close(chan->data);
3011 goto response;
3012 }
3013
3014 hci_conn_hold(conn->hcon);
3015
3016 bacpy(&bt_sk(sk)->src, conn->src);
3017 bacpy(&bt_sk(sk)->dst, conn->dst);
3018 chan->psm = psm;
3019 chan->dcid = scid;
3020
3021 bt_accept_enqueue(parent, sk);
3022
3023 __l2cap_chan_add(conn, chan);
3024
3025 dcid = chan->scid;
3026
3027 __set_chan_timer(chan, sk->sk_sndtimeo);
3028
3029 chan->ident = cmd->ident;
3030
3031 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3032 if (l2cap_chan_check_security(chan)) {
3033 if (bt_sk(sk)->defer_setup) {
3034 __l2cap_state_change(chan, BT_CONNECT2);
3035 result = L2CAP_CR_PEND;
3036 status = L2CAP_CS_AUTHOR_PEND;
3037 parent->sk_data_ready(parent, 0);
3038 } else {
3039 __l2cap_state_change(chan, BT_CONFIG);
3040 result = L2CAP_CR_SUCCESS;
3041 status = L2CAP_CS_NO_INFO;
3042 }
3043 } else {
3044 __l2cap_state_change(chan, BT_CONNECT2);
3045 result = L2CAP_CR_PEND;
3046 status = L2CAP_CS_AUTHEN_PEND;
3047 }
3048 } else {
3049 __l2cap_state_change(chan, BT_CONNECT2);
3050 result = L2CAP_CR_PEND;
3051 status = L2CAP_CS_NO_INFO;
3052 }
3053
3054 response:
3055 release_sock(parent);
3056 mutex_unlock(&conn->chan_lock);
3057
3058 sendresp:
3059 rsp.scid = cpu_to_le16(scid);
3060 rsp.dcid = cpu_to_le16(dcid);
3061 rsp.result = cpu_to_le16(result);
3062 rsp.status = cpu_to_le16(status);
3063 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3064
3065 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3066 struct l2cap_info_req info;
3067 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3068
3069 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3070 conn->info_ident = l2cap_get_ident(conn);
3071
3072 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3073
3074 l2cap_send_cmd(conn, conn->info_ident,
3075 L2CAP_INFO_REQ, sizeof(info), &info);
3076 }
3077
3078 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3079 result == L2CAP_CR_SUCCESS) {
3080 u8 buf[128];
3081 set_bit(CONF_REQ_SENT, &chan->conf_state);
3082 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3083 l2cap_build_conf_req(chan, buf), buf);
3084 chan->num_conf_req++;
3085 }
3086
3087 return 0;
3088 }
3089
3090 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3091 {
3092 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3093 u16 scid, dcid, result, status;
3094 struct l2cap_chan *chan;
3095 u8 req[128];
3096 int err;
3097
3098 scid = __le16_to_cpu(rsp->scid);
3099 dcid = __le16_to_cpu(rsp->dcid);
3100 result = __le16_to_cpu(rsp->result);
3101 status = __le16_to_cpu(rsp->status);
3102
3103 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3104 dcid, scid, result, status);
3105
3106 mutex_lock(&conn->chan_lock);
3107
3108 if (scid) {
3109 chan = __l2cap_get_chan_by_scid(conn, scid);
3110 if (!chan) {
3111 err = -EFAULT;
3112 goto unlock;
3113 }
3114 } else {
3115 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3116 if (!chan) {
3117 err = -EFAULT;
3118 goto unlock;
3119 }
3120 }
3121
3122 err = 0;
3123
3124 l2cap_chan_lock(chan);
3125
3126 switch (result) {
3127 case L2CAP_CR_SUCCESS:
3128 l2cap_state_change(chan, BT_CONFIG);
3129 chan->ident = 0;
3130 chan->dcid = dcid;
3131 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3132
3133 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3134 break;
3135
3136 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3137 l2cap_build_conf_req(chan, req), req);
3138 chan->num_conf_req++;
3139 break;
3140
3141 case L2CAP_CR_PEND:
3142 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3143 break;
3144
3145 default:
3146 l2cap_chan_del(chan, ECONNREFUSED);
3147 break;
3148 }
3149
3150 l2cap_chan_unlock(chan);
3151
3152 unlock:
3153 mutex_unlock(&conn->chan_lock);
3154
3155 return err;
3156 }
3157
3158 static inline void set_default_fcs(struct l2cap_chan *chan)
3159 {
3160 /* FCS is enabled only in ERTM or streaming mode, if one or both
3161 * sides request it.
3162 */
3163 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3164 chan->fcs = L2CAP_FCS_NONE;
3165 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3166 chan->fcs = L2CAP_FCS_CRC16;
3167 }
3168
3169 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3170 {
3171 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3172 u16 dcid, flags;
3173 u8 rsp[64];
3174 struct l2cap_chan *chan;
3175 int len, err = 0;
3176
3177 dcid = __le16_to_cpu(req->dcid);
3178 flags = __le16_to_cpu(req->flags);
3179
3180 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3181
3182 chan = l2cap_get_chan_by_scid(conn, dcid);
3183 if (!chan)
3184 return -ENOENT;
3185
3186 l2cap_chan_lock(chan);
3187
3188 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3189 struct l2cap_cmd_rej_cid rej;
3190
3191 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3192 rej.scid = cpu_to_le16(chan->scid);
3193 rej.dcid = cpu_to_le16(chan->dcid);
3194
3195 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3196 sizeof(rej), &rej);
3197 goto unlock;
3198 }
3199
3200 /* Reject if config buffer is too small. */
3201 len = cmd_len - sizeof(*req);
3202 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3203 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3204 l2cap_build_conf_rsp(chan, rsp,
3205 L2CAP_CONF_REJECT, flags), rsp);
3206 goto unlock;
3207 }
3208
3209 /* Store config. */
3210 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3211 chan->conf_len += len;
3212
3213 if (flags & 0x0001) {
3214 /* Incomplete config. Send empty response. */
3215 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3216 l2cap_build_conf_rsp(chan, rsp,
3217 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3218 goto unlock;
3219 }
3220
3221 /* Complete config. */
3222 len = l2cap_parse_conf_req(chan, rsp);
3223 if (len < 0) {
3224 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3225 goto unlock;
3226 }
3227
3228 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3229 chan->num_conf_rsp++;
3230
3231 /* Reset config buffer. */
3232 chan->conf_len = 0;
3233
3234 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3235 goto unlock;
3236
3237 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3238 set_default_fcs(chan);
3239
3240 l2cap_state_change(chan, BT_CONNECTED);
3241
3242 if (chan->mode == L2CAP_MODE_ERTM ||
3243 chan->mode == L2CAP_MODE_STREAMING)
3244 err = l2cap_ertm_init(chan);
3245
3246 if (err < 0)
3247 l2cap_send_disconn_req(chan->conn, chan, -err);
3248 else
3249 l2cap_chan_ready(chan);
3250
3251 goto unlock;
3252 }
3253
3254 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3255 u8 buf[64];
3256 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3257 l2cap_build_conf_req(chan, buf), buf);
3258 chan->num_conf_req++;
3259 }
3260
3261 /* Got Conf Rsp PENDING from remote side and asume we sent
3262 Conf Rsp PENDING in the code above */
3263 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3264 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3265
3266 /* check compatibility */
3267
3268 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3269 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3270
3271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3272 l2cap_build_conf_rsp(chan, rsp,
3273 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3274 }
3275
3276 unlock:
3277 l2cap_chan_unlock(chan);
3278 return err;
3279 }
3280
3281 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3282 {
3283 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3284 u16 scid, flags, result;
3285 struct l2cap_chan *chan;
3286 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3287 int err = 0;
3288
3289 scid = __le16_to_cpu(rsp->scid);
3290 flags = __le16_to_cpu(rsp->flags);
3291 result = __le16_to_cpu(rsp->result);
3292
3293 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3294 result, len);
3295
3296 chan = l2cap_get_chan_by_scid(conn, scid);
3297 if (!chan)
3298 return 0;
3299
3300 l2cap_chan_lock(chan);
3301
3302 switch (result) {
3303 case L2CAP_CONF_SUCCESS:
3304 l2cap_conf_rfc_get(chan, rsp->data, len);
3305 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3306 break;
3307
3308 case L2CAP_CONF_PENDING:
3309 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3310
3311 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3312 char buf[64];
3313
3314 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3315 buf, &result);
3316 if (len < 0) {
3317 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3318 goto done;
3319 }
3320
3321 /* check compatibility */
3322
3323 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3324 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3325
3326 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3327 l2cap_build_conf_rsp(chan, buf,
3328 L2CAP_CONF_SUCCESS, 0x0000), buf);
3329 }
3330 goto done;
3331
3332 case L2CAP_CONF_UNACCEPT:
3333 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3334 char req[64];
3335
3336 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3337 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3338 goto done;
3339 }
3340
3341 /* throw out any old stored conf requests */
3342 result = L2CAP_CONF_SUCCESS;
3343 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3344 req, &result);
3345 if (len < 0) {
3346 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3347 goto done;
3348 }
3349
3350 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3351 L2CAP_CONF_REQ, len, req);
3352 chan->num_conf_req++;
3353 if (result != L2CAP_CONF_SUCCESS)
3354 goto done;
3355 break;
3356 }
3357
3358 default:
3359 l2cap_chan_set_err(chan, ECONNRESET);
3360
3361 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3362 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3363 goto done;
3364 }
3365
3366 if (flags & 0x01)
3367 goto done;
3368
3369 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3370
3371 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3372 set_default_fcs(chan);
3373
3374 l2cap_state_change(chan, BT_CONNECTED);
3375 if (chan->mode == L2CAP_MODE_ERTM ||
3376 chan->mode == L2CAP_MODE_STREAMING)
3377 err = l2cap_ertm_init(chan);
3378
3379 if (err < 0)
3380 l2cap_send_disconn_req(chan->conn, chan, -err);
3381 else
3382 l2cap_chan_ready(chan);
3383 }
3384
3385 done:
3386 l2cap_chan_unlock(chan);
3387 return err;
3388 }
3389
3390 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3391 {
3392 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3393 struct l2cap_disconn_rsp rsp;
3394 u16 dcid, scid;
3395 struct l2cap_chan *chan;
3396 struct sock *sk;
3397
3398 scid = __le16_to_cpu(req->scid);
3399 dcid = __le16_to_cpu(req->dcid);
3400
3401 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3402
3403 mutex_lock(&conn->chan_lock);
3404
3405 chan = __l2cap_get_chan_by_scid(conn, dcid);
3406 if (!chan) {
3407 mutex_unlock(&conn->chan_lock);
3408 return 0;
3409 }
3410
3411 l2cap_chan_lock(chan);
3412
3413 sk = chan->sk;
3414
3415 rsp.dcid = cpu_to_le16(chan->scid);
3416 rsp.scid = cpu_to_le16(chan->dcid);
3417 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3418
3419 lock_sock(sk);
3420 sk->sk_shutdown = SHUTDOWN_MASK;
3421 release_sock(sk);
3422
3423 l2cap_chan_hold(chan);
3424 l2cap_chan_del(chan, ECONNRESET);
3425
3426 l2cap_chan_unlock(chan);
3427
3428 chan->ops->close(chan->data);
3429 l2cap_chan_put(chan);
3430
3431 mutex_unlock(&conn->chan_lock);
3432
3433 return 0;
3434 }
3435
3436 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3437 {
3438 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3439 u16 dcid, scid;
3440 struct l2cap_chan *chan;
3441
3442 scid = __le16_to_cpu(rsp->scid);
3443 dcid = __le16_to_cpu(rsp->dcid);
3444
3445 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3446
3447 mutex_lock(&conn->chan_lock);
3448
3449 chan = __l2cap_get_chan_by_scid(conn, scid);
3450 if (!chan) {
3451 mutex_unlock(&conn->chan_lock);
3452 return 0;
3453 }
3454
3455 l2cap_chan_lock(chan);
3456
3457 l2cap_chan_hold(chan);
3458 l2cap_chan_del(chan, 0);
3459
3460 l2cap_chan_unlock(chan);
3461
3462 chan->ops->close(chan->data);
3463 l2cap_chan_put(chan);
3464
3465 mutex_unlock(&conn->chan_lock);
3466
3467 return 0;
3468 }
3469
3470 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3471 {
3472 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3473 u16 type;
3474
3475 type = __le16_to_cpu(req->type);
3476
3477 BT_DBG("type 0x%4.4x", type);
3478
3479 if (type == L2CAP_IT_FEAT_MASK) {
3480 u8 buf[8];
3481 u32 feat_mask = l2cap_feat_mask;
3482 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3483 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3484 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3485 if (!disable_ertm)
3486 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3487 | L2CAP_FEAT_FCS;
3488 if (enable_hs)
3489 feat_mask |= L2CAP_FEAT_EXT_FLOW
3490 | L2CAP_FEAT_EXT_WINDOW;
3491
3492 put_unaligned_le32(feat_mask, rsp->data);
3493 l2cap_send_cmd(conn, cmd->ident,
3494 L2CAP_INFO_RSP, sizeof(buf), buf);
3495 } else if (type == L2CAP_IT_FIXED_CHAN) {
3496 u8 buf[12];
3497 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3498
3499 if (enable_hs)
3500 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3501 else
3502 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3503
3504 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3505 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3506 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3507 l2cap_send_cmd(conn, cmd->ident,
3508 L2CAP_INFO_RSP, sizeof(buf), buf);
3509 } else {
3510 struct l2cap_info_rsp rsp;
3511 rsp.type = cpu_to_le16(type);
3512 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3513 l2cap_send_cmd(conn, cmd->ident,
3514 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3515 }
3516
3517 return 0;
3518 }
3519
3520 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3521 {
3522 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3523 u16 type, result;
3524
3525 type = __le16_to_cpu(rsp->type);
3526 result = __le16_to_cpu(rsp->result);
3527
3528 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3529
3530 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3531 if (cmd->ident != conn->info_ident ||
3532 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3533 return 0;
3534
3535 cancel_delayed_work(&conn->info_timer);
3536
3537 if (result != L2CAP_IR_SUCCESS) {
3538 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3539 conn->info_ident = 0;
3540
3541 l2cap_conn_start(conn);
3542
3543 return 0;
3544 }
3545
3546 switch (type) {
3547 case L2CAP_IT_FEAT_MASK:
3548 conn->feat_mask = get_unaligned_le32(rsp->data);
3549
3550 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3551 struct l2cap_info_req req;
3552 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3553
3554 conn->info_ident = l2cap_get_ident(conn);
3555
3556 l2cap_send_cmd(conn, conn->info_ident,
3557 L2CAP_INFO_REQ, sizeof(req), &req);
3558 } else {
3559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3560 conn->info_ident = 0;
3561
3562 l2cap_conn_start(conn);
3563 }
3564 break;
3565
3566 case L2CAP_IT_FIXED_CHAN:
3567 conn->fixed_chan_mask = rsp->data[0];
3568 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3569 conn->info_ident = 0;
3570
3571 l2cap_conn_start(conn);
3572 break;
3573 }
3574
3575 return 0;
3576 }
3577
3578 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3579 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3580 void *data)
3581 {
3582 struct l2cap_create_chan_req *req = data;
3583 struct l2cap_create_chan_rsp rsp;
3584 u16 psm, scid;
3585
3586 if (cmd_len != sizeof(*req))
3587 return -EPROTO;
3588
3589 if (!enable_hs)
3590 return -EINVAL;
3591
3592 psm = le16_to_cpu(req->psm);
3593 scid = le16_to_cpu(req->scid);
3594
3595 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3596
3597 /* Placeholder: Always reject */
3598 rsp.dcid = 0;
3599 rsp.scid = cpu_to_le16(scid);
3600 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3601 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3602
3603 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3604 sizeof(rsp), &rsp);
3605
3606 return 0;
3607 }
3608
3609 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3610 struct l2cap_cmd_hdr *cmd, void *data)
3611 {
3612 BT_DBG("conn %p", conn);
3613
3614 return l2cap_connect_rsp(conn, cmd, data);
3615 }
3616
3617 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3618 u16 icid, u16 result)
3619 {
3620 struct l2cap_move_chan_rsp rsp;
3621
3622 BT_DBG("icid %d, result %d", icid, result);
3623
3624 rsp.icid = cpu_to_le16(icid);
3625 rsp.result = cpu_to_le16(result);
3626
3627 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3628 }
3629
3630 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3631 struct l2cap_chan *chan, u16 icid, u16 result)
3632 {
3633 struct l2cap_move_chan_cfm cfm;
3634 u8 ident;
3635
3636 BT_DBG("icid %d, result %d", icid, result);
3637
3638 ident = l2cap_get_ident(conn);
3639 if (chan)
3640 chan->ident = ident;
3641
3642 cfm.icid = cpu_to_le16(icid);
3643 cfm.result = cpu_to_le16(result);
3644
3645 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3646 }
3647
3648 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3649 u16 icid)
3650 {
3651 struct l2cap_move_chan_cfm_rsp rsp;
3652
3653 BT_DBG("icid %d", icid);
3654
3655 rsp.icid = cpu_to_le16(icid);
3656 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3657 }
3658
3659 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3660 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3661 {
3662 struct l2cap_move_chan_req *req = data;
3663 u16 icid = 0;
3664 u16 result = L2CAP_MR_NOT_ALLOWED;
3665
3666 if (cmd_len != sizeof(*req))
3667 return -EPROTO;
3668
3669 icid = le16_to_cpu(req->icid);
3670
3671 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3672
3673 if (!enable_hs)
3674 return -EINVAL;
3675
3676 /* Placeholder: Always refuse */
3677 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3678
3679 return 0;
3680 }
3681
3682 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3683 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3684 {
3685 struct l2cap_move_chan_rsp *rsp = data;
3686 u16 icid, result;
3687
3688 if (cmd_len != sizeof(*rsp))
3689 return -EPROTO;
3690
3691 icid = le16_to_cpu(rsp->icid);
3692 result = le16_to_cpu(rsp->result);
3693
3694 BT_DBG("icid %d, result %d", icid, result);
3695
3696 /* Placeholder: Always unconfirmed */
3697 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3698
3699 return 0;
3700 }
3701
3702 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3703 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3704 {
3705 struct l2cap_move_chan_cfm *cfm = data;
3706 u16 icid, result;
3707
3708 if (cmd_len != sizeof(*cfm))
3709 return -EPROTO;
3710
3711 icid = le16_to_cpu(cfm->icid);
3712 result = le16_to_cpu(cfm->result);
3713
3714 BT_DBG("icid %d, result %d", icid, result);
3715
3716 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3717
3718 return 0;
3719 }
3720
3721 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3722 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3723 {
3724 struct l2cap_move_chan_cfm_rsp *rsp = data;
3725 u16 icid;
3726
3727 if (cmd_len != sizeof(*rsp))
3728 return -EPROTO;
3729
3730 icid = le16_to_cpu(rsp->icid);
3731
3732 BT_DBG("icid %d", icid);
3733
3734 return 0;
3735 }
3736
3737 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3738 u16 to_multiplier)
3739 {
3740 u16 max_latency;
3741
3742 if (min > max || min < 6 || max > 3200)
3743 return -EINVAL;
3744
3745 if (to_multiplier < 10 || to_multiplier > 3200)
3746 return -EINVAL;
3747
3748 if (max >= to_multiplier * 8)
3749 return -EINVAL;
3750
3751 max_latency = (to_multiplier * 8 / max) - 1;
3752 if (latency > 499 || latency > max_latency)
3753 return -EINVAL;
3754
3755 return 0;
3756 }
3757
3758 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3759 struct l2cap_cmd_hdr *cmd, u8 *data)
3760 {
3761 struct hci_conn *hcon = conn->hcon;
3762 struct l2cap_conn_param_update_req *req;
3763 struct l2cap_conn_param_update_rsp rsp;
3764 u16 min, max, latency, to_multiplier, cmd_len;
3765 int err;
3766
3767 if (!(hcon->link_mode & HCI_LM_MASTER))
3768 return -EINVAL;
3769
3770 cmd_len = __le16_to_cpu(cmd->len);
3771 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3772 return -EPROTO;
3773
3774 req = (struct l2cap_conn_param_update_req *) data;
3775 min = __le16_to_cpu(req->min);
3776 max = __le16_to_cpu(req->max);
3777 latency = __le16_to_cpu(req->latency);
3778 to_multiplier = __le16_to_cpu(req->to_multiplier);
3779
3780 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3781 min, max, latency, to_multiplier);
3782
3783 memset(&rsp, 0, sizeof(rsp));
3784
3785 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3786 if (err)
3787 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3788 else
3789 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3790
3791 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3792 sizeof(rsp), &rsp);
3793
3794 if (!err)
3795 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3796
3797 return 0;
3798 }
3799
3800 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3801 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3802 {
3803 int err = 0;
3804
3805 switch (cmd->code) {
3806 case L2CAP_COMMAND_REJ:
3807 l2cap_command_rej(conn, cmd, data);
3808 break;
3809
3810 case L2CAP_CONN_REQ:
3811 err = l2cap_connect_req(conn, cmd, data);
3812 break;
3813
3814 case L2CAP_CONN_RSP:
3815 err = l2cap_connect_rsp(conn, cmd, data);
3816 break;
3817
3818 case L2CAP_CONF_REQ:
3819 err = l2cap_config_req(conn, cmd, cmd_len, data);
3820 break;
3821
3822 case L2CAP_CONF_RSP:
3823 err = l2cap_config_rsp(conn, cmd, data);
3824 break;
3825
3826 case L2CAP_DISCONN_REQ:
3827 err = l2cap_disconnect_req(conn, cmd, data);
3828 break;
3829
3830 case L2CAP_DISCONN_RSP:
3831 err = l2cap_disconnect_rsp(conn, cmd, data);
3832 break;
3833
3834 case L2CAP_ECHO_REQ:
3835 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3836 break;
3837
3838 case L2CAP_ECHO_RSP:
3839 break;
3840
3841 case L2CAP_INFO_REQ:
3842 err = l2cap_information_req(conn, cmd, data);
3843 break;
3844
3845 case L2CAP_INFO_RSP:
3846 err = l2cap_information_rsp(conn, cmd, data);
3847 break;
3848
3849 case L2CAP_CREATE_CHAN_REQ:
3850 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3851 break;
3852
3853 case L2CAP_CREATE_CHAN_RSP:
3854 err = l2cap_create_channel_rsp(conn, cmd, data);
3855 break;
3856
3857 case L2CAP_MOVE_CHAN_REQ:
3858 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3859 break;
3860
3861 case L2CAP_MOVE_CHAN_RSP:
3862 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3863 break;
3864
3865 case L2CAP_MOVE_CHAN_CFM:
3866 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3867 break;
3868
3869 case L2CAP_MOVE_CHAN_CFM_RSP:
3870 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3871 break;
3872
3873 default:
3874 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3875 err = -EINVAL;
3876 break;
3877 }
3878
3879 return err;
3880 }
3881
3882 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3883 struct l2cap_cmd_hdr *cmd, u8 *data)
3884 {
3885 switch (cmd->code) {
3886 case L2CAP_COMMAND_REJ:
3887 return 0;
3888
3889 case L2CAP_CONN_PARAM_UPDATE_REQ:
3890 return l2cap_conn_param_update_req(conn, cmd, data);
3891
3892 case L2CAP_CONN_PARAM_UPDATE_RSP:
3893 return 0;
3894
3895 default:
3896 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3897 return -EINVAL;
3898 }
3899 }
3900
3901 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3902 struct sk_buff *skb)
3903 {
3904 u8 *data = skb->data;
3905 int len = skb->len;
3906 struct l2cap_cmd_hdr cmd;
3907 int err;
3908
3909 l2cap_raw_recv(conn, skb);
3910
3911 while (len >= L2CAP_CMD_HDR_SIZE) {
3912 u16 cmd_len;
3913 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3914 data += L2CAP_CMD_HDR_SIZE;
3915 len -= L2CAP_CMD_HDR_SIZE;
3916
3917 cmd_len = le16_to_cpu(cmd.len);
3918
3919 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3920
3921 if (cmd_len > len || !cmd.ident) {
3922 BT_DBG("corrupted command");
3923 break;
3924 }
3925
3926 if (conn->hcon->type == LE_LINK)
3927 err = l2cap_le_sig_cmd(conn, &cmd, data);
3928 else
3929 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3930
3931 if (err) {
3932 struct l2cap_cmd_rej_unk rej;
3933
3934 BT_ERR("Wrong link type (%d)", err);
3935
3936 /* FIXME: Map err to a valid reason */
3937 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3938 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3939 }
3940
3941 data += cmd_len;
3942 len -= cmd_len;
3943 }
3944
3945 kfree_skb(skb);
3946 }
3947
3948 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3949 {
3950 u16 our_fcs, rcv_fcs;
3951 int hdr_size;
3952
3953 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3954 hdr_size = L2CAP_EXT_HDR_SIZE;
3955 else
3956 hdr_size = L2CAP_ENH_HDR_SIZE;
3957
3958 if (chan->fcs == L2CAP_FCS_CRC16) {
3959 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3960 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3961 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3962
3963 if (our_fcs != rcv_fcs)
3964 return -EBADMSG;
3965 }
3966 return 0;
3967 }
3968
3969 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3970 {
3971 u32 control = 0;
3972
3973 chan->frames_sent = 0;
3974
3975 control |= __set_reqseq(chan, chan->buffer_seq);
3976
3977 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3978 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3979 l2cap_send_sframe(chan, control);
3980 set_bit(CONN_RNR_SENT, &chan->conn_state);
3981 }
3982
3983 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3984 l2cap_retransmit_frames(chan);
3985
3986 l2cap_ertm_send(chan);
3987
3988 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3989 chan->frames_sent == 0) {
3990 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3991 l2cap_send_sframe(chan, control);
3992 }
3993 }
3994
3995 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3996 {
3997 struct sk_buff *next_skb;
3998 int tx_seq_offset, next_tx_seq_offset;
3999
4000 bt_cb(skb)->control.txseq = tx_seq;
4001 bt_cb(skb)->control.sar = sar;
4002
4003 next_skb = skb_peek(&chan->srej_q);
4004
4005 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4006
4007 while (next_skb) {
4008 if (bt_cb(next_skb)->control.txseq == tx_seq)
4009 return -EINVAL;
4010
4011 next_tx_seq_offset = __seq_offset(chan,
4012 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4013
4014 if (next_tx_seq_offset > tx_seq_offset) {
4015 __skb_queue_before(&chan->srej_q, next_skb, skb);
4016 return 0;
4017 }
4018
4019 if (skb_queue_is_last(&chan->srej_q, next_skb))
4020 next_skb = NULL;
4021 else
4022 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4023 }
4024
4025 __skb_queue_tail(&chan->srej_q, skb);
4026
4027 return 0;
4028 }
4029
4030 static void append_skb_frag(struct sk_buff *skb,
4031 struct sk_buff *new_frag, struct sk_buff **last_frag)
4032 {
4033 /* skb->len reflects data in skb as well as all fragments
4034 * skb->data_len reflects only data in fragments
4035 */
4036 if (!skb_has_frag_list(skb))
4037 skb_shinfo(skb)->frag_list = new_frag;
4038
4039 new_frag->next = NULL;
4040
4041 (*last_frag)->next = new_frag;
4042 *last_frag = new_frag;
4043
4044 skb->len += new_frag->len;
4045 skb->data_len += new_frag->len;
4046 skb->truesize += new_frag->truesize;
4047 }
4048
4049 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4050 {
4051 int err = -EINVAL;
4052
4053 switch (__get_ctrl_sar(chan, control)) {
4054 case L2CAP_SAR_UNSEGMENTED:
4055 if (chan->sdu)
4056 break;
4057
4058 err = chan->ops->recv(chan->data, skb);
4059 break;
4060
4061 case L2CAP_SAR_START:
4062 if (chan->sdu)
4063 break;
4064
4065 chan->sdu_len = get_unaligned_le16(skb->data);
4066 skb_pull(skb, L2CAP_SDULEN_SIZE);
4067
4068 if (chan->sdu_len > chan->imtu) {
4069 err = -EMSGSIZE;
4070 break;
4071 }
4072
4073 if (skb->len >= chan->sdu_len)
4074 break;
4075
4076 chan->sdu = skb;
4077 chan->sdu_last_frag = skb;
4078
4079 skb = NULL;
4080 err = 0;
4081 break;
4082
4083 case L2CAP_SAR_CONTINUE:
4084 if (!chan->sdu)
4085 break;
4086
4087 append_skb_frag(chan->sdu, skb,
4088 &chan->sdu_last_frag);
4089 skb = NULL;
4090
4091 if (chan->sdu->len >= chan->sdu_len)
4092 break;
4093
4094 err = 0;
4095 break;
4096
4097 case L2CAP_SAR_END:
4098 if (!chan->sdu)
4099 break;
4100
4101 append_skb_frag(chan->sdu, skb,
4102 &chan->sdu_last_frag);
4103 skb = NULL;
4104
4105 if (chan->sdu->len != chan->sdu_len)
4106 break;
4107
4108 err = chan->ops->recv(chan->data, chan->sdu);
4109
4110 if (!err) {
4111 /* Reassembly complete */
4112 chan->sdu = NULL;
4113 chan->sdu_last_frag = NULL;
4114 chan->sdu_len = 0;
4115 }
4116 break;
4117 }
4118
4119 if (err) {
4120 kfree_skb(skb);
4121 kfree_skb(chan->sdu);
4122 chan->sdu = NULL;
4123 chan->sdu_last_frag = NULL;
4124 chan->sdu_len = 0;
4125 }
4126
4127 return err;
4128 }
4129
4130 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4131 {
4132 BT_DBG("chan %p, Enter local busy", chan);
4133
4134 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4135 l2cap_seq_list_clear(&chan->srej_list);
4136
4137 __set_ack_timer(chan);
4138 }
4139
4140 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4141 {
4142 u32 control;
4143
4144 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4145 goto done;
4146
4147 control = __set_reqseq(chan, chan->buffer_seq);
4148 control |= __set_ctrl_poll(chan);
4149 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4150 l2cap_send_sframe(chan, control);
4151 chan->retry_count = 1;
4152
4153 __clear_retrans_timer(chan);
4154 __set_monitor_timer(chan);
4155
4156 set_bit(CONN_WAIT_F, &chan->conn_state);
4157
4158 done:
4159 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4160 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4161
4162 BT_DBG("chan %p, Exit local busy", chan);
4163 }
4164
4165 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4166 {
4167 if (chan->mode == L2CAP_MODE_ERTM) {
4168 if (busy)
4169 l2cap_ertm_enter_local_busy(chan);
4170 else
4171 l2cap_ertm_exit_local_busy(chan);
4172 }
4173 }
4174
4175 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4176 {
4177 struct sk_buff *skb;
4178 u32 control;
4179
4180 while ((skb = skb_peek(&chan->srej_q)) &&
4181 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4182 int err;
4183
4184 if (bt_cb(skb)->control.txseq != tx_seq)
4185 break;
4186
4187 skb = skb_dequeue(&chan->srej_q);
4188 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4189 err = l2cap_reassemble_sdu(chan, skb, control);
4190
4191 if (err < 0) {
4192 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4193 break;
4194 }
4195
4196 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4197 tx_seq = __next_seq(chan, tx_seq);
4198 }
4199 }
4200
4201 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4202 {
4203 struct srej_list *l, *tmp;
4204 u32 control;
4205
4206 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4207 if (l->tx_seq == tx_seq) {
4208 list_del(&l->list);
4209 kfree(l);
4210 return;
4211 }
4212 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4213 control |= __set_reqseq(chan, l->tx_seq);
4214 l2cap_send_sframe(chan, control);
4215 list_del(&l->list);
4216 list_add_tail(&l->list, &chan->srej_l);
4217 }
4218 }
4219
4220 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4221 {
4222 struct srej_list *new;
4223 u32 control;
4224
4225 while (tx_seq != chan->expected_tx_seq) {
4226 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4227 control |= __set_reqseq(chan, chan->expected_tx_seq);
4228 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4229 l2cap_send_sframe(chan, control);
4230
4231 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4232 if (!new)
4233 return -ENOMEM;
4234
4235 new->tx_seq = chan->expected_tx_seq;
4236
4237 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4238
4239 list_add_tail(&new->list, &chan->srej_l);
4240 }
4241
4242 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4243
4244 return 0;
4245 }
4246
4247 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4248 {
4249 u16 tx_seq = __get_txseq(chan, rx_control);
4250 u16 req_seq = __get_reqseq(chan, rx_control);
4251 u8 sar = __get_ctrl_sar(chan, rx_control);
4252 int tx_seq_offset, expected_tx_seq_offset;
4253 int num_to_ack = (chan->tx_win/6) + 1;
4254 int err = 0;
4255
4256 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4257 tx_seq, rx_control);
4258
4259 if (__is_ctrl_final(chan, rx_control) &&
4260 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4261 __clear_monitor_timer(chan);
4262 if (chan->unacked_frames > 0)
4263 __set_retrans_timer(chan);
4264 clear_bit(CONN_WAIT_F, &chan->conn_state);
4265 }
4266
4267 chan->expected_ack_seq = req_seq;
4268 l2cap_drop_acked_frames(chan);
4269
4270 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4271
4272 /* invalid tx_seq */
4273 if (tx_seq_offset >= chan->tx_win) {
4274 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4275 goto drop;
4276 }
4277
4278 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4279 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4280 l2cap_send_ack(chan);
4281 goto drop;
4282 }
4283
4284 if (tx_seq == chan->expected_tx_seq)
4285 goto expected;
4286
4287 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4288 struct srej_list *first;
4289
4290 first = list_first_entry(&chan->srej_l,
4291 struct srej_list, list);
4292 if (tx_seq == first->tx_seq) {
4293 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4294 l2cap_check_srej_gap(chan, tx_seq);
4295
4296 list_del(&first->list);
4297 kfree(first);
4298
4299 if (list_empty(&chan->srej_l)) {
4300 chan->buffer_seq = chan->buffer_seq_srej;
4301 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4302 l2cap_send_ack(chan);
4303 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4304 }
4305 } else {
4306 struct srej_list *l;
4307
4308 /* duplicated tx_seq */
4309 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4310 goto drop;
4311
4312 list_for_each_entry(l, &chan->srej_l, list) {
4313 if (l->tx_seq == tx_seq) {
4314 l2cap_resend_srejframe(chan, tx_seq);
4315 return 0;
4316 }
4317 }
4318
4319 err = l2cap_send_srejframe(chan, tx_seq);
4320 if (err < 0) {
4321 l2cap_send_disconn_req(chan->conn, chan, -err);
4322 return err;
4323 }
4324 }
4325 } else {
4326 expected_tx_seq_offset = __seq_offset(chan,
4327 chan->expected_tx_seq, chan->buffer_seq);
4328
4329 /* duplicated tx_seq */
4330 if (tx_seq_offset < expected_tx_seq_offset)
4331 goto drop;
4332
4333 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4334
4335 BT_DBG("chan %p, Enter SREJ", chan);
4336
4337 INIT_LIST_HEAD(&chan->srej_l);
4338 chan->buffer_seq_srej = chan->buffer_seq;
4339
4340 __skb_queue_head_init(&chan->srej_q);
4341 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4342
4343 /* Set P-bit only if there are some I-frames to ack. */
4344 if (__clear_ack_timer(chan))
4345 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4346
4347 err = l2cap_send_srejframe(chan, tx_seq);
4348 if (err < 0) {
4349 l2cap_send_disconn_req(chan->conn, chan, -err);
4350 return err;
4351 }
4352 }
4353 return 0;
4354
4355 expected:
4356 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4357
4358 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4359 bt_cb(skb)->control.txseq = tx_seq;
4360 bt_cb(skb)->control.sar = sar;
4361 __skb_queue_tail(&chan->srej_q, skb);
4362 return 0;
4363 }
4364
4365 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4366 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4367
4368 if (err < 0) {
4369 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4370 return err;
4371 }
4372
4373 if (__is_ctrl_final(chan, rx_control)) {
4374 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4375 l2cap_retransmit_frames(chan);
4376 }
4377
4378
4379 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4380 if (chan->num_acked == num_to_ack - 1)
4381 l2cap_send_ack(chan);
4382 else
4383 __set_ack_timer(chan);
4384
4385 return 0;
4386
4387 drop:
4388 kfree_skb(skb);
4389 return 0;
4390 }
4391
4392 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4393 {
4394 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4395 __get_reqseq(chan, rx_control), rx_control);
4396
4397 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4398 l2cap_drop_acked_frames(chan);
4399
4400 if (__is_ctrl_poll(chan, rx_control)) {
4401 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4402 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4403 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4404 (chan->unacked_frames > 0))
4405 __set_retrans_timer(chan);
4406
4407 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4408 l2cap_send_srejtail(chan);
4409 } else {
4410 l2cap_send_i_or_rr_or_rnr(chan);
4411 }
4412
4413 } else if (__is_ctrl_final(chan, rx_control)) {
4414 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4415
4416 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4417 l2cap_retransmit_frames(chan);
4418
4419 } else {
4420 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4421 (chan->unacked_frames > 0))
4422 __set_retrans_timer(chan);
4423
4424 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4425 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4426 l2cap_send_ack(chan);
4427 else
4428 l2cap_ertm_send(chan);
4429 }
4430 }
4431
4432 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4433 {
4434 u16 tx_seq = __get_reqseq(chan, rx_control);
4435
4436 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4437
4438 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4439
4440 chan->expected_ack_seq = tx_seq;
4441 l2cap_drop_acked_frames(chan);
4442
4443 if (__is_ctrl_final(chan, rx_control)) {
4444 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4445 l2cap_retransmit_frames(chan);
4446 } else {
4447 l2cap_retransmit_frames(chan);
4448
4449 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4450 set_bit(CONN_REJ_ACT, &chan->conn_state);
4451 }
4452 }
4453 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4454 {
4455 u16 tx_seq = __get_reqseq(chan, rx_control);
4456
4457 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4458
4459 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4460
4461 if (__is_ctrl_poll(chan, rx_control)) {
4462 chan->expected_ack_seq = tx_seq;
4463 l2cap_drop_acked_frames(chan);
4464
4465 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4466 l2cap_retransmit_one_frame(chan, tx_seq);
4467
4468 l2cap_ertm_send(chan);
4469
4470 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4471 chan->srej_save_reqseq = tx_seq;
4472 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4473 }
4474 } else if (__is_ctrl_final(chan, rx_control)) {
4475 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4476 chan->srej_save_reqseq == tx_seq)
4477 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4478 else
4479 l2cap_retransmit_one_frame(chan, tx_seq);
4480 } else {
4481 l2cap_retransmit_one_frame(chan, tx_seq);
4482 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4483 chan->srej_save_reqseq = tx_seq;
4484 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4485 }
4486 }
4487 }
4488
4489 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4490 {
4491 u16 tx_seq = __get_reqseq(chan, rx_control);
4492
4493 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4494
4495 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4496 chan->expected_ack_seq = tx_seq;
4497 l2cap_drop_acked_frames(chan);
4498
4499 if (__is_ctrl_poll(chan, rx_control))
4500 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4501
4502 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4503 __clear_retrans_timer(chan);
4504 if (__is_ctrl_poll(chan, rx_control))
4505 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4506 return;
4507 }
4508
4509 if (__is_ctrl_poll(chan, rx_control)) {
4510 l2cap_send_srejtail(chan);
4511 } else {
4512 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4513 l2cap_send_sframe(chan, rx_control);
4514 }
4515 }
4516
4517 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4518 {
4519 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4520
4521 if (__is_ctrl_final(chan, rx_control) &&
4522 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4523 __clear_monitor_timer(chan);
4524 if (chan->unacked_frames > 0)
4525 __set_retrans_timer(chan);
4526 clear_bit(CONN_WAIT_F, &chan->conn_state);
4527 }
4528
4529 switch (__get_ctrl_super(chan, rx_control)) {
4530 case L2CAP_SUPER_RR:
4531 l2cap_data_channel_rrframe(chan, rx_control);
4532 break;
4533
4534 case L2CAP_SUPER_REJ:
4535 l2cap_data_channel_rejframe(chan, rx_control);
4536 break;
4537
4538 case L2CAP_SUPER_SREJ:
4539 l2cap_data_channel_srejframe(chan, rx_control);
4540 break;
4541
4542 case L2CAP_SUPER_RNR:
4543 l2cap_data_channel_rnrframe(chan, rx_control);
4544 break;
4545 }
4546
4547 kfree_skb(skb);
4548 return 0;
4549 }
4550
4551 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4552 {
4553 u32 control;
4554 u16 req_seq;
4555 int len, next_tx_seq_offset, req_seq_offset;
4556
4557 __unpack_control(chan, skb);
4558
4559 control = __get_control(chan, skb->data);
4560 skb_pull(skb, __ctrl_size(chan));
4561 len = skb->len;
4562
4563 /*
4564 * We can just drop the corrupted I-frame here.
4565 * Receiver will miss it and start proper recovery
4566 * procedures and ask retransmission.
4567 */
4568 if (l2cap_check_fcs(chan, skb))
4569 goto drop;
4570
4571 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4572 len -= L2CAP_SDULEN_SIZE;
4573
4574 if (chan->fcs == L2CAP_FCS_CRC16)
4575 len -= L2CAP_FCS_SIZE;
4576
4577 if (len > chan->mps) {
4578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4579 goto drop;
4580 }
4581
4582 req_seq = __get_reqseq(chan, control);
4583
4584 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4585
4586 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4587 chan->expected_ack_seq);
4588
4589 /* check for invalid req-seq */
4590 if (req_seq_offset > next_tx_seq_offset) {
4591 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4592 goto drop;
4593 }
4594
4595 if (!__is_sframe(chan, control)) {
4596 if (len < 0) {
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4598 goto drop;
4599 }
4600
4601 l2cap_data_channel_iframe(chan, control, skb);
4602 } else {
4603 if (len != 0) {
4604 BT_ERR("%d", len);
4605 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4606 goto drop;
4607 }
4608
4609 l2cap_data_channel_sframe(chan, control, skb);
4610 }
4611
4612 return 0;
4613
4614 drop:
4615 kfree_skb(skb);
4616 return 0;
4617 }
4618
4619 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4620 {
4621 struct l2cap_chan *chan;
4622 u32 control;
4623 u16 tx_seq;
4624 int len;
4625
4626 chan = l2cap_get_chan_by_scid(conn, cid);
4627 if (!chan) {
4628 BT_DBG("unknown cid 0x%4.4x", cid);
4629 /* Drop packet and return */
4630 kfree_skb(skb);
4631 return 0;
4632 }
4633
4634 l2cap_chan_lock(chan);
4635
4636 BT_DBG("chan %p, len %d", chan, skb->len);
4637
4638 if (chan->state != BT_CONNECTED)
4639 goto drop;
4640
4641 switch (chan->mode) {
4642 case L2CAP_MODE_BASIC:
4643 /* If socket recv buffers overflows we drop data here
4644 * which is *bad* because L2CAP has to be reliable.
4645 * But we don't have any other choice. L2CAP doesn't
4646 * provide flow control mechanism. */
4647
4648 if (chan->imtu < skb->len)
4649 goto drop;
4650
4651 if (!chan->ops->recv(chan->data, skb))
4652 goto done;
4653 break;
4654
4655 case L2CAP_MODE_ERTM:
4656 l2cap_ertm_data_rcv(chan, skb);
4657
4658 goto done;
4659
4660 case L2CAP_MODE_STREAMING:
4661 control = __get_control(chan, skb->data);
4662 skb_pull(skb, __ctrl_size(chan));
4663 len = skb->len;
4664
4665 if (l2cap_check_fcs(chan, skb))
4666 goto drop;
4667
4668 if (__is_sar_start(chan, control))
4669 len -= L2CAP_SDULEN_SIZE;
4670
4671 if (chan->fcs == L2CAP_FCS_CRC16)
4672 len -= L2CAP_FCS_SIZE;
4673
4674 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4675 goto drop;
4676
4677 tx_seq = __get_txseq(chan, control);
4678
4679 if (chan->expected_tx_seq != tx_seq) {
4680 /* Frame(s) missing - must discard partial SDU */
4681 kfree_skb(chan->sdu);
4682 chan->sdu = NULL;
4683 chan->sdu_last_frag = NULL;
4684 chan->sdu_len = 0;
4685
4686 /* TODO: Notify userland of missing data */
4687 }
4688
4689 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4690
4691 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4692 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4693
4694 goto done;
4695
4696 default:
4697 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4698 break;
4699 }
4700
4701 drop:
4702 kfree_skb(skb);
4703
4704 done:
4705 l2cap_chan_unlock(chan);
4706
4707 return 0;
4708 }
4709
4710 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4711 {
4712 struct l2cap_chan *chan;
4713
4714 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4715 if (!chan)
4716 goto drop;
4717
4718 BT_DBG("chan %p, len %d", chan, skb->len);
4719
4720 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4721 goto drop;
4722
4723 if (chan->imtu < skb->len)
4724 goto drop;
4725
4726 if (!chan->ops->recv(chan->data, skb))
4727 return 0;
4728
4729 drop:
4730 kfree_skb(skb);
4731
4732 return 0;
4733 }
4734
4735 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4736 struct sk_buff *skb)
4737 {
4738 struct l2cap_chan *chan;
4739
4740 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4741 if (!chan)
4742 goto drop;
4743
4744 BT_DBG("chan %p, len %d", chan, skb->len);
4745
4746 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4747 goto drop;
4748
4749 if (chan->imtu < skb->len)
4750 goto drop;
4751
4752 if (!chan->ops->recv(chan->data, skb))
4753 return 0;
4754
4755 drop:
4756 kfree_skb(skb);
4757
4758 return 0;
4759 }
4760
4761 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4762 {
4763 struct l2cap_hdr *lh = (void *) skb->data;
4764 u16 cid, len;
4765 __le16 psm;
4766
4767 skb_pull(skb, L2CAP_HDR_SIZE);
4768 cid = __le16_to_cpu(lh->cid);
4769 len = __le16_to_cpu(lh->len);
4770
4771 if (len != skb->len) {
4772 kfree_skb(skb);
4773 return;
4774 }
4775
4776 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4777
4778 switch (cid) {
4779 case L2CAP_CID_LE_SIGNALING:
4780 case L2CAP_CID_SIGNALING:
4781 l2cap_sig_channel(conn, skb);
4782 break;
4783
4784 case L2CAP_CID_CONN_LESS:
4785 psm = get_unaligned((__le16 *) skb->data);
4786 skb_pull(skb, 2);
4787 l2cap_conless_channel(conn, psm, skb);
4788 break;
4789
4790 case L2CAP_CID_LE_DATA:
4791 l2cap_att_channel(conn, cid, skb);
4792 break;
4793
4794 case L2CAP_CID_SMP:
4795 if (smp_sig_channel(conn, skb))
4796 l2cap_conn_del(conn->hcon, EACCES);
4797 break;
4798
4799 default:
4800 l2cap_data_channel(conn, cid, skb);
4801 break;
4802 }
4803 }
4804
4805 /* ---- L2CAP interface with lower layer (HCI) ---- */
4806
4807 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4808 {
4809 int exact = 0, lm1 = 0, lm2 = 0;
4810 struct l2cap_chan *c;
4811
4812 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4813
4814 /* Find listening sockets and check their link_mode */
4815 read_lock(&chan_list_lock);
4816 list_for_each_entry(c, &chan_list, global_l) {
4817 struct sock *sk = c->sk;
4818
4819 if (c->state != BT_LISTEN)
4820 continue;
4821
4822 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4823 lm1 |= HCI_LM_ACCEPT;
4824 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4825 lm1 |= HCI_LM_MASTER;
4826 exact++;
4827 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4828 lm2 |= HCI_LM_ACCEPT;
4829 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4830 lm2 |= HCI_LM_MASTER;
4831 }
4832 }
4833 read_unlock(&chan_list_lock);
4834
4835 return exact ? lm1 : lm2;
4836 }
4837
4838 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4839 {
4840 struct l2cap_conn *conn;
4841
4842 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4843
4844 if (!status) {
4845 conn = l2cap_conn_add(hcon, status);
4846 if (conn)
4847 l2cap_conn_ready(conn);
4848 } else
4849 l2cap_conn_del(hcon, bt_to_errno(status));
4850
4851 return 0;
4852 }
4853
4854 int l2cap_disconn_ind(struct hci_conn *hcon)
4855 {
4856 struct l2cap_conn *conn = hcon->l2cap_data;
4857
4858 BT_DBG("hcon %p", hcon);
4859
4860 if (!conn)
4861 return HCI_ERROR_REMOTE_USER_TERM;
4862 return conn->disc_reason;
4863 }
4864
4865 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4866 {
4867 BT_DBG("hcon %p reason %d", hcon, reason);
4868
4869 l2cap_conn_del(hcon, bt_to_errno(reason));
4870 return 0;
4871 }
4872
4873 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4874 {
4875 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4876 return;
4877
4878 if (encrypt == 0x00) {
4879 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4880 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4881 } else if (chan->sec_level == BT_SECURITY_HIGH)
4882 l2cap_chan_close(chan, ECONNREFUSED);
4883 } else {
4884 if (chan->sec_level == BT_SECURITY_MEDIUM)
4885 __clear_chan_timer(chan);
4886 }
4887 }
4888
4889 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4890 {
4891 struct l2cap_conn *conn = hcon->l2cap_data;
4892 struct l2cap_chan *chan;
4893
4894 if (!conn)
4895 return 0;
4896
4897 BT_DBG("conn %p", conn);
4898
4899 if (hcon->type == LE_LINK) {
4900 if (!status && encrypt)
4901 smp_distribute_keys(conn, 0);
4902 cancel_delayed_work(&conn->security_timer);
4903 }
4904
4905 mutex_lock(&conn->chan_lock);
4906
4907 list_for_each_entry(chan, &conn->chan_l, list) {
4908 l2cap_chan_lock(chan);
4909
4910 BT_DBG("chan->scid %d", chan->scid);
4911
4912 if (chan->scid == L2CAP_CID_LE_DATA) {
4913 if (!status && encrypt) {
4914 chan->sec_level = hcon->sec_level;
4915 l2cap_chan_ready(chan);
4916 }
4917
4918 l2cap_chan_unlock(chan);
4919 continue;
4920 }
4921
4922 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4923 l2cap_chan_unlock(chan);
4924 continue;
4925 }
4926
4927 if (!status && (chan->state == BT_CONNECTED ||
4928 chan->state == BT_CONFIG)) {
4929 struct sock *sk = chan->sk;
4930
4931 bt_sk(sk)->suspended = false;
4932 sk->sk_state_change(sk);
4933
4934 l2cap_check_encryption(chan, encrypt);
4935 l2cap_chan_unlock(chan);
4936 continue;
4937 }
4938
4939 if (chan->state == BT_CONNECT) {
4940 if (!status) {
4941 l2cap_send_conn_req(chan);
4942 } else {
4943 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4944 }
4945 } else if (chan->state == BT_CONNECT2) {
4946 struct sock *sk = chan->sk;
4947 struct l2cap_conn_rsp rsp;
4948 __u16 res, stat;
4949
4950 lock_sock(sk);
4951
4952 if (!status) {
4953 if (bt_sk(sk)->defer_setup) {
4954 struct sock *parent = bt_sk(sk)->parent;
4955 res = L2CAP_CR_PEND;
4956 stat = L2CAP_CS_AUTHOR_PEND;
4957 if (parent)
4958 parent->sk_data_ready(parent, 0);
4959 } else {
4960 __l2cap_state_change(chan, BT_CONFIG);
4961 res = L2CAP_CR_SUCCESS;
4962 stat = L2CAP_CS_NO_INFO;
4963 }
4964 } else {
4965 __l2cap_state_change(chan, BT_DISCONN);
4966 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4967 res = L2CAP_CR_SEC_BLOCK;
4968 stat = L2CAP_CS_NO_INFO;
4969 }
4970
4971 release_sock(sk);
4972
4973 rsp.scid = cpu_to_le16(chan->dcid);
4974 rsp.dcid = cpu_to_le16(chan->scid);
4975 rsp.result = cpu_to_le16(res);
4976 rsp.status = cpu_to_le16(stat);
4977 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4978 sizeof(rsp), &rsp);
4979 }
4980
4981 l2cap_chan_unlock(chan);
4982 }
4983
4984 mutex_unlock(&conn->chan_lock);
4985
4986 return 0;
4987 }
4988
4989 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4990 {
4991 struct l2cap_conn *conn = hcon->l2cap_data;
4992
4993 if (!conn)
4994 conn = l2cap_conn_add(hcon, 0);
4995
4996 if (!conn)
4997 goto drop;
4998
4999 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5000
5001 if (!(flags & ACL_CONT)) {
5002 struct l2cap_hdr *hdr;
5003 struct l2cap_chan *chan;
5004 u16 cid;
5005 int len;
5006
5007 if (conn->rx_len) {
5008 BT_ERR("Unexpected start frame (len %d)", skb->len);
5009 kfree_skb(conn->rx_skb);
5010 conn->rx_skb = NULL;
5011 conn->rx_len = 0;
5012 l2cap_conn_unreliable(conn, ECOMM);
5013 }
5014
5015 /* Start fragment always begin with Basic L2CAP header */
5016 if (skb->len < L2CAP_HDR_SIZE) {
5017 BT_ERR("Frame is too short (len %d)", skb->len);
5018 l2cap_conn_unreliable(conn, ECOMM);
5019 goto drop;
5020 }
5021
5022 hdr = (struct l2cap_hdr *) skb->data;
5023 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5024 cid = __le16_to_cpu(hdr->cid);
5025
5026 if (len == skb->len) {
5027 /* Complete frame received */
5028 l2cap_recv_frame(conn, skb);
5029 return 0;
5030 }
5031
5032 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5033
5034 if (skb->len > len) {
5035 BT_ERR("Frame is too long (len %d, expected len %d)",
5036 skb->len, len);
5037 l2cap_conn_unreliable(conn, ECOMM);
5038 goto drop;
5039 }
5040
5041 chan = l2cap_get_chan_by_scid(conn, cid);
5042
5043 if (chan && chan->sk) {
5044 struct sock *sk = chan->sk;
5045 lock_sock(sk);
5046
5047 if (chan->imtu < len - L2CAP_HDR_SIZE) {
5048 BT_ERR("Frame exceeding recv MTU (len %d, "
5049 "MTU %d)", len,
5050 chan->imtu);
5051 release_sock(sk);
5052 l2cap_conn_unreliable(conn, ECOMM);
5053 goto drop;
5054 }
5055 release_sock(sk);
5056 }
5057
5058 /* Allocate skb for the complete frame (with header) */
5059 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5060 if (!conn->rx_skb)
5061 goto drop;
5062
5063 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5064 skb->len);
5065 conn->rx_len = len - skb->len;
5066 } else {
5067 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5068
5069 if (!conn->rx_len) {
5070 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5071 l2cap_conn_unreliable(conn, ECOMM);
5072 goto drop;
5073 }
5074
5075 if (skb->len > conn->rx_len) {
5076 BT_ERR("Fragment is too long (len %d, expected %d)",
5077 skb->len, conn->rx_len);
5078 kfree_skb(conn->rx_skb);
5079 conn->rx_skb = NULL;
5080 conn->rx_len = 0;
5081 l2cap_conn_unreliable(conn, ECOMM);
5082 goto drop;
5083 }
5084
5085 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5086 skb->len);
5087 conn->rx_len -= skb->len;
5088
5089 if (!conn->rx_len) {
5090 /* Complete frame received */
5091 l2cap_recv_frame(conn, conn->rx_skb);
5092 conn->rx_skb = NULL;
5093 }
5094 }
5095
5096 drop:
5097 kfree_skb(skb);
5098 return 0;
5099 }
5100
5101 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5102 {
5103 struct l2cap_chan *c;
5104
5105 read_lock(&chan_list_lock);
5106
5107 list_for_each_entry(c, &chan_list, global_l) {
5108 struct sock *sk = c->sk;
5109
5110 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5111 batostr(&bt_sk(sk)->src),
5112 batostr(&bt_sk(sk)->dst),
5113 c->state, __le16_to_cpu(c->psm),
5114 c->scid, c->dcid, c->imtu, c->omtu,
5115 c->sec_level, c->mode);
5116 }
5117
5118 read_unlock(&chan_list_lock);
5119
5120 return 0;
5121 }
5122
5123 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5124 {
5125 return single_open(file, l2cap_debugfs_show, inode->i_private);
5126 }
5127
5128 static const struct file_operations l2cap_debugfs_fops = {
5129 .open = l2cap_debugfs_open,
5130 .read = seq_read,
5131 .llseek = seq_lseek,
5132 .release = single_release,
5133 };
5134
5135 static struct dentry *l2cap_debugfs;
5136
5137 int __init l2cap_init(void)
5138 {
5139 int err;
5140
5141 err = l2cap_init_sockets();
5142 if (err < 0)
5143 return err;
5144
5145 if (bt_debugfs) {
5146 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5147 bt_debugfs, NULL, &l2cap_debugfs_fops);
5148 if (!l2cap_debugfs)
5149 BT_ERR("Failed to create L2CAP debug file");
5150 }
5151
5152 return 0;
5153 }
5154
5155 void l2cap_exit(void)
5156 {
5157 debugfs_remove(l2cap_debugfs);
5158 l2cap_cleanup_sockets();
5159 }
5160
5161 module_param(disable_ertm, bool, 0644);
5162 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.139393 seconds and 6 git commands to generate.