Btrfs: do error checking in btrfs_del_csums
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core and sockets. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 static int disable_ertm = 0;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static const struct proto_ops l2cap_sock_ops;
66
67 static struct workqueue_struct *_busy_wq;
68
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 };
72
73 static void l2cap_busy_work(struct work_struct *work);
74
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
78
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
87 {
88 struct sock *sk = (struct sock *) arg;
89 int reason;
90
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
92
93 bh_lock_sock(sk);
94
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
102
103 __l2cap_sock_close(sk, reason);
104
105 bh_unlock_sock(sk);
106
107 l2cap_sock_kill(sk);
108 sock_put(sk);
109 }
110
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
112 {
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
115 }
116
117 static void l2cap_sock_clear_timer(struct sock *sk)
118 {
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
121 }
122
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
125 {
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
130 }
131 return s;
132 }
133
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135 {
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
140 }
141 return s;
142 }
143
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 {
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
155 }
156
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 {
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
163 }
164 return s;
165 }
166
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 {
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
176 }
177
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
179 {
180 u16 cid = L2CAP_CID_DYN_START;
181
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
185 }
186
187 return 0;
188 }
189
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
191 {
192 sock_hold(sk);
193
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
196
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
200 }
201
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
203 {
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
205
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
209
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
215
216 __sock_put(sk);
217 }
218
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
220 {
221 struct l2cap_chan_list *l = &conn->chan_list;
222
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
225
226 conn->disc_reason = 0x13;
227
228 l2cap_pi(sk)->conn = conn;
229
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
243 }
244
245 __l2cap_chan_link(l, sk);
246
247 if (parent)
248 bt_accept_enqueue(parent, sk);
249 }
250
251 /* Delete channel.
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
254 {
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
257
258 l2cap_sock_clear_timer(sk);
259
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
261
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
267 }
268
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
271
272 if (err)
273 sk->sk_err = err;
274
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
280
281 skb_queue_purge(TX_QUEUE(sk));
282
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
285
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
289
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
292
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
296 }
297 }
298 }
299
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
302 {
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
305
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
311
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
325 }
326 }
327
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
330 }
331
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
333 {
334 u8 id;
335
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
340 */
341
342 spin_lock_bh(&conn->lock);
343
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
346
347 id = conn->tx_ident;
348
349 spin_unlock_bh(&conn->lock);
350
351 return id;
352 }
353
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
355 {
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
357
358 BT_DBG("code 0x%2.2x", code);
359
360 if (!skb)
361 return;
362
363 hci_send_acl(conn->hcon, skb, 0);
364 }
365
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
367 {
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
373
374 if (sk->sk_state != BT_CONNECTED)
375 return;
376
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
379
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
381
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
384
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
388 }
389
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
393 }
394
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
398
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
403
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
407 }
408
409 hci_send_acl(pi->conn->hcon, skb, 0);
410 }
411
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
413 {
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
419
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
421
422 l2cap_send_sframe(pi, control);
423 }
424
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
426 {
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
428 }
429
430 static void l2cap_do_start(struct sock *sk)
431 {
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
433
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
437
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
442
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
445
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
448 }
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
452
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
455
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
458
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
461 }
462 }
463
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465 {
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
477 }
478 }
479
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
481 {
482 struct l2cap_disconn_req req;
483
484 if (!conn)
485 return;
486
487 skb_queue_purge(TX_QUEUE(sk));
488
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
493 }
494
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
499
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
502 }
503
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
506 {
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
510
511 BT_DBG("conn %p", conn);
512
513 INIT_LIST_HEAD(&del.list);
514
515 read_lock(&l->lock);
516
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
519
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
524 }
525
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
528
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
533 }
534
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
545 }
546
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
549
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
555
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
561
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
568
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
573 }
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
577 }
578
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
586 }
587
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
592 }
593
594 bh_unlock_sock(sk);
595 }
596
597 read_unlock(&l->lock);
598
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
605 }
606 }
607
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
609 {
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
612
613 BT_DBG("conn %p", conn);
614
615 read_lock(&l->lock);
616
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
619
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
627
628 bh_unlock_sock(sk);
629 }
630
631 read_unlock(&l->lock);
632 }
633
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
636 {
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
639
640 BT_DBG("conn %p", conn);
641
642 read_lock(&l->lock);
643
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
647 }
648
649 read_unlock(&l->lock);
650 }
651
652 static void l2cap_info_timeout(unsigned long arg)
653 {
654 struct l2cap_conn *conn = (void *) arg;
655
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
658
659 l2cap_conn_start(conn);
660 }
661
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
663 {
664 struct l2cap_conn *conn = hcon->l2cap_data;
665
666 if (conn || status)
667 return conn;
668
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
672
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
675
676 BT_DBG("hcon %p conn %p", hcon, conn);
677
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
681
682 conn->feat_mask = 0;
683
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
686
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
689
690 conn->disc_reason = 0x13;
691
692 return conn;
693 }
694
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
696 {
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
699
700 if (!conn)
701 return;
702
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
704
705 kfree_skb(conn->rx_skb);
706
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
713 }
714
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
717
718 hcon->l2cap_data = NULL;
719 kfree(conn);
720 }
721
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
723 {
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
728 }
729
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
732 {
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739 found:
740 return sk;
741 }
742
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
745 */
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
747 {
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
750
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
754
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
759
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
763 }
764 }
765 return node ? sk : sk1;
766 }
767
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
771 {
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
779 }
780
781 static void l2cap_sock_destruct(struct sock *sk)
782 {
783 BT_DBG("sk %p", sk);
784
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
787 }
788
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
790 {
791 struct sock *sk;
792
793 BT_DBG("parent %p", parent);
794
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
798
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
801 }
802
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
805 */
806 static void l2cap_sock_kill(struct sock *sk)
807 {
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
810
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
812
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
817 }
818
819 static void __l2cap_sock_close(struct sock *sk, int reason)
820 {
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
827
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
839
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
846
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
851
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
861
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
866
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
870 }
871 }
872
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
875 {
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
881 }
882
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884 {
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
886
887 BT_DBG("sk %p", sk);
888
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
918 }
919
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
927 }
928
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
933 };
934
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
936 {
937 struct sock *sk;
938
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
942
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948
949 sock_reset_flag(sk, SOCK_ZAPPED);
950
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
953
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
958 }
959
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
962 {
963 struct sock *sk;
964
965 BT_DBG("sock %p", sock);
966
967 sock->state = SS_UNCONNECTED;
968
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
972
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
975
976 sock->ops = &l2cap_sock_ops;
977
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
981
982 l2cap_sock_init(sk, NULL);
983 return 0;
984 }
985
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987 {
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
991
992 BT_DBG("sk %p", sk);
993
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
996
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1000
1001 if (la.l2_cid)
1002 return -EINVAL;
1003
1004 lock_sock(sk);
1005
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1009 }
1010
1011 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE)) {
1013 err = -EACCES;
1014 goto done;
1015 }
1016
1017 write_lock_bh(&l2cap_sk_list.lock);
1018
1019 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1020 err = -EADDRINUSE;
1021 } else {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1024 l2cap_pi(sk)->psm = la.l2_psm;
1025 l2cap_pi(sk)->sport = la.l2_psm;
1026 sk->sk_state = BT_BOUND;
1027
1028 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1029 __le16_to_cpu(la.l2_psm) == 0x0003)
1030 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1031 }
1032
1033 write_unlock_bh(&l2cap_sk_list.lock);
1034
1035 done:
1036 release_sock(sk);
1037 return err;
1038 }
1039
1040 static int l2cap_do_connect(struct sock *sk)
1041 {
1042 bdaddr_t *src = &bt_sk(sk)->src;
1043 bdaddr_t *dst = &bt_sk(sk)->dst;
1044 struct l2cap_conn *conn;
1045 struct hci_conn *hcon;
1046 struct hci_dev *hdev;
1047 __u8 auth_type;
1048 int err;
1049
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1051 l2cap_pi(sk)->psm);
1052
1053 hdev = hci_get_route(dst, src);
1054 if (!hdev)
1055 return -EHOSTUNREACH;
1056
1057 hci_dev_lock_bh(hdev);
1058
1059 err = -ENOMEM;
1060
1061 if (sk->sk_type == SOCK_RAW) {
1062 switch (l2cap_pi(sk)->sec_level) {
1063 case BT_SECURITY_HIGH:
1064 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1065 break;
1066 case BT_SECURITY_MEDIUM:
1067 auth_type = HCI_AT_DEDICATED_BONDING;
1068 break;
1069 default:
1070 auth_type = HCI_AT_NO_BONDING;
1071 break;
1072 }
1073 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1075 auth_type = HCI_AT_NO_BONDING_MITM;
1076 else
1077 auth_type = HCI_AT_NO_BONDING;
1078
1079 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1080 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1081 } else {
1082 switch (l2cap_pi(sk)->sec_level) {
1083 case BT_SECURITY_HIGH:
1084 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1085 break;
1086 case BT_SECURITY_MEDIUM:
1087 auth_type = HCI_AT_GENERAL_BONDING;
1088 break;
1089 default:
1090 auth_type = HCI_AT_NO_BONDING;
1091 break;
1092 }
1093 }
1094
1095 hcon = hci_connect(hdev, ACL_LINK, dst,
1096 l2cap_pi(sk)->sec_level, auth_type);
1097 if (!hcon)
1098 goto done;
1099
1100 conn = l2cap_conn_add(hcon, 0);
1101 if (!conn) {
1102 hci_conn_put(hcon);
1103 goto done;
1104 }
1105
1106 err = 0;
1107
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1110
1111 l2cap_chan_add(conn, sk, NULL);
1112
1113 sk->sk_state = BT_CONNECT;
1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1115
1116 if (hcon->state == BT_CONNECTED) {
1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
1119 l2cap_sock_clear_timer(sk);
1120 sk->sk_state = BT_CONNECTED;
1121 } else
1122 l2cap_do_start(sk);
1123 }
1124
1125 done:
1126 hci_dev_unlock_bh(hdev);
1127 hci_dev_put(hdev);
1128 return err;
1129 }
1130
1131 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1132 {
1133 struct sock *sk = sock->sk;
1134 struct sockaddr_l2 la;
1135 int len, err = 0;
1136
1137 BT_DBG("sk %p", sk);
1138
1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1141 return -EINVAL;
1142
1143 memset(&la, 0, sizeof(la));
1144 len = min_t(unsigned int, sizeof(la), alen);
1145 memcpy(&la, addr, len);
1146
1147 if (la.l2_cid)
1148 return -EINVAL;
1149
1150 lock_sock(sk);
1151
1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1153 && !la.l2_psm) {
1154 err = -EINVAL;
1155 goto done;
1156 }
1157
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1160 break;
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1163 if (!disable_ertm)
1164 break;
1165 /* fall through */
1166 default:
1167 err = -ENOTSUPP;
1168 goto done;
1169 }
1170
1171 switch (sk->sk_state) {
1172 case BT_CONNECT:
1173 case BT_CONNECT2:
1174 case BT_CONFIG:
1175 /* Already connecting */
1176 goto wait;
1177
1178 case BT_CONNECTED:
1179 /* Already connected */
1180 err = -EISCONN;
1181 goto done;
1182
1183 case BT_OPEN:
1184 case BT_BOUND:
1185 /* Can connect */
1186 break;
1187
1188 default:
1189 err = -EBADFD;
1190 goto done;
1191 }
1192
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1195 l2cap_pi(sk)->psm = la.l2_psm;
1196
1197 err = l2cap_do_connect(sk);
1198 if (err)
1199 goto done;
1200
1201 wait:
1202 err = bt_sock_wait_state(sk, BT_CONNECTED,
1203 sock_sndtimeo(sk, flags & O_NONBLOCK));
1204 done:
1205 release_sock(sk);
1206 return err;
1207 }
1208
1209 static int l2cap_sock_listen(struct socket *sock, int backlog)
1210 {
1211 struct sock *sk = sock->sk;
1212 int err = 0;
1213
1214 BT_DBG("sk %p backlog %d", sk, backlog);
1215
1216 lock_sock(sk);
1217
1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1220 err = -EBADFD;
1221 goto done;
1222 }
1223
1224 switch (l2cap_pi(sk)->mode) {
1225 case L2CAP_MODE_BASIC:
1226 break;
1227 case L2CAP_MODE_ERTM:
1228 case L2CAP_MODE_STREAMING:
1229 if (!disable_ertm)
1230 break;
1231 /* fall through */
1232 default:
1233 err = -ENOTSUPP;
1234 goto done;
1235 }
1236
1237 if (!l2cap_pi(sk)->psm) {
1238 bdaddr_t *src = &bt_sk(sk)->src;
1239 u16 psm;
1240
1241 err = -EINVAL;
1242
1243 write_lock_bh(&l2cap_sk_list.lock);
1244
1245 for (psm = 0x1001; psm < 0x1100; psm += 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1247 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1248 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1249 err = 0;
1250 break;
1251 }
1252
1253 write_unlock_bh(&l2cap_sk_list.lock);
1254
1255 if (err < 0)
1256 goto done;
1257 }
1258
1259 sk->sk_max_ack_backlog = backlog;
1260 sk->sk_ack_backlog = 0;
1261 sk->sk_state = BT_LISTEN;
1262
1263 done:
1264 release_sock(sk);
1265 return err;
1266 }
1267
1268 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1269 {
1270 DECLARE_WAITQUEUE(wait, current);
1271 struct sock *sk = sock->sk, *nsk;
1272 long timeo;
1273 int err = 0;
1274
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1276
1277 if (sk->sk_state != BT_LISTEN) {
1278 err = -EBADFD;
1279 goto done;
1280 }
1281
1282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1283
1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1285
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1290 if (!timeo) {
1291 err = -EAGAIN;
1292 break;
1293 }
1294
1295 release_sock(sk);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1298
1299 if (sk->sk_state != BT_LISTEN) {
1300 err = -EBADFD;
1301 break;
1302 }
1303
1304 if (signal_pending(current)) {
1305 err = sock_intr_errno(timeo);
1306 break;
1307 }
1308 }
1309 set_current_state(TASK_RUNNING);
1310 remove_wait_queue(sk_sleep(sk), &wait);
1311
1312 if (err)
1313 goto done;
1314
1315 newsock->state = SS_CONNECTED;
1316
1317 BT_DBG("new socket %p", nsk);
1318
1319 done:
1320 release_sock(sk);
1321 return err;
1322 }
1323
1324 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1325 {
1326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1327 struct sock *sk = sock->sk;
1328
1329 BT_DBG("sock %p, sk %p", sock, sk);
1330
1331 addr->sa_family = AF_BLUETOOTH;
1332 *len = sizeof(struct sockaddr_l2);
1333
1334 if (peer) {
1335 la->l2_psm = l2cap_pi(sk)->psm;
1336 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1337 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1338 } else {
1339 la->l2_psm = l2cap_pi(sk)->sport;
1340 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1341 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1342 }
1343
1344 return 0;
1345 }
1346
1347 static int __l2cap_wait_ack(struct sock *sk)
1348 {
1349 DECLARE_WAITQUEUE(wait, current);
1350 int err = 0;
1351 int timeo = HZ/5;
1352
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1356
1357 if (!timeo)
1358 timeo = HZ/5;
1359
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1362 break;
1363 }
1364
1365 release_sock(sk);
1366 timeo = schedule_timeout(timeo);
1367 lock_sock(sk);
1368
1369 err = sock_error(sk);
1370 if (err)
1371 break;
1372 }
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1375 return err;
1376 }
1377
1378 static void l2cap_monitor_timeout(unsigned long arg)
1379 {
1380 struct sock *sk = (void *) arg;
1381
1382 BT_DBG("sk %p", sk);
1383
1384 bh_lock_sock(sk);
1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1387 bh_unlock_sock(sk);
1388 return;
1389 }
1390
1391 l2cap_pi(sk)->retry_count++;
1392 __mod_monitor_timer();
1393
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1395 bh_unlock_sock(sk);
1396 }
1397
1398 static void l2cap_retrans_timeout(unsigned long arg)
1399 {
1400 struct sock *sk = (void *) arg;
1401
1402 BT_DBG("sk %p", sk);
1403
1404 bh_lock_sock(sk);
1405 l2cap_pi(sk)->retry_count = 1;
1406 __mod_monitor_timer();
1407
1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1409
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1411 bh_unlock_sock(sk);
1412 }
1413
1414 static void l2cap_drop_acked_frames(struct sock *sk)
1415 {
1416 struct sk_buff *skb;
1417
1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 break;
1422
1423 skb = skb_dequeue(TX_QUEUE(sk));
1424 kfree_skb(skb);
1425
1426 l2cap_pi(sk)->unacked_frames--;
1427 }
1428
1429 if (!l2cap_pi(sk)->unacked_frames)
1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1431 }
1432
1433 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1434 {
1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1436
1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1438
1439 hci_send_acl(pi->conn->hcon, skb, 0);
1440 }
1441
1442 static void l2cap_streaming_send(struct sock *sk)
1443 {
1444 struct sk_buff *skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 u16 control, fcs;
1447
1448 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1449 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1451 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1452
1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1454 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1455 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1456 }
1457
1458 l2cap_do_send(sk, skb);
1459
1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1461 }
1462 }
1463
1464 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1465 {
1466 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 struct sk_buff *skb, *tx_skb;
1468 u16 control, fcs;
1469
1470 skb = skb_peek(TX_QUEUE(sk));
1471 if (!skb)
1472 return;
1473
1474 do {
1475 if (bt_cb(skb)->tx_seq == tx_seq)
1476 break;
1477
1478 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1479 return;
1480
1481 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1482
1483 if (pi->remote_max_tx &&
1484 bt_cb(skb)->retries == pi->remote_max_tx) {
1485 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1486 return;
1487 }
1488
1489 tx_skb = skb_clone(skb, GFP_ATOMIC);
1490 bt_cb(skb)->retries++;
1491 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1492
1493 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1494 control |= L2CAP_CTRL_FINAL;
1495 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1496 }
1497
1498 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1499 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1500
1501 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1502
1503 if (pi->fcs == L2CAP_FCS_CRC16) {
1504 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1505 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1506 }
1507
1508 l2cap_do_send(sk, tx_skb);
1509 }
1510
1511 static int l2cap_ertm_send(struct sock *sk)
1512 {
1513 struct sk_buff *skb, *tx_skb;
1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1515 u16 control, fcs;
1516 int nsent = 0;
1517
1518 if (sk->sk_state != BT_CONNECTED)
1519 return -ENOTCONN;
1520
1521 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1522
1523 if (pi->remote_max_tx &&
1524 bt_cb(skb)->retries == pi->remote_max_tx) {
1525 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1526 break;
1527 }
1528
1529 tx_skb = skb_clone(skb, GFP_ATOMIC);
1530
1531 bt_cb(skb)->retries++;
1532
1533 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1534 control &= L2CAP_CTRL_SAR;
1535
1536 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1537 control |= L2CAP_CTRL_FINAL;
1538 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1539 }
1540 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1541 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1542 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1543
1544
1545 if (pi->fcs == L2CAP_FCS_CRC16) {
1546 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1547 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1548 }
1549
1550 l2cap_do_send(sk, tx_skb);
1551
1552 __mod_retrans_timer();
1553
1554 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1555 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1556
1557 pi->unacked_frames++;
1558 pi->frames_sent++;
1559
1560 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1561 sk->sk_send_head = NULL;
1562 else
1563 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1564
1565 nsent++;
1566 }
1567
1568 return nsent;
1569 }
1570
1571 static int l2cap_retransmit_frames(struct sock *sk)
1572 {
1573 struct l2cap_pinfo *pi = l2cap_pi(sk);
1574 int ret;
1575
1576 if (!skb_queue_empty(TX_QUEUE(sk)))
1577 sk->sk_send_head = TX_QUEUE(sk)->next;
1578
1579 pi->next_tx_seq = pi->expected_ack_seq;
1580 ret = l2cap_ertm_send(sk);
1581 return ret;
1582 }
1583
1584 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1585 {
1586 struct sock *sk = (struct sock *)pi;
1587 u16 control = 0;
1588
1589 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1590
1591 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1592 control |= L2CAP_SUPER_RCV_NOT_READY;
1593 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1594 l2cap_send_sframe(pi, control);
1595 return;
1596 }
1597
1598 if (l2cap_ertm_send(sk) > 0)
1599 return;
1600
1601 control |= L2CAP_SUPER_RCV_READY;
1602 l2cap_send_sframe(pi, control);
1603 }
1604
1605 static void l2cap_send_srejtail(struct sock *sk)
1606 {
1607 struct srej_list *tail;
1608 u16 control;
1609
1610 control = L2CAP_SUPER_SELECT_REJECT;
1611 control |= L2CAP_CTRL_FINAL;
1612
1613 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1614 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1615
1616 l2cap_send_sframe(l2cap_pi(sk), control);
1617 }
1618
1619 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1620 {
1621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1622 struct sk_buff **frag;
1623 int err, sent = 0;
1624
1625 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1626 return -EFAULT;
1627
1628 sent += count;
1629 len -= count;
1630
1631 /* Continuation fragments (no L2CAP header) */
1632 frag = &skb_shinfo(skb)->frag_list;
1633 while (len) {
1634 count = min_t(unsigned int, conn->mtu, len);
1635
1636 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1637 if (!*frag)
1638 return -EFAULT;
1639 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1640 return -EFAULT;
1641
1642 sent += count;
1643 len -= count;
1644
1645 frag = &(*frag)->next;
1646 }
1647
1648 return sent;
1649 }
1650
1651 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1652 {
1653 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1654 struct sk_buff *skb;
1655 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1656 struct l2cap_hdr *lh;
1657
1658 BT_DBG("sk %p len %d", sk, (int)len);
1659
1660 count = min_t(unsigned int, (conn->mtu - hlen), len);
1661 skb = bt_skb_send_alloc(sk, count + hlen,
1662 msg->msg_flags & MSG_DONTWAIT, &err);
1663 if (!skb)
1664 return ERR_PTR(-ENOMEM);
1665
1666 /* Create L2CAP header */
1667 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1668 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1669 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1670 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1671
1672 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1673 if (unlikely(err < 0)) {
1674 kfree_skb(skb);
1675 return ERR_PTR(err);
1676 }
1677 return skb;
1678 }
1679
1680 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1681 {
1682 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1683 struct sk_buff *skb;
1684 int err, count, hlen = L2CAP_HDR_SIZE;
1685 struct l2cap_hdr *lh;
1686
1687 BT_DBG("sk %p len %d", sk, (int)len);
1688
1689 count = min_t(unsigned int, (conn->mtu - hlen), len);
1690 skb = bt_skb_send_alloc(sk, count + hlen,
1691 msg->msg_flags & MSG_DONTWAIT, &err);
1692 if (!skb)
1693 return ERR_PTR(-ENOMEM);
1694
1695 /* Create L2CAP header */
1696 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1697 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1698 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1699
1700 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1701 if (unlikely(err < 0)) {
1702 kfree_skb(skb);
1703 return ERR_PTR(err);
1704 }
1705 return skb;
1706 }
1707
1708 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1709 {
1710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1711 struct sk_buff *skb;
1712 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1713 struct l2cap_hdr *lh;
1714
1715 BT_DBG("sk %p len %d", sk, (int)len);
1716
1717 if (!conn)
1718 return ERR_PTR(-ENOTCONN);
1719
1720 if (sdulen)
1721 hlen += 2;
1722
1723 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1724 hlen += 2;
1725
1726 count = min_t(unsigned int, (conn->mtu - hlen), len);
1727 skb = bt_skb_send_alloc(sk, count + hlen,
1728 msg->msg_flags & MSG_DONTWAIT, &err);
1729 if (!skb)
1730 return ERR_PTR(-ENOMEM);
1731
1732 /* Create L2CAP header */
1733 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1734 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1735 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1736 put_unaligned_le16(control, skb_put(skb, 2));
1737 if (sdulen)
1738 put_unaligned_le16(sdulen, skb_put(skb, 2));
1739
1740 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1741 if (unlikely(err < 0)) {
1742 kfree_skb(skb);
1743 return ERR_PTR(err);
1744 }
1745
1746 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1747 put_unaligned_le16(0, skb_put(skb, 2));
1748
1749 bt_cb(skb)->retries = 0;
1750 return skb;
1751 }
1752
1753 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1754 {
1755 struct l2cap_pinfo *pi = l2cap_pi(sk);
1756 struct sk_buff *skb;
1757 struct sk_buff_head sar_queue;
1758 u16 control;
1759 size_t size = 0;
1760
1761 skb_queue_head_init(&sar_queue);
1762 control = L2CAP_SDU_START;
1763 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1764 if (IS_ERR(skb))
1765 return PTR_ERR(skb);
1766
1767 __skb_queue_tail(&sar_queue, skb);
1768 len -= pi->remote_mps;
1769 size += pi->remote_mps;
1770
1771 while (len > 0) {
1772 size_t buflen;
1773
1774 if (len > pi->remote_mps) {
1775 control = L2CAP_SDU_CONTINUE;
1776 buflen = pi->remote_mps;
1777 } else {
1778 control = L2CAP_SDU_END;
1779 buflen = len;
1780 }
1781
1782 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1783 if (IS_ERR(skb)) {
1784 skb_queue_purge(&sar_queue);
1785 return PTR_ERR(skb);
1786 }
1787
1788 __skb_queue_tail(&sar_queue, skb);
1789 len -= buflen;
1790 size += buflen;
1791 }
1792 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1793 if (sk->sk_send_head == NULL)
1794 sk->sk_send_head = sar_queue.next;
1795
1796 return size;
1797 }
1798
1799 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1800 {
1801 struct sock *sk = sock->sk;
1802 struct l2cap_pinfo *pi = l2cap_pi(sk);
1803 struct sk_buff *skb;
1804 u16 control;
1805 int err;
1806
1807 BT_DBG("sock %p, sk %p", sock, sk);
1808
1809 err = sock_error(sk);
1810 if (err)
1811 return err;
1812
1813 if (msg->msg_flags & MSG_OOB)
1814 return -EOPNOTSUPP;
1815
1816 lock_sock(sk);
1817
1818 if (sk->sk_state != BT_CONNECTED) {
1819 err = -ENOTCONN;
1820 goto done;
1821 }
1822
1823 /* Connectionless channel */
1824 if (sk->sk_type == SOCK_DGRAM) {
1825 skb = l2cap_create_connless_pdu(sk, msg, len);
1826 if (IS_ERR(skb)) {
1827 err = PTR_ERR(skb);
1828 } else {
1829 l2cap_do_send(sk, skb);
1830 err = len;
1831 }
1832 goto done;
1833 }
1834
1835 switch (pi->mode) {
1836 case L2CAP_MODE_BASIC:
1837 /* Check outgoing MTU */
1838 if (len > pi->omtu) {
1839 err = -EMSGSIZE;
1840 goto done;
1841 }
1842
1843 /* Create a basic PDU */
1844 skb = l2cap_create_basic_pdu(sk, msg, len);
1845 if (IS_ERR(skb)) {
1846 err = PTR_ERR(skb);
1847 goto done;
1848 }
1849
1850 l2cap_do_send(sk, skb);
1851 err = len;
1852 break;
1853
1854 case L2CAP_MODE_ERTM:
1855 case L2CAP_MODE_STREAMING:
1856 /* Entire SDU fits into one PDU */
1857 if (len <= pi->remote_mps) {
1858 control = L2CAP_SDU_UNSEGMENTED;
1859 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1860 if (IS_ERR(skb)) {
1861 err = PTR_ERR(skb);
1862 goto done;
1863 }
1864 __skb_queue_tail(TX_QUEUE(sk), skb);
1865
1866 if (sk->sk_send_head == NULL)
1867 sk->sk_send_head = skb;
1868
1869 } else {
1870 /* Segment SDU into multiples PDUs */
1871 err = l2cap_sar_segment_sdu(sk, msg, len);
1872 if (err < 0)
1873 goto done;
1874 }
1875
1876 if (pi->mode == L2CAP_MODE_STREAMING) {
1877 l2cap_streaming_send(sk);
1878 } else {
1879 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1880 pi->conn_state && L2CAP_CONN_WAIT_F) {
1881 err = len;
1882 break;
1883 }
1884 err = l2cap_ertm_send(sk);
1885 }
1886
1887 if (err >= 0)
1888 err = len;
1889 break;
1890
1891 default:
1892 BT_DBG("bad state %1.1x", pi->mode);
1893 err = -EBADFD;
1894 }
1895
1896 done:
1897 release_sock(sk);
1898 return err;
1899 }
1900
1901 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1902 {
1903 struct sock *sk = sock->sk;
1904
1905 lock_sock(sk);
1906
1907 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1908 struct l2cap_conn_rsp rsp;
1909 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1910 u8 buf[128];
1911
1912 sk->sk_state = BT_CONFIG;
1913
1914 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1916 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1917 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1918 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1919 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1920
1921 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1922 release_sock(sk);
1923 return 0;
1924 }
1925
1926 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1928 l2cap_build_conf_req(sk, buf), buf);
1929 l2cap_pi(sk)->num_conf_req++;
1930
1931 release_sock(sk);
1932 return 0;
1933 }
1934
1935 release_sock(sk);
1936
1937 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1938 }
1939
1940 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1941 {
1942 struct sock *sk = sock->sk;
1943 struct l2cap_options opts;
1944 int len, err = 0;
1945 u32 opt;
1946
1947 BT_DBG("sk %p", sk);
1948
1949 lock_sock(sk);
1950
1951 switch (optname) {
1952 case L2CAP_OPTIONS:
1953 if (sk->sk_state == BT_CONNECTED) {
1954 err = -EINVAL;
1955 break;
1956 }
1957
1958 opts.imtu = l2cap_pi(sk)->imtu;
1959 opts.omtu = l2cap_pi(sk)->omtu;
1960 opts.flush_to = l2cap_pi(sk)->flush_to;
1961 opts.mode = l2cap_pi(sk)->mode;
1962 opts.fcs = l2cap_pi(sk)->fcs;
1963 opts.max_tx = l2cap_pi(sk)->max_tx;
1964 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1965
1966 len = min_t(unsigned int, sizeof(opts), optlen);
1967 if (copy_from_user((char *) &opts, optval, len)) {
1968 err = -EFAULT;
1969 break;
1970 }
1971
1972 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1973 err = -EINVAL;
1974 break;
1975 }
1976
1977 l2cap_pi(sk)->mode = opts.mode;
1978 switch (l2cap_pi(sk)->mode) {
1979 case L2CAP_MODE_BASIC:
1980 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1981 break;
1982 case L2CAP_MODE_ERTM:
1983 case L2CAP_MODE_STREAMING:
1984 if (!disable_ertm)
1985 break;
1986 /* fall through */
1987 default:
1988 err = -EINVAL;
1989 break;
1990 }
1991
1992 l2cap_pi(sk)->imtu = opts.imtu;
1993 l2cap_pi(sk)->omtu = opts.omtu;
1994 l2cap_pi(sk)->fcs = opts.fcs;
1995 l2cap_pi(sk)->max_tx = opts.max_tx;
1996 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1997 break;
1998
1999 case L2CAP_LM:
2000 if (get_user(opt, (u32 __user *) optval)) {
2001 err = -EFAULT;
2002 break;
2003 }
2004
2005 if (opt & L2CAP_LM_AUTH)
2006 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2007 if (opt & L2CAP_LM_ENCRYPT)
2008 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2009 if (opt & L2CAP_LM_SECURE)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2011
2012 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2013 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2014 break;
2015
2016 default:
2017 err = -ENOPROTOOPT;
2018 break;
2019 }
2020
2021 release_sock(sk);
2022 return err;
2023 }
2024
2025 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2026 {
2027 struct sock *sk = sock->sk;
2028 struct bt_security sec;
2029 int len, err = 0;
2030 u32 opt;
2031
2032 BT_DBG("sk %p", sk);
2033
2034 if (level == SOL_L2CAP)
2035 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2036
2037 if (level != SOL_BLUETOOTH)
2038 return -ENOPROTOOPT;
2039
2040 lock_sock(sk);
2041
2042 switch (optname) {
2043 case BT_SECURITY:
2044 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2045 && sk->sk_type != SOCK_RAW) {
2046 err = -EINVAL;
2047 break;
2048 }
2049
2050 sec.level = BT_SECURITY_LOW;
2051
2052 len = min_t(unsigned int, sizeof(sec), optlen);
2053 if (copy_from_user((char *) &sec, optval, len)) {
2054 err = -EFAULT;
2055 break;
2056 }
2057
2058 if (sec.level < BT_SECURITY_LOW ||
2059 sec.level > BT_SECURITY_HIGH) {
2060 err = -EINVAL;
2061 break;
2062 }
2063
2064 l2cap_pi(sk)->sec_level = sec.level;
2065 break;
2066
2067 case BT_DEFER_SETUP:
2068 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2069 err = -EINVAL;
2070 break;
2071 }
2072
2073 if (get_user(opt, (u32 __user *) optval)) {
2074 err = -EFAULT;
2075 break;
2076 }
2077
2078 bt_sk(sk)->defer_setup = opt;
2079 break;
2080
2081 default:
2082 err = -ENOPROTOOPT;
2083 break;
2084 }
2085
2086 release_sock(sk);
2087 return err;
2088 }
2089
2090 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2091 {
2092 struct sock *sk = sock->sk;
2093 struct l2cap_options opts;
2094 struct l2cap_conninfo cinfo;
2095 int len, err = 0;
2096 u32 opt;
2097
2098 BT_DBG("sk %p", sk);
2099
2100 if (get_user(len, optlen))
2101 return -EFAULT;
2102
2103 lock_sock(sk);
2104
2105 switch (optname) {
2106 case L2CAP_OPTIONS:
2107 opts.imtu = l2cap_pi(sk)->imtu;
2108 opts.omtu = l2cap_pi(sk)->omtu;
2109 opts.flush_to = l2cap_pi(sk)->flush_to;
2110 opts.mode = l2cap_pi(sk)->mode;
2111 opts.fcs = l2cap_pi(sk)->fcs;
2112 opts.max_tx = l2cap_pi(sk)->max_tx;
2113 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2114
2115 len = min_t(unsigned int, len, sizeof(opts));
2116 if (copy_to_user(optval, (char *) &opts, len))
2117 err = -EFAULT;
2118
2119 break;
2120
2121 case L2CAP_LM:
2122 switch (l2cap_pi(sk)->sec_level) {
2123 case BT_SECURITY_LOW:
2124 opt = L2CAP_LM_AUTH;
2125 break;
2126 case BT_SECURITY_MEDIUM:
2127 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2128 break;
2129 case BT_SECURITY_HIGH:
2130 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2131 L2CAP_LM_SECURE;
2132 break;
2133 default:
2134 opt = 0;
2135 break;
2136 }
2137
2138 if (l2cap_pi(sk)->role_switch)
2139 opt |= L2CAP_LM_MASTER;
2140
2141 if (l2cap_pi(sk)->force_reliable)
2142 opt |= L2CAP_LM_RELIABLE;
2143
2144 if (put_user(opt, (u32 __user *) optval))
2145 err = -EFAULT;
2146 break;
2147
2148 case L2CAP_CONNINFO:
2149 if (sk->sk_state != BT_CONNECTED &&
2150 !(sk->sk_state == BT_CONNECT2 &&
2151 bt_sk(sk)->defer_setup)) {
2152 err = -ENOTCONN;
2153 break;
2154 }
2155
2156 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2157 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2158
2159 len = min_t(unsigned int, len, sizeof(cinfo));
2160 if (copy_to_user(optval, (char *) &cinfo, len))
2161 err = -EFAULT;
2162
2163 break;
2164
2165 default:
2166 err = -ENOPROTOOPT;
2167 break;
2168 }
2169
2170 release_sock(sk);
2171 return err;
2172 }
2173
2174 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2175 {
2176 struct sock *sk = sock->sk;
2177 struct bt_security sec;
2178 int len, err = 0;
2179
2180 BT_DBG("sk %p", sk);
2181
2182 if (level == SOL_L2CAP)
2183 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2184
2185 if (level != SOL_BLUETOOTH)
2186 return -ENOPROTOOPT;
2187
2188 if (get_user(len, optlen))
2189 return -EFAULT;
2190
2191 lock_sock(sk);
2192
2193 switch (optname) {
2194 case BT_SECURITY:
2195 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2196 && sk->sk_type != SOCK_RAW) {
2197 err = -EINVAL;
2198 break;
2199 }
2200
2201 sec.level = l2cap_pi(sk)->sec_level;
2202
2203 len = min_t(unsigned int, len, sizeof(sec));
2204 if (copy_to_user(optval, (char *) &sec, len))
2205 err = -EFAULT;
2206
2207 break;
2208
2209 case BT_DEFER_SETUP:
2210 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2211 err = -EINVAL;
2212 break;
2213 }
2214
2215 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2216 err = -EFAULT;
2217
2218 break;
2219
2220 default:
2221 err = -ENOPROTOOPT;
2222 break;
2223 }
2224
2225 release_sock(sk);
2226 return err;
2227 }
2228
2229 static int l2cap_sock_shutdown(struct socket *sock, int how)
2230 {
2231 struct sock *sk = sock->sk;
2232 int err = 0;
2233
2234 BT_DBG("sock %p, sk %p", sock, sk);
2235
2236 if (!sk)
2237 return 0;
2238
2239 lock_sock(sk);
2240 if (!sk->sk_shutdown) {
2241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2242 err = __l2cap_wait_ack(sk);
2243
2244 sk->sk_shutdown = SHUTDOWN_MASK;
2245 l2cap_sock_clear_timer(sk);
2246 __l2cap_sock_close(sk, 0);
2247
2248 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2249 err = bt_sock_wait_state(sk, BT_CLOSED,
2250 sk->sk_lingertime);
2251 }
2252
2253 if (!err && sk->sk_err)
2254 err = -sk->sk_err;
2255
2256 release_sock(sk);
2257 return err;
2258 }
2259
2260 static int l2cap_sock_release(struct socket *sock)
2261 {
2262 struct sock *sk = sock->sk;
2263 int err;
2264
2265 BT_DBG("sock %p, sk %p", sock, sk);
2266
2267 if (!sk)
2268 return 0;
2269
2270 err = l2cap_sock_shutdown(sock, 2);
2271
2272 sock_orphan(sk);
2273 l2cap_sock_kill(sk);
2274 return err;
2275 }
2276
2277 static void l2cap_chan_ready(struct sock *sk)
2278 {
2279 struct sock *parent = bt_sk(sk)->parent;
2280
2281 BT_DBG("sk %p, parent %p", sk, parent);
2282
2283 l2cap_pi(sk)->conf_state = 0;
2284 l2cap_sock_clear_timer(sk);
2285
2286 if (!parent) {
2287 /* Outgoing channel.
2288 * Wake up socket sleeping on connect.
2289 */
2290 sk->sk_state = BT_CONNECTED;
2291 sk->sk_state_change(sk);
2292 } else {
2293 /* Incoming channel.
2294 * Wake up socket sleeping on accept.
2295 */
2296 parent->sk_data_ready(parent, 0);
2297 }
2298 }
2299
2300 /* Copy frame to all raw sockets on that connection */
2301 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2302 {
2303 struct l2cap_chan_list *l = &conn->chan_list;
2304 struct sk_buff *nskb;
2305 struct sock *sk;
2306
2307 BT_DBG("conn %p", conn);
2308
2309 read_lock(&l->lock);
2310 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2311 if (sk->sk_type != SOCK_RAW)
2312 continue;
2313
2314 /* Don't send frame to the socket it came from */
2315 if (skb->sk == sk)
2316 continue;
2317 nskb = skb_clone(skb, GFP_ATOMIC);
2318 if (!nskb)
2319 continue;
2320
2321 if (sock_queue_rcv_skb(sk, nskb))
2322 kfree_skb(nskb);
2323 }
2324 read_unlock(&l->lock);
2325 }
2326
2327 /* ---- L2CAP signalling commands ---- */
2328 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2329 u8 code, u8 ident, u16 dlen, void *data)
2330 {
2331 struct sk_buff *skb, **frag;
2332 struct l2cap_cmd_hdr *cmd;
2333 struct l2cap_hdr *lh;
2334 int len, count;
2335
2336 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2337 conn, code, ident, dlen);
2338
2339 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2340 count = min_t(unsigned int, conn->mtu, len);
2341
2342 skb = bt_skb_alloc(count, GFP_ATOMIC);
2343 if (!skb)
2344 return NULL;
2345
2346 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2347 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2348 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2349
2350 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2351 cmd->code = code;
2352 cmd->ident = ident;
2353 cmd->len = cpu_to_le16(dlen);
2354
2355 if (dlen) {
2356 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2357 memcpy(skb_put(skb, count), data, count);
2358 data += count;
2359 }
2360
2361 len -= skb->len;
2362
2363 /* Continuation fragments (no L2CAP header) */
2364 frag = &skb_shinfo(skb)->frag_list;
2365 while (len) {
2366 count = min_t(unsigned int, conn->mtu, len);
2367
2368 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2369 if (!*frag)
2370 goto fail;
2371
2372 memcpy(skb_put(*frag, count), data, count);
2373
2374 len -= count;
2375 data += count;
2376
2377 frag = &(*frag)->next;
2378 }
2379
2380 return skb;
2381
2382 fail:
2383 kfree_skb(skb);
2384 return NULL;
2385 }
2386
2387 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2388 {
2389 struct l2cap_conf_opt *opt = *ptr;
2390 int len;
2391
2392 len = L2CAP_CONF_OPT_SIZE + opt->len;
2393 *ptr += len;
2394
2395 *type = opt->type;
2396 *olen = opt->len;
2397
2398 switch (opt->len) {
2399 case 1:
2400 *val = *((u8 *) opt->val);
2401 break;
2402
2403 case 2:
2404 *val = __le16_to_cpu(*((__le16 *) opt->val));
2405 break;
2406
2407 case 4:
2408 *val = __le32_to_cpu(*((__le32 *) opt->val));
2409 break;
2410
2411 default:
2412 *val = (unsigned long) opt->val;
2413 break;
2414 }
2415
2416 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2417 return len;
2418 }
2419
2420 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2421 {
2422 struct l2cap_conf_opt *opt = *ptr;
2423
2424 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2425
2426 opt->type = type;
2427 opt->len = len;
2428
2429 switch (len) {
2430 case 1:
2431 *((u8 *) opt->val) = val;
2432 break;
2433
2434 case 2:
2435 *((__le16 *) opt->val) = cpu_to_le16(val);
2436 break;
2437
2438 case 4:
2439 *((__le32 *) opt->val) = cpu_to_le32(val);
2440 break;
2441
2442 default:
2443 memcpy(opt->val, (void *) val, len);
2444 break;
2445 }
2446
2447 *ptr += L2CAP_CONF_OPT_SIZE + len;
2448 }
2449
2450 static void l2cap_ack_timeout(unsigned long arg)
2451 {
2452 struct sock *sk = (void *) arg;
2453
2454 bh_lock_sock(sk);
2455 l2cap_send_ack(l2cap_pi(sk));
2456 bh_unlock_sock(sk);
2457 }
2458
2459 static inline void l2cap_ertm_init(struct sock *sk)
2460 {
2461 l2cap_pi(sk)->expected_ack_seq = 0;
2462 l2cap_pi(sk)->unacked_frames = 0;
2463 l2cap_pi(sk)->buffer_seq = 0;
2464 l2cap_pi(sk)->num_acked = 0;
2465 l2cap_pi(sk)->frames_sent = 0;
2466
2467 setup_timer(&l2cap_pi(sk)->retrans_timer,
2468 l2cap_retrans_timeout, (unsigned long) sk);
2469 setup_timer(&l2cap_pi(sk)->monitor_timer,
2470 l2cap_monitor_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->ack_timer,
2472 l2cap_ack_timeout, (unsigned long) sk);
2473
2474 __skb_queue_head_init(SREJ_QUEUE(sk));
2475 __skb_queue_head_init(BUSY_QUEUE(sk));
2476
2477 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2478
2479 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2480 }
2481
2482 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2483 {
2484 switch (mode) {
2485 case L2CAP_MODE_STREAMING:
2486 case L2CAP_MODE_ERTM:
2487 if (l2cap_mode_supported(mode, remote_feat_mask))
2488 return mode;
2489 /* fall through */
2490 default:
2491 return L2CAP_MODE_BASIC;
2492 }
2493 }
2494
2495 static int l2cap_build_conf_req(struct sock *sk, void *data)
2496 {
2497 struct l2cap_pinfo *pi = l2cap_pi(sk);
2498 struct l2cap_conf_req *req = data;
2499 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2500 void *ptr = req->data;
2501
2502 BT_DBG("sk %p", sk);
2503
2504 if (pi->num_conf_req || pi->num_conf_rsp)
2505 goto done;
2506
2507 switch (pi->mode) {
2508 case L2CAP_MODE_STREAMING:
2509 case L2CAP_MODE_ERTM:
2510 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2511 break;
2512
2513 /* fall through */
2514 default:
2515 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2516 break;
2517 }
2518
2519 done:
2520 switch (pi->mode) {
2521 case L2CAP_MODE_BASIC:
2522 if (pi->imtu != L2CAP_DEFAULT_MTU)
2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2524
2525 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2526 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2527 break;
2528
2529 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2535
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2537 (unsigned long) &rfc);
2538 break;
2539
2540 case L2CAP_MODE_ERTM:
2541 rfc.mode = L2CAP_MODE_ERTM;
2542 rfc.txwin_size = pi->tx_win;
2543 rfc.max_transmit = pi->max_tx;
2544 rfc.retrans_timeout = 0;
2545 rfc.monitor_timeout = 0;
2546 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2548 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2549
2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2551 (unsigned long) &rfc);
2552
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2554 break;
2555
2556 if (pi->fcs == L2CAP_FCS_NONE ||
2557 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2558 pi->fcs = L2CAP_FCS_NONE;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2560 }
2561 break;
2562
2563 case L2CAP_MODE_STREAMING:
2564 rfc.mode = L2CAP_MODE_STREAMING;
2565 rfc.txwin_size = 0;
2566 rfc.max_transmit = 0;
2567 rfc.retrans_timeout = 0;
2568 rfc.monitor_timeout = 0;
2569 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2571 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2572
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2574 (unsigned long) &rfc);
2575
2576 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2577 break;
2578
2579 if (pi->fcs == L2CAP_FCS_NONE ||
2580 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2581 pi->fcs = L2CAP_FCS_NONE;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2583 }
2584 break;
2585 }
2586
2587 /* FIXME: Need actual value of the flush timeout */
2588 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2589 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2590
2591 req->dcid = cpu_to_le16(pi->dcid);
2592 req->flags = cpu_to_le16(0);
2593
2594 return ptr - data;
2595 }
2596
2597 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2598 {
2599 struct l2cap_pinfo *pi = l2cap_pi(sk);
2600 struct l2cap_conf_rsp *rsp = data;
2601 void *ptr = rsp->data;
2602 void *req = pi->conf_req;
2603 int len = pi->conf_len;
2604 int type, hint, olen;
2605 unsigned long val;
2606 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2607 u16 mtu = L2CAP_DEFAULT_MTU;
2608 u16 result = L2CAP_CONF_SUCCESS;
2609
2610 BT_DBG("sk %p", sk);
2611
2612 while (len >= L2CAP_CONF_OPT_SIZE) {
2613 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2614
2615 hint = type & L2CAP_CONF_HINT;
2616 type &= L2CAP_CONF_MASK;
2617
2618 switch (type) {
2619 case L2CAP_CONF_MTU:
2620 mtu = val;
2621 break;
2622
2623 case L2CAP_CONF_FLUSH_TO:
2624 pi->flush_to = val;
2625 break;
2626
2627 case L2CAP_CONF_QOS:
2628 break;
2629
2630 case L2CAP_CONF_RFC:
2631 if (olen == sizeof(rfc))
2632 memcpy(&rfc, (void *) val, olen);
2633 break;
2634
2635 case L2CAP_CONF_FCS:
2636 if (val == L2CAP_FCS_NONE)
2637 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2638
2639 break;
2640
2641 default:
2642 if (hint)
2643 break;
2644
2645 result = L2CAP_CONF_UNKNOWN;
2646 *((u8 *) ptr++) = type;
2647 break;
2648 }
2649 }
2650
2651 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2652 goto done;
2653
2654 switch (pi->mode) {
2655 case L2CAP_MODE_STREAMING:
2656 case L2CAP_MODE_ERTM:
2657 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2658 pi->mode = l2cap_select_mode(rfc.mode,
2659 pi->conn->feat_mask);
2660 break;
2661 }
2662
2663 if (pi->mode != rfc.mode)
2664 return -ECONNREFUSED;
2665
2666 break;
2667 }
2668
2669 done:
2670 if (pi->mode != rfc.mode) {
2671 result = L2CAP_CONF_UNACCEPT;
2672 rfc.mode = pi->mode;
2673
2674 if (pi->num_conf_rsp == 1)
2675 return -ECONNREFUSED;
2676
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2679 }
2680
2681
2682 if (result == L2CAP_CONF_SUCCESS) {
2683 /* Configure output options and let the other side know
2684 * which ones we don't like. */
2685
2686 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2687 result = L2CAP_CONF_UNACCEPT;
2688 else {
2689 pi->omtu = mtu;
2690 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2691 }
2692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2693
2694 switch (rfc.mode) {
2695 case L2CAP_MODE_BASIC:
2696 pi->fcs = L2CAP_FCS_NONE;
2697 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2698 break;
2699
2700 case L2CAP_MODE_ERTM:
2701 pi->remote_tx_win = rfc.txwin_size;
2702 pi->remote_max_tx = rfc.max_transmit;
2703
2704 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2705 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2706
2707 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2708
2709 rfc.retrans_timeout =
2710 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2711 rfc.monitor_timeout =
2712 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2713
2714 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2715
2716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2717 sizeof(rfc), (unsigned long) &rfc);
2718
2719 break;
2720
2721 case L2CAP_MODE_STREAMING:
2722 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2723 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2724
2725 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2726
2727 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2728
2729 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2730 sizeof(rfc), (unsigned long) &rfc);
2731
2732 break;
2733
2734 default:
2735 result = L2CAP_CONF_UNACCEPT;
2736
2737 memset(&rfc, 0, sizeof(rfc));
2738 rfc.mode = pi->mode;
2739 }
2740
2741 if (result == L2CAP_CONF_SUCCESS)
2742 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2743 }
2744 rsp->scid = cpu_to_le16(pi->dcid);
2745 rsp->result = cpu_to_le16(result);
2746 rsp->flags = cpu_to_le16(0x0000);
2747
2748 return ptr - data;
2749 }
2750
2751 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2752 {
2753 struct l2cap_pinfo *pi = l2cap_pi(sk);
2754 struct l2cap_conf_req *req = data;
2755 void *ptr = req->data;
2756 int type, olen;
2757 unsigned long val;
2758 struct l2cap_conf_rfc rfc;
2759
2760 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2761
2762 while (len >= L2CAP_CONF_OPT_SIZE) {
2763 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2764
2765 switch (type) {
2766 case L2CAP_CONF_MTU:
2767 if (val < L2CAP_DEFAULT_MIN_MTU) {
2768 *result = L2CAP_CONF_UNACCEPT;
2769 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2770 } else
2771 pi->imtu = val;
2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2773 break;
2774
2775 case L2CAP_CONF_FLUSH_TO:
2776 pi->flush_to = val;
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2778 2, pi->flush_to);
2779 break;
2780
2781 case L2CAP_CONF_RFC:
2782 if (olen == sizeof(rfc))
2783 memcpy(&rfc, (void *)val, olen);
2784
2785 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2786 rfc.mode != pi->mode)
2787 return -ECONNREFUSED;
2788
2789 pi->fcs = 0;
2790
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2792 sizeof(rfc), (unsigned long) &rfc);
2793 break;
2794 }
2795 }
2796
2797 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2798 return -ECONNREFUSED;
2799
2800 pi->mode = rfc.mode;
2801
2802 if (*result == L2CAP_CONF_SUCCESS) {
2803 switch (rfc.mode) {
2804 case L2CAP_MODE_ERTM:
2805 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2806 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2807 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2808 break;
2809 case L2CAP_MODE_STREAMING:
2810 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2811 }
2812 }
2813
2814 req->dcid = cpu_to_le16(pi->dcid);
2815 req->flags = cpu_to_le16(0x0000);
2816
2817 return ptr - data;
2818 }
2819
2820 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2821 {
2822 struct l2cap_conf_rsp *rsp = data;
2823 void *ptr = rsp->data;
2824
2825 BT_DBG("sk %p", sk);
2826
2827 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2828 rsp->result = cpu_to_le16(result);
2829 rsp->flags = cpu_to_le16(flags);
2830
2831 return ptr - data;
2832 }
2833
2834 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2835 {
2836 struct l2cap_pinfo *pi = l2cap_pi(sk);
2837 int type, olen;
2838 unsigned long val;
2839 struct l2cap_conf_rfc rfc;
2840
2841 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2842
2843 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2844 return;
2845
2846 while (len >= L2CAP_CONF_OPT_SIZE) {
2847 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2848
2849 switch (type) {
2850 case L2CAP_CONF_RFC:
2851 if (olen == sizeof(rfc))
2852 memcpy(&rfc, (void *)val, olen);
2853 goto done;
2854 }
2855 }
2856
2857 done:
2858 switch (rfc.mode) {
2859 case L2CAP_MODE_ERTM:
2860 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2861 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2863 break;
2864 case L2CAP_MODE_STREAMING:
2865 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2866 }
2867 }
2868
2869 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2870 {
2871 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2872
2873 if (rej->reason != 0x0000)
2874 return 0;
2875
2876 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2877 cmd->ident == conn->info_ident) {
2878 del_timer(&conn->info_timer);
2879
2880 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2881 conn->info_ident = 0;
2882
2883 l2cap_conn_start(conn);
2884 }
2885
2886 return 0;
2887 }
2888
2889 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2890 {
2891 struct l2cap_chan_list *list = &conn->chan_list;
2892 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2893 struct l2cap_conn_rsp rsp;
2894 struct sock *parent, *uninitialized_var(sk);
2895 int result, status = L2CAP_CS_NO_INFO;
2896
2897 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2898 __le16 psm = req->psm;
2899
2900 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2901
2902 /* Check if we have socket listening on psm */
2903 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2904 if (!parent) {
2905 result = L2CAP_CR_BAD_PSM;
2906 goto sendresp;
2907 }
2908
2909 /* Check if the ACL is secure enough (if not SDP) */
2910 if (psm != cpu_to_le16(0x0001) &&
2911 !hci_conn_check_link_mode(conn->hcon)) {
2912 conn->disc_reason = 0x05;
2913 result = L2CAP_CR_SEC_BLOCK;
2914 goto response;
2915 }
2916
2917 result = L2CAP_CR_NO_MEM;
2918
2919 /* Check for backlog size */
2920 if (sk_acceptq_is_full(parent)) {
2921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2922 goto response;
2923 }
2924
2925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2926 if (!sk)
2927 goto response;
2928
2929 write_lock_bh(&list->lock);
2930
2931 /* Check if we already have channel with that dcid */
2932 if (__l2cap_get_chan_by_dcid(list, scid)) {
2933 write_unlock_bh(&list->lock);
2934 sock_set_flag(sk, SOCK_ZAPPED);
2935 l2cap_sock_kill(sk);
2936 goto response;
2937 }
2938
2939 hci_conn_hold(conn->hcon);
2940
2941 l2cap_sock_init(sk, parent);
2942 bacpy(&bt_sk(sk)->src, conn->src);
2943 bacpy(&bt_sk(sk)->dst, conn->dst);
2944 l2cap_pi(sk)->psm = psm;
2945 l2cap_pi(sk)->dcid = scid;
2946
2947 __l2cap_chan_add(conn, sk, parent);
2948 dcid = l2cap_pi(sk)->scid;
2949
2950 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2951
2952 l2cap_pi(sk)->ident = cmd->ident;
2953
2954 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2955 if (l2cap_check_security(sk)) {
2956 if (bt_sk(sk)->defer_setup) {
2957 sk->sk_state = BT_CONNECT2;
2958 result = L2CAP_CR_PEND;
2959 status = L2CAP_CS_AUTHOR_PEND;
2960 parent->sk_data_ready(parent, 0);
2961 } else {
2962 sk->sk_state = BT_CONFIG;
2963 result = L2CAP_CR_SUCCESS;
2964 status = L2CAP_CS_NO_INFO;
2965 }
2966 } else {
2967 sk->sk_state = BT_CONNECT2;
2968 result = L2CAP_CR_PEND;
2969 status = L2CAP_CS_AUTHEN_PEND;
2970 }
2971 } else {
2972 sk->sk_state = BT_CONNECT2;
2973 result = L2CAP_CR_PEND;
2974 status = L2CAP_CS_NO_INFO;
2975 }
2976
2977 write_unlock_bh(&list->lock);
2978
2979 response:
2980 bh_unlock_sock(parent);
2981
2982 sendresp:
2983 rsp.scid = cpu_to_le16(scid);
2984 rsp.dcid = cpu_to_le16(dcid);
2985 rsp.result = cpu_to_le16(result);
2986 rsp.status = cpu_to_le16(status);
2987 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2988
2989 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2990 struct l2cap_info_req info;
2991 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2992
2993 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2994 conn->info_ident = l2cap_get_ident(conn);
2995
2996 mod_timer(&conn->info_timer, jiffies +
2997 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2998
2999 l2cap_send_cmd(conn, conn->info_ident,
3000 L2CAP_INFO_REQ, sizeof(info), &info);
3001 }
3002
3003 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3004 result == L2CAP_CR_SUCCESS) {
3005 u8 buf[128];
3006 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3007 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3008 l2cap_build_conf_req(sk, buf), buf);
3009 l2cap_pi(sk)->num_conf_req++;
3010 }
3011
3012 return 0;
3013 }
3014
3015 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3016 {
3017 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3018 u16 scid, dcid, result, status;
3019 struct sock *sk;
3020 u8 req[128];
3021
3022 scid = __le16_to_cpu(rsp->scid);
3023 dcid = __le16_to_cpu(rsp->dcid);
3024 result = __le16_to_cpu(rsp->result);
3025 status = __le16_to_cpu(rsp->status);
3026
3027 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3028
3029 if (scid) {
3030 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3031 if (!sk)
3032 return -EFAULT;
3033 } else {
3034 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3035 if (!sk)
3036 return -EFAULT;
3037 }
3038
3039 switch (result) {
3040 case L2CAP_CR_SUCCESS:
3041 sk->sk_state = BT_CONFIG;
3042 l2cap_pi(sk)->ident = 0;
3043 l2cap_pi(sk)->dcid = dcid;
3044 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3045
3046 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3047 break;
3048
3049 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3050
3051 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3052 l2cap_build_conf_req(sk, req), req);
3053 l2cap_pi(sk)->num_conf_req++;
3054 break;
3055
3056 case L2CAP_CR_PEND:
3057 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3058 break;
3059
3060 default:
3061 l2cap_chan_del(sk, ECONNREFUSED);
3062 break;
3063 }
3064
3065 bh_unlock_sock(sk);
3066 return 0;
3067 }
3068
3069 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3070 {
3071 /* FCS is enabled only in ERTM or streaming mode, if one or both
3072 * sides request it.
3073 */
3074 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3075 pi->fcs = L2CAP_FCS_NONE;
3076 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3077 pi->fcs = L2CAP_FCS_CRC16;
3078 }
3079
3080 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3081 {
3082 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3083 u16 dcid, flags;
3084 u8 rsp[64];
3085 struct sock *sk;
3086 int len;
3087
3088 dcid = __le16_to_cpu(req->dcid);
3089 flags = __le16_to_cpu(req->flags);
3090
3091 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3092
3093 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3094 if (!sk)
3095 return -ENOENT;
3096
3097 if (sk->sk_state == BT_DISCONN)
3098 goto unlock;
3099
3100 /* Reject if config buffer is too small. */
3101 len = cmd_len - sizeof(*req);
3102 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3103 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3104 l2cap_build_conf_rsp(sk, rsp,
3105 L2CAP_CONF_REJECT, flags), rsp);
3106 goto unlock;
3107 }
3108
3109 /* Store config. */
3110 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3111 l2cap_pi(sk)->conf_len += len;
3112
3113 if (flags & 0x0001) {
3114 /* Incomplete config. Send empty response. */
3115 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3116 l2cap_build_conf_rsp(sk, rsp,
3117 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3118 goto unlock;
3119 }
3120
3121 /* Complete config. */
3122 len = l2cap_parse_conf_req(sk, rsp);
3123 if (len < 0) {
3124 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3125 goto unlock;
3126 }
3127
3128 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3129 l2cap_pi(sk)->num_conf_rsp++;
3130
3131 /* Reset config buffer. */
3132 l2cap_pi(sk)->conf_len = 0;
3133
3134 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3135 goto unlock;
3136
3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3138 set_default_fcs(l2cap_pi(sk));
3139
3140 sk->sk_state = BT_CONNECTED;
3141
3142 l2cap_pi(sk)->next_tx_seq = 0;
3143 l2cap_pi(sk)->expected_tx_seq = 0;
3144 __skb_queue_head_init(TX_QUEUE(sk));
3145 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3146 l2cap_ertm_init(sk);
3147
3148 l2cap_chan_ready(sk);
3149 goto unlock;
3150 }
3151
3152 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3153 u8 buf[64];
3154 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3155 l2cap_build_conf_req(sk, buf), buf);
3156 l2cap_pi(sk)->num_conf_req++;
3157 }
3158
3159 unlock:
3160 bh_unlock_sock(sk);
3161 return 0;
3162 }
3163
3164 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3165 {
3166 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3167 u16 scid, flags, result;
3168 struct sock *sk;
3169 int len = cmd->len - sizeof(*rsp);
3170
3171 scid = __le16_to_cpu(rsp->scid);
3172 flags = __le16_to_cpu(rsp->flags);
3173 result = __le16_to_cpu(rsp->result);
3174
3175 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3176 scid, flags, result);
3177
3178 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3179 if (!sk)
3180 return 0;
3181
3182 switch (result) {
3183 case L2CAP_CONF_SUCCESS:
3184 l2cap_conf_rfc_get(sk, rsp->data, len);
3185 break;
3186
3187 case L2CAP_CONF_UNACCEPT:
3188 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3189 char req[64];
3190
3191 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3192 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3193 goto done;
3194 }
3195
3196 /* throw out any old stored conf requests */
3197 result = L2CAP_CONF_SUCCESS;
3198 len = l2cap_parse_conf_rsp(sk, rsp->data,
3199 len, req, &result);
3200 if (len < 0) {
3201 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3202 goto done;
3203 }
3204
3205 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3206 L2CAP_CONF_REQ, len, req);
3207 l2cap_pi(sk)->num_conf_req++;
3208 if (result != L2CAP_CONF_SUCCESS)
3209 goto done;
3210 break;
3211 }
3212
3213 default:
3214 sk->sk_err = ECONNRESET;
3215 l2cap_sock_set_timer(sk, HZ * 5);
3216 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3217 goto done;
3218 }
3219
3220 if (flags & 0x01)
3221 goto done;
3222
3223 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3224
3225 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3226 set_default_fcs(l2cap_pi(sk));
3227
3228 sk->sk_state = BT_CONNECTED;
3229 l2cap_pi(sk)->next_tx_seq = 0;
3230 l2cap_pi(sk)->expected_tx_seq = 0;
3231 __skb_queue_head_init(TX_QUEUE(sk));
3232 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3233 l2cap_ertm_init(sk);
3234
3235 l2cap_chan_ready(sk);
3236 }
3237
3238 done:
3239 bh_unlock_sock(sk);
3240 return 0;
3241 }
3242
3243 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3244 {
3245 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3246 struct l2cap_disconn_rsp rsp;
3247 u16 dcid, scid;
3248 struct sock *sk;
3249
3250 scid = __le16_to_cpu(req->scid);
3251 dcid = __le16_to_cpu(req->dcid);
3252
3253 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3254
3255 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3256 if (!sk)
3257 return 0;
3258
3259 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3260 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3261 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3262
3263 sk->sk_shutdown = SHUTDOWN_MASK;
3264
3265 l2cap_chan_del(sk, ECONNRESET);
3266 bh_unlock_sock(sk);
3267
3268 l2cap_sock_kill(sk);
3269 return 0;
3270 }
3271
3272 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3273 {
3274 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3275 u16 dcid, scid;
3276 struct sock *sk;
3277
3278 scid = __le16_to_cpu(rsp->scid);
3279 dcid = __le16_to_cpu(rsp->dcid);
3280
3281 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3282
3283 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3284 if (!sk)
3285 return 0;
3286
3287 l2cap_chan_del(sk, 0);
3288 bh_unlock_sock(sk);
3289
3290 l2cap_sock_kill(sk);
3291 return 0;
3292 }
3293
3294 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3295 {
3296 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3297 u16 type;
3298
3299 type = __le16_to_cpu(req->type);
3300
3301 BT_DBG("type 0x%4.4x", type);
3302
3303 if (type == L2CAP_IT_FEAT_MASK) {
3304 u8 buf[8];
3305 u32 feat_mask = l2cap_feat_mask;
3306 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3307 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3308 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3309 if (!disable_ertm)
3310 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3311 | L2CAP_FEAT_FCS;
3312 put_unaligned_le32(feat_mask, rsp->data);
3313 l2cap_send_cmd(conn, cmd->ident,
3314 L2CAP_INFO_RSP, sizeof(buf), buf);
3315 } else if (type == L2CAP_IT_FIXED_CHAN) {
3316 u8 buf[12];
3317 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3318 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3319 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3320 memcpy(buf + 4, l2cap_fixed_chan, 8);
3321 l2cap_send_cmd(conn, cmd->ident,
3322 L2CAP_INFO_RSP, sizeof(buf), buf);
3323 } else {
3324 struct l2cap_info_rsp rsp;
3325 rsp.type = cpu_to_le16(type);
3326 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3327 l2cap_send_cmd(conn, cmd->ident,
3328 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3329 }
3330
3331 return 0;
3332 }
3333
3334 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3335 {
3336 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3337 u16 type, result;
3338
3339 type = __le16_to_cpu(rsp->type);
3340 result = __le16_to_cpu(rsp->result);
3341
3342 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3343
3344 del_timer(&conn->info_timer);
3345
3346 if (result != L2CAP_IR_SUCCESS) {
3347 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3348 conn->info_ident = 0;
3349
3350 l2cap_conn_start(conn);
3351
3352 return 0;
3353 }
3354
3355 if (type == L2CAP_IT_FEAT_MASK) {
3356 conn->feat_mask = get_unaligned_le32(rsp->data);
3357
3358 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3359 struct l2cap_info_req req;
3360 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3361
3362 conn->info_ident = l2cap_get_ident(conn);
3363
3364 l2cap_send_cmd(conn, conn->info_ident,
3365 L2CAP_INFO_REQ, sizeof(req), &req);
3366 } else {
3367 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3368 conn->info_ident = 0;
3369
3370 l2cap_conn_start(conn);
3371 }
3372 } else if (type == L2CAP_IT_FIXED_CHAN) {
3373 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3374 conn->info_ident = 0;
3375
3376 l2cap_conn_start(conn);
3377 }
3378
3379 return 0;
3380 }
3381
3382 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3383 {
3384 u8 *data = skb->data;
3385 int len = skb->len;
3386 struct l2cap_cmd_hdr cmd;
3387 int err = 0;
3388
3389 l2cap_raw_recv(conn, skb);
3390
3391 while (len >= L2CAP_CMD_HDR_SIZE) {
3392 u16 cmd_len;
3393 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3394 data += L2CAP_CMD_HDR_SIZE;
3395 len -= L2CAP_CMD_HDR_SIZE;
3396
3397 cmd_len = le16_to_cpu(cmd.len);
3398
3399 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3400
3401 if (cmd_len > len || !cmd.ident) {
3402 BT_DBG("corrupted command");
3403 break;
3404 }
3405
3406 switch (cmd.code) {
3407 case L2CAP_COMMAND_REJ:
3408 l2cap_command_rej(conn, &cmd, data);
3409 break;
3410
3411 case L2CAP_CONN_REQ:
3412 err = l2cap_connect_req(conn, &cmd, data);
3413 break;
3414
3415 case L2CAP_CONN_RSP:
3416 err = l2cap_connect_rsp(conn, &cmd, data);
3417 break;
3418
3419 case L2CAP_CONF_REQ:
3420 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3421 break;
3422
3423 case L2CAP_CONF_RSP:
3424 err = l2cap_config_rsp(conn, &cmd, data);
3425 break;
3426
3427 case L2CAP_DISCONN_REQ:
3428 err = l2cap_disconnect_req(conn, &cmd, data);
3429 break;
3430
3431 case L2CAP_DISCONN_RSP:
3432 err = l2cap_disconnect_rsp(conn, &cmd, data);
3433 break;
3434
3435 case L2CAP_ECHO_REQ:
3436 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3437 break;
3438
3439 case L2CAP_ECHO_RSP:
3440 break;
3441
3442 case L2CAP_INFO_REQ:
3443 err = l2cap_information_req(conn, &cmd, data);
3444 break;
3445
3446 case L2CAP_INFO_RSP:
3447 err = l2cap_information_rsp(conn, &cmd, data);
3448 break;
3449
3450 default:
3451 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3452 err = -EINVAL;
3453 break;
3454 }
3455
3456 if (err) {
3457 struct l2cap_cmd_rej rej;
3458 BT_DBG("error %d", err);
3459
3460 /* FIXME: Map err to a valid reason */
3461 rej.reason = cpu_to_le16(0);
3462 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3463 }
3464
3465 data += cmd_len;
3466 len -= cmd_len;
3467 }
3468
3469 kfree_skb(skb);
3470 }
3471
3472 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3473 {
3474 u16 our_fcs, rcv_fcs;
3475 int hdr_size = L2CAP_HDR_SIZE + 2;
3476
3477 if (pi->fcs == L2CAP_FCS_CRC16) {
3478 skb_trim(skb, skb->len - 2);
3479 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3480 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3481
3482 if (our_fcs != rcv_fcs)
3483 return -EBADMSG;
3484 }
3485 return 0;
3486 }
3487
3488 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3489 {
3490 struct l2cap_pinfo *pi = l2cap_pi(sk);
3491 u16 control = 0;
3492
3493 pi->frames_sent = 0;
3494
3495 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3496
3497 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3498 control |= L2CAP_SUPER_RCV_NOT_READY;
3499 l2cap_send_sframe(pi, control);
3500 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3501 }
3502
3503 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3504 l2cap_retransmit_frames(sk);
3505
3506 l2cap_ertm_send(sk);
3507
3508 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3509 pi->frames_sent == 0) {
3510 control |= L2CAP_SUPER_RCV_READY;
3511 l2cap_send_sframe(pi, control);
3512 }
3513 }
3514
3515 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3516 {
3517 struct sk_buff *next_skb;
3518 struct l2cap_pinfo *pi = l2cap_pi(sk);
3519 int tx_seq_offset, next_tx_seq_offset;
3520
3521 bt_cb(skb)->tx_seq = tx_seq;
3522 bt_cb(skb)->sar = sar;
3523
3524 next_skb = skb_peek(SREJ_QUEUE(sk));
3525 if (!next_skb) {
3526 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3527 return 0;
3528 }
3529
3530 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3531 if (tx_seq_offset < 0)
3532 tx_seq_offset += 64;
3533
3534 do {
3535 if (bt_cb(next_skb)->tx_seq == tx_seq)
3536 return -EINVAL;
3537
3538 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3539 pi->buffer_seq) % 64;
3540 if (next_tx_seq_offset < 0)
3541 next_tx_seq_offset += 64;
3542
3543 if (next_tx_seq_offset > tx_seq_offset) {
3544 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3545 return 0;
3546 }
3547
3548 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3549 break;
3550
3551 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3552
3553 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3554
3555 return 0;
3556 }
3557
3558 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3559 {
3560 struct l2cap_pinfo *pi = l2cap_pi(sk);
3561 struct sk_buff *_skb;
3562 int err;
3563
3564 switch (control & L2CAP_CTRL_SAR) {
3565 case L2CAP_SDU_UNSEGMENTED:
3566 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3567 goto drop;
3568
3569 err = sock_queue_rcv_skb(sk, skb);
3570 if (!err)
3571 return err;
3572
3573 break;
3574
3575 case L2CAP_SDU_START:
3576 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3577 goto drop;
3578
3579 pi->sdu_len = get_unaligned_le16(skb->data);
3580
3581 if (pi->sdu_len > pi->imtu)
3582 goto disconnect;
3583
3584 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3585 if (!pi->sdu)
3586 return -ENOMEM;
3587
3588 /* pull sdu_len bytes only after alloc, because of Local Busy
3589 * condition we have to be sure that this will be executed
3590 * only once, i.e., when alloc does not fail */
3591 skb_pull(skb, 2);
3592
3593 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3594
3595 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3596 pi->partial_sdu_len = skb->len;
3597 break;
3598
3599 case L2CAP_SDU_CONTINUE:
3600 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3601 goto disconnect;
3602
3603 if (!pi->sdu)
3604 goto disconnect;
3605
3606 pi->partial_sdu_len += skb->len;
3607 if (pi->partial_sdu_len > pi->sdu_len)
3608 goto drop;
3609
3610 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3611
3612 break;
3613
3614 case L2CAP_SDU_END:
3615 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3616 goto disconnect;
3617
3618 if (!pi->sdu)
3619 goto disconnect;
3620
3621 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3622 pi->partial_sdu_len += skb->len;
3623
3624 if (pi->partial_sdu_len > pi->imtu)
3625 goto drop;
3626
3627 if (pi->partial_sdu_len != pi->sdu_len)
3628 goto drop;
3629
3630 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3631 }
3632
3633 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3634 if (!_skb) {
3635 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3636 return -ENOMEM;
3637 }
3638
3639 err = sock_queue_rcv_skb(sk, _skb);
3640 if (err < 0) {
3641 kfree_skb(_skb);
3642 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3643 return err;
3644 }
3645
3646 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3647 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3648
3649 kfree_skb(pi->sdu);
3650 break;
3651 }
3652
3653 kfree_skb(skb);
3654 return 0;
3655
3656 drop:
3657 kfree_skb(pi->sdu);
3658 pi->sdu = NULL;
3659
3660 disconnect:
3661 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3662 kfree_skb(skb);
3663 return 0;
3664 }
3665
3666 static int l2cap_try_push_rx_skb(struct sock *sk)
3667 {
3668 struct l2cap_pinfo *pi = l2cap_pi(sk);
3669 struct sk_buff *skb;
3670 u16 control;
3671 int err;
3672
3673 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3674 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3675 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3676 if (err < 0) {
3677 skb_queue_head(BUSY_QUEUE(sk), skb);
3678 return -EBUSY;
3679 }
3680
3681 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3682 }
3683
3684 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3685 goto done;
3686
3687 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3688 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3689 l2cap_send_sframe(pi, control);
3690 l2cap_pi(sk)->retry_count = 1;
3691
3692 del_timer(&pi->retrans_timer);
3693 __mod_monitor_timer();
3694
3695 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3696
3697 done:
3698 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3699 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3700
3701 BT_DBG("sk %p, Exit local busy", sk);
3702
3703 return 0;
3704 }
3705
3706 static void l2cap_busy_work(struct work_struct *work)
3707 {
3708 DECLARE_WAITQUEUE(wait, current);
3709 struct l2cap_pinfo *pi =
3710 container_of(work, struct l2cap_pinfo, busy_work);
3711 struct sock *sk = (struct sock *)pi;
3712 int n_tries = 0, timeo = HZ/5, err;
3713 struct sk_buff *skb;
3714
3715 lock_sock(sk);
3716
3717 add_wait_queue(sk_sleep(sk), &wait);
3718 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3719 set_current_state(TASK_INTERRUPTIBLE);
3720
3721 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3722 err = -EBUSY;
3723 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3724 break;
3725 }
3726
3727 if (!timeo)
3728 timeo = HZ/5;
3729
3730 if (signal_pending(current)) {
3731 err = sock_intr_errno(timeo);
3732 break;
3733 }
3734
3735 release_sock(sk);
3736 timeo = schedule_timeout(timeo);
3737 lock_sock(sk);
3738
3739 err = sock_error(sk);
3740 if (err)
3741 break;
3742
3743 if (l2cap_try_push_rx_skb(sk) == 0)
3744 break;
3745 }
3746
3747 set_current_state(TASK_RUNNING);
3748 remove_wait_queue(sk_sleep(sk), &wait);
3749
3750 release_sock(sk);
3751 }
3752
3753 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3754 {
3755 struct l2cap_pinfo *pi = l2cap_pi(sk);
3756 int sctrl, err;
3757
3758 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3759 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3760 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3761 return l2cap_try_push_rx_skb(sk);
3762
3763
3764 }
3765
3766 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3767 if (err >= 0) {
3768 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3769 return err;
3770 }
3771
3772 /* Busy Condition */
3773 BT_DBG("sk %p, Enter local busy", sk);
3774
3775 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3776 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3777 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3778
3779 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3780 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3781 l2cap_send_sframe(pi, sctrl);
3782
3783 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3784
3785 del_timer(&pi->ack_timer);
3786
3787 queue_work(_busy_wq, &pi->busy_work);
3788
3789 return err;
3790 }
3791
3792 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3793 {
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 struct sk_buff *_skb;
3796 int err = -EINVAL;
3797
3798 /*
3799 * TODO: We have to notify the userland if some data is lost with the
3800 * Streaming Mode.
3801 */
3802
3803 switch (control & L2CAP_CTRL_SAR) {
3804 case L2CAP_SDU_UNSEGMENTED:
3805 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3806 kfree_skb(pi->sdu);
3807 break;
3808 }
3809
3810 err = sock_queue_rcv_skb(sk, skb);
3811 if (!err)
3812 return 0;
3813
3814 break;
3815
3816 case L2CAP_SDU_START:
3817 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3818 kfree_skb(pi->sdu);
3819 break;
3820 }
3821
3822 pi->sdu_len = get_unaligned_le16(skb->data);
3823 skb_pull(skb, 2);
3824
3825 if (pi->sdu_len > pi->imtu) {
3826 err = -EMSGSIZE;
3827 break;
3828 }
3829
3830 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3831 if (!pi->sdu) {
3832 err = -ENOMEM;
3833 break;
3834 }
3835
3836 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3837
3838 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3839 pi->partial_sdu_len = skb->len;
3840 err = 0;
3841 break;
3842
3843 case L2CAP_SDU_CONTINUE:
3844 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3845 break;
3846
3847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3848
3849 pi->partial_sdu_len += skb->len;
3850 if (pi->partial_sdu_len > pi->sdu_len)
3851 kfree_skb(pi->sdu);
3852 else
3853 err = 0;
3854
3855 break;
3856
3857 case L2CAP_SDU_END:
3858 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3859 break;
3860
3861 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3862
3863 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3864 pi->partial_sdu_len += skb->len;
3865
3866 if (pi->partial_sdu_len > pi->imtu)
3867 goto drop;
3868
3869 if (pi->partial_sdu_len == pi->sdu_len) {
3870 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3871 err = sock_queue_rcv_skb(sk, _skb);
3872 if (err < 0)
3873 kfree_skb(_skb);
3874 }
3875 err = 0;
3876
3877 drop:
3878 kfree_skb(pi->sdu);
3879 break;
3880 }
3881
3882 kfree_skb(skb);
3883 return err;
3884 }
3885
3886 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3887 {
3888 struct sk_buff *skb;
3889 u16 control;
3890
3891 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3892 if (bt_cb(skb)->tx_seq != tx_seq)
3893 break;
3894
3895 skb = skb_dequeue(SREJ_QUEUE(sk));
3896 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3897 l2cap_ertm_reassembly_sdu(sk, skb, control);
3898 l2cap_pi(sk)->buffer_seq_srej =
3899 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3900 tx_seq = (tx_seq + 1) % 64;
3901 }
3902 }
3903
3904 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3905 {
3906 struct l2cap_pinfo *pi = l2cap_pi(sk);
3907 struct srej_list *l, *tmp;
3908 u16 control;
3909
3910 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3911 if (l->tx_seq == tx_seq) {
3912 list_del(&l->list);
3913 kfree(l);
3914 return;
3915 }
3916 control = L2CAP_SUPER_SELECT_REJECT;
3917 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3918 l2cap_send_sframe(pi, control);
3919 list_del(&l->list);
3920 list_add_tail(&l->list, SREJ_LIST(sk));
3921 }
3922 }
3923
3924 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3925 {
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927 struct srej_list *new;
3928 u16 control;
3929
3930 while (tx_seq != pi->expected_tx_seq) {
3931 control = L2CAP_SUPER_SELECT_REJECT;
3932 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3933 l2cap_send_sframe(pi, control);
3934
3935 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3936 new->tx_seq = pi->expected_tx_seq;
3937 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3938 list_add_tail(&new->list, SREJ_LIST(sk));
3939 }
3940 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3941 }
3942
3943 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3944 {
3945 struct l2cap_pinfo *pi = l2cap_pi(sk);
3946 u8 tx_seq = __get_txseq(rx_control);
3947 u8 req_seq = __get_reqseq(rx_control);
3948 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3949 int tx_seq_offset, expected_tx_seq_offset;
3950 int num_to_ack = (pi->tx_win/6) + 1;
3951 int err = 0;
3952
3953 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3954 rx_control);
3955
3956 if (L2CAP_CTRL_FINAL & rx_control &&
3957 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3958 del_timer(&pi->monitor_timer);
3959 if (pi->unacked_frames > 0)
3960 __mod_retrans_timer();
3961 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3962 }
3963
3964 pi->expected_ack_seq = req_seq;
3965 l2cap_drop_acked_frames(sk);
3966
3967 if (tx_seq == pi->expected_tx_seq)
3968 goto expected;
3969
3970 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3971 if (tx_seq_offset < 0)
3972 tx_seq_offset += 64;
3973
3974 /* invalid tx_seq */
3975 if (tx_seq_offset >= pi->tx_win) {
3976 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3977 goto drop;
3978 }
3979
3980 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3981 goto drop;
3982
3983 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3984 struct srej_list *first;
3985
3986 first = list_first_entry(SREJ_LIST(sk),
3987 struct srej_list, list);
3988 if (tx_seq == first->tx_seq) {
3989 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3990 l2cap_check_srej_gap(sk, tx_seq);
3991
3992 list_del(&first->list);
3993 kfree(first);
3994
3995 if (list_empty(SREJ_LIST(sk))) {
3996 pi->buffer_seq = pi->buffer_seq_srej;
3997 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3998 l2cap_send_ack(pi);
3999 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4000 }
4001 } else {
4002 struct srej_list *l;
4003
4004 /* duplicated tx_seq */
4005 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4006 goto drop;
4007
4008 list_for_each_entry(l, SREJ_LIST(sk), list) {
4009 if (l->tx_seq == tx_seq) {
4010 l2cap_resend_srejframe(sk, tx_seq);
4011 return 0;
4012 }
4013 }
4014 l2cap_send_srejframe(sk, tx_seq);
4015 }
4016 } else {
4017 expected_tx_seq_offset =
4018 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4019 if (expected_tx_seq_offset < 0)
4020 expected_tx_seq_offset += 64;
4021
4022 /* duplicated tx_seq */
4023 if (tx_seq_offset < expected_tx_seq_offset)
4024 goto drop;
4025
4026 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4027
4028 BT_DBG("sk %p, Enter SREJ", sk);
4029
4030 INIT_LIST_HEAD(SREJ_LIST(sk));
4031 pi->buffer_seq_srej = pi->buffer_seq;
4032
4033 __skb_queue_head_init(SREJ_QUEUE(sk));
4034 __skb_queue_head_init(BUSY_QUEUE(sk));
4035 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4036
4037 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4038
4039 l2cap_send_srejframe(sk, tx_seq);
4040
4041 del_timer(&pi->ack_timer);
4042 }
4043 return 0;
4044
4045 expected:
4046 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4047
4048 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4049 bt_cb(skb)->tx_seq = tx_seq;
4050 bt_cb(skb)->sar = sar;
4051 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4052 return 0;
4053 }
4054
4055 err = l2cap_push_rx_skb(sk, skb, rx_control);
4056 if (err < 0)
4057 return 0;
4058
4059 if (rx_control & L2CAP_CTRL_FINAL) {
4060 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4061 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4062 else
4063 l2cap_retransmit_frames(sk);
4064 }
4065
4066 __mod_ack_timer();
4067
4068 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4069 if (pi->num_acked == num_to_ack - 1)
4070 l2cap_send_ack(pi);
4071
4072 return 0;
4073
4074 drop:
4075 kfree_skb(skb);
4076 return 0;
4077 }
4078
4079 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4080 {
4081 struct l2cap_pinfo *pi = l2cap_pi(sk);
4082
4083 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4084 rx_control);
4085
4086 pi->expected_ack_seq = __get_reqseq(rx_control);
4087 l2cap_drop_acked_frames(sk);
4088
4089 if (rx_control & L2CAP_CTRL_POLL) {
4090 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4091 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4092 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4093 (pi->unacked_frames > 0))
4094 __mod_retrans_timer();
4095
4096 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4097 l2cap_send_srejtail(sk);
4098 } else {
4099 l2cap_send_i_or_rr_or_rnr(sk);
4100 }
4101
4102 } else if (rx_control & L2CAP_CTRL_FINAL) {
4103 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4104
4105 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4106 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4107 else
4108 l2cap_retransmit_frames(sk);
4109
4110 } else {
4111 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4112 (pi->unacked_frames > 0))
4113 __mod_retrans_timer();
4114
4115 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4116 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4117 l2cap_send_ack(pi);
4118 } else {
4119 l2cap_ertm_send(sk);
4120 }
4121 }
4122 }
4123
4124 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4125 {
4126 struct l2cap_pinfo *pi = l2cap_pi(sk);
4127 u8 tx_seq = __get_reqseq(rx_control);
4128
4129 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4130
4131 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4132
4133 pi->expected_ack_seq = tx_seq;
4134 l2cap_drop_acked_frames(sk);
4135
4136 if (rx_control & L2CAP_CTRL_FINAL) {
4137 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4138 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4139 else
4140 l2cap_retransmit_frames(sk);
4141 } else {
4142 l2cap_retransmit_frames(sk);
4143
4144 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4145 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4146 }
4147 }
4148 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4149 {
4150 struct l2cap_pinfo *pi = l2cap_pi(sk);
4151 u8 tx_seq = __get_reqseq(rx_control);
4152
4153 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4154
4155 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4156
4157 if (rx_control & L2CAP_CTRL_POLL) {
4158 pi->expected_ack_seq = tx_seq;
4159 l2cap_drop_acked_frames(sk);
4160
4161 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4162 l2cap_retransmit_one_frame(sk, tx_seq);
4163
4164 l2cap_ertm_send(sk);
4165
4166 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4167 pi->srej_save_reqseq = tx_seq;
4168 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4169 }
4170 } else if (rx_control & L2CAP_CTRL_FINAL) {
4171 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4172 pi->srej_save_reqseq == tx_seq)
4173 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4174 else
4175 l2cap_retransmit_one_frame(sk, tx_seq);
4176 } else {
4177 l2cap_retransmit_one_frame(sk, tx_seq);
4178 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4179 pi->srej_save_reqseq = tx_seq;
4180 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4181 }
4182 }
4183 }
4184
4185 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4186 {
4187 struct l2cap_pinfo *pi = l2cap_pi(sk);
4188 u8 tx_seq = __get_reqseq(rx_control);
4189
4190 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4191
4192 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4193 pi->expected_ack_seq = tx_seq;
4194 l2cap_drop_acked_frames(sk);
4195
4196 if (rx_control & L2CAP_CTRL_POLL)
4197 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4198
4199 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4200 del_timer(&pi->retrans_timer);
4201 if (rx_control & L2CAP_CTRL_POLL)
4202 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4203 return;
4204 }
4205
4206 if (rx_control & L2CAP_CTRL_POLL)
4207 l2cap_send_srejtail(sk);
4208 else
4209 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4210 }
4211
4212 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4213 {
4214 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4215
4216 if (L2CAP_CTRL_FINAL & rx_control &&
4217 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4218 del_timer(&l2cap_pi(sk)->monitor_timer);
4219 if (l2cap_pi(sk)->unacked_frames > 0)
4220 __mod_retrans_timer();
4221 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4222 }
4223
4224 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4225 case L2CAP_SUPER_RCV_READY:
4226 l2cap_data_channel_rrframe(sk, rx_control);
4227 break;
4228
4229 case L2CAP_SUPER_REJECT:
4230 l2cap_data_channel_rejframe(sk, rx_control);
4231 break;
4232
4233 case L2CAP_SUPER_SELECT_REJECT:
4234 l2cap_data_channel_srejframe(sk, rx_control);
4235 break;
4236
4237 case L2CAP_SUPER_RCV_NOT_READY:
4238 l2cap_data_channel_rnrframe(sk, rx_control);
4239 break;
4240 }
4241
4242 kfree_skb(skb);
4243 return 0;
4244 }
4245
4246 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4247 {
4248 struct l2cap_pinfo *pi = l2cap_pi(sk);
4249 u16 control;
4250 u8 req_seq;
4251 int len, next_tx_seq_offset, req_seq_offset;
4252
4253 control = get_unaligned_le16(skb->data);
4254 skb_pull(skb, 2);
4255 len = skb->len;
4256
4257 /*
4258 * We can just drop the corrupted I-frame here.
4259 * Receiver will miss it and start proper recovery
4260 * procedures and ask retransmission.
4261 */
4262 if (l2cap_check_fcs(pi, skb))
4263 goto drop;
4264
4265 if (__is_sar_start(control) && __is_iframe(control))
4266 len -= 2;
4267
4268 if (pi->fcs == L2CAP_FCS_CRC16)
4269 len -= 2;
4270
4271 if (len > pi->mps) {
4272 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4273 goto drop;
4274 }
4275
4276 req_seq = __get_reqseq(control);
4277 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4278 if (req_seq_offset < 0)
4279 req_seq_offset += 64;
4280
4281 next_tx_seq_offset =
4282 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4283 if (next_tx_seq_offset < 0)
4284 next_tx_seq_offset += 64;
4285
4286 /* check for invalid req-seq */
4287 if (req_seq_offset > next_tx_seq_offset) {
4288 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4289 goto drop;
4290 }
4291
4292 if (__is_iframe(control)) {
4293 if (len < 0) {
4294 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4295 goto drop;
4296 }
4297
4298 l2cap_data_channel_iframe(sk, control, skb);
4299 } else {
4300 if (len != 0) {
4301 BT_ERR("%d", len);
4302 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4303 goto drop;
4304 }
4305
4306 l2cap_data_channel_sframe(sk, control, skb);
4307 }
4308
4309 return 0;
4310
4311 drop:
4312 kfree_skb(skb);
4313 return 0;
4314 }
4315
4316 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4317 {
4318 struct sock *sk;
4319 struct l2cap_pinfo *pi;
4320 u16 control;
4321 u8 tx_seq;
4322 int len;
4323
4324 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4325 if (!sk) {
4326 BT_DBG("unknown cid 0x%4.4x", cid);
4327 goto drop;
4328 }
4329
4330 pi = l2cap_pi(sk);
4331
4332 BT_DBG("sk %p, len %d", sk, skb->len);
4333
4334 if (sk->sk_state != BT_CONNECTED)
4335 goto drop;
4336
4337 switch (pi->mode) {
4338 case L2CAP_MODE_BASIC:
4339 /* If socket recv buffers overflows we drop data here
4340 * which is *bad* because L2CAP has to be reliable.
4341 * But we don't have any other choice. L2CAP doesn't
4342 * provide flow control mechanism. */
4343
4344 if (pi->imtu < skb->len)
4345 goto drop;
4346
4347 if (!sock_queue_rcv_skb(sk, skb))
4348 goto done;
4349 break;
4350
4351 case L2CAP_MODE_ERTM:
4352 if (!sock_owned_by_user(sk)) {
4353 l2cap_ertm_data_rcv(sk, skb);
4354 } else {
4355 if (sk_add_backlog(sk, skb))
4356 goto drop;
4357 }
4358
4359 goto done;
4360
4361 case L2CAP_MODE_STREAMING:
4362 control = get_unaligned_le16(skb->data);
4363 skb_pull(skb, 2);
4364 len = skb->len;
4365
4366 if (l2cap_check_fcs(pi, skb))
4367 goto drop;
4368
4369 if (__is_sar_start(control))
4370 len -= 2;
4371
4372 if (pi->fcs == L2CAP_FCS_CRC16)
4373 len -= 2;
4374
4375 if (len > pi->mps || len < 0 || __is_sframe(control))
4376 goto drop;
4377
4378 tx_seq = __get_txseq(control);
4379
4380 if (pi->expected_tx_seq == tx_seq)
4381 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4382 else
4383 pi->expected_tx_seq = (tx_seq + 1) % 64;
4384
4385 l2cap_streaming_reassembly_sdu(sk, skb, control);
4386
4387 goto done;
4388
4389 default:
4390 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4391 break;
4392 }
4393
4394 drop:
4395 kfree_skb(skb);
4396
4397 done:
4398 if (sk)
4399 bh_unlock_sock(sk);
4400
4401 return 0;
4402 }
4403
4404 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4405 {
4406 struct sock *sk;
4407
4408 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4409 if (!sk)
4410 goto drop;
4411
4412 BT_DBG("sk %p, len %d", sk, skb->len);
4413
4414 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4415 goto drop;
4416
4417 if (l2cap_pi(sk)->imtu < skb->len)
4418 goto drop;
4419
4420 if (!sock_queue_rcv_skb(sk, skb))
4421 goto done;
4422
4423 drop:
4424 kfree_skb(skb);
4425
4426 done:
4427 if (sk)
4428 bh_unlock_sock(sk);
4429 return 0;
4430 }
4431
4432 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4433 {
4434 struct l2cap_hdr *lh = (void *) skb->data;
4435 u16 cid, len;
4436 __le16 psm;
4437
4438 skb_pull(skb, L2CAP_HDR_SIZE);
4439 cid = __le16_to_cpu(lh->cid);
4440 len = __le16_to_cpu(lh->len);
4441
4442 if (len != skb->len) {
4443 kfree_skb(skb);
4444 return;
4445 }
4446
4447 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4448
4449 switch (cid) {
4450 case L2CAP_CID_SIGNALING:
4451 l2cap_sig_channel(conn, skb);
4452 break;
4453
4454 case L2CAP_CID_CONN_LESS:
4455 psm = get_unaligned_le16(skb->data);
4456 skb_pull(skb, 2);
4457 l2cap_conless_channel(conn, psm, skb);
4458 break;
4459
4460 default:
4461 l2cap_data_channel(conn, cid, skb);
4462 break;
4463 }
4464 }
4465
4466 /* ---- L2CAP interface with lower layer (HCI) ---- */
4467
4468 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4469 {
4470 int exact = 0, lm1 = 0, lm2 = 0;
4471 register struct sock *sk;
4472 struct hlist_node *node;
4473
4474 if (type != ACL_LINK)
4475 return -EINVAL;
4476
4477 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4478
4479 /* Find listening sockets and check their link_mode */
4480 read_lock(&l2cap_sk_list.lock);
4481 sk_for_each(sk, node, &l2cap_sk_list.head) {
4482 if (sk->sk_state != BT_LISTEN)
4483 continue;
4484
4485 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4486 lm1 |= HCI_LM_ACCEPT;
4487 if (l2cap_pi(sk)->role_switch)
4488 lm1 |= HCI_LM_MASTER;
4489 exact++;
4490 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4491 lm2 |= HCI_LM_ACCEPT;
4492 if (l2cap_pi(sk)->role_switch)
4493 lm2 |= HCI_LM_MASTER;
4494 }
4495 }
4496 read_unlock(&l2cap_sk_list.lock);
4497
4498 return exact ? lm1 : lm2;
4499 }
4500
4501 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4502 {
4503 struct l2cap_conn *conn;
4504
4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4506
4507 if (hcon->type != ACL_LINK)
4508 return -EINVAL;
4509
4510 if (!status) {
4511 conn = l2cap_conn_add(hcon, status);
4512 if (conn)
4513 l2cap_conn_ready(conn);
4514 } else
4515 l2cap_conn_del(hcon, bt_err(status));
4516
4517 return 0;
4518 }
4519
4520 static int l2cap_disconn_ind(struct hci_conn *hcon)
4521 {
4522 struct l2cap_conn *conn = hcon->l2cap_data;
4523
4524 BT_DBG("hcon %p", hcon);
4525
4526 if (hcon->type != ACL_LINK || !conn)
4527 return 0x13;
4528
4529 return conn->disc_reason;
4530 }
4531
4532 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4533 {
4534 BT_DBG("hcon %p reason %d", hcon, reason);
4535
4536 if (hcon->type != ACL_LINK)
4537 return -EINVAL;
4538
4539 l2cap_conn_del(hcon, bt_err(reason));
4540
4541 return 0;
4542 }
4543
4544 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4545 {
4546 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4547 return;
4548
4549 if (encrypt == 0x00) {
4550 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4551 l2cap_sock_clear_timer(sk);
4552 l2cap_sock_set_timer(sk, HZ * 5);
4553 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4554 __l2cap_sock_close(sk, ECONNREFUSED);
4555 } else {
4556 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4557 l2cap_sock_clear_timer(sk);
4558 }
4559 }
4560
4561 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4562 {
4563 struct l2cap_chan_list *l;
4564 struct l2cap_conn *conn = hcon->l2cap_data;
4565 struct sock *sk;
4566
4567 if (!conn)
4568 return 0;
4569
4570 l = &conn->chan_list;
4571
4572 BT_DBG("conn %p", conn);
4573
4574 read_lock(&l->lock);
4575
4576 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4577 bh_lock_sock(sk);
4578
4579 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4580 bh_unlock_sock(sk);
4581 continue;
4582 }
4583
4584 if (!status && (sk->sk_state == BT_CONNECTED ||
4585 sk->sk_state == BT_CONFIG)) {
4586 l2cap_check_encryption(sk, encrypt);
4587 bh_unlock_sock(sk);
4588 continue;
4589 }
4590
4591 if (sk->sk_state == BT_CONNECT) {
4592 if (!status) {
4593 struct l2cap_conn_req req;
4594 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4595 req.psm = l2cap_pi(sk)->psm;
4596
4597 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4598 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4599
4600 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4601 L2CAP_CONN_REQ, sizeof(req), &req);
4602 } else {
4603 l2cap_sock_clear_timer(sk);
4604 l2cap_sock_set_timer(sk, HZ / 10);
4605 }
4606 } else if (sk->sk_state == BT_CONNECT2) {
4607 struct l2cap_conn_rsp rsp;
4608 __u16 result;
4609
4610 if (!status) {
4611 sk->sk_state = BT_CONFIG;
4612 result = L2CAP_CR_SUCCESS;
4613 } else {
4614 sk->sk_state = BT_DISCONN;
4615 l2cap_sock_set_timer(sk, HZ / 10);
4616 result = L2CAP_CR_SEC_BLOCK;
4617 }
4618
4619 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4620 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4621 rsp.result = cpu_to_le16(result);
4622 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4623 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4624 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4625 }
4626
4627 bh_unlock_sock(sk);
4628 }
4629
4630 read_unlock(&l->lock);
4631
4632 return 0;
4633 }
4634
4635 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4636 {
4637 struct l2cap_conn *conn = hcon->l2cap_data;
4638
4639 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4640 goto drop;
4641
4642 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4643
4644 if (flags & ACL_START) {
4645 struct l2cap_hdr *hdr;
4646 int len;
4647
4648 if (conn->rx_len) {
4649 BT_ERR("Unexpected start frame (len %d)", skb->len);
4650 kfree_skb(conn->rx_skb);
4651 conn->rx_skb = NULL;
4652 conn->rx_len = 0;
4653 l2cap_conn_unreliable(conn, ECOMM);
4654 }
4655
4656 if (skb->len < 2) {
4657 BT_ERR("Frame is too short (len %d)", skb->len);
4658 l2cap_conn_unreliable(conn, ECOMM);
4659 goto drop;
4660 }
4661
4662 hdr = (struct l2cap_hdr *) skb->data;
4663 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4664
4665 if (len == skb->len) {
4666 /* Complete frame received */
4667 l2cap_recv_frame(conn, skb);
4668 return 0;
4669 }
4670
4671 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4672
4673 if (skb->len > len) {
4674 BT_ERR("Frame is too long (len %d, expected len %d)",
4675 skb->len, len);
4676 l2cap_conn_unreliable(conn, ECOMM);
4677 goto drop;
4678 }
4679
4680 /* Allocate skb for the complete frame (with header) */
4681 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4682 if (!conn->rx_skb)
4683 goto drop;
4684
4685 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4686 skb->len);
4687 conn->rx_len = len - skb->len;
4688 } else {
4689 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4690
4691 if (!conn->rx_len) {
4692 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4693 l2cap_conn_unreliable(conn, ECOMM);
4694 goto drop;
4695 }
4696
4697 if (skb->len > conn->rx_len) {
4698 BT_ERR("Fragment is too long (len %d, expected %d)",
4699 skb->len, conn->rx_len);
4700 kfree_skb(conn->rx_skb);
4701 conn->rx_skb = NULL;
4702 conn->rx_len = 0;
4703 l2cap_conn_unreliable(conn, ECOMM);
4704 goto drop;
4705 }
4706
4707 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4708 skb->len);
4709 conn->rx_len -= skb->len;
4710
4711 if (!conn->rx_len) {
4712 /* Complete frame received */
4713 l2cap_recv_frame(conn, conn->rx_skb);
4714 conn->rx_skb = NULL;
4715 }
4716 }
4717
4718 drop:
4719 kfree_skb(skb);
4720 return 0;
4721 }
4722
4723 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4724 {
4725 struct sock *sk;
4726 struct hlist_node *node;
4727
4728 read_lock_bh(&l2cap_sk_list.lock);
4729
4730 sk_for_each(sk, node, &l2cap_sk_list.head) {
4731 struct l2cap_pinfo *pi = l2cap_pi(sk);
4732
4733 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4734 batostr(&bt_sk(sk)->src),
4735 batostr(&bt_sk(sk)->dst),
4736 sk->sk_state, __le16_to_cpu(pi->psm),
4737 pi->scid, pi->dcid,
4738 pi->imtu, pi->omtu, pi->sec_level);
4739 }
4740
4741 read_unlock_bh(&l2cap_sk_list.lock);
4742
4743 return 0;
4744 }
4745
4746 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4747 {
4748 return single_open(file, l2cap_debugfs_show, inode->i_private);
4749 }
4750
4751 static const struct file_operations l2cap_debugfs_fops = {
4752 .open = l2cap_debugfs_open,
4753 .read = seq_read,
4754 .llseek = seq_lseek,
4755 .release = single_release,
4756 };
4757
4758 static struct dentry *l2cap_debugfs;
4759
4760 static const struct proto_ops l2cap_sock_ops = {
4761 .family = PF_BLUETOOTH,
4762 .owner = THIS_MODULE,
4763 .release = l2cap_sock_release,
4764 .bind = l2cap_sock_bind,
4765 .connect = l2cap_sock_connect,
4766 .listen = l2cap_sock_listen,
4767 .accept = l2cap_sock_accept,
4768 .getname = l2cap_sock_getname,
4769 .sendmsg = l2cap_sock_sendmsg,
4770 .recvmsg = l2cap_sock_recvmsg,
4771 .poll = bt_sock_poll,
4772 .ioctl = bt_sock_ioctl,
4773 .mmap = sock_no_mmap,
4774 .socketpair = sock_no_socketpair,
4775 .shutdown = l2cap_sock_shutdown,
4776 .setsockopt = l2cap_sock_setsockopt,
4777 .getsockopt = l2cap_sock_getsockopt
4778 };
4779
4780 static const struct net_proto_family l2cap_sock_family_ops = {
4781 .family = PF_BLUETOOTH,
4782 .owner = THIS_MODULE,
4783 .create = l2cap_sock_create,
4784 };
4785
4786 static struct hci_proto l2cap_hci_proto = {
4787 .name = "L2CAP",
4788 .id = HCI_PROTO_L2CAP,
4789 .connect_ind = l2cap_connect_ind,
4790 .connect_cfm = l2cap_connect_cfm,
4791 .disconn_ind = l2cap_disconn_ind,
4792 .disconn_cfm = l2cap_disconn_cfm,
4793 .security_cfm = l2cap_security_cfm,
4794 .recv_acldata = l2cap_recv_acldata
4795 };
4796
4797 static int __init l2cap_init(void)
4798 {
4799 int err;
4800
4801 err = proto_register(&l2cap_proto, 0);
4802 if (err < 0)
4803 return err;
4804
4805 _busy_wq = create_singlethread_workqueue("l2cap");
4806 if (!_busy_wq)
4807 goto error;
4808
4809 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4810 if (err < 0) {
4811 BT_ERR("L2CAP socket registration failed");
4812 goto error;
4813 }
4814
4815 err = hci_register_proto(&l2cap_hci_proto);
4816 if (err < 0) {
4817 BT_ERR("L2CAP protocol registration failed");
4818 bt_sock_unregister(BTPROTO_L2CAP);
4819 goto error;
4820 }
4821
4822 if (bt_debugfs) {
4823 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4824 bt_debugfs, NULL, &l2cap_debugfs_fops);
4825 if (!l2cap_debugfs)
4826 BT_ERR("Failed to create L2CAP debug file");
4827 }
4828
4829 BT_INFO("L2CAP ver %s", VERSION);
4830 BT_INFO("L2CAP socket layer initialized");
4831
4832 return 0;
4833
4834 error:
4835 proto_unregister(&l2cap_proto);
4836 return err;
4837 }
4838
4839 static void __exit l2cap_exit(void)
4840 {
4841 debugfs_remove(l2cap_debugfs);
4842
4843 flush_workqueue(_busy_wq);
4844 destroy_workqueue(_busy_wq);
4845
4846 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4847 BT_ERR("L2CAP socket unregistration failed");
4848
4849 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4850 BT_ERR("L2CAP protocol unregistration failed");
4851
4852 proto_unregister(&l2cap_proto);
4853 }
4854
4855 void l2cap_load(void)
4856 {
4857 /* Dummy function to trigger automatic L2CAP module loading by
4858 * other modules that use L2CAP sockets but don't use any other
4859 * symbols from it. */
4860 }
4861 EXPORT_SYMBOL(l2cap_load);
4862
4863 module_init(l2cap_init);
4864 module_exit(l2cap_exit);
4865
4866 module_param(disable_ertm, bool, 0644);
4867 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4868
4869 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4870 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4871 MODULE_VERSION(VERSION);
4872 MODULE_LICENSE("GPL");
4873 MODULE_ALIAS("bt-proto-0");
This page took 0.210118 seconds and 5 git commands to generate.