Bluetooth: Fix ERTM error reporting to the userspace
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static const struct proto_ops l2cap_sock_ops;
64
65 static struct workqueue_struct *_busy_wq;
66
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
76
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
79
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
82 {
83 struct sock *sk = (struct sock *) arg;
84 int reason;
85
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
87
88 bh_lock_sock(sk);
89
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
95 else
96 reason = ETIMEDOUT;
97
98 __l2cap_sock_close(sk, reason);
99
100 bh_unlock_sock(sk);
101
102 l2cap_sock_kill(sk);
103 sock_put(sk);
104 }
105
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
107 {
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 }
111
112 static void l2cap_sock_clear_timer(struct sock *sk)
113 {
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
116 }
117
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 {
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
124 break;
125 }
126 return s;
127 }
128
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 {
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
135 }
136 return s;
137 }
138
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 {
143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
150 }
151
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 {
154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
158 }
159 return s;
160 }
161
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 {
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
171 }
172
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
174 {
175 u16 cid = L2CAP_CID_DYN_START;
176
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
179 return cid;
180 }
181
182 return 0;
183 }
184
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
186 {
187 sock_hold(sk);
188
189 if (l->head)
190 l2cap_pi(l->head)->prev_c = sk;
191
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
194 l->head = sk;
195 }
196
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
198 {
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
200
201 write_lock_bh(&l->lock);
202 if (sk == l->head)
203 l->head = next;
204
205 if (next)
206 l2cap_pi(next)->prev_c = prev;
207 if (prev)
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
210
211 __sock_put(sk);
212 }
213
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
215 {
216 struct l2cap_chan_list *l = &conn->chan_list;
217
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
220
221 conn->disc_reason = 0x13;
222
223 l2cap_pi(sk)->conn = conn;
224
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 } else {
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 }
239
240 __l2cap_chan_link(l, sk);
241
242 if (parent)
243 bt_accept_enqueue(parent, sk);
244 }
245
246 /* Delete channel.
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
249 {
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
252
253 l2cap_sock_clear_timer(sk);
254
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256
257 if (conn) {
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
262 }
263
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
266
267 if (err)
268 sk->sk_err = err;
269
270 if (parent) {
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
273 } else
274 sk->sk_state_change(sk);
275
276 skb_queue_purge(TX_QUEUE(sk));
277
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
280
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
284
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
287
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
289 list_del(&l->list);
290 kfree(l);
291 }
292 }
293 }
294
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
297 {
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
299 __u8 auth_type;
300
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
304 else
305 auth_type = HCI_AT_NO_BONDING;
306
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
309 } else {
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
313 break;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
316 break;
317 default:
318 auth_type = HCI_AT_NO_BONDING;
319 break;
320 }
321 }
322
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
324 auth_type);
325 }
326
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
328 {
329 u8 id;
330
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
335 */
336
337 spin_lock_bh(&conn->lock);
338
339 if (++conn->tx_ident > 128)
340 conn->tx_ident = 1;
341
342 id = conn->tx_ident;
343
344 spin_unlock_bh(&conn->lock);
345
346 return id;
347 }
348
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
350 {
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
352
353 BT_DBG("code 0x%2.2x", code);
354
355 if (!skb)
356 return;
357
358 hci_send_acl(conn->hcon, skb, 0);
359 }
360
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
362 {
363 struct sk_buff *skb;
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
368
369 if (sk->sk_state != BT_CONNECTED)
370 return;
371
372 if (pi->fcs == L2CAP_FCS_CRC16)
373 hlen += 2;
374
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
376
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
379
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
383 }
384
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
388 }
389
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
391 if (!skb)
392 return;
393
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
398
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
402 }
403
404 hci_send_acl(pi->conn->hcon, skb, 0);
405 }
406
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
408 {
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
412 } else
413 control |= L2CAP_SUPER_RCV_READY;
414
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
416
417 l2cap_send_sframe(pi, control);
418 }
419
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
421 {
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
423 }
424
425 static void l2cap_do_start(struct sock *sk)
426 {
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
428
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
431 return;
432
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
437
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
440
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
443 }
444 } else {
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
447
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
450
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
453
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
456 }
457 }
458
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
460 {
461 struct l2cap_disconn_req req;
462
463 if (!conn)
464 return;
465
466 skb_queue_purge(TX_QUEUE(sk));
467
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
472 }
473
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
478
479 sk->sk_state = BT_DISCONN;
480 sk->sk_err = err;
481 }
482
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
485 {
486 struct l2cap_chan_list *l = &conn->chan_list;
487 struct sock *sk;
488
489 BT_DBG("conn %p", conn);
490
491 read_lock(&l->lock);
492
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
494 bh_lock_sock(sk);
495
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
498 bh_unlock_sock(sk);
499 continue;
500 }
501
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
508
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
511
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
514 }
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
519
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
526
527 } else {
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
531 }
532 } else {
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
535 }
536
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
539 }
540
541 bh_unlock_sock(sk);
542 }
543
544 read_unlock(&l->lock);
545 }
546
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
548 {
549 struct l2cap_chan_list *l = &conn->chan_list;
550 struct sock *sk;
551
552 BT_DBG("conn %p", conn);
553
554 read_lock(&l->lock);
555
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
557 bh_lock_sock(sk);
558
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
565 l2cap_do_start(sk);
566
567 bh_unlock_sock(sk);
568 }
569
570 read_unlock(&l->lock);
571 }
572
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
575 {
576 struct l2cap_chan_list *l = &conn->chan_list;
577 struct sock *sk;
578
579 BT_DBG("conn %p", conn);
580
581 read_lock(&l->lock);
582
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
585 sk->sk_err = err;
586 }
587
588 read_unlock(&l->lock);
589 }
590
591 static void l2cap_info_timeout(unsigned long arg)
592 {
593 struct l2cap_conn *conn = (void *) arg;
594
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
597
598 l2cap_conn_start(conn);
599 }
600
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
602 {
603 struct l2cap_conn *conn = hcon->l2cap_data;
604
605 if (conn || status)
606 return conn;
607
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
609 if (!conn)
610 return NULL;
611
612 hcon->l2cap_data = conn;
613 conn->hcon = hcon;
614
615 BT_DBG("hcon %p conn %p", hcon, conn);
616
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
620
621 conn->feat_mask = 0;
622
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
625
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
628
629 conn->disc_reason = 0x13;
630
631 return conn;
632 }
633
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
635 {
636 struct l2cap_conn *conn = hcon->l2cap_data;
637 struct sock *sk;
638
639 if (!conn)
640 return;
641
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
643
644 kfree_skb(conn->rx_skb);
645
646 /* Kill channels */
647 while ((sk = conn->chan_list.head)) {
648 bh_lock_sock(sk);
649 l2cap_chan_del(sk, err);
650 bh_unlock_sock(sk);
651 l2cap_sock_kill(sk);
652 }
653
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
656
657 hcon->l2cap_data = NULL;
658 kfree(conn);
659 }
660
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
662 {
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
667 }
668
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
671 {
672 struct sock *sk;
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
676 goto found;
677 sk = NULL;
678 found:
679 return sk;
680 }
681
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
684 */
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
686 {
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
689
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
692 continue;
693
694 if (l2cap_pi(sk)->psm == psm) {
695 /* Exact match. */
696 if (!bacmp(&bt_sk(sk)->src, src))
697 break;
698
699 /* Closest match */
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
701 sk1 = sk;
702 }
703 }
704 return node ? sk : sk1;
705 }
706
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
710 {
711 struct sock *s;
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
714 if (s)
715 bh_lock_sock(s);
716 read_unlock(&l2cap_sk_list.lock);
717 return s;
718 }
719
720 static void l2cap_sock_destruct(struct sock *sk)
721 {
722 BT_DBG("sk %p", sk);
723
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
726 }
727
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
729 {
730 struct sock *sk;
731
732 BT_DBG("parent %p", parent);
733
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
737
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
740 }
741
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
744 */
745 static void l2cap_sock_kill(struct sock *sk)
746 {
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
748 return;
749
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
751
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
755 sock_put(sk);
756 }
757
758 static void __l2cap_sock_close(struct sock *sk, int reason)
759 {
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
761
762 switch (sk->sk_state) {
763 case BT_LISTEN:
764 l2cap_sock_cleanup_listen(sk);
765 break;
766
767 case BT_CONNECTED:
768 case BT_CONFIG:
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
772
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
775 } else
776 l2cap_chan_del(sk, reason);
777 break;
778
779 case BT_CONNECT2:
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
784 __u16 result;
785
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
788 else
789 result = L2CAP_CR_BAD_PSM;
790
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
797 } else
798 l2cap_chan_del(sk, reason);
799 break;
800
801 case BT_CONNECT:
802 case BT_DISCONN:
803 l2cap_chan_del(sk, reason);
804 break;
805
806 default:
807 sock_set_flag(sk, SOCK_ZAPPED);
808 break;
809 }
810 }
811
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
814 {
815 l2cap_sock_clear_timer(sk);
816 lock_sock(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
818 release_sock(sk);
819 l2cap_sock_kill(sk);
820 }
821
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
823 {
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
825
826 BT_DBG("sk %p", sk);
827
828 if (parent) {
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
831
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->mode = l2cap_pi(parent)->mode;
835 pi->fcs = l2cap_pi(parent)->fcs;
836 pi->max_tx = l2cap_pi(parent)->max_tx;
837 pi->tx_win = l2cap_pi(parent)->tx_win;
838 pi->sec_level = l2cap_pi(parent)->sec_level;
839 pi->role_switch = l2cap_pi(parent)->role_switch;
840 pi->force_reliable = l2cap_pi(parent)->force_reliable;
841 } else {
842 pi->imtu = L2CAP_DEFAULT_MTU;
843 pi->omtu = 0;
844 if (enable_ertm && sk->sk_type == SOCK_STREAM)
845 pi->mode = L2CAP_MODE_ERTM;
846 else
847 pi->mode = L2CAP_MODE_BASIC;
848 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
849 pi->fcs = L2CAP_FCS_CRC16;
850 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
851 pi->sec_level = BT_SECURITY_LOW;
852 pi->role_switch = 0;
853 pi->force_reliable = 0;
854 }
855
856 /* Default config options */
857 pi->conf_len = 0;
858 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
859 skb_queue_head_init(TX_QUEUE(sk));
860 skb_queue_head_init(SREJ_QUEUE(sk));
861 skb_queue_head_init(BUSY_QUEUE(sk));
862 INIT_LIST_HEAD(SREJ_LIST(sk));
863 }
864
865 static struct proto l2cap_proto = {
866 .name = "L2CAP",
867 .owner = THIS_MODULE,
868 .obj_size = sizeof(struct l2cap_pinfo)
869 };
870
871 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
872 {
873 struct sock *sk;
874
875 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
876 if (!sk)
877 return NULL;
878
879 sock_init_data(sock, sk);
880 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
881
882 sk->sk_destruct = l2cap_sock_destruct;
883 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
884
885 sock_reset_flag(sk, SOCK_ZAPPED);
886
887 sk->sk_protocol = proto;
888 sk->sk_state = BT_OPEN;
889
890 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
891
892 bt_sock_link(&l2cap_sk_list, sk);
893 return sk;
894 }
895
896 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
897 int kern)
898 {
899 struct sock *sk;
900
901 BT_DBG("sock %p", sock);
902
903 sock->state = SS_UNCONNECTED;
904
905 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
906 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
907 return -ESOCKTNOSUPPORT;
908
909 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
910 return -EPERM;
911
912 sock->ops = &l2cap_sock_ops;
913
914 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
915 if (!sk)
916 return -ENOMEM;
917
918 l2cap_sock_init(sk, NULL);
919 return 0;
920 }
921
922 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
923 {
924 struct sock *sk = sock->sk;
925 struct sockaddr_l2 la;
926 int len, err = 0;
927
928 BT_DBG("sk %p", sk);
929
930 if (!addr || addr->sa_family != AF_BLUETOOTH)
931 return -EINVAL;
932
933 memset(&la, 0, sizeof(la));
934 len = min_t(unsigned int, sizeof(la), alen);
935 memcpy(&la, addr, len);
936
937 if (la.l2_cid)
938 return -EINVAL;
939
940 lock_sock(sk);
941
942 if (sk->sk_state != BT_OPEN) {
943 err = -EBADFD;
944 goto done;
945 }
946
947 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
948 !capable(CAP_NET_BIND_SERVICE)) {
949 err = -EACCES;
950 goto done;
951 }
952
953 write_lock_bh(&l2cap_sk_list.lock);
954
955 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
956 err = -EADDRINUSE;
957 } else {
958 /* Save source address */
959 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
960 l2cap_pi(sk)->psm = la.l2_psm;
961 l2cap_pi(sk)->sport = la.l2_psm;
962 sk->sk_state = BT_BOUND;
963
964 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
965 __le16_to_cpu(la.l2_psm) == 0x0003)
966 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
967 }
968
969 write_unlock_bh(&l2cap_sk_list.lock);
970
971 done:
972 release_sock(sk);
973 return err;
974 }
975
976 static int l2cap_do_connect(struct sock *sk)
977 {
978 bdaddr_t *src = &bt_sk(sk)->src;
979 bdaddr_t *dst = &bt_sk(sk)->dst;
980 struct l2cap_conn *conn;
981 struct hci_conn *hcon;
982 struct hci_dev *hdev;
983 __u8 auth_type;
984 int err;
985
986 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
987 l2cap_pi(sk)->psm);
988
989 hdev = hci_get_route(dst, src);
990 if (!hdev)
991 return -EHOSTUNREACH;
992
993 hci_dev_lock_bh(hdev);
994
995 err = -ENOMEM;
996
997 if (sk->sk_type == SOCK_RAW) {
998 switch (l2cap_pi(sk)->sec_level) {
999 case BT_SECURITY_HIGH:
1000 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1001 break;
1002 case BT_SECURITY_MEDIUM:
1003 auth_type = HCI_AT_DEDICATED_BONDING;
1004 break;
1005 default:
1006 auth_type = HCI_AT_NO_BONDING;
1007 break;
1008 }
1009 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1010 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1011 auth_type = HCI_AT_NO_BONDING_MITM;
1012 else
1013 auth_type = HCI_AT_NO_BONDING;
1014
1015 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1016 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1017 } else {
1018 switch (l2cap_pi(sk)->sec_level) {
1019 case BT_SECURITY_HIGH:
1020 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1021 break;
1022 case BT_SECURITY_MEDIUM:
1023 auth_type = HCI_AT_GENERAL_BONDING;
1024 break;
1025 default:
1026 auth_type = HCI_AT_NO_BONDING;
1027 break;
1028 }
1029 }
1030
1031 hcon = hci_connect(hdev, ACL_LINK, dst,
1032 l2cap_pi(sk)->sec_level, auth_type);
1033 if (!hcon)
1034 goto done;
1035
1036 conn = l2cap_conn_add(hcon, 0);
1037 if (!conn) {
1038 hci_conn_put(hcon);
1039 goto done;
1040 }
1041
1042 err = 0;
1043
1044 /* Update source addr of the socket */
1045 bacpy(src, conn->src);
1046
1047 l2cap_chan_add(conn, sk, NULL);
1048
1049 sk->sk_state = BT_CONNECT;
1050 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1051
1052 if (hcon->state == BT_CONNECTED) {
1053 if (sk->sk_type != SOCK_SEQPACKET &&
1054 sk->sk_type != SOCK_STREAM) {
1055 l2cap_sock_clear_timer(sk);
1056 sk->sk_state = BT_CONNECTED;
1057 } else
1058 l2cap_do_start(sk);
1059 }
1060
1061 done:
1062 hci_dev_unlock_bh(hdev);
1063 hci_dev_put(hdev);
1064 return err;
1065 }
1066
1067 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1068 {
1069 struct sock *sk = sock->sk;
1070 struct sockaddr_l2 la;
1071 int len, err = 0;
1072
1073 BT_DBG("sk %p", sk);
1074
1075 if (!addr || alen < sizeof(addr->sa_family) ||
1076 addr->sa_family != AF_BLUETOOTH)
1077 return -EINVAL;
1078
1079 memset(&la, 0, sizeof(la));
1080 len = min_t(unsigned int, sizeof(la), alen);
1081 memcpy(&la, addr, len);
1082
1083 if (la.l2_cid)
1084 return -EINVAL;
1085
1086 lock_sock(sk);
1087
1088 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1089 && !la.l2_psm) {
1090 err = -EINVAL;
1091 goto done;
1092 }
1093
1094 switch (l2cap_pi(sk)->mode) {
1095 case L2CAP_MODE_BASIC:
1096 break;
1097 case L2CAP_MODE_ERTM:
1098 case L2CAP_MODE_STREAMING:
1099 if (enable_ertm)
1100 break;
1101 /* fall through */
1102 default:
1103 err = -ENOTSUPP;
1104 goto done;
1105 }
1106
1107 switch (sk->sk_state) {
1108 case BT_CONNECT:
1109 case BT_CONNECT2:
1110 case BT_CONFIG:
1111 /* Already connecting */
1112 goto wait;
1113
1114 case BT_CONNECTED:
1115 /* Already connected */
1116 goto done;
1117
1118 case BT_OPEN:
1119 case BT_BOUND:
1120 /* Can connect */
1121 break;
1122
1123 default:
1124 err = -EBADFD;
1125 goto done;
1126 }
1127
1128 /* Set destination address and psm */
1129 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1130 l2cap_pi(sk)->psm = la.l2_psm;
1131
1132 err = l2cap_do_connect(sk);
1133 if (err)
1134 goto done;
1135
1136 wait:
1137 err = bt_sock_wait_state(sk, BT_CONNECTED,
1138 sock_sndtimeo(sk, flags & O_NONBLOCK));
1139 done:
1140 release_sock(sk);
1141 return err;
1142 }
1143
1144 static int l2cap_sock_listen(struct socket *sock, int backlog)
1145 {
1146 struct sock *sk = sock->sk;
1147 int err = 0;
1148
1149 BT_DBG("sk %p backlog %d", sk, backlog);
1150
1151 lock_sock(sk);
1152
1153 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1154 || sk->sk_state != BT_BOUND) {
1155 err = -EBADFD;
1156 goto done;
1157 }
1158
1159 switch (l2cap_pi(sk)->mode) {
1160 case L2CAP_MODE_BASIC:
1161 break;
1162 case L2CAP_MODE_ERTM:
1163 case L2CAP_MODE_STREAMING:
1164 if (enable_ertm)
1165 break;
1166 /* fall through */
1167 default:
1168 err = -ENOTSUPP;
1169 goto done;
1170 }
1171
1172 if (!l2cap_pi(sk)->psm) {
1173 bdaddr_t *src = &bt_sk(sk)->src;
1174 u16 psm;
1175
1176 err = -EINVAL;
1177
1178 write_lock_bh(&l2cap_sk_list.lock);
1179
1180 for (psm = 0x1001; psm < 0x1100; psm += 2)
1181 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1182 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1183 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1184 err = 0;
1185 break;
1186 }
1187
1188 write_unlock_bh(&l2cap_sk_list.lock);
1189
1190 if (err < 0)
1191 goto done;
1192 }
1193
1194 sk->sk_max_ack_backlog = backlog;
1195 sk->sk_ack_backlog = 0;
1196 sk->sk_state = BT_LISTEN;
1197
1198 done:
1199 release_sock(sk);
1200 return err;
1201 }
1202
1203 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1204 {
1205 DECLARE_WAITQUEUE(wait, current);
1206 struct sock *sk = sock->sk, *nsk;
1207 long timeo;
1208 int err = 0;
1209
1210 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1211
1212 if (sk->sk_state != BT_LISTEN) {
1213 err = -EBADFD;
1214 goto done;
1215 }
1216
1217 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1218
1219 BT_DBG("sk %p timeo %ld", sk, timeo);
1220
1221 /* Wait for an incoming connection. (wake-one). */
1222 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1223 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1224 set_current_state(TASK_INTERRUPTIBLE);
1225 if (!timeo) {
1226 err = -EAGAIN;
1227 break;
1228 }
1229
1230 release_sock(sk);
1231 timeo = schedule_timeout(timeo);
1232 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1233
1234 if (sk->sk_state != BT_LISTEN) {
1235 err = -EBADFD;
1236 break;
1237 }
1238
1239 if (signal_pending(current)) {
1240 err = sock_intr_errno(timeo);
1241 break;
1242 }
1243 }
1244 set_current_state(TASK_RUNNING);
1245 remove_wait_queue(sk_sleep(sk), &wait);
1246
1247 if (err)
1248 goto done;
1249
1250 newsock->state = SS_CONNECTED;
1251
1252 BT_DBG("new socket %p", nsk);
1253
1254 done:
1255 release_sock(sk);
1256 return err;
1257 }
1258
1259 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1260 {
1261 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1262 struct sock *sk = sock->sk;
1263
1264 BT_DBG("sock %p, sk %p", sock, sk);
1265
1266 addr->sa_family = AF_BLUETOOTH;
1267 *len = sizeof(struct sockaddr_l2);
1268
1269 if (peer) {
1270 la->l2_psm = l2cap_pi(sk)->psm;
1271 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1272 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1273 } else {
1274 la->l2_psm = l2cap_pi(sk)->sport;
1275 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1276 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1277 }
1278
1279 return 0;
1280 }
1281
1282 static int __l2cap_wait_ack(struct sock *sk)
1283 {
1284 DECLARE_WAITQUEUE(wait, current);
1285 int err = 0;
1286 int timeo = HZ/5;
1287
1288 add_wait_queue(sk_sleep(sk), &wait);
1289 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1290 set_current_state(TASK_INTERRUPTIBLE);
1291
1292 if (!timeo)
1293 timeo = HZ/5;
1294
1295 if (signal_pending(current)) {
1296 err = sock_intr_errno(timeo);
1297 break;
1298 }
1299
1300 release_sock(sk);
1301 timeo = schedule_timeout(timeo);
1302 lock_sock(sk);
1303
1304 err = sock_error(sk);
1305 if (err)
1306 break;
1307 }
1308 set_current_state(TASK_RUNNING);
1309 remove_wait_queue(sk_sleep(sk), &wait);
1310 return err;
1311 }
1312
1313 static void l2cap_monitor_timeout(unsigned long arg)
1314 {
1315 struct sock *sk = (void *) arg;
1316
1317 bh_lock_sock(sk);
1318 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1319 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1320 bh_unlock_sock(sk);
1321 return;
1322 }
1323
1324 l2cap_pi(sk)->retry_count++;
1325 __mod_monitor_timer();
1326
1327 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1328 bh_unlock_sock(sk);
1329 }
1330
1331 static void l2cap_retrans_timeout(unsigned long arg)
1332 {
1333 struct sock *sk = (void *) arg;
1334
1335 bh_lock_sock(sk);
1336 l2cap_pi(sk)->retry_count = 1;
1337 __mod_monitor_timer();
1338
1339 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1340
1341 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1342 bh_unlock_sock(sk);
1343 }
1344
1345 static void l2cap_drop_acked_frames(struct sock *sk)
1346 {
1347 struct sk_buff *skb;
1348
1349 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1350 l2cap_pi(sk)->unacked_frames) {
1351 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1352 break;
1353
1354 skb = skb_dequeue(TX_QUEUE(sk));
1355 kfree_skb(skb);
1356
1357 l2cap_pi(sk)->unacked_frames--;
1358 }
1359
1360 if (!l2cap_pi(sk)->unacked_frames)
1361 del_timer(&l2cap_pi(sk)->retrans_timer);
1362 }
1363
1364 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1365 {
1366 struct l2cap_pinfo *pi = l2cap_pi(sk);
1367
1368 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1369
1370 hci_send_acl(pi->conn->hcon, skb, 0);
1371 }
1372
1373 static int l2cap_streaming_send(struct sock *sk)
1374 {
1375 struct sk_buff *skb, *tx_skb;
1376 struct l2cap_pinfo *pi = l2cap_pi(sk);
1377 u16 control, fcs;
1378
1379 while ((skb = sk->sk_send_head)) {
1380 tx_skb = skb_clone(skb, GFP_ATOMIC);
1381
1382 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1383 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1384 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1385
1386 if (pi->fcs == L2CAP_FCS_CRC16) {
1387 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1388 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1389 }
1390
1391 l2cap_do_send(sk, tx_skb);
1392
1393 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1394
1395 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1396 sk->sk_send_head = NULL;
1397 else
1398 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1399
1400 skb = skb_dequeue(TX_QUEUE(sk));
1401 kfree_skb(skb);
1402 }
1403 return 0;
1404 }
1405
1406 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1407 {
1408 struct l2cap_pinfo *pi = l2cap_pi(sk);
1409 struct sk_buff *skb, *tx_skb;
1410 u16 control, fcs;
1411
1412 skb = skb_peek(TX_QUEUE(sk));
1413 if (!skb)
1414 return;
1415
1416 do {
1417 if (bt_cb(skb)->tx_seq == tx_seq)
1418 break;
1419
1420 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1421 return;
1422
1423 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1424
1425 if (pi->remote_max_tx &&
1426 bt_cb(skb)->retries == pi->remote_max_tx) {
1427 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1428 return;
1429 }
1430
1431 tx_skb = skb_clone(skb, GFP_ATOMIC);
1432 bt_cb(skb)->retries++;
1433 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1434
1435 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1436 control |= L2CAP_CTRL_FINAL;
1437 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1438 }
1439
1440 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1441 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1442
1443 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1444
1445 if (pi->fcs == L2CAP_FCS_CRC16) {
1446 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1447 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1448 }
1449
1450 l2cap_do_send(sk, tx_skb);
1451 }
1452
1453 static int l2cap_ertm_send(struct sock *sk)
1454 {
1455 struct sk_buff *skb, *tx_skb;
1456 struct l2cap_pinfo *pi = l2cap_pi(sk);
1457 u16 control, fcs;
1458 int nsent = 0;
1459
1460 if (sk->sk_state != BT_CONNECTED)
1461 return -ENOTCONN;
1462
1463 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1464
1465 if (pi->remote_max_tx &&
1466 bt_cb(skb)->retries == pi->remote_max_tx) {
1467 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1468 break;
1469 }
1470
1471 tx_skb = skb_clone(skb, GFP_ATOMIC);
1472
1473 bt_cb(skb)->retries++;
1474
1475 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1476 control &= L2CAP_CTRL_SAR;
1477
1478 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1479 control |= L2CAP_CTRL_FINAL;
1480 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1481 }
1482 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1483 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1484 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1485
1486
1487 if (pi->fcs == L2CAP_FCS_CRC16) {
1488 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1489 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1490 }
1491
1492 l2cap_do_send(sk, tx_skb);
1493
1494 __mod_retrans_timer();
1495
1496 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1497 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1498
1499 pi->unacked_frames++;
1500 pi->frames_sent++;
1501
1502 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1503 sk->sk_send_head = NULL;
1504 else
1505 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1506
1507 nsent++;
1508 }
1509
1510 return nsent;
1511 }
1512
1513 static int l2cap_retransmit_frames(struct sock *sk)
1514 {
1515 struct l2cap_pinfo *pi = l2cap_pi(sk);
1516 int ret;
1517
1518 spin_lock_bh(&pi->send_lock);
1519
1520 if (!skb_queue_empty(TX_QUEUE(sk)))
1521 sk->sk_send_head = TX_QUEUE(sk)->next;
1522
1523 pi->next_tx_seq = pi->expected_ack_seq;
1524 ret = l2cap_ertm_send(sk);
1525
1526 spin_unlock_bh(&pi->send_lock);
1527
1528 return ret;
1529 }
1530
1531 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1532 {
1533 struct sock *sk = (struct sock *)pi;
1534 u16 control = 0;
1535 int nframes;
1536
1537 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1538
1539 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1540 control |= L2CAP_SUPER_RCV_NOT_READY;
1541 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1542 l2cap_send_sframe(pi, control);
1543 return;
1544 }
1545
1546 spin_lock_bh(&pi->send_lock);
1547 nframes = l2cap_ertm_send(sk);
1548 spin_unlock_bh(&pi->send_lock);
1549
1550 if (nframes > 0)
1551 return;
1552
1553 control |= L2CAP_SUPER_RCV_READY;
1554 l2cap_send_sframe(pi, control);
1555 }
1556
1557 static void l2cap_send_srejtail(struct sock *sk)
1558 {
1559 struct srej_list *tail;
1560 u16 control;
1561
1562 control = L2CAP_SUPER_SELECT_REJECT;
1563 control |= L2CAP_CTRL_FINAL;
1564
1565 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1566 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1567
1568 l2cap_send_sframe(l2cap_pi(sk), control);
1569 }
1570
1571 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1572 {
1573 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1574 struct sk_buff **frag;
1575 int err, sent = 0;
1576
1577 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1578 return -EFAULT;
1579
1580 sent += count;
1581 len -= count;
1582
1583 /* Continuation fragments (no L2CAP header) */
1584 frag = &skb_shinfo(skb)->frag_list;
1585 while (len) {
1586 count = min_t(unsigned int, conn->mtu, len);
1587
1588 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1589 if (!*frag)
1590 return -EFAULT;
1591 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1592 return -EFAULT;
1593
1594 sent += count;
1595 len -= count;
1596
1597 frag = &(*frag)->next;
1598 }
1599
1600 return sent;
1601 }
1602
1603 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1604 {
1605 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1606 struct sk_buff *skb;
1607 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1608 struct l2cap_hdr *lh;
1609
1610 BT_DBG("sk %p len %d", sk, (int)len);
1611
1612 count = min_t(unsigned int, (conn->mtu - hlen), len);
1613 skb = bt_skb_send_alloc(sk, count + hlen,
1614 msg->msg_flags & MSG_DONTWAIT, &err);
1615 if (!skb)
1616 return ERR_PTR(-ENOMEM);
1617
1618 /* Create L2CAP header */
1619 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1620 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1621 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1622 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1623
1624 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1625 if (unlikely(err < 0)) {
1626 kfree_skb(skb);
1627 return ERR_PTR(err);
1628 }
1629 return skb;
1630 }
1631
1632 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1633 {
1634 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1635 struct sk_buff *skb;
1636 int err, count, hlen = L2CAP_HDR_SIZE;
1637 struct l2cap_hdr *lh;
1638
1639 BT_DBG("sk %p len %d", sk, (int)len);
1640
1641 count = min_t(unsigned int, (conn->mtu - hlen), len);
1642 skb = bt_skb_send_alloc(sk, count + hlen,
1643 msg->msg_flags & MSG_DONTWAIT, &err);
1644 if (!skb)
1645 return ERR_PTR(-ENOMEM);
1646
1647 /* Create L2CAP header */
1648 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1649 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1650 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1651
1652 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1653 if (unlikely(err < 0)) {
1654 kfree_skb(skb);
1655 return ERR_PTR(err);
1656 }
1657 return skb;
1658 }
1659
1660 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1661 {
1662 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1663 struct sk_buff *skb;
1664 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1665 struct l2cap_hdr *lh;
1666
1667 BT_DBG("sk %p len %d", sk, (int)len);
1668
1669 if (!conn)
1670 return ERR_PTR(-ENOTCONN);
1671
1672 if (sdulen)
1673 hlen += 2;
1674
1675 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1676 hlen += 2;
1677
1678 count = min_t(unsigned int, (conn->mtu - hlen), len);
1679 skb = bt_skb_send_alloc(sk, count + hlen,
1680 msg->msg_flags & MSG_DONTWAIT, &err);
1681 if (!skb)
1682 return ERR_PTR(-ENOMEM);
1683
1684 /* Create L2CAP header */
1685 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1686 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1687 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1688 put_unaligned_le16(control, skb_put(skb, 2));
1689 if (sdulen)
1690 put_unaligned_le16(sdulen, skb_put(skb, 2));
1691
1692 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1693 if (unlikely(err < 0)) {
1694 kfree_skb(skb);
1695 return ERR_PTR(err);
1696 }
1697
1698 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1699 put_unaligned_le16(0, skb_put(skb, 2));
1700
1701 bt_cb(skb)->retries = 0;
1702 return skb;
1703 }
1704
1705 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1706 {
1707 struct l2cap_pinfo *pi = l2cap_pi(sk);
1708 struct sk_buff *skb;
1709 struct sk_buff_head sar_queue;
1710 u16 control;
1711 size_t size = 0;
1712
1713 skb_queue_head_init(&sar_queue);
1714 control = L2CAP_SDU_START;
1715 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1716 if (IS_ERR(skb))
1717 return PTR_ERR(skb);
1718
1719 __skb_queue_tail(&sar_queue, skb);
1720 len -= pi->remote_mps;
1721 size += pi->remote_mps;
1722
1723 while (len > 0) {
1724 size_t buflen;
1725
1726 if (len > pi->remote_mps) {
1727 control = L2CAP_SDU_CONTINUE;
1728 buflen = pi->remote_mps;
1729 } else {
1730 control = L2CAP_SDU_END;
1731 buflen = len;
1732 }
1733
1734 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1735 if (IS_ERR(skb)) {
1736 skb_queue_purge(&sar_queue);
1737 return PTR_ERR(skb);
1738 }
1739
1740 __skb_queue_tail(&sar_queue, skb);
1741 len -= buflen;
1742 size += buflen;
1743 }
1744 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1745 spin_lock_bh(&pi->send_lock);
1746 if (sk->sk_send_head == NULL)
1747 sk->sk_send_head = sar_queue.next;
1748 spin_unlock_bh(&pi->send_lock);
1749
1750 return size;
1751 }
1752
1753 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1754 {
1755 struct sock *sk = sock->sk;
1756 struct l2cap_pinfo *pi = l2cap_pi(sk);
1757 struct sk_buff *skb;
1758 u16 control;
1759 int err;
1760
1761 BT_DBG("sock %p, sk %p", sock, sk);
1762
1763 err = sock_error(sk);
1764 if (err)
1765 return err;
1766
1767 if (msg->msg_flags & MSG_OOB)
1768 return -EOPNOTSUPP;
1769
1770 lock_sock(sk);
1771
1772 if (sk->sk_state != BT_CONNECTED) {
1773 err = -ENOTCONN;
1774 goto done;
1775 }
1776
1777 /* Connectionless channel */
1778 if (sk->sk_type == SOCK_DGRAM) {
1779 skb = l2cap_create_connless_pdu(sk, msg, len);
1780 if (IS_ERR(skb)) {
1781 err = PTR_ERR(skb);
1782 } else {
1783 l2cap_do_send(sk, skb);
1784 err = len;
1785 }
1786 goto done;
1787 }
1788
1789 switch (pi->mode) {
1790 case L2CAP_MODE_BASIC:
1791 /* Check outgoing MTU */
1792 if (len > pi->omtu) {
1793 err = -EINVAL;
1794 goto done;
1795 }
1796
1797 /* Create a basic PDU */
1798 skb = l2cap_create_basic_pdu(sk, msg, len);
1799 if (IS_ERR(skb)) {
1800 err = PTR_ERR(skb);
1801 goto done;
1802 }
1803
1804 l2cap_do_send(sk, skb);
1805 err = len;
1806 break;
1807
1808 case L2CAP_MODE_ERTM:
1809 case L2CAP_MODE_STREAMING:
1810 /* Entire SDU fits into one PDU */
1811 if (len <= pi->remote_mps) {
1812 control = L2CAP_SDU_UNSEGMENTED;
1813 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1814 if (IS_ERR(skb)) {
1815 err = PTR_ERR(skb);
1816 goto done;
1817 }
1818 __skb_queue_tail(TX_QUEUE(sk), skb);
1819
1820 if (pi->mode == L2CAP_MODE_ERTM)
1821 spin_lock_bh(&pi->send_lock);
1822
1823 if (sk->sk_send_head == NULL)
1824 sk->sk_send_head = skb;
1825
1826 if (pi->mode == L2CAP_MODE_ERTM)
1827 spin_unlock_bh(&pi->send_lock);
1828 } else {
1829 /* Segment SDU into multiples PDUs */
1830 err = l2cap_sar_segment_sdu(sk, msg, len);
1831 if (err < 0)
1832 goto done;
1833 }
1834
1835 if (pi->mode == L2CAP_MODE_STREAMING) {
1836 err = l2cap_streaming_send(sk);
1837 } else {
1838 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1839 pi->conn_state && L2CAP_CONN_WAIT_F) {
1840 err = len;
1841 break;
1842 }
1843 spin_lock_bh(&pi->send_lock);
1844 err = l2cap_ertm_send(sk);
1845 spin_unlock_bh(&pi->send_lock);
1846 }
1847
1848 if (err >= 0)
1849 err = len;
1850 break;
1851
1852 default:
1853 BT_DBG("bad state %1.1x", pi->mode);
1854 err = -EINVAL;
1855 }
1856
1857 done:
1858 release_sock(sk);
1859 return err;
1860 }
1861
1862 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1863 {
1864 struct sock *sk = sock->sk;
1865
1866 lock_sock(sk);
1867
1868 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1869 struct l2cap_conn_rsp rsp;
1870
1871 sk->sk_state = BT_CONFIG;
1872
1873 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1874 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1875 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1876 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1877 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1878 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1879
1880 release_sock(sk);
1881 return 0;
1882 }
1883
1884 release_sock(sk);
1885
1886 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1887 }
1888
1889 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1890 {
1891 struct sock *sk = sock->sk;
1892 struct l2cap_options opts;
1893 int len, err = 0;
1894 u32 opt;
1895
1896 BT_DBG("sk %p", sk);
1897
1898 lock_sock(sk);
1899
1900 switch (optname) {
1901 case L2CAP_OPTIONS:
1902 opts.imtu = l2cap_pi(sk)->imtu;
1903 opts.omtu = l2cap_pi(sk)->omtu;
1904 opts.flush_to = l2cap_pi(sk)->flush_to;
1905 opts.mode = l2cap_pi(sk)->mode;
1906 opts.fcs = l2cap_pi(sk)->fcs;
1907 opts.max_tx = l2cap_pi(sk)->max_tx;
1908 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1909
1910 len = min_t(unsigned int, sizeof(opts), optlen);
1911 if (copy_from_user((char *) &opts, optval, len)) {
1912 err = -EFAULT;
1913 break;
1914 }
1915
1916 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1917 err = -EINVAL;
1918 break;
1919 }
1920
1921 l2cap_pi(sk)->mode = opts.mode;
1922 switch (l2cap_pi(sk)->mode) {
1923 case L2CAP_MODE_BASIC:
1924 break;
1925 case L2CAP_MODE_ERTM:
1926 case L2CAP_MODE_STREAMING:
1927 if (enable_ertm)
1928 break;
1929 /* fall through */
1930 default:
1931 err = -EINVAL;
1932 break;
1933 }
1934
1935 l2cap_pi(sk)->imtu = opts.imtu;
1936 l2cap_pi(sk)->omtu = opts.omtu;
1937 l2cap_pi(sk)->fcs = opts.fcs;
1938 l2cap_pi(sk)->max_tx = opts.max_tx;
1939 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1940 break;
1941
1942 case L2CAP_LM:
1943 if (get_user(opt, (u32 __user *) optval)) {
1944 err = -EFAULT;
1945 break;
1946 }
1947
1948 if (opt & L2CAP_LM_AUTH)
1949 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1950 if (opt & L2CAP_LM_ENCRYPT)
1951 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1952 if (opt & L2CAP_LM_SECURE)
1953 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1954
1955 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1956 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1957 break;
1958
1959 default:
1960 err = -ENOPROTOOPT;
1961 break;
1962 }
1963
1964 release_sock(sk);
1965 return err;
1966 }
1967
1968 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1969 {
1970 struct sock *sk = sock->sk;
1971 struct bt_security sec;
1972 int len, err = 0;
1973 u32 opt;
1974
1975 BT_DBG("sk %p", sk);
1976
1977 if (level == SOL_L2CAP)
1978 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1979
1980 if (level != SOL_BLUETOOTH)
1981 return -ENOPROTOOPT;
1982
1983 lock_sock(sk);
1984
1985 switch (optname) {
1986 case BT_SECURITY:
1987 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1988 && sk->sk_type != SOCK_RAW) {
1989 err = -EINVAL;
1990 break;
1991 }
1992
1993 sec.level = BT_SECURITY_LOW;
1994
1995 len = min_t(unsigned int, sizeof(sec), optlen);
1996 if (copy_from_user((char *) &sec, optval, len)) {
1997 err = -EFAULT;
1998 break;
1999 }
2000
2001 if (sec.level < BT_SECURITY_LOW ||
2002 sec.level > BT_SECURITY_HIGH) {
2003 err = -EINVAL;
2004 break;
2005 }
2006
2007 l2cap_pi(sk)->sec_level = sec.level;
2008 break;
2009
2010 case BT_DEFER_SETUP:
2011 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2012 err = -EINVAL;
2013 break;
2014 }
2015
2016 if (get_user(opt, (u32 __user *) optval)) {
2017 err = -EFAULT;
2018 break;
2019 }
2020
2021 bt_sk(sk)->defer_setup = opt;
2022 break;
2023
2024 default:
2025 err = -ENOPROTOOPT;
2026 break;
2027 }
2028
2029 release_sock(sk);
2030 return err;
2031 }
2032
2033 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2034 {
2035 struct sock *sk = sock->sk;
2036 struct l2cap_options opts;
2037 struct l2cap_conninfo cinfo;
2038 int len, err = 0;
2039 u32 opt;
2040
2041 BT_DBG("sk %p", sk);
2042
2043 if (get_user(len, optlen))
2044 return -EFAULT;
2045
2046 lock_sock(sk);
2047
2048 switch (optname) {
2049 case L2CAP_OPTIONS:
2050 opts.imtu = l2cap_pi(sk)->imtu;
2051 opts.omtu = l2cap_pi(sk)->omtu;
2052 opts.flush_to = l2cap_pi(sk)->flush_to;
2053 opts.mode = l2cap_pi(sk)->mode;
2054 opts.fcs = l2cap_pi(sk)->fcs;
2055 opts.max_tx = l2cap_pi(sk)->max_tx;
2056 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2057
2058 len = min_t(unsigned int, len, sizeof(opts));
2059 if (copy_to_user(optval, (char *) &opts, len))
2060 err = -EFAULT;
2061
2062 break;
2063
2064 case L2CAP_LM:
2065 switch (l2cap_pi(sk)->sec_level) {
2066 case BT_SECURITY_LOW:
2067 opt = L2CAP_LM_AUTH;
2068 break;
2069 case BT_SECURITY_MEDIUM:
2070 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2071 break;
2072 case BT_SECURITY_HIGH:
2073 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2074 L2CAP_LM_SECURE;
2075 break;
2076 default:
2077 opt = 0;
2078 break;
2079 }
2080
2081 if (l2cap_pi(sk)->role_switch)
2082 opt |= L2CAP_LM_MASTER;
2083
2084 if (l2cap_pi(sk)->force_reliable)
2085 opt |= L2CAP_LM_RELIABLE;
2086
2087 if (put_user(opt, (u32 __user *) optval))
2088 err = -EFAULT;
2089 break;
2090
2091 case L2CAP_CONNINFO:
2092 if (sk->sk_state != BT_CONNECTED &&
2093 !(sk->sk_state == BT_CONNECT2 &&
2094 bt_sk(sk)->defer_setup)) {
2095 err = -ENOTCONN;
2096 break;
2097 }
2098
2099 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2100 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2101
2102 len = min_t(unsigned int, len, sizeof(cinfo));
2103 if (copy_to_user(optval, (char *) &cinfo, len))
2104 err = -EFAULT;
2105
2106 break;
2107
2108 default:
2109 err = -ENOPROTOOPT;
2110 break;
2111 }
2112
2113 release_sock(sk);
2114 return err;
2115 }
2116
2117 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2118 {
2119 struct sock *sk = sock->sk;
2120 struct bt_security sec;
2121 int len, err = 0;
2122
2123 BT_DBG("sk %p", sk);
2124
2125 if (level == SOL_L2CAP)
2126 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2127
2128 if (level != SOL_BLUETOOTH)
2129 return -ENOPROTOOPT;
2130
2131 if (get_user(len, optlen))
2132 return -EFAULT;
2133
2134 lock_sock(sk);
2135
2136 switch (optname) {
2137 case BT_SECURITY:
2138 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2139 && sk->sk_type != SOCK_RAW) {
2140 err = -EINVAL;
2141 break;
2142 }
2143
2144 sec.level = l2cap_pi(sk)->sec_level;
2145
2146 len = min_t(unsigned int, len, sizeof(sec));
2147 if (copy_to_user(optval, (char *) &sec, len))
2148 err = -EFAULT;
2149
2150 break;
2151
2152 case BT_DEFER_SETUP:
2153 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2154 err = -EINVAL;
2155 break;
2156 }
2157
2158 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2159 err = -EFAULT;
2160
2161 break;
2162
2163 default:
2164 err = -ENOPROTOOPT;
2165 break;
2166 }
2167
2168 release_sock(sk);
2169 return err;
2170 }
2171
2172 static int l2cap_sock_shutdown(struct socket *sock, int how)
2173 {
2174 struct sock *sk = sock->sk;
2175 int err = 0;
2176
2177 BT_DBG("sock %p, sk %p", sock, sk);
2178
2179 if (!sk)
2180 return 0;
2181
2182 lock_sock(sk);
2183 if (!sk->sk_shutdown) {
2184 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2185 err = __l2cap_wait_ack(sk);
2186
2187 sk->sk_shutdown = SHUTDOWN_MASK;
2188 l2cap_sock_clear_timer(sk);
2189 __l2cap_sock_close(sk, 0);
2190
2191 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2192 err = bt_sock_wait_state(sk, BT_CLOSED,
2193 sk->sk_lingertime);
2194 }
2195
2196 if (!err && sk->sk_err)
2197 err = -sk->sk_err;
2198
2199 release_sock(sk);
2200 return err;
2201 }
2202
2203 static int l2cap_sock_release(struct socket *sock)
2204 {
2205 struct sock *sk = sock->sk;
2206 int err;
2207
2208 BT_DBG("sock %p, sk %p", sock, sk);
2209
2210 if (!sk)
2211 return 0;
2212
2213 err = l2cap_sock_shutdown(sock, 2);
2214
2215 sock_orphan(sk);
2216 l2cap_sock_kill(sk);
2217 return err;
2218 }
2219
2220 static void l2cap_chan_ready(struct sock *sk)
2221 {
2222 struct sock *parent = bt_sk(sk)->parent;
2223
2224 BT_DBG("sk %p, parent %p", sk, parent);
2225
2226 l2cap_pi(sk)->conf_state = 0;
2227 l2cap_sock_clear_timer(sk);
2228
2229 if (!parent) {
2230 /* Outgoing channel.
2231 * Wake up socket sleeping on connect.
2232 */
2233 sk->sk_state = BT_CONNECTED;
2234 sk->sk_state_change(sk);
2235 } else {
2236 /* Incoming channel.
2237 * Wake up socket sleeping on accept.
2238 */
2239 parent->sk_data_ready(parent, 0);
2240 }
2241 }
2242
2243 /* Copy frame to all raw sockets on that connection */
2244 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2245 {
2246 struct l2cap_chan_list *l = &conn->chan_list;
2247 struct sk_buff *nskb;
2248 struct sock *sk;
2249
2250 BT_DBG("conn %p", conn);
2251
2252 read_lock(&l->lock);
2253 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2254 if (sk->sk_type != SOCK_RAW)
2255 continue;
2256
2257 /* Don't send frame to the socket it came from */
2258 if (skb->sk == sk)
2259 continue;
2260 nskb = skb_clone(skb, GFP_ATOMIC);
2261 if (!nskb)
2262 continue;
2263
2264 if (sock_queue_rcv_skb(sk, nskb))
2265 kfree_skb(nskb);
2266 }
2267 read_unlock(&l->lock);
2268 }
2269
2270 /* ---- L2CAP signalling commands ---- */
2271 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2272 u8 code, u8 ident, u16 dlen, void *data)
2273 {
2274 struct sk_buff *skb, **frag;
2275 struct l2cap_cmd_hdr *cmd;
2276 struct l2cap_hdr *lh;
2277 int len, count;
2278
2279 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2280 conn, code, ident, dlen);
2281
2282 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2283 count = min_t(unsigned int, conn->mtu, len);
2284
2285 skb = bt_skb_alloc(count, GFP_ATOMIC);
2286 if (!skb)
2287 return NULL;
2288
2289 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2290 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2291 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2292
2293 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2294 cmd->code = code;
2295 cmd->ident = ident;
2296 cmd->len = cpu_to_le16(dlen);
2297
2298 if (dlen) {
2299 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2300 memcpy(skb_put(skb, count), data, count);
2301 data += count;
2302 }
2303
2304 len -= skb->len;
2305
2306 /* Continuation fragments (no L2CAP header) */
2307 frag = &skb_shinfo(skb)->frag_list;
2308 while (len) {
2309 count = min_t(unsigned int, conn->mtu, len);
2310
2311 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2312 if (!*frag)
2313 goto fail;
2314
2315 memcpy(skb_put(*frag, count), data, count);
2316
2317 len -= count;
2318 data += count;
2319
2320 frag = &(*frag)->next;
2321 }
2322
2323 return skb;
2324
2325 fail:
2326 kfree_skb(skb);
2327 return NULL;
2328 }
2329
2330 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2331 {
2332 struct l2cap_conf_opt *opt = *ptr;
2333 int len;
2334
2335 len = L2CAP_CONF_OPT_SIZE + opt->len;
2336 *ptr += len;
2337
2338 *type = opt->type;
2339 *olen = opt->len;
2340
2341 switch (opt->len) {
2342 case 1:
2343 *val = *((u8 *) opt->val);
2344 break;
2345
2346 case 2:
2347 *val = __le16_to_cpu(*((__le16 *) opt->val));
2348 break;
2349
2350 case 4:
2351 *val = __le32_to_cpu(*((__le32 *) opt->val));
2352 break;
2353
2354 default:
2355 *val = (unsigned long) opt->val;
2356 break;
2357 }
2358
2359 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2360 return len;
2361 }
2362
2363 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2364 {
2365 struct l2cap_conf_opt *opt = *ptr;
2366
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2368
2369 opt->type = type;
2370 opt->len = len;
2371
2372 switch (len) {
2373 case 1:
2374 *((u8 *) opt->val) = val;
2375 break;
2376
2377 case 2:
2378 *((__le16 *) opt->val) = cpu_to_le16(val);
2379 break;
2380
2381 case 4:
2382 *((__le32 *) opt->val) = cpu_to_le32(val);
2383 break;
2384
2385 default:
2386 memcpy(opt->val, (void *) val, len);
2387 break;
2388 }
2389
2390 *ptr += L2CAP_CONF_OPT_SIZE + len;
2391 }
2392
2393 static void l2cap_ack_timeout(unsigned long arg)
2394 {
2395 struct sock *sk = (void *) arg;
2396
2397 bh_lock_sock(sk);
2398 l2cap_send_ack(l2cap_pi(sk));
2399 bh_unlock_sock(sk);
2400 }
2401
2402 static inline void l2cap_ertm_init(struct sock *sk)
2403 {
2404 l2cap_pi(sk)->expected_ack_seq = 0;
2405 l2cap_pi(sk)->unacked_frames = 0;
2406 l2cap_pi(sk)->buffer_seq = 0;
2407 l2cap_pi(sk)->num_acked = 0;
2408 l2cap_pi(sk)->frames_sent = 0;
2409
2410 setup_timer(&l2cap_pi(sk)->retrans_timer,
2411 l2cap_retrans_timeout, (unsigned long) sk);
2412 setup_timer(&l2cap_pi(sk)->monitor_timer,
2413 l2cap_monitor_timeout, (unsigned long) sk);
2414 setup_timer(&l2cap_pi(sk)->ack_timer,
2415 l2cap_ack_timeout, (unsigned long) sk);
2416
2417 __skb_queue_head_init(SREJ_QUEUE(sk));
2418 __skb_queue_head_init(BUSY_QUEUE(sk));
2419 spin_lock_init(&l2cap_pi(sk)->send_lock);
2420
2421 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2422 }
2423
2424 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2425 {
2426 u32 local_feat_mask = l2cap_feat_mask;
2427 if (enable_ertm)
2428 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2429
2430 switch (mode) {
2431 case L2CAP_MODE_ERTM:
2432 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2433 case L2CAP_MODE_STREAMING:
2434 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2435 default:
2436 return 0x00;
2437 }
2438 }
2439
2440 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2441 {
2442 switch (mode) {
2443 case L2CAP_MODE_STREAMING:
2444 case L2CAP_MODE_ERTM:
2445 if (l2cap_mode_supported(mode, remote_feat_mask))
2446 return mode;
2447 /* fall through */
2448 default:
2449 return L2CAP_MODE_BASIC;
2450 }
2451 }
2452
2453 static int l2cap_build_conf_req(struct sock *sk, void *data)
2454 {
2455 struct l2cap_pinfo *pi = l2cap_pi(sk);
2456 struct l2cap_conf_req *req = data;
2457 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2458 void *ptr = req->data;
2459
2460 BT_DBG("sk %p", sk);
2461
2462 if (pi->num_conf_req || pi->num_conf_rsp)
2463 goto done;
2464
2465 switch (pi->mode) {
2466 case L2CAP_MODE_STREAMING:
2467 case L2CAP_MODE_ERTM:
2468 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2469 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2470 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2471 break;
2472 default:
2473 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2474 break;
2475 }
2476
2477 done:
2478 switch (pi->mode) {
2479 case L2CAP_MODE_BASIC:
2480 if (pi->imtu != L2CAP_DEFAULT_MTU)
2481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2482 break;
2483
2484 case L2CAP_MODE_ERTM:
2485 rfc.mode = L2CAP_MODE_ERTM;
2486 rfc.txwin_size = pi->tx_win;
2487 rfc.max_transmit = pi->max_tx;
2488 rfc.retrans_timeout = 0;
2489 rfc.monitor_timeout = 0;
2490 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2491 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2492 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2493
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2495 sizeof(rfc), (unsigned long) &rfc);
2496
2497 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2498 break;
2499
2500 if (pi->fcs == L2CAP_FCS_NONE ||
2501 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2502 pi->fcs = L2CAP_FCS_NONE;
2503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2504 }
2505 break;
2506
2507 case L2CAP_MODE_STREAMING:
2508 rfc.mode = L2CAP_MODE_STREAMING;
2509 rfc.txwin_size = 0;
2510 rfc.max_transmit = 0;
2511 rfc.retrans_timeout = 0;
2512 rfc.monitor_timeout = 0;
2513 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2514 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2515 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2516
2517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2518 sizeof(rfc), (unsigned long) &rfc);
2519
2520 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2521 break;
2522
2523 if (pi->fcs == L2CAP_FCS_NONE ||
2524 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2525 pi->fcs = L2CAP_FCS_NONE;
2526 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2527 }
2528 break;
2529 }
2530
2531 /* FIXME: Need actual value of the flush timeout */
2532 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2533 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2534
2535 req->dcid = cpu_to_le16(pi->dcid);
2536 req->flags = cpu_to_le16(0);
2537
2538 return ptr - data;
2539 }
2540
2541 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2542 {
2543 struct l2cap_pinfo *pi = l2cap_pi(sk);
2544 struct l2cap_conf_rsp *rsp = data;
2545 void *ptr = rsp->data;
2546 void *req = pi->conf_req;
2547 int len = pi->conf_len;
2548 int type, hint, olen;
2549 unsigned long val;
2550 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2551 u16 mtu = L2CAP_DEFAULT_MTU;
2552 u16 result = L2CAP_CONF_SUCCESS;
2553
2554 BT_DBG("sk %p", sk);
2555
2556 while (len >= L2CAP_CONF_OPT_SIZE) {
2557 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2558
2559 hint = type & L2CAP_CONF_HINT;
2560 type &= L2CAP_CONF_MASK;
2561
2562 switch (type) {
2563 case L2CAP_CONF_MTU:
2564 mtu = val;
2565 break;
2566
2567 case L2CAP_CONF_FLUSH_TO:
2568 pi->flush_to = val;
2569 break;
2570
2571 case L2CAP_CONF_QOS:
2572 break;
2573
2574 case L2CAP_CONF_RFC:
2575 if (olen == sizeof(rfc))
2576 memcpy(&rfc, (void *) val, olen);
2577 break;
2578
2579 case L2CAP_CONF_FCS:
2580 if (val == L2CAP_FCS_NONE)
2581 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2582
2583 break;
2584
2585 default:
2586 if (hint)
2587 break;
2588
2589 result = L2CAP_CONF_UNKNOWN;
2590 *((u8 *) ptr++) = type;
2591 break;
2592 }
2593 }
2594
2595 if (pi->num_conf_rsp || pi->num_conf_req)
2596 goto done;
2597
2598 switch (pi->mode) {
2599 case L2CAP_MODE_STREAMING:
2600 case L2CAP_MODE_ERTM:
2601 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2602 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2603 return -ECONNREFUSED;
2604 break;
2605 default:
2606 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2607 break;
2608 }
2609
2610 done:
2611 if (pi->mode != rfc.mode) {
2612 result = L2CAP_CONF_UNACCEPT;
2613 rfc.mode = pi->mode;
2614
2615 if (pi->num_conf_rsp == 1)
2616 return -ECONNREFUSED;
2617
2618 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2619 sizeof(rfc), (unsigned long) &rfc);
2620 }
2621
2622
2623 if (result == L2CAP_CONF_SUCCESS) {
2624 /* Configure output options and let the other side know
2625 * which ones we don't like. */
2626
2627 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2628 result = L2CAP_CONF_UNACCEPT;
2629 else {
2630 pi->omtu = mtu;
2631 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2632 }
2633 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2634
2635 switch (rfc.mode) {
2636 case L2CAP_MODE_BASIC:
2637 pi->fcs = L2CAP_FCS_NONE;
2638 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2639 break;
2640
2641 case L2CAP_MODE_ERTM:
2642 pi->remote_tx_win = rfc.txwin_size;
2643 pi->remote_max_tx = rfc.max_transmit;
2644 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2645 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2646
2647 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2648
2649 rfc.retrans_timeout =
2650 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2651 rfc.monitor_timeout =
2652 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2653
2654 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2655
2656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2657 sizeof(rfc), (unsigned long) &rfc);
2658
2659 break;
2660
2661 case L2CAP_MODE_STREAMING:
2662 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2663 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2664
2665 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2666
2667 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2668
2669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2670 sizeof(rfc), (unsigned long) &rfc);
2671
2672 break;
2673
2674 default:
2675 result = L2CAP_CONF_UNACCEPT;
2676
2677 memset(&rfc, 0, sizeof(rfc));
2678 rfc.mode = pi->mode;
2679 }
2680
2681 if (result == L2CAP_CONF_SUCCESS)
2682 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2683 }
2684 rsp->scid = cpu_to_le16(pi->dcid);
2685 rsp->result = cpu_to_le16(result);
2686 rsp->flags = cpu_to_le16(0x0000);
2687
2688 return ptr - data;
2689 }
2690
2691 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2692 {
2693 struct l2cap_pinfo *pi = l2cap_pi(sk);
2694 struct l2cap_conf_req *req = data;
2695 void *ptr = req->data;
2696 int type, olen;
2697 unsigned long val;
2698 struct l2cap_conf_rfc rfc;
2699
2700 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2701
2702 while (len >= L2CAP_CONF_OPT_SIZE) {
2703 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2704
2705 switch (type) {
2706 case L2CAP_CONF_MTU:
2707 if (val < L2CAP_DEFAULT_MIN_MTU) {
2708 *result = L2CAP_CONF_UNACCEPT;
2709 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2710 } else
2711 pi->omtu = val;
2712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2713 break;
2714
2715 case L2CAP_CONF_FLUSH_TO:
2716 pi->flush_to = val;
2717 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2718 2, pi->flush_to);
2719 break;
2720
2721 case L2CAP_CONF_RFC:
2722 if (olen == sizeof(rfc))
2723 memcpy(&rfc, (void *)val, olen);
2724
2725 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2726 rfc.mode != pi->mode)
2727 return -ECONNREFUSED;
2728
2729 pi->mode = rfc.mode;
2730 pi->fcs = 0;
2731
2732 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2733 sizeof(rfc), (unsigned long) &rfc);
2734 break;
2735 }
2736 }
2737
2738 if (*result == L2CAP_CONF_SUCCESS) {
2739 switch (rfc.mode) {
2740 case L2CAP_MODE_ERTM:
2741 pi->remote_tx_win = rfc.txwin_size;
2742 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2743 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2744 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2745 break;
2746 case L2CAP_MODE_STREAMING:
2747 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2748 }
2749 }
2750
2751 req->dcid = cpu_to_le16(pi->dcid);
2752 req->flags = cpu_to_le16(0x0000);
2753
2754 return ptr - data;
2755 }
2756
2757 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2758 {
2759 struct l2cap_conf_rsp *rsp = data;
2760 void *ptr = rsp->data;
2761
2762 BT_DBG("sk %p", sk);
2763
2764 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2765 rsp->result = cpu_to_le16(result);
2766 rsp->flags = cpu_to_le16(flags);
2767
2768 return ptr - data;
2769 }
2770
2771 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2772 {
2773 struct l2cap_pinfo *pi = l2cap_pi(sk);
2774 int type, olen;
2775 unsigned long val;
2776 struct l2cap_conf_rfc rfc;
2777
2778 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2779
2780 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2781 return;
2782
2783 while (len >= L2CAP_CONF_OPT_SIZE) {
2784 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2785
2786 switch (type) {
2787 case L2CAP_CONF_RFC:
2788 if (olen == sizeof(rfc))
2789 memcpy(&rfc, (void *)val, olen);
2790 goto done;
2791 }
2792 }
2793
2794 done:
2795 switch (rfc.mode) {
2796 case L2CAP_MODE_ERTM:
2797 pi->remote_tx_win = rfc.txwin_size;
2798 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2799 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2800 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2801 break;
2802 case L2CAP_MODE_STREAMING:
2803 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2804 }
2805 }
2806
2807 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2808 {
2809 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2810
2811 if (rej->reason != 0x0000)
2812 return 0;
2813
2814 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2815 cmd->ident == conn->info_ident) {
2816 del_timer(&conn->info_timer);
2817
2818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2819 conn->info_ident = 0;
2820
2821 l2cap_conn_start(conn);
2822 }
2823
2824 return 0;
2825 }
2826
2827 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2828 {
2829 struct l2cap_chan_list *list = &conn->chan_list;
2830 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2831 struct l2cap_conn_rsp rsp;
2832 struct sock *sk, *parent;
2833 int result, status = L2CAP_CS_NO_INFO;
2834
2835 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2836 __le16 psm = req->psm;
2837
2838 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2839
2840 /* Check if we have socket listening on psm */
2841 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2842 if (!parent) {
2843 result = L2CAP_CR_BAD_PSM;
2844 goto sendresp;
2845 }
2846
2847 /* Check if the ACL is secure enough (if not SDP) */
2848 if (psm != cpu_to_le16(0x0001) &&
2849 !hci_conn_check_link_mode(conn->hcon)) {
2850 conn->disc_reason = 0x05;
2851 result = L2CAP_CR_SEC_BLOCK;
2852 goto response;
2853 }
2854
2855 result = L2CAP_CR_NO_MEM;
2856
2857 /* Check for backlog size */
2858 if (sk_acceptq_is_full(parent)) {
2859 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2860 goto response;
2861 }
2862
2863 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2864 if (!sk)
2865 goto response;
2866
2867 write_lock_bh(&list->lock);
2868
2869 /* Check if we already have channel with that dcid */
2870 if (__l2cap_get_chan_by_dcid(list, scid)) {
2871 write_unlock_bh(&list->lock);
2872 sock_set_flag(sk, SOCK_ZAPPED);
2873 l2cap_sock_kill(sk);
2874 goto response;
2875 }
2876
2877 hci_conn_hold(conn->hcon);
2878
2879 l2cap_sock_init(sk, parent);
2880 bacpy(&bt_sk(sk)->src, conn->src);
2881 bacpy(&bt_sk(sk)->dst, conn->dst);
2882 l2cap_pi(sk)->psm = psm;
2883 l2cap_pi(sk)->dcid = scid;
2884
2885 __l2cap_chan_add(conn, sk, parent);
2886 dcid = l2cap_pi(sk)->scid;
2887
2888 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2889
2890 l2cap_pi(sk)->ident = cmd->ident;
2891
2892 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2893 if (l2cap_check_security(sk)) {
2894 if (bt_sk(sk)->defer_setup) {
2895 sk->sk_state = BT_CONNECT2;
2896 result = L2CAP_CR_PEND;
2897 status = L2CAP_CS_AUTHOR_PEND;
2898 parent->sk_data_ready(parent, 0);
2899 } else {
2900 sk->sk_state = BT_CONFIG;
2901 result = L2CAP_CR_SUCCESS;
2902 status = L2CAP_CS_NO_INFO;
2903 }
2904 } else {
2905 sk->sk_state = BT_CONNECT2;
2906 result = L2CAP_CR_PEND;
2907 status = L2CAP_CS_AUTHEN_PEND;
2908 }
2909 } else {
2910 sk->sk_state = BT_CONNECT2;
2911 result = L2CAP_CR_PEND;
2912 status = L2CAP_CS_NO_INFO;
2913 }
2914
2915 write_unlock_bh(&list->lock);
2916
2917 response:
2918 bh_unlock_sock(parent);
2919
2920 sendresp:
2921 rsp.scid = cpu_to_le16(scid);
2922 rsp.dcid = cpu_to_le16(dcid);
2923 rsp.result = cpu_to_le16(result);
2924 rsp.status = cpu_to_le16(status);
2925 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2926
2927 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2928 struct l2cap_info_req info;
2929 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2930
2931 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2932 conn->info_ident = l2cap_get_ident(conn);
2933
2934 mod_timer(&conn->info_timer, jiffies +
2935 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2936
2937 l2cap_send_cmd(conn, conn->info_ident,
2938 L2CAP_INFO_REQ, sizeof(info), &info);
2939 }
2940
2941 return 0;
2942 }
2943
2944 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2945 {
2946 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2947 u16 scid, dcid, result, status;
2948 struct sock *sk;
2949 u8 req[128];
2950
2951 scid = __le16_to_cpu(rsp->scid);
2952 dcid = __le16_to_cpu(rsp->dcid);
2953 result = __le16_to_cpu(rsp->result);
2954 status = __le16_to_cpu(rsp->status);
2955
2956 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2957
2958 if (scid) {
2959 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2960 if (!sk)
2961 return 0;
2962 } else {
2963 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2964 if (!sk)
2965 return 0;
2966 }
2967
2968 switch (result) {
2969 case L2CAP_CR_SUCCESS:
2970 sk->sk_state = BT_CONFIG;
2971 l2cap_pi(sk)->ident = 0;
2972 l2cap_pi(sk)->dcid = dcid;
2973 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2974 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2975
2976 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2977 l2cap_build_conf_req(sk, req), req);
2978 l2cap_pi(sk)->num_conf_req++;
2979 break;
2980
2981 case L2CAP_CR_PEND:
2982 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2983 break;
2984
2985 default:
2986 l2cap_chan_del(sk, ECONNREFUSED);
2987 break;
2988 }
2989
2990 bh_unlock_sock(sk);
2991 return 0;
2992 }
2993
2994 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2995 {
2996 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2997 u16 dcid, flags;
2998 u8 rsp[64];
2999 struct sock *sk;
3000 int len;
3001
3002 dcid = __le16_to_cpu(req->dcid);
3003 flags = __le16_to_cpu(req->flags);
3004
3005 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3006
3007 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3008 if (!sk)
3009 return -ENOENT;
3010
3011 if (sk->sk_state == BT_DISCONN)
3012 goto unlock;
3013
3014 /* Reject if config buffer is too small. */
3015 len = cmd_len - sizeof(*req);
3016 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3017 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3018 l2cap_build_conf_rsp(sk, rsp,
3019 L2CAP_CONF_REJECT, flags), rsp);
3020 goto unlock;
3021 }
3022
3023 /* Store config. */
3024 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3025 l2cap_pi(sk)->conf_len += len;
3026
3027 if (flags & 0x0001) {
3028 /* Incomplete config. Send empty response. */
3029 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3030 l2cap_build_conf_rsp(sk, rsp,
3031 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3032 goto unlock;
3033 }
3034
3035 /* Complete config. */
3036 len = l2cap_parse_conf_req(sk, rsp);
3037 if (len < 0) {
3038 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3039 goto unlock;
3040 }
3041
3042 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3043 l2cap_pi(sk)->num_conf_rsp++;
3044
3045 /* Reset config buffer. */
3046 l2cap_pi(sk)->conf_len = 0;
3047
3048 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3049 goto unlock;
3050
3051 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3052 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3053 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3054 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3055
3056 sk->sk_state = BT_CONNECTED;
3057
3058 l2cap_pi(sk)->next_tx_seq = 0;
3059 l2cap_pi(sk)->expected_tx_seq = 0;
3060 __skb_queue_head_init(TX_QUEUE(sk));
3061 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3062 l2cap_ertm_init(sk);
3063
3064 l2cap_chan_ready(sk);
3065 goto unlock;
3066 }
3067
3068 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3069 u8 buf[64];
3070 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3071 l2cap_build_conf_req(sk, buf), buf);
3072 l2cap_pi(sk)->num_conf_req++;
3073 }
3074
3075 unlock:
3076 bh_unlock_sock(sk);
3077 return 0;
3078 }
3079
3080 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3081 {
3082 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3083 u16 scid, flags, result;
3084 struct sock *sk;
3085 int len = cmd->len - sizeof(*rsp);
3086
3087 scid = __le16_to_cpu(rsp->scid);
3088 flags = __le16_to_cpu(rsp->flags);
3089 result = __le16_to_cpu(rsp->result);
3090
3091 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3092 scid, flags, result);
3093
3094 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3095 if (!sk)
3096 return 0;
3097
3098 switch (result) {
3099 case L2CAP_CONF_SUCCESS:
3100 l2cap_conf_rfc_get(sk, rsp->data, len);
3101 break;
3102
3103 case L2CAP_CONF_UNACCEPT:
3104 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3105 char req[64];
3106
3107 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3108 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3109 goto done;
3110 }
3111
3112 /* throw out any old stored conf requests */
3113 result = L2CAP_CONF_SUCCESS;
3114 len = l2cap_parse_conf_rsp(sk, rsp->data,
3115 len, req, &result);
3116 if (len < 0) {
3117 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3118 goto done;
3119 }
3120
3121 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3122 L2CAP_CONF_REQ, len, req);
3123 l2cap_pi(sk)->num_conf_req++;
3124 if (result != L2CAP_CONF_SUCCESS)
3125 goto done;
3126 break;
3127 }
3128
3129 default:
3130 sk->sk_err = ECONNRESET;
3131 l2cap_sock_set_timer(sk, HZ * 5);
3132 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3133 goto done;
3134 }
3135
3136 if (flags & 0x01)
3137 goto done;
3138
3139 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3140
3141 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3142 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3143 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3144 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3145
3146 sk->sk_state = BT_CONNECTED;
3147 l2cap_pi(sk)->next_tx_seq = 0;
3148 l2cap_pi(sk)->expected_tx_seq = 0;
3149 __skb_queue_head_init(TX_QUEUE(sk));
3150 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3151 l2cap_ertm_init(sk);
3152
3153 l2cap_chan_ready(sk);
3154 }
3155
3156 done:
3157 bh_unlock_sock(sk);
3158 return 0;
3159 }
3160
3161 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3162 {
3163 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3164 struct l2cap_disconn_rsp rsp;
3165 u16 dcid, scid;
3166 struct sock *sk;
3167
3168 scid = __le16_to_cpu(req->scid);
3169 dcid = __le16_to_cpu(req->dcid);
3170
3171 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3172
3173 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3174 if (!sk)
3175 return 0;
3176
3177 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3178 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3179 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3180
3181 sk->sk_shutdown = SHUTDOWN_MASK;
3182
3183 l2cap_chan_del(sk, ECONNRESET);
3184 bh_unlock_sock(sk);
3185
3186 l2cap_sock_kill(sk);
3187 return 0;
3188 }
3189
3190 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3191 {
3192 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3193 u16 dcid, scid;
3194 struct sock *sk;
3195
3196 scid = __le16_to_cpu(rsp->scid);
3197 dcid = __le16_to_cpu(rsp->dcid);
3198
3199 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3200
3201 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3202 if (!sk)
3203 return 0;
3204
3205 l2cap_chan_del(sk, 0);
3206 bh_unlock_sock(sk);
3207
3208 l2cap_sock_kill(sk);
3209 return 0;
3210 }
3211
3212 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3213 {
3214 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3215 u16 type;
3216
3217 type = __le16_to_cpu(req->type);
3218
3219 BT_DBG("type 0x%4.4x", type);
3220
3221 if (type == L2CAP_IT_FEAT_MASK) {
3222 u8 buf[8];
3223 u32 feat_mask = l2cap_feat_mask;
3224 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3225 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3226 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3227 if (enable_ertm)
3228 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3229 | L2CAP_FEAT_FCS;
3230 put_unaligned_le32(feat_mask, rsp->data);
3231 l2cap_send_cmd(conn, cmd->ident,
3232 L2CAP_INFO_RSP, sizeof(buf), buf);
3233 } else if (type == L2CAP_IT_FIXED_CHAN) {
3234 u8 buf[12];
3235 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3236 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3237 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3238 memcpy(buf + 4, l2cap_fixed_chan, 8);
3239 l2cap_send_cmd(conn, cmd->ident,
3240 L2CAP_INFO_RSP, sizeof(buf), buf);
3241 } else {
3242 struct l2cap_info_rsp rsp;
3243 rsp.type = cpu_to_le16(type);
3244 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3245 l2cap_send_cmd(conn, cmd->ident,
3246 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3247 }
3248
3249 return 0;
3250 }
3251
3252 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3253 {
3254 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3255 u16 type, result;
3256
3257 type = __le16_to_cpu(rsp->type);
3258 result = __le16_to_cpu(rsp->result);
3259
3260 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3261
3262 del_timer(&conn->info_timer);
3263
3264 if (type == L2CAP_IT_FEAT_MASK) {
3265 conn->feat_mask = get_unaligned_le32(rsp->data);
3266
3267 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3268 struct l2cap_info_req req;
3269 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3270
3271 conn->info_ident = l2cap_get_ident(conn);
3272
3273 l2cap_send_cmd(conn, conn->info_ident,
3274 L2CAP_INFO_REQ, sizeof(req), &req);
3275 } else {
3276 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3277 conn->info_ident = 0;
3278
3279 l2cap_conn_start(conn);
3280 }
3281 } else if (type == L2CAP_IT_FIXED_CHAN) {
3282 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3283 conn->info_ident = 0;
3284
3285 l2cap_conn_start(conn);
3286 }
3287
3288 return 0;
3289 }
3290
3291 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3292 {
3293 u8 *data = skb->data;
3294 int len = skb->len;
3295 struct l2cap_cmd_hdr cmd;
3296 int err = 0;
3297
3298 l2cap_raw_recv(conn, skb);
3299
3300 while (len >= L2CAP_CMD_HDR_SIZE) {
3301 u16 cmd_len;
3302 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3303 data += L2CAP_CMD_HDR_SIZE;
3304 len -= L2CAP_CMD_HDR_SIZE;
3305
3306 cmd_len = le16_to_cpu(cmd.len);
3307
3308 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3309
3310 if (cmd_len > len || !cmd.ident) {
3311 BT_DBG("corrupted command");
3312 break;
3313 }
3314
3315 switch (cmd.code) {
3316 case L2CAP_COMMAND_REJ:
3317 l2cap_command_rej(conn, &cmd, data);
3318 break;
3319
3320 case L2CAP_CONN_REQ:
3321 err = l2cap_connect_req(conn, &cmd, data);
3322 break;
3323
3324 case L2CAP_CONN_RSP:
3325 err = l2cap_connect_rsp(conn, &cmd, data);
3326 break;
3327
3328 case L2CAP_CONF_REQ:
3329 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3330 break;
3331
3332 case L2CAP_CONF_RSP:
3333 err = l2cap_config_rsp(conn, &cmd, data);
3334 break;
3335
3336 case L2CAP_DISCONN_REQ:
3337 err = l2cap_disconnect_req(conn, &cmd, data);
3338 break;
3339
3340 case L2CAP_DISCONN_RSP:
3341 err = l2cap_disconnect_rsp(conn, &cmd, data);
3342 break;
3343
3344 case L2CAP_ECHO_REQ:
3345 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3346 break;
3347
3348 case L2CAP_ECHO_RSP:
3349 break;
3350
3351 case L2CAP_INFO_REQ:
3352 err = l2cap_information_req(conn, &cmd, data);
3353 break;
3354
3355 case L2CAP_INFO_RSP:
3356 err = l2cap_information_rsp(conn, &cmd, data);
3357 break;
3358
3359 default:
3360 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3361 err = -EINVAL;
3362 break;
3363 }
3364
3365 if (err) {
3366 struct l2cap_cmd_rej rej;
3367 BT_DBG("error %d", err);
3368
3369 /* FIXME: Map err to a valid reason */
3370 rej.reason = cpu_to_le16(0);
3371 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3372 }
3373
3374 data += cmd_len;
3375 len -= cmd_len;
3376 }
3377
3378 kfree_skb(skb);
3379 }
3380
3381 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3382 {
3383 u16 our_fcs, rcv_fcs;
3384 int hdr_size = L2CAP_HDR_SIZE + 2;
3385
3386 if (pi->fcs == L2CAP_FCS_CRC16) {
3387 skb_trim(skb, skb->len - 2);
3388 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3389 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3390
3391 if (our_fcs != rcv_fcs)
3392 return -EINVAL;
3393 }
3394 return 0;
3395 }
3396
3397 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3398 {
3399 struct l2cap_pinfo *pi = l2cap_pi(sk);
3400 u16 control = 0;
3401
3402 pi->frames_sent = 0;
3403
3404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3405
3406 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3407 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3408 l2cap_send_sframe(pi, control);
3409 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3410 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3411 }
3412
3413 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3414 l2cap_retransmit_frames(sk);
3415
3416 spin_lock_bh(&pi->send_lock);
3417 l2cap_ertm_send(sk);
3418 spin_unlock_bh(&pi->send_lock);
3419
3420 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3421 pi->frames_sent == 0) {
3422 control |= L2CAP_SUPER_RCV_READY;
3423 l2cap_send_sframe(pi, control);
3424 }
3425 }
3426
3427 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3428 {
3429 struct sk_buff *next_skb;
3430 struct l2cap_pinfo *pi = l2cap_pi(sk);
3431 int tx_seq_offset, next_tx_seq_offset;
3432
3433 bt_cb(skb)->tx_seq = tx_seq;
3434 bt_cb(skb)->sar = sar;
3435
3436 next_skb = skb_peek(SREJ_QUEUE(sk));
3437 if (!next_skb) {
3438 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3439 return 0;
3440 }
3441
3442 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3443 if (tx_seq_offset < 0)
3444 tx_seq_offset += 64;
3445
3446 do {
3447 if (bt_cb(next_skb)->tx_seq == tx_seq)
3448 return -EINVAL;
3449
3450 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3451 pi->buffer_seq) % 64;
3452 if (next_tx_seq_offset < 0)
3453 next_tx_seq_offset += 64;
3454
3455 if (next_tx_seq_offset > tx_seq_offset) {
3456 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3457 return 0;
3458 }
3459
3460 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3461 break;
3462
3463 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3464
3465 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3466
3467 return 0;
3468 }
3469
3470 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3471 {
3472 struct l2cap_pinfo *pi = l2cap_pi(sk);
3473 struct sk_buff *_skb;
3474 int err;
3475
3476 switch (control & L2CAP_CTRL_SAR) {
3477 case L2CAP_SDU_UNSEGMENTED:
3478 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3479 goto drop;
3480
3481 err = sock_queue_rcv_skb(sk, skb);
3482 if (!err)
3483 return err;
3484
3485 break;
3486
3487 case L2CAP_SDU_START:
3488 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3489 goto drop;
3490
3491 pi->sdu_len = get_unaligned_le16(skb->data);
3492
3493 if (pi->sdu_len > pi->imtu)
3494 goto disconnect;
3495
3496 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3497 if (!pi->sdu)
3498 return -ENOMEM;
3499
3500 /* pull sdu_len bytes only after alloc, because of Local Busy
3501 * condition we have to be sure that this will be executed
3502 * only once, i.e., when alloc does not fail */
3503 skb_pull(skb, 2);
3504
3505 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3506
3507 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3508 pi->partial_sdu_len = skb->len;
3509 break;
3510
3511 case L2CAP_SDU_CONTINUE:
3512 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3513 goto disconnect;
3514
3515 if (!pi->sdu)
3516 goto disconnect;
3517
3518 pi->partial_sdu_len += skb->len;
3519 if (pi->partial_sdu_len > pi->sdu_len)
3520 goto drop;
3521
3522 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3523
3524 break;
3525
3526 case L2CAP_SDU_END:
3527 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3528 goto disconnect;
3529
3530 if (!pi->sdu)
3531 goto disconnect;
3532
3533 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3534 pi->partial_sdu_len += skb->len;
3535
3536 if (pi->partial_sdu_len > pi->imtu)
3537 goto drop;
3538
3539 if (pi->partial_sdu_len != pi->sdu_len)
3540 goto drop;
3541
3542 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3543 }
3544
3545 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3546 if (!_skb) {
3547 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3548 return -ENOMEM;
3549 }
3550
3551 err = sock_queue_rcv_skb(sk, _skb);
3552 if (err < 0) {
3553 kfree_skb(_skb);
3554 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3555 return err;
3556 }
3557
3558 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3559 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3560
3561 kfree_skb(pi->sdu);
3562 break;
3563 }
3564
3565 kfree_skb(skb);
3566 return 0;
3567
3568 drop:
3569 kfree_skb(pi->sdu);
3570 pi->sdu = NULL;
3571
3572 disconnect:
3573 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3574 kfree_skb(skb);
3575 return 0;
3576 }
3577
3578 static void l2cap_busy_work(struct work_struct *work)
3579 {
3580 DECLARE_WAITQUEUE(wait, current);
3581 struct l2cap_pinfo *pi =
3582 container_of(work, struct l2cap_pinfo, busy_work);
3583 struct sock *sk = (struct sock *)pi;
3584 int n_tries = 0, timeo = HZ/5, err;
3585 struct sk_buff *skb;
3586 u16 control;
3587
3588 lock_sock(sk);
3589
3590 add_wait_queue(sk_sleep(sk), &wait);
3591 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3592 set_current_state(TASK_INTERRUPTIBLE);
3593
3594 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3595 err = -EBUSY;
3596 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3597 goto done;
3598 }
3599
3600 if (!timeo)
3601 timeo = HZ/5;
3602
3603 if (signal_pending(current)) {
3604 err = sock_intr_errno(timeo);
3605 goto done;
3606 }
3607
3608 release_sock(sk);
3609 timeo = schedule_timeout(timeo);
3610 lock_sock(sk);
3611
3612 err = sock_error(sk);
3613 if (err)
3614 goto done;
3615
3616 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3617 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3618 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3619 if (err < 0) {
3620 skb_queue_head(BUSY_QUEUE(sk), skb);
3621 break;
3622 }
3623
3624 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3625 }
3626
3627 if (!skb)
3628 break;
3629 }
3630
3631 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3632 goto done;
3633
3634 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3635 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3636 l2cap_send_sframe(pi, control);
3637 l2cap_pi(sk)->retry_count = 1;
3638
3639 del_timer(&pi->retrans_timer);
3640 __mod_monitor_timer();
3641
3642 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3643
3644 done:
3645 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3646 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3647
3648 set_current_state(TASK_RUNNING);
3649 remove_wait_queue(sk_sleep(sk), &wait);
3650
3651 release_sock(sk);
3652 }
3653
3654 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3655 {
3656 struct l2cap_pinfo *pi = l2cap_pi(sk);
3657 int sctrl, err;
3658
3659 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3660 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3661 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3662 return -EBUSY;
3663 }
3664
3665 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3666 if (err >= 0) {
3667 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3668 return err;
3669 }
3670
3671 /* Busy Condition */
3672 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3673 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3674 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3675
3676 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3677 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3678 l2cap_send_sframe(pi, sctrl);
3679
3680 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3681
3682 del_timer(&pi->ack_timer);
3683
3684 queue_work(_busy_wq, &pi->busy_work);
3685
3686 return err;
3687 }
3688
3689 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3690 {
3691 struct l2cap_pinfo *pi = l2cap_pi(sk);
3692 struct sk_buff *_skb;
3693 int err = -EINVAL;
3694
3695 /*
3696 * TODO: We have to notify the userland if some data is lost with the
3697 * Streaming Mode.
3698 */
3699
3700 switch (control & L2CAP_CTRL_SAR) {
3701 case L2CAP_SDU_UNSEGMENTED:
3702 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3703 kfree_skb(pi->sdu);
3704 break;
3705 }
3706
3707 err = sock_queue_rcv_skb(sk, skb);
3708 if (!err)
3709 return 0;
3710
3711 break;
3712
3713 case L2CAP_SDU_START:
3714 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3715 kfree_skb(pi->sdu);
3716 break;
3717 }
3718
3719 pi->sdu_len = get_unaligned_le16(skb->data);
3720 skb_pull(skb, 2);
3721
3722 if (pi->sdu_len > pi->imtu) {
3723 err = -EMSGSIZE;
3724 break;
3725 }
3726
3727 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3728 if (!pi->sdu) {
3729 err = -ENOMEM;
3730 break;
3731 }
3732
3733 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3734
3735 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3736 pi->partial_sdu_len = skb->len;
3737 err = 0;
3738 break;
3739
3740 case L2CAP_SDU_CONTINUE:
3741 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3742 break;
3743
3744 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3745
3746 pi->partial_sdu_len += skb->len;
3747 if (pi->partial_sdu_len > pi->sdu_len)
3748 kfree_skb(pi->sdu);
3749 else
3750 err = 0;
3751
3752 break;
3753
3754 case L2CAP_SDU_END:
3755 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3756 break;
3757
3758 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3759
3760 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3761 pi->partial_sdu_len += skb->len;
3762
3763 if (pi->partial_sdu_len > pi->imtu)
3764 goto drop;
3765
3766 if (pi->partial_sdu_len == pi->sdu_len) {
3767 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3768 err = sock_queue_rcv_skb(sk, _skb);
3769 if (err < 0)
3770 kfree_skb(_skb);
3771 }
3772 err = 0;
3773
3774 drop:
3775 kfree_skb(pi->sdu);
3776 break;
3777 }
3778
3779 kfree_skb(skb);
3780 return err;
3781 }
3782
3783 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3784 {
3785 struct sk_buff *skb;
3786 u16 control;
3787
3788 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3789 if (bt_cb(skb)->tx_seq != tx_seq)
3790 break;
3791
3792 skb = skb_dequeue(SREJ_QUEUE(sk));
3793 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3794 l2cap_ertm_reassembly_sdu(sk, skb, control);
3795 l2cap_pi(sk)->buffer_seq_srej =
3796 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3797 tx_seq = (tx_seq + 1) % 64;
3798 }
3799 }
3800
3801 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3802 {
3803 struct l2cap_pinfo *pi = l2cap_pi(sk);
3804 struct srej_list *l, *tmp;
3805 u16 control;
3806
3807 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3808 if (l->tx_seq == tx_seq) {
3809 list_del(&l->list);
3810 kfree(l);
3811 return;
3812 }
3813 control = L2CAP_SUPER_SELECT_REJECT;
3814 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3815 l2cap_send_sframe(pi, control);
3816 list_del(&l->list);
3817 list_add_tail(&l->list, SREJ_LIST(sk));
3818 }
3819 }
3820
3821 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3822 {
3823 struct l2cap_pinfo *pi = l2cap_pi(sk);
3824 struct srej_list *new;
3825 u16 control;
3826
3827 while (tx_seq != pi->expected_tx_seq) {
3828 control = L2CAP_SUPER_SELECT_REJECT;
3829 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3830 l2cap_send_sframe(pi, control);
3831
3832 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3833 new->tx_seq = pi->expected_tx_seq;
3834 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3835 list_add_tail(&new->list, SREJ_LIST(sk));
3836 }
3837 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3838 }
3839
3840 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3841 {
3842 struct l2cap_pinfo *pi = l2cap_pi(sk);
3843 u8 tx_seq = __get_txseq(rx_control);
3844 u8 req_seq = __get_reqseq(rx_control);
3845 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3846 int tx_seq_offset, expected_tx_seq_offset;
3847 int num_to_ack = (pi->tx_win/6) + 1;
3848 int err = 0;
3849
3850 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3851
3852 if (L2CAP_CTRL_FINAL & rx_control &&
3853 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3854 del_timer(&pi->monitor_timer);
3855 if (pi->unacked_frames > 0)
3856 __mod_retrans_timer();
3857 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3858 }
3859
3860 pi->expected_ack_seq = req_seq;
3861 l2cap_drop_acked_frames(sk);
3862
3863 if (tx_seq == pi->expected_tx_seq)
3864 goto expected;
3865
3866 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3867 if (tx_seq_offset < 0)
3868 tx_seq_offset += 64;
3869
3870 /* invalid tx_seq */
3871 if (tx_seq_offset >= pi->tx_win) {
3872 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3873 goto drop;
3874 }
3875
3876 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3877 goto drop;
3878
3879 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3880 struct srej_list *first;
3881
3882 first = list_first_entry(SREJ_LIST(sk),
3883 struct srej_list, list);
3884 if (tx_seq == first->tx_seq) {
3885 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3886 l2cap_check_srej_gap(sk, tx_seq);
3887
3888 list_del(&first->list);
3889 kfree(first);
3890
3891 if (list_empty(SREJ_LIST(sk))) {
3892 pi->buffer_seq = pi->buffer_seq_srej;
3893 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3894 l2cap_send_ack(pi);
3895 }
3896 } else {
3897 struct srej_list *l;
3898
3899 /* duplicated tx_seq */
3900 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3901 goto drop;
3902
3903 list_for_each_entry(l, SREJ_LIST(sk), list) {
3904 if (l->tx_seq == tx_seq) {
3905 l2cap_resend_srejframe(sk, tx_seq);
3906 return 0;
3907 }
3908 }
3909 l2cap_send_srejframe(sk, tx_seq);
3910 }
3911 } else {
3912 expected_tx_seq_offset =
3913 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3914 if (expected_tx_seq_offset < 0)
3915 expected_tx_seq_offset += 64;
3916
3917 /* duplicated tx_seq */
3918 if (tx_seq_offset < expected_tx_seq_offset)
3919 goto drop;
3920
3921 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3922
3923 INIT_LIST_HEAD(SREJ_LIST(sk));
3924 pi->buffer_seq_srej = pi->buffer_seq;
3925
3926 __skb_queue_head_init(SREJ_QUEUE(sk));
3927 __skb_queue_head_init(BUSY_QUEUE(sk));
3928 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3929
3930 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3931
3932 l2cap_send_srejframe(sk, tx_seq);
3933
3934 del_timer(&pi->ack_timer);
3935 }
3936 return 0;
3937
3938 expected:
3939 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3940
3941 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3942 bt_cb(skb)->tx_seq = tx_seq;
3943 bt_cb(skb)->sar = sar;
3944 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3945 return 0;
3946 }
3947
3948 err = l2cap_push_rx_skb(sk, skb, rx_control);
3949 if (err < 0)
3950 return 0;
3951
3952 if (rx_control & L2CAP_CTRL_FINAL) {
3953 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3954 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3955 else
3956 l2cap_retransmit_frames(sk);
3957 }
3958
3959 __mod_ack_timer();
3960
3961 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3962 if (pi->num_acked == num_to_ack - 1)
3963 l2cap_send_ack(pi);
3964
3965 return 0;
3966
3967 drop:
3968 kfree_skb(skb);
3969 return 0;
3970 }
3971
3972 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3973 {
3974 struct l2cap_pinfo *pi = l2cap_pi(sk);
3975
3976 pi->expected_ack_seq = __get_reqseq(rx_control);
3977 l2cap_drop_acked_frames(sk);
3978
3979 if (rx_control & L2CAP_CTRL_POLL) {
3980 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3981 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3982 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3983 (pi->unacked_frames > 0))
3984 __mod_retrans_timer();
3985
3986 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3987 l2cap_send_srejtail(sk);
3988 } else {
3989 l2cap_send_i_or_rr_or_rnr(sk);
3990 }
3991
3992 } else if (rx_control & L2CAP_CTRL_FINAL) {
3993 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3994
3995 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3996 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3997 else
3998 l2cap_retransmit_frames(sk);
3999
4000 } else {
4001 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4002 (pi->unacked_frames > 0))
4003 __mod_retrans_timer();
4004
4005 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4006 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4007 l2cap_send_ack(pi);
4008 } else {
4009 spin_lock_bh(&pi->send_lock);
4010 l2cap_ertm_send(sk);
4011 spin_unlock_bh(&pi->send_lock);
4012 }
4013 }
4014 }
4015
4016 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4017 {
4018 struct l2cap_pinfo *pi = l2cap_pi(sk);
4019 u8 tx_seq = __get_reqseq(rx_control);
4020
4021 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4022
4023 pi->expected_ack_seq = tx_seq;
4024 l2cap_drop_acked_frames(sk);
4025
4026 if (rx_control & L2CAP_CTRL_FINAL) {
4027 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4028 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4029 else
4030 l2cap_retransmit_frames(sk);
4031 } else {
4032 l2cap_retransmit_frames(sk);
4033
4034 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4035 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4036 }
4037 }
4038 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4039 {
4040 struct l2cap_pinfo *pi = l2cap_pi(sk);
4041 u8 tx_seq = __get_reqseq(rx_control);
4042
4043 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4044
4045 if (rx_control & L2CAP_CTRL_POLL) {
4046 pi->expected_ack_seq = tx_seq;
4047 l2cap_drop_acked_frames(sk);
4048
4049 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4050 l2cap_retransmit_one_frame(sk, tx_seq);
4051
4052 spin_lock_bh(&pi->send_lock);
4053 l2cap_ertm_send(sk);
4054 spin_unlock_bh(&pi->send_lock);
4055
4056 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4057 pi->srej_save_reqseq = tx_seq;
4058 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4059 }
4060 } else if (rx_control & L2CAP_CTRL_FINAL) {
4061 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4062 pi->srej_save_reqseq == tx_seq)
4063 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4064 else
4065 l2cap_retransmit_one_frame(sk, tx_seq);
4066 } else {
4067 l2cap_retransmit_one_frame(sk, tx_seq);
4068 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4069 pi->srej_save_reqseq = tx_seq;
4070 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4071 }
4072 }
4073 }
4074
4075 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4076 {
4077 struct l2cap_pinfo *pi = l2cap_pi(sk);
4078 u8 tx_seq = __get_reqseq(rx_control);
4079
4080 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4081 pi->expected_ack_seq = tx_seq;
4082 l2cap_drop_acked_frames(sk);
4083
4084 if (rx_control & L2CAP_CTRL_POLL)
4085 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4086
4087 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4088 del_timer(&pi->retrans_timer);
4089 if (rx_control & L2CAP_CTRL_POLL)
4090 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4091 return;
4092 }
4093
4094 if (rx_control & L2CAP_CTRL_POLL)
4095 l2cap_send_srejtail(sk);
4096 else
4097 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4098 }
4099
4100 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4101 {
4102 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4103
4104 if (L2CAP_CTRL_FINAL & rx_control &&
4105 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4106 del_timer(&l2cap_pi(sk)->monitor_timer);
4107 if (l2cap_pi(sk)->unacked_frames > 0)
4108 __mod_retrans_timer();
4109 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4110 }
4111
4112 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4113 case L2CAP_SUPER_RCV_READY:
4114 l2cap_data_channel_rrframe(sk, rx_control);
4115 break;
4116
4117 case L2CAP_SUPER_REJECT:
4118 l2cap_data_channel_rejframe(sk, rx_control);
4119 break;
4120
4121 case L2CAP_SUPER_SELECT_REJECT:
4122 l2cap_data_channel_srejframe(sk, rx_control);
4123 break;
4124
4125 case L2CAP_SUPER_RCV_NOT_READY:
4126 l2cap_data_channel_rnrframe(sk, rx_control);
4127 break;
4128 }
4129
4130 kfree_skb(skb);
4131 return 0;
4132 }
4133
4134 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4135 {
4136 struct sock *sk;
4137 struct l2cap_pinfo *pi;
4138 u16 control;
4139 u8 tx_seq, req_seq;
4140 int len, next_tx_seq_offset, req_seq_offset;
4141
4142 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4143 if (!sk) {
4144 BT_DBG("unknown cid 0x%4.4x", cid);
4145 goto drop;
4146 }
4147
4148 pi = l2cap_pi(sk);
4149
4150 BT_DBG("sk %p, len %d", sk, skb->len);
4151
4152 if (sk->sk_state != BT_CONNECTED)
4153 goto drop;
4154
4155 switch (pi->mode) {
4156 case L2CAP_MODE_BASIC:
4157 /* If socket recv buffers overflows we drop data here
4158 * which is *bad* because L2CAP has to be reliable.
4159 * But we don't have any other choice. L2CAP doesn't
4160 * provide flow control mechanism. */
4161
4162 if (pi->imtu < skb->len)
4163 goto drop;
4164
4165 if (!sock_queue_rcv_skb(sk, skb))
4166 goto done;
4167 break;
4168
4169 case L2CAP_MODE_ERTM:
4170 control = get_unaligned_le16(skb->data);
4171 skb_pull(skb, 2);
4172 len = skb->len;
4173
4174 /*
4175 * We can just drop the corrupted I-frame here.
4176 * Receiver will miss it and start proper recovery
4177 * procedures and ask retransmission.
4178 */
4179 if (l2cap_check_fcs(pi, skb))
4180 goto drop;
4181
4182 if (__is_sar_start(control) && __is_iframe(control))
4183 len -= 2;
4184
4185 if (pi->fcs == L2CAP_FCS_CRC16)
4186 len -= 2;
4187
4188 if (len > pi->mps) {
4189 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4190 goto drop;
4191 }
4192
4193 req_seq = __get_reqseq(control);
4194 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4195 if (req_seq_offset < 0)
4196 req_seq_offset += 64;
4197
4198 next_tx_seq_offset =
4199 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4200 if (next_tx_seq_offset < 0)
4201 next_tx_seq_offset += 64;
4202
4203 /* check for invalid req-seq */
4204 if (req_seq_offset > next_tx_seq_offset) {
4205 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4206 goto drop;
4207 }
4208
4209 if (__is_iframe(control)) {
4210 if (len < 0) {
4211 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4212 goto drop;
4213 }
4214
4215 l2cap_data_channel_iframe(sk, control, skb);
4216 } else {
4217 if (len != 0) {
4218 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4219 goto drop;
4220 }
4221
4222 l2cap_data_channel_sframe(sk, control, skb);
4223 }
4224
4225 goto done;
4226
4227 case L2CAP_MODE_STREAMING:
4228 control = get_unaligned_le16(skb->data);
4229 skb_pull(skb, 2);
4230 len = skb->len;
4231
4232 if (l2cap_check_fcs(pi, skb))
4233 goto drop;
4234
4235 if (__is_sar_start(control))
4236 len -= 2;
4237
4238 if (pi->fcs == L2CAP_FCS_CRC16)
4239 len -= 2;
4240
4241 if (len > pi->mps || len < 0 || __is_sframe(control))
4242 goto drop;
4243
4244 tx_seq = __get_txseq(control);
4245
4246 if (pi->expected_tx_seq == tx_seq)
4247 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4248 else
4249 pi->expected_tx_seq = (tx_seq + 1) % 64;
4250
4251 l2cap_streaming_reassembly_sdu(sk, skb, control);
4252
4253 goto done;
4254
4255 default:
4256 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4257 break;
4258 }
4259
4260 drop:
4261 kfree_skb(skb);
4262
4263 done:
4264 if (sk)
4265 bh_unlock_sock(sk);
4266
4267 return 0;
4268 }
4269
4270 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4271 {
4272 struct sock *sk;
4273
4274 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4275 if (!sk)
4276 goto drop;
4277
4278 BT_DBG("sk %p, len %d", sk, skb->len);
4279
4280 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4281 goto drop;
4282
4283 if (l2cap_pi(sk)->imtu < skb->len)
4284 goto drop;
4285
4286 if (!sock_queue_rcv_skb(sk, skb))
4287 goto done;
4288
4289 drop:
4290 kfree_skb(skb);
4291
4292 done:
4293 if (sk)
4294 bh_unlock_sock(sk);
4295 return 0;
4296 }
4297
4298 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4299 {
4300 struct l2cap_hdr *lh = (void *) skb->data;
4301 u16 cid, len;
4302 __le16 psm;
4303
4304 skb_pull(skb, L2CAP_HDR_SIZE);
4305 cid = __le16_to_cpu(lh->cid);
4306 len = __le16_to_cpu(lh->len);
4307
4308 if (len != skb->len) {
4309 kfree_skb(skb);
4310 return;
4311 }
4312
4313 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4314
4315 switch (cid) {
4316 case L2CAP_CID_SIGNALING:
4317 l2cap_sig_channel(conn, skb);
4318 break;
4319
4320 case L2CAP_CID_CONN_LESS:
4321 psm = get_unaligned_le16(skb->data);
4322 skb_pull(skb, 2);
4323 l2cap_conless_channel(conn, psm, skb);
4324 break;
4325
4326 default:
4327 l2cap_data_channel(conn, cid, skb);
4328 break;
4329 }
4330 }
4331
4332 /* ---- L2CAP interface with lower layer (HCI) ---- */
4333
4334 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4335 {
4336 int exact = 0, lm1 = 0, lm2 = 0;
4337 register struct sock *sk;
4338 struct hlist_node *node;
4339
4340 if (type != ACL_LINK)
4341 return 0;
4342
4343 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4344
4345 /* Find listening sockets and check their link_mode */
4346 read_lock(&l2cap_sk_list.lock);
4347 sk_for_each(sk, node, &l2cap_sk_list.head) {
4348 if (sk->sk_state != BT_LISTEN)
4349 continue;
4350
4351 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4352 lm1 |= HCI_LM_ACCEPT;
4353 if (l2cap_pi(sk)->role_switch)
4354 lm1 |= HCI_LM_MASTER;
4355 exact++;
4356 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4357 lm2 |= HCI_LM_ACCEPT;
4358 if (l2cap_pi(sk)->role_switch)
4359 lm2 |= HCI_LM_MASTER;
4360 }
4361 }
4362 read_unlock(&l2cap_sk_list.lock);
4363
4364 return exact ? lm1 : lm2;
4365 }
4366
4367 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4368 {
4369 struct l2cap_conn *conn;
4370
4371 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4372
4373 if (hcon->type != ACL_LINK)
4374 return 0;
4375
4376 if (!status) {
4377 conn = l2cap_conn_add(hcon, status);
4378 if (conn)
4379 l2cap_conn_ready(conn);
4380 } else
4381 l2cap_conn_del(hcon, bt_err(status));
4382
4383 return 0;
4384 }
4385
4386 static int l2cap_disconn_ind(struct hci_conn *hcon)
4387 {
4388 struct l2cap_conn *conn = hcon->l2cap_data;
4389
4390 BT_DBG("hcon %p", hcon);
4391
4392 if (hcon->type != ACL_LINK || !conn)
4393 return 0x13;
4394
4395 return conn->disc_reason;
4396 }
4397
4398 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4399 {
4400 BT_DBG("hcon %p reason %d", hcon, reason);
4401
4402 if (hcon->type != ACL_LINK)
4403 return 0;
4404
4405 l2cap_conn_del(hcon, bt_err(reason));
4406
4407 return 0;
4408 }
4409
4410 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4411 {
4412 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4413 return;
4414
4415 if (encrypt == 0x00) {
4416 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4417 l2cap_sock_clear_timer(sk);
4418 l2cap_sock_set_timer(sk, HZ * 5);
4419 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4420 __l2cap_sock_close(sk, ECONNREFUSED);
4421 } else {
4422 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4423 l2cap_sock_clear_timer(sk);
4424 }
4425 }
4426
4427 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4428 {
4429 struct l2cap_chan_list *l;
4430 struct l2cap_conn *conn = hcon->l2cap_data;
4431 struct sock *sk;
4432
4433 if (!conn)
4434 return 0;
4435
4436 l = &conn->chan_list;
4437
4438 BT_DBG("conn %p", conn);
4439
4440 read_lock(&l->lock);
4441
4442 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4443 bh_lock_sock(sk);
4444
4445 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4446 bh_unlock_sock(sk);
4447 continue;
4448 }
4449
4450 if (!status && (sk->sk_state == BT_CONNECTED ||
4451 sk->sk_state == BT_CONFIG)) {
4452 l2cap_check_encryption(sk, encrypt);
4453 bh_unlock_sock(sk);
4454 continue;
4455 }
4456
4457 if (sk->sk_state == BT_CONNECT) {
4458 if (!status) {
4459 struct l2cap_conn_req req;
4460 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4461 req.psm = l2cap_pi(sk)->psm;
4462
4463 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4464 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4465
4466 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4467 L2CAP_CONN_REQ, sizeof(req), &req);
4468 } else {
4469 l2cap_sock_clear_timer(sk);
4470 l2cap_sock_set_timer(sk, HZ / 10);
4471 }
4472 } else if (sk->sk_state == BT_CONNECT2) {
4473 struct l2cap_conn_rsp rsp;
4474 __u16 result;
4475
4476 if (!status) {
4477 sk->sk_state = BT_CONFIG;
4478 result = L2CAP_CR_SUCCESS;
4479 } else {
4480 sk->sk_state = BT_DISCONN;
4481 l2cap_sock_set_timer(sk, HZ / 10);
4482 result = L2CAP_CR_SEC_BLOCK;
4483 }
4484
4485 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4486 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4487 rsp.result = cpu_to_le16(result);
4488 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4489 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4490 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4491 }
4492
4493 bh_unlock_sock(sk);
4494 }
4495
4496 read_unlock(&l->lock);
4497
4498 return 0;
4499 }
4500
4501 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4502 {
4503 struct l2cap_conn *conn = hcon->l2cap_data;
4504
4505 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4506 goto drop;
4507
4508 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4509
4510 if (flags & ACL_START) {
4511 struct l2cap_hdr *hdr;
4512 int len;
4513
4514 if (conn->rx_len) {
4515 BT_ERR("Unexpected start frame (len %d)", skb->len);
4516 kfree_skb(conn->rx_skb);
4517 conn->rx_skb = NULL;
4518 conn->rx_len = 0;
4519 l2cap_conn_unreliable(conn, ECOMM);
4520 }
4521
4522 if (skb->len < 2) {
4523 BT_ERR("Frame is too short (len %d)", skb->len);
4524 l2cap_conn_unreliable(conn, ECOMM);
4525 goto drop;
4526 }
4527
4528 hdr = (struct l2cap_hdr *) skb->data;
4529 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4530
4531 if (len == skb->len) {
4532 /* Complete frame received */
4533 l2cap_recv_frame(conn, skb);
4534 return 0;
4535 }
4536
4537 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4538
4539 if (skb->len > len) {
4540 BT_ERR("Frame is too long (len %d, expected len %d)",
4541 skb->len, len);
4542 l2cap_conn_unreliable(conn, ECOMM);
4543 goto drop;
4544 }
4545
4546 /* Allocate skb for the complete frame (with header) */
4547 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4548 if (!conn->rx_skb)
4549 goto drop;
4550
4551 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4552 skb->len);
4553 conn->rx_len = len - skb->len;
4554 } else {
4555 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4556
4557 if (!conn->rx_len) {
4558 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4559 l2cap_conn_unreliable(conn, ECOMM);
4560 goto drop;
4561 }
4562
4563 if (skb->len > conn->rx_len) {
4564 BT_ERR("Fragment is too long (len %d, expected %d)",
4565 skb->len, conn->rx_len);
4566 kfree_skb(conn->rx_skb);
4567 conn->rx_skb = NULL;
4568 conn->rx_len = 0;
4569 l2cap_conn_unreliable(conn, ECOMM);
4570 goto drop;
4571 }
4572
4573 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4574 skb->len);
4575 conn->rx_len -= skb->len;
4576
4577 if (!conn->rx_len) {
4578 /* Complete frame received */
4579 l2cap_recv_frame(conn, conn->rx_skb);
4580 conn->rx_skb = NULL;
4581 }
4582 }
4583
4584 drop:
4585 kfree_skb(skb);
4586 return 0;
4587 }
4588
4589 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4590 {
4591 struct sock *sk;
4592 struct hlist_node *node;
4593
4594 read_lock_bh(&l2cap_sk_list.lock);
4595
4596 sk_for_each(sk, node, &l2cap_sk_list.head) {
4597 struct l2cap_pinfo *pi = l2cap_pi(sk);
4598
4599 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4600 batostr(&bt_sk(sk)->src),
4601 batostr(&bt_sk(sk)->dst),
4602 sk->sk_state, __le16_to_cpu(pi->psm),
4603 pi->scid, pi->dcid,
4604 pi->imtu, pi->omtu, pi->sec_level);
4605 }
4606
4607 read_unlock_bh(&l2cap_sk_list.lock);
4608
4609 return 0;
4610 }
4611
4612 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4613 {
4614 return single_open(file, l2cap_debugfs_show, inode->i_private);
4615 }
4616
4617 static const struct file_operations l2cap_debugfs_fops = {
4618 .open = l2cap_debugfs_open,
4619 .read = seq_read,
4620 .llseek = seq_lseek,
4621 .release = single_release,
4622 };
4623
4624 static struct dentry *l2cap_debugfs;
4625
4626 static const struct proto_ops l2cap_sock_ops = {
4627 .family = PF_BLUETOOTH,
4628 .owner = THIS_MODULE,
4629 .release = l2cap_sock_release,
4630 .bind = l2cap_sock_bind,
4631 .connect = l2cap_sock_connect,
4632 .listen = l2cap_sock_listen,
4633 .accept = l2cap_sock_accept,
4634 .getname = l2cap_sock_getname,
4635 .sendmsg = l2cap_sock_sendmsg,
4636 .recvmsg = l2cap_sock_recvmsg,
4637 .poll = bt_sock_poll,
4638 .ioctl = bt_sock_ioctl,
4639 .mmap = sock_no_mmap,
4640 .socketpair = sock_no_socketpair,
4641 .shutdown = l2cap_sock_shutdown,
4642 .setsockopt = l2cap_sock_setsockopt,
4643 .getsockopt = l2cap_sock_getsockopt
4644 };
4645
4646 static const struct net_proto_family l2cap_sock_family_ops = {
4647 .family = PF_BLUETOOTH,
4648 .owner = THIS_MODULE,
4649 .create = l2cap_sock_create,
4650 };
4651
4652 static struct hci_proto l2cap_hci_proto = {
4653 .name = "L2CAP",
4654 .id = HCI_PROTO_L2CAP,
4655 .connect_ind = l2cap_connect_ind,
4656 .connect_cfm = l2cap_connect_cfm,
4657 .disconn_ind = l2cap_disconn_ind,
4658 .disconn_cfm = l2cap_disconn_cfm,
4659 .security_cfm = l2cap_security_cfm,
4660 .recv_acldata = l2cap_recv_acldata
4661 };
4662
4663 static int __init l2cap_init(void)
4664 {
4665 int err;
4666
4667 err = proto_register(&l2cap_proto, 0);
4668 if (err < 0)
4669 return err;
4670
4671 _busy_wq = create_singlethread_workqueue("l2cap");
4672 if (!_busy_wq)
4673 goto error;
4674
4675 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4676 if (err < 0) {
4677 BT_ERR("L2CAP socket registration failed");
4678 goto error;
4679 }
4680
4681 err = hci_register_proto(&l2cap_hci_proto);
4682 if (err < 0) {
4683 BT_ERR("L2CAP protocol registration failed");
4684 bt_sock_unregister(BTPROTO_L2CAP);
4685 goto error;
4686 }
4687
4688 if (bt_debugfs) {
4689 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4690 bt_debugfs, NULL, &l2cap_debugfs_fops);
4691 if (!l2cap_debugfs)
4692 BT_ERR("Failed to create L2CAP debug file");
4693 }
4694
4695 BT_INFO("L2CAP ver %s", VERSION);
4696 BT_INFO("L2CAP socket layer initialized");
4697
4698 return 0;
4699
4700 error:
4701 proto_unregister(&l2cap_proto);
4702 return err;
4703 }
4704
4705 static void __exit l2cap_exit(void)
4706 {
4707 debugfs_remove(l2cap_debugfs);
4708
4709 flush_workqueue(_busy_wq);
4710 destroy_workqueue(_busy_wq);
4711
4712 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4713 BT_ERR("L2CAP socket unregistration failed");
4714
4715 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4716 BT_ERR("L2CAP protocol unregistration failed");
4717
4718 proto_unregister(&l2cap_proto);
4719 }
4720
4721 void l2cap_load(void)
4722 {
4723 /* Dummy function to trigger automatic L2CAP module loading by
4724 * other modules that use L2CAP sockets but don't use any other
4725 * symbols from it. */
4726 }
4727 EXPORT_SYMBOL(l2cap_load);
4728
4729 module_init(l2cap_init);
4730 module_exit(l2cap_exit);
4731
4732 module_param(enable_ertm, bool, 0644);
4733 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4734
4735 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4736 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4737 MODULE_VERSION(VERSION);
4738 MODULE_LICENSE("GPL");
4739 MODULE_ALIAS("bt-proto-0");
This page took 0.122268 seconds and 6 git commands to generate.