Bluetooth: Actively send request for Basic Mode
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static const struct proto_ops l2cap_sock_ops;
64
65 static struct workqueue_struct *_busy_wq;
66
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
76
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
79
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
82 {
83 struct sock *sk = (struct sock *) arg;
84 int reason;
85
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
87
88 bh_lock_sock(sk);
89
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
95 else
96 reason = ETIMEDOUT;
97
98 __l2cap_sock_close(sk, reason);
99
100 bh_unlock_sock(sk);
101
102 l2cap_sock_kill(sk);
103 sock_put(sk);
104 }
105
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
107 {
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 }
111
112 static void l2cap_sock_clear_timer(struct sock *sk)
113 {
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
116 }
117
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 {
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
124 break;
125 }
126 return s;
127 }
128
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 {
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
135 }
136 return s;
137 }
138
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 {
143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
150 }
151
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 {
154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
158 }
159 return s;
160 }
161
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 {
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
171 }
172
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
174 {
175 u16 cid = L2CAP_CID_DYN_START;
176
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
179 return cid;
180 }
181
182 return 0;
183 }
184
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
186 {
187 sock_hold(sk);
188
189 if (l->head)
190 l2cap_pi(l->head)->prev_c = sk;
191
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
194 l->head = sk;
195 }
196
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
198 {
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
200
201 write_lock_bh(&l->lock);
202 if (sk == l->head)
203 l->head = next;
204
205 if (next)
206 l2cap_pi(next)->prev_c = prev;
207 if (prev)
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
210
211 __sock_put(sk);
212 }
213
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
215 {
216 struct l2cap_chan_list *l = &conn->chan_list;
217
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
220
221 conn->disc_reason = 0x13;
222
223 l2cap_pi(sk)->conn = conn;
224
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 } else {
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 }
239
240 __l2cap_chan_link(l, sk);
241
242 if (parent)
243 bt_accept_enqueue(parent, sk);
244 }
245
246 /* Delete channel.
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
249 {
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
252
253 l2cap_sock_clear_timer(sk);
254
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256
257 if (conn) {
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
262 }
263
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
266
267 if (err)
268 sk->sk_err = err;
269
270 if (parent) {
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
273 } else
274 sk->sk_state_change(sk);
275
276 skb_queue_purge(TX_QUEUE(sk));
277
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
280
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
284
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
287
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
289 list_del(&l->list);
290 kfree(l);
291 }
292 }
293 }
294
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
297 {
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
299 __u8 auth_type;
300
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
304 else
305 auth_type = HCI_AT_NO_BONDING;
306
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
309 } else {
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
313 break;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
316 break;
317 default:
318 auth_type = HCI_AT_NO_BONDING;
319 break;
320 }
321 }
322
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
324 auth_type);
325 }
326
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
328 {
329 u8 id;
330
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
335 */
336
337 spin_lock_bh(&conn->lock);
338
339 if (++conn->tx_ident > 128)
340 conn->tx_ident = 1;
341
342 id = conn->tx_ident;
343
344 spin_unlock_bh(&conn->lock);
345
346 return id;
347 }
348
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
350 {
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
352
353 BT_DBG("code 0x%2.2x", code);
354
355 if (!skb)
356 return;
357
358 hci_send_acl(conn->hcon, skb, 0);
359 }
360
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
362 {
363 struct sk_buff *skb;
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
368
369 if (sk->sk_state != BT_CONNECTED)
370 return;
371
372 if (pi->fcs == L2CAP_FCS_CRC16)
373 hlen += 2;
374
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
376
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
379
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
383 }
384
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
388 }
389
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
391 if (!skb)
392 return;
393
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
398
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
402 }
403
404 hci_send_acl(pi->conn->hcon, skb, 0);
405 }
406
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
408 {
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
412 } else
413 control |= L2CAP_SUPER_RCV_READY;
414
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
416
417 l2cap_send_sframe(pi, control);
418 }
419
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
421 {
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
423 }
424
425 static void l2cap_do_start(struct sock *sk)
426 {
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
428
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
431 return;
432
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
437
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
440
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
443 }
444 } else {
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
447
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
450
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
453
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
456 }
457 }
458
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
460 {
461 struct l2cap_disconn_req req;
462
463 if (!conn)
464 return;
465
466 skb_queue_purge(TX_QUEUE(sk));
467
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
472 }
473
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
478
479 sk->sk_state = BT_DISCONN;
480 sk->sk_err = err;
481 }
482
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
485 {
486 struct l2cap_chan_list *l = &conn->chan_list;
487 struct sock *sk;
488
489 BT_DBG("conn %p", conn);
490
491 read_lock(&l->lock);
492
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
494 bh_lock_sock(sk);
495
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
498 bh_unlock_sock(sk);
499 continue;
500 }
501
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
508
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
511
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
514 }
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
519
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
526
527 } else {
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
531 }
532 } else {
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
535 }
536
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
539 }
540
541 bh_unlock_sock(sk);
542 }
543
544 read_unlock(&l->lock);
545 }
546
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
548 {
549 struct l2cap_chan_list *l = &conn->chan_list;
550 struct sock *sk;
551
552 BT_DBG("conn %p", conn);
553
554 read_lock(&l->lock);
555
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
557 bh_lock_sock(sk);
558
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
565 l2cap_do_start(sk);
566
567 bh_unlock_sock(sk);
568 }
569
570 read_unlock(&l->lock);
571 }
572
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
575 {
576 struct l2cap_chan_list *l = &conn->chan_list;
577 struct sock *sk;
578
579 BT_DBG("conn %p", conn);
580
581 read_lock(&l->lock);
582
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
585 sk->sk_err = err;
586 }
587
588 read_unlock(&l->lock);
589 }
590
591 static void l2cap_info_timeout(unsigned long arg)
592 {
593 struct l2cap_conn *conn = (void *) arg;
594
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
597
598 l2cap_conn_start(conn);
599 }
600
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
602 {
603 struct l2cap_conn *conn = hcon->l2cap_data;
604
605 if (conn || status)
606 return conn;
607
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
609 if (!conn)
610 return NULL;
611
612 hcon->l2cap_data = conn;
613 conn->hcon = hcon;
614
615 BT_DBG("hcon %p conn %p", hcon, conn);
616
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
620
621 conn->feat_mask = 0;
622
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
625
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
628
629 conn->disc_reason = 0x13;
630
631 return conn;
632 }
633
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
635 {
636 struct l2cap_conn *conn = hcon->l2cap_data;
637 struct sock *sk;
638
639 if (!conn)
640 return;
641
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
643
644 kfree_skb(conn->rx_skb);
645
646 /* Kill channels */
647 while ((sk = conn->chan_list.head)) {
648 bh_lock_sock(sk);
649 l2cap_chan_del(sk, err);
650 bh_unlock_sock(sk);
651 l2cap_sock_kill(sk);
652 }
653
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
656
657 hcon->l2cap_data = NULL;
658 kfree(conn);
659 }
660
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
662 {
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
667 }
668
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
671 {
672 struct sock *sk;
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
676 goto found;
677 sk = NULL;
678 found:
679 return sk;
680 }
681
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
684 */
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
686 {
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
689
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
692 continue;
693
694 if (l2cap_pi(sk)->psm == psm) {
695 /* Exact match. */
696 if (!bacmp(&bt_sk(sk)->src, src))
697 break;
698
699 /* Closest match */
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
701 sk1 = sk;
702 }
703 }
704 return node ? sk : sk1;
705 }
706
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
710 {
711 struct sock *s;
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
714 if (s)
715 bh_lock_sock(s);
716 read_unlock(&l2cap_sk_list.lock);
717 return s;
718 }
719
720 static void l2cap_sock_destruct(struct sock *sk)
721 {
722 BT_DBG("sk %p", sk);
723
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
726 }
727
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
729 {
730 struct sock *sk;
731
732 BT_DBG("parent %p", parent);
733
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
737
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
740 }
741
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
744 */
745 static void l2cap_sock_kill(struct sock *sk)
746 {
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
748 return;
749
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
751
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
755 sock_put(sk);
756 }
757
758 static void __l2cap_sock_close(struct sock *sk, int reason)
759 {
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
761
762 switch (sk->sk_state) {
763 case BT_LISTEN:
764 l2cap_sock_cleanup_listen(sk);
765 break;
766
767 case BT_CONNECTED:
768 case BT_CONFIG:
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
772
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
775 } else
776 l2cap_chan_del(sk, reason);
777 break;
778
779 case BT_CONNECT2:
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
784 __u16 result;
785
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
788 else
789 result = L2CAP_CR_BAD_PSM;
790
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
797 } else
798 l2cap_chan_del(sk, reason);
799 break;
800
801 case BT_CONNECT:
802 case BT_DISCONN:
803 l2cap_chan_del(sk, reason);
804 break;
805
806 default:
807 sock_set_flag(sk, SOCK_ZAPPED);
808 break;
809 }
810 }
811
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
814 {
815 l2cap_sock_clear_timer(sk);
816 lock_sock(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
818 release_sock(sk);
819 l2cap_sock_kill(sk);
820 }
821
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
823 {
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
825
826 BT_DBG("sk %p", sk);
827
828 if (parent) {
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
831
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->conf_state = l2cap_pi(parent)->conf_state;
835 pi->mode = l2cap_pi(parent)->mode;
836 pi->fcs = l2cap_pi(parent)->fcs;
837 pi->max_tx = l2cap_pi(parent)->max_tx;
838 pi->tx_win = l2cap_pi(parent)->tx_win;
839 pi->sec_level = l2cap_pi(parent)->sec_level;
840 pi->role_switch = l2cap_pi(parent)->role_switch;
841 pi->force_reliable = l2cap_pi(parent)->force_reliable;
842 } else {
843 pi->imtu = L2CAP_DEFAULT_MTU;
844 pi->omtu = 0;
845 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
846 pi->mode = L2CAP_MODE_ERTM;
847 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
848 } else {
849 pi->mode = L2CAP_MODE_BASIC;
850 }
851 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
852 pi->fcs = L2CAP_FCS_CRC16;
853 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
854 pi->sec_level = BT_SECURITY_LOW;
855 pi->role_switch = 0;
856 pi->force_reliable = 0;
857 }
858
859 /* Default config options */
860 pi->conf_len = 0;
861 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
862 skb_queue_head_init(TX_QUEUE(sk));
863 skb_queue_head_init(SREJ_QUEUE(sk));
864 skb_queue_head_init(BUSY_QUEUE(sk));
865 INIT_LIST_HEAD(SREJ_LIST(sk));
866 }
867
868 static struct proto l2cap_proto = {
869 .name = "L2CAP",
870 .owner = THIS_MODULE,
871 .obj_size = sizeof(struct l2cap_pinfo)
872 };
873
874 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
875 {
876 struct sock *sk;
877
878 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
879 if (!sk)
880 return NULL;
881
882 sock_init_data(sock, sk);
883 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
884
885 sk->sk_destruct = l2cap_sock_destruct;
886 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
887
888 sock_reset_flag(sk, SOCK_ZAPPED);
889
890 sk->sk_protocol = proto;
891 sk->sk_state = BT_OPEN;
892
893 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
894
895 bt_sock_link(&l2cap_sk_list, sk);
896 return sk;
897 }
898
899 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
900 int kern)
901 {
902 struct sock *sk;
903
904 BT_DBG("sock %p", sock);
905
906 sock->state = SS_UNCONNECTED;
907
908 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
909 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
910 return -ESOCKTNOSUPPORT;
911
912 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
913 return -EPERM;
914
915 sock->ops = &l2cap_sock_ops;
916
917 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
918 if (!sk)
919 return -ENOMEM;
920
921 l2cap_sock_init(sk, NULL);
922 return 0;
923 }
924
925 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
926 {
927 struct sock *sk = sock->sk;
928 struct sockaddr_l2 la;
929 int len, err = 0;
930
931 BT_DBG("sk %p", sk);
932
933 if (!addr || addr->sa_family != AF_BLUETOOTH)
934 return -EINVAL;
935
936 memset(&la, 0, sizeof(la));
937 len = min_t(unsigned int, sizeof(la), alen);
938 memcpy(&la, addr, len);
939
940 if (la.l2_cid)
941 return -EINVAL;
942
943 lock_sock(sk);
944
945 if (sk->sk_state != BT_OPEN) {
946 err = -EBADFD;
947 goto done;
948 }
949
950 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
951 !capable(CAP_NET_BIND_SERVICE)) {
952 err = -EACCES;
953 goto done;
954 }
955
956 write_lock_bh(&l2cap_sk_list.lock);
957
958 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
959 err = -EADDRINUSE;
960 } else {
961 /* Save source address */
962 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
963 l2cap_pi(sk)->psm = la.l2_psm;
964 l2cap_pi(sk)->sport = la.l2_psm;
965 sk->sk_state = BT_BOUND;
966
967 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
968 __le16_to_cpu(la.l2_psm) == 0x0003)
969 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
970 }
971
972 write_unlock_bh(&l2cap_sk_list.lock);
973
974 done:
975 release_sock(sk);
976 return err;
977 }
978
979 static int l2cap_do_connect(struct sock *sk)
980 {
981 bdaddr_t *src = &bt_sk(sk)->src;
982 bdaddr_t *dst = &bt_sk(sk)->dst;
983 struct l2cap_conn *conn;
984 struct hci_conn *hcon;
985 struct hci_dev *hdev;
986 __u8 auth_type;
987 int err;
988
989 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
990 l2cap_pi(sk)->psm);
991
992 hdev = hci_get_route(dst, src);
993 if (!hdev)
994 return -EHOSTUNREACH;
995
996 hci_dev_lock_bh(hdev);
997
998 err = -ENOMEM;
999
1000 if (sk->sk_type == SOCK_RAW) {
1001 switch (l2cap_pi(sk)->sec_level) {
1002 case BT_SECURITY_HIGH:
1003 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1004 break;
1005 case BT_SECURITY_MEDIUM:
1006 auth_type = HCI_AT_DEDICATED_BONDING;
1007 break;
1008 default:
1009 auth_type = HCI_AT_NO_BONDING;
1010 break;
1011 }
1012 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1013 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1014 auth_type = HCI_AT_NO_BONDING_MITM;
1015 else
1016 auth_type = HCI_AT_NO_BONDING;
1017
1018 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1019 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1020 } else {
1021 switch (l2cap_pi(sk)->sec_level) {
1022 case BT_SECURITY_HIGH:
1023 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1024 break;
1025 case BT_SECURITY_MEDIUM:
1026 auth_type = HCI_AT_GENERAL_BONDING;
1027 break;
1028 default:
1029 auth_type = HCI_AT_NO_BONDING;
1030 break;
1031 }
1032 }
1033
1034 hcon = hci_connect(hdev, ACL_LINK, dst,
1035 l2cap_pi(sk)->sec_level, auth_type);
1036 if (!hcon)
1037 goto done;
1038
1039 conn = l2cap_conn_add(hcon, 0);
1040 if (!conn) {
1041 hci_conn_put(hcon);
1042 goto done;
1043 }
1044
1045 err = 0;
1046
1047 /* Update source addr of the socket */
1048 bacpy(src, conn->src);
1049
1050 l2cap_chan_add(conn, sk, NULL);
1051
1052 sk->sk_state = BT_CONNECT;
1053 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1054
1055 if (hcon->state == BT_CONNECTED) {
1056 if (sk->sk_type != SOCK_SEQPACKET &&
1057 sk->sk_type != SOCK_STREAM) {
1058 l2cap_sock_clear_timer(sk);
1059 sk->sk_state = BT_CONNECTED;
1060 } else
1061 l2cap_do_start(sk);
1062 }
1063
1064 done:
1065 hci_dev_unlock_bh(hdev);
1066 hci_dev_put(hdev);
1067 return err;
1068 }
1069
1070 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1071 {
1072 struct sock *sk = sock->sk;
1073 struct sockaddr_l2 la;
1074 int len, err = 0;
1075
1076 BT_DBG("sk %p", sk);
1077
1078 if (!addr || alen < sizeof(addr->sa_family) ||
1079 addr->sa_family != AF_BLUETOOTH)
1080 return -EINVAL;
1081
1082 memset(&la, 0, sizeof(la));
1083 len = min_t(unsigned int, sizeof(la), alen);
1084 memcpy(&la, addr, len);
1085
1086 if (la.l2_cid)
1087 return -EINVAL;
1088
1089 lock_sock(sk);
1090
1091 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1092 && !la.l2_psm) {
1093 err = -EINVAL;
1094 goto done;
1095 }
1096
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1099 break;
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1102 if (enable_ertm)
1103 break;
1104 /* fall through */
1105 default:
1106 err = -ENOTSUPP;
1107 goto done;
1108 }
1109
1110 switch (sk->sk_state) {
1111 case BT_CONNECT:
1112 case BT_CONNECT2:
1113 case BT_CONFIG:
1114 /* Already connecting */
1115 goto wait;
1116
1117 case BT_CONNECTED:
1118 /* Already connected */
1119 goto done;
1120
1121 case BT_OPEN:
1122 case BT_BOUND:
1123 /* Can connect */
1124 break;
1125
1126 default:
1127 err = -EBADFD;
1128 goto done;
1129 }
1130
1131 /* Set destination address and psm */
1132 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1133 l2cap_pi(sk)->psm = la.l2_psm;
1134
1135 err = l2cap_do_connect(sk);
1136 if (err)
1137 goto done;
1138
1139 wait:
1140 err = bt_sock_wait_state(sk, BT_CONNECTED,
1141 sock_sndtimeo(sk, flags & O_NONBLOCK));
1142 done:
1143 release_sock(sk);
1144 return err;
1145 }
1146
1147 static int l2cap_sock_listen(struct socket *sock, int backlog)
1148 {
1149 struct sock *sk = sock->sk;
1150 int err = 0;
1151
1152 BT_DBG("sk %p backlog %d", sk, backlog);
1153
1154 lock_sock(sk);
1155
1156 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1157 || sk->sk_state != BT_BOUND) {
1158 err = -EBADFD;
1159 goto done;
1160 }
1161
1162 switch (l2cap_pi(sk)->mode) {
1163 case L2CAP_MODE_BASIC:
1164 break;
1165 case L2CAP_MODE_ERTM:
1166 case L2CAP_MODE_STREAMING:
1167 if (enable_ertm)
1168 break;
1169 /* fall through */
1170 default:
1171 err = -ENOTSUPP;
1172 goto done;
1173 }
1174
1175 if (!l2cap_pi(sk)->psm) {
1176 bdaddr_t *src = &bt_sk(sk)->src;
1177 u16 psm;
1178
1179 err = -EINVAL;
1180
1181 write_lock_bh(&l2cap_sk_list.lock);
1182
1183 for (psm = 0x1001; psm < 0x1100; psm += 2)
1184 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1185 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1186 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1187 err = 0;
1188 break;
1189 }
1190
1191 write_unlock_bh(&l2cap_sk_list.lock);
1192
1193 if (err < 0)
1194 goto done;
1195 }
1196
1197 sk->sk_max_ack_backlog = backlog;
1198 sk->sk_ack_backlog = 0;
1199 sk->sk_state = BT_LISTEN;
1200
1201 done:
1202 release_sock(sk);
1203 return err;
1204 }
1205
1206 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1207 {
1208 DECLARE_WAITQUEUE(wait, current);
1209 struct sock *sk = sock->sk, *nsk;
1210 long timeo;
1211 int err = 0;
1212
1213 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1214
1215 if (sk->sk_state != BT_LISTEN) {
1216 err = -EBADFD;
1217 goto done;
1218 }
1219
1220 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1221
1222 BT_DBG("sk %p timeo %ld", sk, timeo);
1223
1224 /* Wait for an incoming connection. (wake-one). */
1225 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1226 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1227 set_current_state(TASK_INTERRUPTIBLE);
1228 if (!timeo) {
1229 err = -EAGAIN;
1230 break;
1231 }
1232
1233 release_sock(sk);
1234 timeo = schedule_timeout(timeo);
1235 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1236
1237 if (sk->sk_state != BT_LISTEN) {
1238 err = -EBADFD;
1239 break;
1240 }
1241
1242 if (signal_pending(current)) {
1243 err = sock_intr_errno(timeo);
1244 break;
1245 }
1246 }
1247 set_current_state(TASK_RUNNING);
1248 remove_wait_queue(sk_sleep(sk), &wait);
1249
1250 if (err)
1251 goto done;
1252
1253 newsock->state = SS_CONNECTED;
1254
1255 BT_DBG("new socket %p", nsk);
1256
1257 done:
1258 release_sock(sk);
1259 return err;
1260 }
1261
1262 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1263 {
1264 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1265 struct sock *sk = sock->sk;
1266
1267 BT_DBG("sock %p, sk %p", sock, sk);
1268
1269 addr->sa_family = AF_BLUETOOTH;
1270 *len = sizeof(struct sockaddr_l2);
1271
1272 if (peer) {
1273 la->l2_psm = l2cap_pi(sk)->psm;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1276 } else {
1277 la->l2_psm = l2cap_pi(sk)->sport;
1278 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1279 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1280 }
1281
1282 return 0;
1283 }
1284
1285 static int __l2cap_wait_ack(struct sock *sk)
1286 {
1287 DECLARE_WAITQUEUE(wait, current);
1288 int err = 0;
1289 int timeo = HZ/5;
1290
1291 add_wait_queue(sk_sleep(sk), &wait);
1292 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1293 set_current_state(TASK_INTERRUPTIBLE);
1294
1295 if (!timeo)
1296 timeo = HZ/5;
1297
1298 if (signal_pending(current)) {
1299 err = sock_intr_errno(timeo);
1300 break;
1301 }
1302
1303 release_sock(sk);
1304 timeo = schedule_timeout(timeo);
1305 lock_sock(sk);
1306
1307 err = sock_error(sk);
1308 if (err)
1309 break;
1310 }
1311 set_current_state(TASK_RUNNING);
1312 remove_wait_queue(sk_sleep(sk), &wait);
1313 return err;
1314 }
1315
1316 static void l2cap_monitor_timeout(unsigned long arg)
1317 {
1318 struct sock *sk = (void *) arg;
1319
1320 BT_DBG("sk %p", sk);
1321
1322 bh_lock_sock(sk);
1323 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1324 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1325 bh_unlock_sock(sk);
1326 return;
1327 }
1328
1329 l2cap_pi(sk)->retry_count++;
1330 __mod_monitor_timer();
1331
1332 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1333 bh_unlock_sock(sk);
1334 }
1335
1336 static void l2cap_retrans_timeout(unsigned long arg)
1337 {
1338 struct sock *sk = (void *) arg;
1339
1340 BT_DBG("sk %p", sk);
1341
1342 bh_lock_sock(sk);
1343 l2cap_pi(sk)->retry_count = 1;
1344 __mod_monitor_timer();
1345
1346 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1347
1348 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1349 bh_unlock_sock(sk);
1350 }
1351
1352 static void l2cap_drop_acked_frames(struct sock *sk)
1353 {
1354 struct sk_buff *skb;
1355
1356 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1357 l2cap_pi(sk)->unacked_frames) {
1358 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1359 break;
1360
1361 skb = skb_dequeue(TX_QUEUE(sk));
1362 kfree_skb(skb);
1363
1364 l2cap_pi(sk)->unacked_frames--;
1365 }
1366
1367 if (!l2cap_pi(sk)->unacked_frames)
1368 del_timer(&l2cap_pi(sk)->retrans_timer);
1369 }
1370
1371 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1372 {
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374
1375 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1376
1377 hci_send_acl(pi->conn->hcon, skb, 0);
1378 }
1379
1380 static int l2cap_streaming_send(struct sock *sk)
1381 {
1382 struct sk_buff *skb, *tx_skb;
1383 struct l2cap_pinfo *pi = l2cap_pi(sk);
1384 u16 control, fcs;
1385
1386 while ((skb = sk->sk_send_head)) {
1387 tx_skb = skb_clone(skb, GFP_ATOMIC);
1388
1389 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1390 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1391 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1392
1393 if (pi->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1395 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1396 }
1397
1398 l2cap_do_send(sk, tx_skb);
1399
1400 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1401
1402 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1403 sk->sk_send_head = NULL;
1404 else
1405 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1406
1407 skb = skb_dequeue(TX_QUEUE(sk));
1408 kfree_skb(skb);
1409 }
1410 return 0;
1411 }
1412
1413 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1414 {
1415 struct l2cap_pinfo *pi = l2cap_pi(sk);
1416 struct sk_buff *skb, *tx_skb;
1417 u16 control, fcs;
1418
1419 skb = skb_peek(TX_QUEUE(sk));
1420 if (!skb)
1421 return;
1422
1423 do {
1424 if (bt_cb(skb)->tx_seq == tx_seq)
1425 break;
1426
1427 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1428 return;
1429
1430 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1431
1432 if (pi->remote_max_tx &&
1433 bt_cb(skb)->retries == pi->remote_max_tx) {
1434 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1435 return;
1436 }
1437
1438 tx_skb = skb_clone(skb, GFP_ATOMIC);
1439 bt_cb(skb)->retries++;
1440 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1441
1442 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1443 control |= L2CAP_CTRL_FINAL;
1444 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1445 }
1446
1447 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1448 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1449
1450 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1451
1452 if (pi->fcs == L2CAP_FCS_CRC16) {
1453 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1454 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1455 }
1456
1457 l2cap_do_send(sk, tx_skb);
1458 }
1459
1460 static int l2cap_ertm_send(struct sock *sk)
1461 {
1462 struct sk_buff *skb, *tx_skb;
1463 struct l2cap_pinfo *pi = l2cap_pi(sk);
1464 u16 control, fcs;
1465 int nsent = 0;
1466
1467 if (sk->sk_state != BT_CONNECTED)
1468 return -ENOTCONN;
1469
1470 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1471
1472 if (pi->remote_max_tx &&
1473 bt_cb(skb)->retries == pi->remote_max_tx) {
1474 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1475 break;
1476 }
1477
1478 tx_skb = skb_clone(skb, GFP_ATOMIC);
1479
1480 bt_cb(skb)->retries++;
1481
1482 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1483 control &= L2CAP_CTRL_SAR;
1484
1485 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1486 control |= L2CAP_CTRL_FINAL;
1487 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1488 }
1489 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1490 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1491 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1492
1493
1494 if (pi->fcs == L2CAP_FCS_CRC16) {
1495 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1496 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1497 }
1498
1499 l2cap_do_send(sk, tx_skb);
1500
1501 __mod_retrans_timer();
1502
1503 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1504 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1505
1506 pi->unacked_frames++;
1507 pi->frames_sent++;
1508
1509 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1510 sk->sk_send_head = NULL;
1511 else
1512 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1513
1514 nsent++;
1515 }
1516
1517 return nsent;
1518 }
1519
1520 static int l2cap_retransmit_frames(struct sock *sk)
1521 {
1522 struct l2cap_pinfo *pi = l2cap_pi(sk);
1523 int ret;
1524
1525 spin_lock_bh(&pi->send_lock);
1526
1527 if (!skb_queue_empty(TX_QUEUE(sk)))
1528 sk->sk_send_head = TX_QUEUE(sk)->next;
1529
1530 pi->next_tx_seq = pi->expected_ack_seq;
1531 ret = l2cap_ertm_send(sk);
1532
1533 spin_unlock_bh(&pi->send_lock);
1534
1535 return ret;
1536 }
1537
1538 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1539 {
1540 struct sock *sk = (struct sock *)pi;
1541 u16 control = 0;
1542 int nframes;
1543
1544 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1545
1546 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1547 control |= L2CAP_SUPER_RCV_NOT_READY;
1548 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1549 l2cap_send_sframe(pi, control);
1550 return;
1551 }
1552
1553 spin_lock_bh(&pi->send_lock);
1554 nframes = l2cap_ertm_send(sk);
1555 spin_unlock_bh(&pi->send_lock);
1556
1557 if (nframes > 0)
1558 return;
1559
1560 control |= L2CAP_SUPER_RCV_READY;
1561 l2cap_send_sframe(pi, control);
1562 }
1563
1564 static void l2cap_send_srejtail(struct sock *sk)
1565 {
1566 struct srej_list *tail;
1567 u16 control;
1568
1569 control = L2CAP_SUPER_SELECT_REJECT;
1570 control |= L2CAP_CTRL_FINAL;
1571
1572 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1573 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1574
1575 l2cap_send_sframe(l2cap_pi(sk), control);
1576 }
1577
1578 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1579 {
1580 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1581 struct sk_buff **frag;
1582 int err, sent = 0;
1583
1584 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1585 return -EFAULT;
1586
1587 sent += count;
1588 len -= count;
1589
1590 /* Continuation fragments (no L2CAP header) */
1591 frag = &skb_shinfo(skb)->frag_list;
1592 while (len) {
1593 count = min_t(unsigned int, conn->mtu, len);
1594
1595 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1596 if (!*frag)
1597 return -EFAULT;
1598 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1599 return -EFAULT;
1600
1601 sent += count;
1602 len -= count;
1603
1604 frag = &(*frag)->next;
1605 }
1606
1607 return sent;
1608 }
1609
1610 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1611 {
1612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1613 struct sk_buff *skb;
1614 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1615 struct l2cap_hdr *lh;
1616
1617 BT_DBG("sk %p len %d", sk, (int)len);
1618
1619 count = min_t(unsigned int, (conn->mtu - hlen), len);
1620 skb = bt_skb_send_alloc(sk, count + hlen,
1621 msg->msg_flags & MSG_DONTWAIT, &err);
1622 if (!skb)
1623 return ERR_PTR(-ENOMEM);
1624
1625 /* Create L2CAP header */
1626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1627 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1628 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1629 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1630
1631 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1632 if (unlikely(err < 0)) {
1633 kfree_skb(skb);
1634 return ERR_PTR(err);
1635 }
1636 return skb;
1637 }
1638
1639 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1640 {
1641 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1642 struct sk_buff *skb;
1643 int err, count, hlen = L2CAP_HDR_SIZE;
1644 struct l2cap_hdr *lh;
1645
1646 BT_DBG("sk %p len %d", sk, (int)len);
1647
1648 count = min_t(unsigned int, (conn->mtu - hlen), len);
1649 skb = bt_skb_send_alloc(sk, count + hlen,
1650 msg->msg_flags & MSG_DONTWAIT, &err);
1651 if (!skb)
1652 return ERR_PTR(-ENOMEM);
1653
1654 /* Create L2CAP header */
1655 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1656 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1657 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1658
1659 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1660 if (unlikely(err < 0)) {
1661 kfree_skb(skb);
1662 return ERR_PTR(err);
1663 }
1664 return skb;
1665 }
1666
1667 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1668 {
1669 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1670 struct sk_buff *skb;
1671 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1672 struct l2cap_hdr *lh;
1673
1674 BT_DBG("sk %p len %d", sk, (int)len);
1675
1676 if (!conn)
1677 return ERR_PTR(-ENOTCONN);
1678
1679 if (sdulen)
1680 hlen += 2;
1681
1682 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1683 hlen += 2;
1684
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = bt_skb_send_alloc(sk, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1688 if (!skb)
1689 return ERR_PTR(-ENOMEM);
1690
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695 put_unaligned_le16(control, skb_put(skb, 2));
1696 if (sdulen)
1697 put_unaligned_le16(sdulen, skb_put(skb, 2));
1698
1699 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1701 kfree_skb(skb);
1702 return ERR_PTR(err);
1703 }
1704
1705 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1706 put_unaligned_le16(0, skb_put(skb, 2));
1707
1708 bt_cb(skb)->retries = 0;
1709 return skb;
1710 }
1711
1712 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1713 {
1714 struct l2cap_pinfo *pi = l2cap_pi(sk);
1715 struct sk_buff *skb;
1716 struct sk_buff_head sar_queue;
1717 u16 control;
1718 size_t size = 0;
1719
1720 skb_queue_head_init(&sar_queue);
1721 control = L2CAP_SDU_START;
1722 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1723 if (IS_ERR(skb))
1724 return PTR_ERR(skb);
1725
1726 __skb_queue_tail(&sar_queue, skb);
1727 len -= pi->remote_mps;
1728 size += pi->remote_mps;
1729
1730 while (len > 0) {
1731 size_t buflen;
1732
1733 if (len > pi->remote_mps) {
1734 control = L2CAP_SDU_CONTINUE;
1735 buflen = pi->remote_mps;
1736 } else {
1737 control = L2CAP_SDU_END;
1738 buflen = len;
1739 }
1740
1741 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1742 if (IS_ERR(skb)) {
1743 skb_queue_purge(&sar_queue);
1744 return PTR_ERR(skb);
1745 }
1746
1747 __skb_queue_tail(&sar_queue, skb);
1748 len -= buflen;
1749 size += buflen;
1750 }
1751 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1752 spin_lock_bh(&pi->send_lock);
1753 if (sk->sk_send_head == NULL)
1754 sk->sk_send_head = sar_queue.next;
1755 spin_unlock_bh(&pi->send_lock);
1756
1757 return size;
1758 }
1759
1760 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1761 {
1762 struct sock *sk = sock->sk;
1763 struct l2cap_pinfo *pi = l2cap_pi(sk);
1764 struct sk_buff *skb;
1765 u16 control;
1766 int err;
1767
1768 BT_DBG("sock %p, sk %p", sock, sk);
1769
1770 err = sock_error(sk);
1771 if (err)
1772 return err;
1773
1774 if (msg->msg_flags & MSG_OOB)
1775 return -EOPNOTSUPP;
1776
1777 lock_sock(sk);
1778
1779 if (sk->sk_state != BT_CONNECTED) {
1780 err = -ENOTCONN;
1781 goto done;
1782 }
1783
1784 /* Connectionless channel */
1785 if (sk->sk_type == SOCK_DGRAM) {
1786 skb = l2cap_create_connless_pdu(sk, msg, len);
1787 if (IS_ERR(skb)) {
1788 err = PTR_ERR(skb);
1789 } else {
1790 l2cap_do_send(sk, skb);
1791 err = len;
1792 }
1793 goto done;
1794 }
1795
1796 switch (pi->mode) {
1797 case L2CAP_MODE_BASIC:
1798 /* Check outgoing MTU */
1799 if (len > pi->omtu) {
1800 err = -EINVAL;
1801 goto done;
1802 }
1803
1804 /* Create a basic PDU */
1805 skb = l2cap_create_basic_pdu(sk, msg, len);
1806 if (IS_ERR(skb)) {
1807 err = PTR_ERR(skb);
1808 goto done;
1809 }
1810
1811 l2cap_do_send(sk, skb);
1812 err = len;
1813 break;
1814
1815 case L2CAP_MODE_ERTM:
1816 case L2CAP_MODE_STREAMING:
1817 /* Entire SDU fits into one PDU */
1818 if (len <= pi->remote_mps) {
1819 control = L2CAP_SDU_UNSEGMENTED;
1820 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1821 if (IS_ERR(skb)) {
1822 err = PTR_ERR(skb);
1823 goto done;
1824 }
1825 __skb_queue_tail(TX_QUEUE(sk), skb);
1826
1827 if (pi->mode == L2CAP_MODE_ERTM)
1828 spin_lock_bh(&pi->send_lock);
1829
1830 if (sk->sk_send_head == NULL)
1831 sk->sk_send_head = skb;
1832
1833 if (pi->mode == L2CAP_MODE_ERTM)
1834 spin_unlock_bh(&pi->send_lock);
1835 } else {
1836 /* Segment SDU into multiples PDUs */
1837 err = l2cap_sar_segment_sdu(sk, msg, len);
1838 if (err < 0)
1839 goto done;
1840 }
1841
1842 if (pi->mode == L2CAP_MODE_STREAMING) {
1843 err = l2cap_streaming_send(sk);
1844 } else {
1845 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1846 pi->conn_state && L2CAP_CONN_WAIT_F) {
1847 err = len;
1848 break;
1849 }
1850 spin_lock_bh(&pi->send_lock);
1851 err = l2cap_ertm_send(sk);
1852 spin_unlock_bh(&pi->send_lock);
1853 }
1854
1855 if (err >= 0)
1856 err = len;
1857 break;
1858
1859 default:
1860 BT_DBG("bad state %1.1x", pi->mode);
1861 err = -EINVAL;
1862 }
1863
1864 done:
1865 release_sock(sk);
1866 return err;
1867 }
1868
1869 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1870 {
1871 struct sock *sk = sock->sk;
1872
1873 lock_sock(sk);
1874
1875 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1876 struct l2cap_conn_rsp rsp;
1877
1878 sk->sk_state = BT_CONFIG;
1879
1880 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1881 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1882 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1883 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1884 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1885 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1886
1887 release_sock(sk);
1888 return 0;
1889 }
1890
1891 release_sock(sk);
1892
1893 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1894 }
1895
1896 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1897 {
1898 struct sock *sk = sock->sk;
1899 struct l2cap_options opts;
1900 int len, err = 0;
1901 u32 opt;
1902
1903 BT_DBG("sk %p", sk);
1904
1905 lock_sock(sk);
1906
1907 switch (optname) {
1908 case L2CAP_OPTIONS:
1909 opts.imtu = l2cap_pi(sk)->imtu;
1910 opts.omtu = l2cap_pi(sk)->omtu;
1911 opts.flush_to = l2cap_pi(sk)->flush_to;
1912 opts.mode = l2cap_pi(sk)->mode;
1913 opts.fcs = l2cap_pi(sk)->fcs;
1914 opts.max_tx = l2cap_pi(sk)->max_tx;
1915 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1916
1917 len = min_t(unsigned int, sizeof(opts), optlen);
1918 if (copy_from_user((char *) &opts, optval, len)) {
1919 err = -EFAULT;
1920 break;
1921 }
1922
1923 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1924 err = -EINVAL;
1925 break;
1926 }
1927
1928 l2cap_pi(sk)->mode = opts.mode;
1929 switch (l2cap_pi(sk)->mode) {
1930 case L2CAP_MODE_BASIC:
1931 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1932 break;
1933 case L2CAP_MODE_ERTM:
1934 case L2CAP_MODE_STREAMING:
1935 if (enable_ertm)
1936 break;
1937 /* fall through */
1938 default:
1939 err = -EINVAL;
1940 break;
1941 }
1942
1943 l2cap_pi(sk)->imtu = opts.imtu;
1944 l2cap_pi(sk)->omtu = opts.omtu;
1945 l2cap_pi(sk)->fcs = opts.fcs;
1946 l2cap_pi(sk)->max_tx = opts.max_tx;
1947 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1948 break;
1949
1950 case L2CAP_LM:
1951 if (get_user(opt, (u32 __user *) optval)) {
1952 err = -EFAULT;
1953 break;
1954 }
1955
1956 if (opt & L2CAP_LM_AUTH)
1957 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1958 if (opt & L2CAP_LM_ENCRYPT)
1959 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1960 if (opt & L2CAP_LM_SECURE)
1961 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1962
1963 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1964 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1965 break;
1966
1967 default:
1968 err = -ENOPROTOOPT;
1969 break;
1970 }
1971
1972 release_sock(sk);
1973 return err;
1974 }
1975
1976 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1977 {
1978 struct sock *sk = sock->sk;
1979 struct bt_security sec;
1980 int len, err = 0;
1981 u32 opt;
1982
1983 BT_DBG("sk %p", sk);
1984
1985 if (level == SOL_L2CAP)
1986 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1987
1988 if (level != SOL_BLUETOOTH)
1989 return -ENOPROTOOPT;
1990
1991 lock_sock(sk);
1992
1993 switch (optname) {
1994 case BT_SECURITY:
1995 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1996 && sk->sk_type != SOCK_RAW) {
1997 err = -EINVAL;
1998 break;
1999 }
2000
2001 sec.level = BT_SECURITY_LOW;
2002
2003 len = min_t(unsigned int, sizeof(sec), optlen);
2004 if (copy_from_user((char *) &sec, optval, len)) {
2005 err = -EFAULT;
2006 break;
2007 }
2008
2009 if (sec.level < BT_SECURITY_LOW ||
2010 sec.level > BT_SECURITY_HIGH) {
2011 err = -EINVAL;
2012 break;
2013 }
2014
2015 l2cap_pi(sk)->sec_level = sec.level;
2016 break;
2017
2018 case BT_DEFER_SETUP:
2019 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2020 err = -EINVAL;
2021 break;
2022 }
2023
2024 if (get_user(opt, (u32 __user *) optval)) {
2025 err = -EFAULT;
2026 break;
2027 }
2028
2029 bt_sk(sk)->defer_setup = opt;
2030 break;
2031
2032 default:
2033 err = -ENOPROTOOPT;
2034 break;
2035 }
2036
2037 release_sock(sk);
2038 return err;
2039 }
2040
2041 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2042 {
2043 struct sock *sk = sock->sk;
2044 struct l2cap_options opts;
2045 struct l2cap_conninfo cinfo;
2046 int len, err = 0;
2047 u32 opt;
2048
2049 BT_DBG("sk %p", sk);
2050
2051 if (get_user(len, optlen))
2052 return -EFAULT;
2053
2054 lock_sock(sk);
2055
2056 switch (optname) {
2057 case L2CAP_OPTIONS:
2058 opts.imtu = l2cap_pi(sk)->imtu;
2059 opts.omtu = l2cap_pi(sk)->omtu;
2060 opts.flush_to = l2cap_pi(sk)->flush_to;
2061 opts.mode = l2cap_pi(sk)->mode;
2062 opts.fcs = l2cap_pi(sk)->fcs;
2063 opts.max_tx = l2cap_pi(sk)->max_tx;
2064 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2065
2066 len = min_t(unsigned int, len, sizeof(opts));
2067 if (copy_to_user(optval, (char *) &opts, len))
2068 err = -EFAULT;
2069
2070 break;
2071
2072 case L2CAP_LM:
2073 switch (l2cap_pi(sk)->sec_level) {
2074 case BT_SECURITY_LOW:
2075 opt = L2CAP_LM_AUTH;
2076 break;
2077 case BT_SECURITY_MEDIUM:
2078 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2079 break;
2080 case BT_SECURITY_HIGH:
2081 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2082 L2CAP_LM_SECURE;
2083 break;
2084 default:
2085 opt = 0;
2086 break;
2087 }
2088
2089 if (l2cap_pi(sk)->role_switch)
2090 opt |= L2CAP_LM_MASTER;
2091
2092 if (l2cap_pi(sk)->force_reliable)
2093 opt |= L2CAP_LM_RELIABLE;
2094
2095 if (put_user(opt, (u32 __user *) optval))
2096 err = -EFAULT;
2097 break;
2098
2099 case L2CAP_CONNINFO:
2100 if (sk->sk_state != BT_CONNECTED &&
2101 !(sk->sk_state == BT_CONNECT2 &&
2102 bt_sk(sk)->defer_setup)) {
2103 err = -ENOTCONN;
2104 break;
2105 }
2106
2107 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2108 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2109
2110 len = min_t(unsigned int, len, sizeof(cinfo));
2111 if (copy_to_user(optval, (char *) &cinfo, len))
2112 err = -EFAULT;
2113
2114 break;
2115
2116 default:
2117 err = -ENOPROTOOPT;
2118 break;
2119 }
2120
2121 release_sock(sk);
2122 return err;
2123 }
2124
2125 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2126 {
2127 struct sock *sk = sock->sk;
2128 struct bt_security sec;
2129 int len, err = 0;
2130
2131 BT_DBG("sk %p", sk);
2132
2133 if (level == SOL_L2CAP)
2134 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2135
2136 if (level != SOL_BLUETOOTH)
2137 return -ENOPROTOOPT;
2138
2139 if (get_user(len, optlen))
2140 return -EFAULT;
2141
2142 lock_sock(sk);
2143
2144 switch (optname) {
2145 case BT_SECURITY:
2146 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2147 && sk->sk_type != SOCK_RAW) {
2148 err = -EINVAL;
2149 break;
2150 }
2151
2152 sec.level = l2cap_pi(sk)->sec_level;
2153
2154 len = min_t(unsigned int, len, sizeof(sec));
2155 if (copy_to_user(optval, (char *) &sec, len))
2156 err = -EFAULT;
2157
2158 break;
2159
2160 case BT_DEFER_SETUP:
2161 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2162 err = -EINVAL;
2163 break;
2164 }
2165
2166 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2167 err = -EFAULT;
2168
2169 break;
2170
2171 default:
2172 err = -ENOPROTOOPT;
2173 break;
2174 }
2175
2176 release_sock(sk);
2177 return err;
2178 }
2179
2180 static int l2cap_sock_shutdown(struct socket *sock, int how)
2181 {
2182 struct sock *sk = sock->sk;
2183 int err = 0;
2184
2185 BT_DBG("sock %p, sk %p", sock, sk);
2186
2187 if (!sk)
2188 return 0;
2189
2190 lock_sock(sk);
2191 if (!sk->sk_shutdown) {
2192 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2193 err = __l2cap_wait_ack(sk);
2194
2195 sk->sk_shutdown = SHUTDOWN_MASK;
2196 l2cap_sock_clear_timer(sk);
2197 __l2cap_sock_close(sk, 0);
2198
2199 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2200 err = bt_sock_wait_state(sk, BT_CLOSED,
2201 sk->sk_lingertime);
2202 }
2203
2204 if (!err && sk->sk_err)
2205 err = -sk->sk_err;
2206
2207 release_sock(sk);
2208 return err;
2209 }
2210
2211 static int l2cap_sock_release(struct socket *sock)
2212 {
2213 struct sock *sk = sock->sk;
2214 int err;
2215
2216 BT_DBG("sock %p, sk %p", sock, sk);
2217
2218 if (!sk)
2219 return 0;
2220
2221 err = l2cap_sock_shutdown(sock, 2);
2222
2223 sock_orphan(sk);
2224 l2cap_sock_kill(sk);
2225 return err;
2226 }
2227
2228 static void l2cap_chan_ready(struct sock *sk)
2229 {
2230 struct sock *parent = bt_sk(sk)->parent;
2231
2232 BT_DBG("sk %p, parent %p", sk, parent);
2233
2234 l2cap_pi(sk)->conf_state = 0;
2235 l2cap_sock_clear_timer(sk);
2236
2237 if (!parent) {
2238 /* Outgoing channel.
2239 * Wake up socket sleeping on connect.
2240 */
2241 sk->sk_state = BT_CONNECTED;
2242 sk->sk_state_change(sk);
2243 } else {
2244 /* Incoming channel.
2245 * Wake up socket sleeping on accept.
2246 */
2247 parent->sk_data_ready(parent, 0);
2248 }
2249 }
2250
2251 /* Copy frame to all raw sockets on that connection */
2252 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2253 {
2254 struct l2cap_chan_list *l = &conn->chan_list;
2255 struct sk_buff *nskb;
2256 struct sock *sk;
2257
2258 BT_DBG("conn %p", conn);
2259
2260 read_lock(&l->lock);
2261 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2262 if (sk->sk_type != SOCK_RAW)
2263 continue;
2264
2265 /* Don't send frame to the socket it came from */
2266 if (skb->sk == sk)
2267 continue;
2268 nskb = skb_clone(skb, GFP_ATOMIC);
2269 if (!nskb)
2270 continue;
2271
2272 if (sock_queue_rcv_skb(sk, nskb))
2273 kfree_skb(nskb);
2274 }
2275 read_unlock(&l->lock);
2276 }
2277
2278 /* ---- L2CAP signalling commands ---- */
2279 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2280 u8 code, u8 ident, u16 dlen, void *data)
2281 {
2282 struct sk_buff *skb, **frag;
2283 struct l2cap_cmd_hdr *cmd;
2284 struct l2cap_hdr *lh;
2285 int len, count;
2286
2287 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2288 conn, code, ident, dlen);
2289
2290 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2291 count = min_t(unsigned int, conn->mtu, len);
2292
2293 skb = bt_skb_alloc(count, GFP_ATOMIC);
2294 if (!skb)
2295 return NULL;
2296
2297 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2298 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2299 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2300
2301 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2302 cmd->code = code;
2303 cmd->ident = ident;
2304 cmd->len = cpu_to_le16(dlen);
2305
2306 if (dlen) {
2307 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2308 memcpy(skb_put(skb, count), data, count);
2309 data += count;
2310 }
2311
2312 len -= skb->len;
2313
2314 /* Continuation fragments (no L2CAP header) */
2315 frag = &skb_shinfo(skb)->frag_list;
2316 while (len) {
2317 count = min_t(unsigned int, conn->mtu, len);
2318
2319 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2320 if (!*frag)
2321 goto fail;
2322
2323 memcpy(skb_put(*frag, count), data, count);
2324
2325 len -= count;
2326 data += count;
2327
2328 frag = &(*frag)->next;
2329 }
2330
2331 return skb;
2332
2333 fail:
2334 kfree_skb(skb);
2335 return NULL;
2336 }
2337
2338 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2339 {
2340 struct l2cap_conf_opt *opt = *ptr;
2341 int len;
2342
2343 len = L2CAP_CONF_OPT_SIZE + opt->len;
2344 *ptr += len;
2345
2346 *type = opt->type;
2347 *olen = opt->len;
2348
2349 switch (opt->len) {
2350 case 1:
2351 *val = *((u8 *) opt->val);
2352 break;
2353
2354 case 2:
2355 *val = __le16_to_cpu(*((__le16 *) opt->val));
2356 break;
2357
2358 case 4:
2359 *val = __le32_to_cpu(*((__le32 *) opt->val));
2360 break;
2361
2362 default:
2363 *val = (unsigned long) opt->val;
2364 break;
2365 }
2366
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2368 return len;
2369 }
2370
2371 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2372 {
2373 struct l2cap_conf_opt *opt = *ptr;
2374
2375 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2376
2377 opt->type = type;
2378 opt->len = len;
2379
2380 switch (len) {
2381 case 1:
2382 *((u8 *) opt->val) = val;
2383 break;
2384
2385 case 2:
2386 *((__le16 *) opt->val) = cpu_to_le16(val);
2387 break;
2388
2389 case 4:
2390 *((__le32 *) opt->val) = cpu_to_le32(val);
2391 break;
2392
2393 default:
2394 memcpy(opt->val, (void *) val, len);
2395 break;
2396 }
2397
2398 *ptr += L2CAP_CONF_OPT_SIZE + len;
2399 }
2400
2401 static void l2cap_ack_timeout(unsigned long arg)
2402 {
2403 struct sock *sk = (void *) arg;
2404
2405 bh_lock_sock(sk);
2406 l2cap_send_ack(l2cap_pi(sk));
2407 bh_unlock_sock(sk);
2408 }
2409
2410 static inline void l2cap_ertm_init(struct sock *sk)
2411 {
2412 l2cap_pi(sk)->expected_ack_seq = 0;
2413 l2cap_pi(sk)->unacked_frames = 0;
2414 l2cap_pi(sk)->buffer_seq = 0;
2415 l2cap_pi(sk)->num_acked = 0;
2416 l2cap_pi(sk)->frames_sent = 0;
2417
2418 setup_timer(&l2cap_pi(sk)->retrans_timer,
2419 l2cap_retrans_timeout, (unsigned long) sk);
2420 setup_timer(&l2cap_pi(sk)->monitor_timer,
2421 l2cap_monitor_timeout, (unsigned long) sk);
2422 setup_timer(&l2cap_pi(sk)->ack_timer,
2423 l2cap_ack_timeout, (unsigned long) sk);
2424
2425 __skb_queue_head_init(SREJ_QUEUE(sk));
2426 __skb_queue_head_init(BUSY_QUEUE(sk));
2427 spin_lock_init(&l2cap_pi(sk)->send_lock);
2428
2429 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2430 }
2431
2432 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2433 {
2434 u32 local_feat_mask = l2cap_feat_mask;
2435 if (enable_ertm)
2436 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2437
2438 switch (mode) {
2439 case L2CAP_MODE_ERTM:
2440 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2441 case L2CAP_MODE_STREAMING:
2442 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2443 default:
2444 return 0x00;
2445 }
2446 }
2447
2448 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2449 {
2450 switch (mode) {
2451 case L2CAP_MODE_STREAMING:
2452 case L2CAP_MODE_ERTM:
2453 if (l2cap_mode_supported(mode, remote_feat_mask))
2454 return mode;
2455 /* fall through */
2456 default:
2457 return L2CAP_MODE_BASIC;
2458 }
2459 }
2460
2461 static int l2cap_build_conf_req(struct sock *sk, void *data)
2462 {
2463 struct l2cap_pinfo *pi = l2cap_pi(sk);
2464 struct l2cap_conf_req *req = data;
2465 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2466 void *ptr = req->data;
2467
2468 BT_DBG("sk %p", sk);
2469
2470 if (pi->num_conf_req || pi->num_conf_rsp)
2471 goto done;
2472
2473 switch (pi->mode) {
2474 case L2CAP_MODE_STREAMING:
2475 case L2CAP_MODE_ERTM:
2476 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2477 pi->mode = l2cap_select_mode(rfc.mode,
2478 pi->conn->feat_mask);
2479 break;
2480 }
2481
2482 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2483 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2484 break;
2485 default:
2486 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2487 break;
2488 }
2489
2490 done:
2491 switch (pi->mode) {
2492 case L2CAP_MODE_BASIC:
2493 if (pi->imtu != L2CAP_DEFAULT_MTU)
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2495
2496 rfc.mode = L2CAP_MODE_BASIC;
2497 rfc.txwin_size = 0;
2498 rfc.max_transmit = 0;
2499 rfc.retrans_timeout = 0;
2500 rfc.monitor_timeout = 0;
2501 rfc.max_pdu_size = 0;
2502
2503 break;
2504
2505 case L2CAP_MODE_ERTM:
2506 rfc.mode = L2CAP_MODE_ERTM;
2507 rfc.txwin_size = pi->tx_win;
2508 rfc.max_transmit = pi->max_tx;
2509 rfc.retrans_timeout = 0;
2510 rfc.monitor_timeout = 0;
2511 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2512 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2513 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2514
2515 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2516 break;
2517
2518 if (pi->fcs == L2CAP_FCS_NONE ||
2519 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2520 pi->fcs = L2CAP_FCS_NONE;
2521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2522 }
2523 break;
2524
2525 case L2CAP_MODE_STREAMING:
2526 rfc.mode = L2CAP_MODE_STREAMING;
2527 rfc.txwin_size = 0;
2528 rfc.max_transmit = 0;
2529 rfc.retrans_timeout = 0;
2530 rfc.monitor_timeout = 0;
2531 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2532 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2533 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2534
2535 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2536 break;
2537
2538 if (pi->fcs == L2CAP_FCS_NONE ||
2539 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2540 pi->fcs = L2CAP_FCS_NONE;
2541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2542 }
2543 break;
2544 }
2545
2546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2547 (unsigned long) &rfc);
2548
2549 /* FIXME: Need actual value of the flush timeout */
2550 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2551 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2552
2553 req->dcid = cpu_to_le16(pi->dcid);
2554 req->flags = cpu_to_le16(0);
2555
2556 return ptr - data;
2557 }
2558
2559 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2560 {
2561 struct l2cap_pinfo *pi = l2cap_pi(sk);
2562 struct l2cap_conf_rsp *rsp = data;
2563 void *ptr = rsp->data;
2564 void *req = pi->conf_req;
2565 int len = pi->conf_len;
2566 int type, hint, olen;
2567 unsigned long val;
2568 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2569 u16 mtu = L2CAP_DEFAULT_MTU;
2570 u16 result = L2CAP_CONF_SUCCESS;
2571
2572 BT_DBG("sk %p", sk);
2573
2574 while (len >= L2CAP_CONF_OPT_SIZE) {
2575 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2576
2577 hint = type & L2CAP_CONF_HINT;
2578 type &= L2CAP_CONF_MASK;
2579
2580 switch (type) {
2581 case L2CAP_CONF_MTU:
2582 mtu = val;
2583 break;
2584
2585 case L2CAP_CONF_FLUSH_TO:
2586 pi->flush_to = val;
2587 break;
2588
2589 case L2CAP_CONF_QOS:
2590 break;
2591
2592 case L2CAP_CONF_RFC:
2593 if (olen == sizeof(rfc))
2594 memcpy(&rfc, (void *) val, olen);
2595 break;
2596
2597 case L2CAP_CONF_FCS:
2598 if (val == L2CAP_FCS_NONE)
2599 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2600
2601 break;
2602
2603 default:
2604 if (hint)
2605 break;
2606
2607 result = L2CAP_CONF_UNKNOWN;
2608 *((u8 *) ptr++) = type;
2609 break;
2610 }
2611 }
2612
2613 if (pi->num_conf_rsp || pi->num_conf_req)
2614 goto done;
2615
2616 switch (pi->mode) {
2617 case L2CAP_MODE_STREAMING:
2618 case L2CAP_MODE_ERTM:
2619 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2620 pi->mode = l2cap_select_mode(rfc.mode,
2621 pi->conn->feat_mask);
2622 break;
2623 }
2624
2625 if (pi->mode != rfc.mode)
2626 return -ECONNREFUSED;
2627
2628 break;
2629 }
2630
2631 done:
2632 if (pi->mode != rfc.mode) {
2633 result = L2CAP_CONF_UNACCEPT;
2634 rfc.mode = pi->mode;
2635
2636 if (pi->num_conf_rsp == 1)
2637 return -ECONNREFUSED;
2638
2639 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2640 sizeof(rfc), (unsigned long) &rfc);
2641 }
2642
2643
2644 if (result == L2CAP_CONF_SUCCESS) {
2645 /* Configure output options and let the other side know
2646 * which ones we don't like. */
2647
2648 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2649 result = L2CAP_CONF_UNACCEPT;
2650 else {
2651 pi->omtu = mtu;
2652 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2653 }
2654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2655
2656 switch (rfc.mode) {
2657 case L2CAP_MODE_BASIC:
2658 pi->fcs = L2CAP_FCS_NONE;
2659 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2660 break;
2661
2662 case L2CAP_MODE_ERTM:
2663 pi->remote_tx_win = rfc.txwin_size;
2664 pi->remote_max_tx = rfc.max_transmit;
2665 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2666 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2667
2668 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2669
2670 rfc.retrans_timeout =
2671 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2672 rfc.monitor_timeout =
2673 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2674
2675 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2676
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2679
2680 break;
2681
2682 case L2CAP_MODE_STREAMING:
2683 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2684 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2685
2686 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2687
2688 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2689
2690 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2691 sizeof(rfc), (unsigned long) &rfc);
2692
2693 break;
2694
2695 default:
2696 result = L2CAP_CONF_UNACCEPT;
2697
2698 memset(&rfc, 0, sizeof(rfc));
2699 rfc.mode = pi->mode;
2700 }
2701
2702 if (result == L2CAP_CONF_SUCCESS)
2703 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2704 }
2705 rsp->scid = cpu_to_le16(pi->dcid);
2706 rsp->result = cpu_to_le16(result);
2707 rsp->flags = cpu_to_le16(0x0000);
2708
2709 return ptr - data;
2710 }
2711
2712 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2713 {
2714 struct l2cap_pinfo *pi = l2cap_pi(sk);
2715 struct l2cap_conf_req *req = data;
2716 void *ptr = req->data;
2717 int type, olen;
2718 unsigned long val;
2719 struct l2cap_conf_rfc rfc;
2720
2721 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2722
2723 while (len >= L2CAP_CONF_OPT_SIZE) {
2724 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2725
2726 switch (type) {
2727 case L2CAP_CONF_MTU:
2728 if (val < L2CAP_DEFAULT_MIN_MTU) {
2729 *result = L2CAP_CONF_UNACCEPT;
2730 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2731 } else
2732 pi->omtu = val;
2733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2734 break;
2735
2736 case L2CAP_CONF_FLUSH_TO:
2737 pi->flush_to = val;
2738 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2739 2, pi->flush_to);
2740 break;
2741
2742 case L2CAP_CONF_RFC:
2743 if (olen == sizeof(rfc))
2744 memcpy(&rfc, (void *)val, olen);
2745
2746 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2747 rfc.mode != pi->mode)
2748 return -ECONNREFUSED;
2749
2750 pi->mode = rfc.mode;
2751 pi->fcs = 0;
2752
2753 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2754 sizeof(rfc), (unsigned long) &rfc);
2755 break;
2756 }
2757 }
2758
2759 if (*result == L2CAP_CONF_SUCCESS) {
2760 switch (rfc.mode) {
2761 case L2CAP_MODE_ERTM:
2762 pi->remote_tx_win = rfc.txwin_size;
2763 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2764 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2765 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2766 break;
2767 case L2CAP_MODE_STREAMING:
2768 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2769 }
2770 }
2771
2772 req->dcid = cpu_to_le16(pi->dcid);
2773 req->flags = cpu_to_le16(0x0000);
2774
2775 return ptr - data;
2776 }
2777
2778 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2779 {
2780 struct l2cap_conf_rsp *rsp = data;
2781 void *ptr = rsp->data;
2782
2783 BT_DBG("sk %p", sk);
2784
2785 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2786 rsp->result = cpu_to_le16(result);
2787 rsp->flags = cpu_to_le16(flags);
2788
2789 return ptr - data;
2790 }
2791
2792 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2793 {
2794 struct l2cap_pinfo *pi = l2cap_pi(sk);
2795 int type, olen;
2796 unsigned long val;
2797 struct l2cap_conf_rfc rfc;
2798
2799 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2800
2801 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2802 return;
2803
2804 while (len >= L2CAP_CONF_OPT_SIZE) {
2805 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2806
2807 switch (type) {
2808 case L2CAP_CONF_RFC:
2809 if (olen == sizeof(rfc))
2810 memcpy(&rfc, (void *)val, olen);
2811 goto done;
2812 }
2813 }
2814
2815 done:
2816 switch (rfc.mode) {
2817 case L2CAP_MODE_ERTM:
2818 pi->remote_tx_win = rfc.txwin_size;
2819 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2820 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2821 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2822 break;
2823 case L2CAP_MODE_STREAMING:
2824 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2825 }
2826 }
2827
2828 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2829 {
2830 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2831
2832 if (rej->reason != 0x0000)
2833 return 0;
2834
2835 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2836 cmd->ident == conn->info_ident) {
2837 del_timer(&conn->info_timer);
2838
2839 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2840 conn->info_ident = 0;
2841
2842 l2cap_conn_start(conn);
2843 }
2844
2845 return 0;
2846 }
2847
2848 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2849 {
2850 struct l2cap_chan_list *list = &conn->chan_list;
2851 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2852 struct l2cap_conn_rsp rsp;
2853 struct sock *sk, *parent;
2854 int result, status = L2CAP_CS_NO_INFO;
2855
2856 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2857 __le16 psm = req->psm;
2858
2859 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2860
2861 /* Check if we have socket listening on psm */
2862 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2863 if (!parent) {
2864 result = L2CAP_CR_BAD_PSM;
2865 goto sendresp;
2866 }
2867
2868 /* Check if the ACL is secure enough (if not SDP) */
2869 if (psm != cpu_to_le16(0x0001) &&
2870 !hci_conn_check_link_mode(conn->hcon)) {
2871 conn->disc_reason = 0x05;
2872 result = L2CAP_CR_SEC_BLOCK;
2873 goto response;
2874 }
2875
2876 result = L2CAP_CR_NO_MEM;
2877
2878 /* Check for backlog size */
2879 if (sk_acceptq_is_full(parent)) {
2880 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2881 goto response;
2882 }
2883
2884 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2885 if (!sk)
2886 goto response;
2887
2888 write_lock_bh(&list->lock);
2889
2890 /* Check if we already have channel with that dcid */
2891 if (__l2cap_get_chan_by_dcid(list, scid)) {
2892 write_unlock_bh(&list->lock);
2893 sock_set_flag(sk, SOCK_ZAPPED);
2894 l2cap_sock_kill(sk);
2895 goto response;
2896 }
2897
2898 hci_conn_hold(conn->hcon);
2899
2900 l2cap_sock_init(sk, parent);
2901 bacpy(&bt_sk(sk)->src, conn->src);
2902 bacpy(&bt_sk(sk)->dst, conn->dst);
2903 l2cap_pi(sk)->psm = psm;
2904 l2cap_pi(sk)->dcid = scid;
2905
2906 __l2cap_chan_add(conn, sk, parent);
2907 dcid = l2cap_pi(sk)->scid;
2908
2909 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2910
2911 l2cap_pi(sk)->ident = cmd->ident;
2912
2913 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2914 if (l2cap_check_security(sk)) {
2915 if (bt_sk(sk)->defer_setup) {
2916 sk->sk_state = BT_CONNECT2;
2917 result = L2CAP_CR_PEND;
2918 status = L2CAP_CS_AUTHOR_PEND;
2919 parent->sk_data_ready(parent, 0);
2920 } else {
2921 sk->sk_state = BT_CONFIG;
2922 result = L2CAP_CR_SUCCESS;
2923 status = L2CAP_CS_NO_INFO;
2924 }
2925 } else {
2926 sk->sk_state = BT_CONNECT2;
2927 result = L2CAP_CR_PEND;
2928 status = L2CAP_CS_AUTHEN_PEND;
2929 }
2930 } else {
2931 sk->sk_state = BT_CONNECT2;
2932 result = L2CAP_CR_PEND;
2933 status = L2CAP_CS_NO_INFO;
2934 }
2935
2936 write_unlock_bh(&list->lock);
2937
2938 response:
2939 bh_unlock_sock(parent);
2940
2941 sendresp:
2942 rsp.scid = cpu_to_le16(scid);
2943 rsp.dcid = cpu_to_le16(dcid);
2944 rsp.result = cpu_to_le16(result);
2945 rsp.status = cpu_to_le16(status);
2946 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2947
2948 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2949 struct l2cap_info_req info;
2950 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2951
2952 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2953 conn->info_ident = l2cap_get_ident(conn);
2954
2955 mod_timer(&conn->info_timer, jiffies +
2956 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2957
2958 l2cap_send_cmd(conn, conn->info_ident,
2959 L2CAP_INFO_REQ, sizeof(info), &info);
2960 }
2961
2962 return 0;
2963 }
2964
2965 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2966 {
2967 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2968 u16 scid, dcid, result, status;
2969 struct sock *sk;
2970 u8 req[128];
2971
2972 scid = __le16_to_cpu(rsp->scid);
2973 dcid = __le16_to_cpu(rsp->dcid);
2974 result = __le16_to_cpu(rsp->result);
2975 status = __le16_to_cpu(rsp->status);
2976
2977 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2978
2979 if (scid) {
2980 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2981 if (!sk)
2982 return 0;
2983 } else {
2984 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2985 if (!sk)
2986 return 0;
2987 }
2988
2989 switch (result) {
2990 case L2CAP_CR_SUCCESS:
2991 sk->sk_state = BT_CONFIG;
2992 l2cap_pi(sk)->ident = 0;
2993 l2cap_pi(sk)->dcid = dcid;
2994 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2995 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2996
2997 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2998 l2cap_build_conf_req(sk, req), req);
2999 l2cap_pi(sk)->num_conf_req++;
3000 break;
3001
3002 case L2CAP_CR_PEND:
3003 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3004 break;
3005
3006 default:
3007 l2cap_chan_del(sk, ECONNREFUSED);
3008 break;
3009 }
3010
3011 bh_unlock_sock(sk);
3012 return 0;
3013 }
3014
3015 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3016 {
3017 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3018 u16 dcid, flags;
3019 u8 rsp[64];
3020 struct sock *sk;
3021 int len;
3022
3023 dcid = __le16_to_cpu(req->dcid);
3024 flags = __le16_to_cpu(req->flags);
3025
3026 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3027
3028 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3029 if (!sk)
3030 return -ENOENT;
3031
3032 if (sk->sk_state == BT_DISCONN)
3033 goto unlock;
3034
3035 /* Reject if config buffer is too small. */
3036 len = cmd_len - sizeof(*req);
3037 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3038 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3039 l2cap_build_conf_rsp(sk, rsp,
3040 L2CAP_CONF_REJECT, flags), rsp);
3041 goto unlock;
3042 }
3043
3044 /* Store config. */
3045 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3046 l2cap_pi(sk)->conf_len += len;
3047
3048 if (flags & 0x0001) {
3049 /* Incomplete config. Send empty response. */
3050 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3051 l2cap_build_conf_rsp(sk, rsp,
3052 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3053 goto unlock;
3054 }
3055
3056 /* Complete config. */
3057 len = l2cap_parse_conf_req(sk, rsp);
3058 if (len < 0) {
3059 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3060 goto unlock;
3061 }
3062
3063 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3064 l2cap_pi(sk)->num_conf_rsp++;
3065
3066 /* Reset config buffer. */
3067 l2cap_pi(sk)->conf_len = 0;
3068
3069 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3070 goto unlock;
3071
3072 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3073 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3074 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3075 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3076
3077 sk->sk_state = BT_CONNECTED;
3078
3079 l2cap_pi(sk)->next_tx_seq = 0;
3080 l2cap_pi(sk)->expected_tx_seq = 0;
3081 __skb_queue_head_init(TX_QUEUE(sk));
3082 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3083 l2cap_ertm_init(sk);
3084
3085 l2cap_chan_ready(sk);
3086 goto unlock;
3087 }
3088
3089 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3090 u8 buf[64];
3091 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3092 l2cap_build_conf_req(sk, buf), buf);
3093 l2cap_pi(sk)->num_conf_req++;
3094 }
3095
3096 unlock:
3097 bh_unlock_sock(sk);
3098 return 0;
3099 }
3100
3101 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3102 {
3103 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3104 u16 scid, flags, result;
3105 struct sock *sk;
3106 int len = cmd->len - sizeof(*rsp);
3107
3108 scid = __le16_to_cpu(rsp->scid);
3109 flags = __le16_to_cpu(rsp->flags);
3110 result = __le16_to_cpu(rsp->result);
3111
3112 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3113 scid, flags, result);
3114
3115 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3116 if (!sk)
3117 return 0;
3118
3119 switch (result) {
3120 case L2CAP_CONF_SUCCESS:
3121 l2cap_conf_rfc_get(sk, rsp->data, len);
3122 break;
3123
3124 case L2CAP_CONF_UNACCEPT:
3125 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3126 char req[64];
3127
3128 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3129 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3130 goto done;
3131 }
3132
3133 /* throw out any old stored conf requests */
3134 result = L2CAP_CONF_SUCCESS;
3135 len = l2cap_parse_conf_rsp(sk, rsp->data,
3136 len, req, &result);
3137 if (len < 0) {
3138 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3139 goto done;
3140 }
3141
3142 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3143 L2CAP_CONF_REQ, len, req);
3144 l2cap_pi(sk)->num_conf_req++;
3145 if (result != L2CAP_CONF_SUCCESS)
3146 goto done;
3147 break;
3148 }
3149
3150 default:
3151 sk->sk_err = ECONNRESET;
3152 l2cap_sock_set_timer(sk, HZ * 5);
3153 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3154 goto done;
3155 }
3156
3157 if (flags & 0x01)
3158 goto done;
3159
3160 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3161
3162 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3163 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3164 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3165 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3166
3167 sk->sk_state = BT_CONNECTED;
3168 l2cap_pi(sk)->next_tx_seq = 0;
3169 l2cap_pi(sk)->expected_tx_seq = 0;
3170 __skb_queue_head_init(TX_QUEUE(sk));
3171 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3172 l2cap_ertm_init(sk);
3173
3174 l2cap_chan_ready(sk);
3175 }
3176
3177 done:
3178 bh_unlock_sock(sk);
3179 return 0;
3180 }
3181
3182 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3183 {
3184 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3185 struct l2cap_disconn_rsp rsp;
3186 u16 dcid, scid;
3187 struct sock *sk;
3188
3189 scid = __le16_to_cpu(req->scid);
3190 dcid = __le16_to_cpu(req->dcid);
3191
3192 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3193
3194 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3195 if (!sk)
3196 return 0;
3197
3198 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3199 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3200 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3201
3202 sk->sk_shutdown = SHUTDOWN_MASK;
3203
3204 l2cap_chan_del(sk, ECONNRESET);
3205 bh_unlock_sock(sk);
3206
3207 l2cap_sock_kill(sk);
3208 return 0;
3209 }
3210
3211 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3212 {
3213 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3214 u16 dcid, scid;
3215 struct sock *sk;
3216
3217 scid = __le16_to_cpu(rsp->scid);
3218 dcid = __le16_to_cpu(rsp->dcid);
3219
3220 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3221
3222 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3223 if (!sk)
3224 return 0;
3225
3226 l2cap_chan_del(sk, 0);
3227 bh_unlock_sock(sk);
3228
3229 l2cap_sock_kill(sk);
3230 return 0;
3231 }
3232
3233 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3234 {
3235 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3236 u16 type;
3237
3238 type = __le16_to_cpu(req->type);
3239
3240 BT_DBG("type 0x%4.4x", type);
3241
3242 if (type == L2CAP_IT_FEAT_MASK) {
3243 u8 buf[8];
3244 u32 feat_mask = l2cap_feat_mask;
3245 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3246 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3247 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3248 if (enable_ertm)
3249 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3250 | L2CAP_FEAT_FCS;
3251 put_unaligned_le32(feat_mask, rsp->data);
3252 l2cap_send_cmd(conn, cmd->ident,
3253 L2CAP_INFO_RSP, sizeof(buf), buf);
3254 } else if (type == L2CAP_IT_FIXED_CHAN) {
3255 u8 buf[12];
3256 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3257 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3258 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3259 memcpy(buf + 4, l2cap_fixed_chan, 8);
3260 l2cap_send_cmd(conn, cmd->ident,
3261 L2CAP_INFO_RSP, sizeof(buf), buf);
3262 } else {
3263 struct l2cap_info_rsp rsp;
3264 rsp.type = cpu_to_le16(type);
3265 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3266 l2cap_send_cmd(conn, cmd->ident,
3267 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3268 }
3269
3270 return 0;
3271 }
3272
3273 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3274 {
3275 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3276 u16 type, result;
3277
3278 type = __le16_to_cpu(rsp->type);
3279 result = __le16_to_cpu(rsp->result);
3280
3281 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3282
3283 del_timer(&conn->info_timer);
3284
3285 if (type == L2CAP_IT_FEAT_MASK) {
3286 conn->feat_mask = get_unaligned_le32(rsp->data);
3287
3288 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3289 struct l2cap_info_req req;
3290 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3291
3292 conn->info_ident = l2cap_get_ident(conn);
3293
3294 l2cap_send_cmd(conn, conn->info_ident,
3295 L2CAP_INFO_REQ, sizeof(req), &req);
3296 } else {
3297 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3298 conn->info_ident = 0;
3299
3300 l2cap_conn_start(conn);
3301 }
3302 } else if (type == L2CAP_IT_FIXED_CHAN) {
3303 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3304 conn->info_ident = 0;
3305
3306 l2cap_conn_start(conn);
3307 }
3308
3309 return 0;
3310 }
3311
3312 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3313 {
3314 u8 *data = skb->data;
3315 int len = skb->len;
3316 struct l2cap_cmd_hdr cmd;
3317 int err = 0;
3318
3319 l2cap_raw_recv(conn, skb);
3320
3321 while (len >= L2CAP_CMD_HDR_SIZE) {
3322 u16 cmd_len;
3323 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3324 data += L2CAP_CMD_HDR_SIZE;
3325 len -= L2CAP_CMD_HDR_SIZE;
3326
3327 cmd_len = le16_to_cpu(cmd.len);
3328
3329 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3330
3331 if (cmd_len > len || !cmd.ident) {
3332 BT_DBG("corrupted command");
3333 break;
3334 }
3335
3336 switch (cmd.code) {
3337 case L2CAP_COMMAND_REJ:
3338 l2cap_command_rej(conn, &cmd, data);
3339 break;
3340
3341 case L2CAP_CONN_REQ:
3342 err = l2cap_connect_req(conn, &cmd, data);
3343 break;
3344
3345 case L2CAP_CONN_RSP:
3346 err = l2cap_connect_rsp(conn, &cmd, data);
3347 break;
3348
3349 case L2CAP_CONF_REQ:
3350 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3351 break;
3352
3353 case L2CAP_CONF_RSP:
3354 err = l2cap_config_rsp(conn, &cmd, data);
3355 break;
3356
3357 case L2CAP_DISCONN_REQ:
3358 err = l2cap_disconnect_req(conn, &cmd, data);
3359 break;
3360
3361 case L2CAP_DISCONN_RSP:
3362 err = l2cap_disconnect_rsp(conn, &cmd, data);
3363 break;
3364
3365 case L2CAP_ECHO_REQ:
3366 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3367 break;
3368
3369 case L2CAP_ECHO_RSP:
3370 break;
3371
3372 case L2CAP_INFO_REQ:
3373 err = l2cap_information_req(conn, &cmd, data);
3374 break;
3375
3376 case L2CAP_INFO_RSP:
3377 err = l2cap_information_rsp(conn, &cmd, data);
3378 break;
3379
3380 default:
3381 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3382 err = -EINVAL;
3383 break;
3384 }
3385
3386 if (err) {
3387 struct l2cap_cmd_rej rej;
3388 BT_DBG("error %d", err);
3389
3390 /* FIXME: Map err to a valid reason */
3391 rej.reason = cpu_to_le16(0);
3392 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3393 }
3394
3395 data += cmd_len;
3396 len -= cmd_len;
3397 }
3398
3399 kfree_skb(skb);
3400 }
3401
3402 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3403 {
3404 u16 our_fcs, rcv_fcs;
3405 int hdr_size = L2CAP_HDR_SIZE + 2;
3406
3407 if (pi->fcs == L2CAP_FCS_CRC16) {
3408 skb_trim(skb, skb->len - 2);
3409 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3410 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3411
3412 if (our_fcs != rcv_fcs)
3413 return -EINVAL;
3414 }
3415 return 0;
3416 }
3417
3418 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3419 {
3420 struct l2cap_pinfo *pi = l2cap_pi(sk);
3421 u16 control = 0;
3422
3423 pi->frames_sent = 0;
3424
3425 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3426
3427 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3428 control |= L2CAP_SUPER_RCV_NOT_READY;
3429 l2cap_send_sframe(pi, control);
3430 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3431 }
3432
3433 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3434 l2cap_retransmit_frames(sk);
3435
3436 spin_lock_bh(&pi->send_lock);
3437 l2cap_ertm_send(sk);
3438 spin_unlock_bh(&pi->send_lock);
3439
3440 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3441 pi->frames_sent == 0) {
3442 control |= L2CAP_SUPER_RCV_READY;
3443 l2cap_send_sframe(pi, control);
3444 }
3445 }
3446
3447 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3448 {
3449 struct sk_buff *next_skb;
3450 struct l2cap_pinfo *pi = l2cap_pi(sk);
3451 int tx_seq_offset, next_tx_seq_offset;
3452
3453 bt_cb(skb)->tx_seq = tx_seq;
3454 bt_cb(skb)->sar = sar;
3455
3456 next_skb = skb_peek(SREJ_QUEUE(sk));
3457 if (!next_skb) {
3458 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3459 return 0;
3460 }
3461
3462 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3463 if (tx_seq_offset < 0)
3464 tx_seq_offset += 64;
3465
3466 do {
3467 if (bt_cb(next_skb)->tx_seq == tx_seq)
3468 return -EINVAL;
3469
3470 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3471 pi->buffer_seq) % 64;
3472 if (next_tx_seq_offset < 0)
3473 next_tx_seq_offset += 64;
3474
3475 if (next_tx_seq_offset > tx_seq_offset) {
3476 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3477 return 0;
3478 }
3479
3480 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3481 break;
3482
3483 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3484
3485 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3486
3487 return 0;
3488 }
3489
3490 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3491 {
3492 struct l2cap_pinfo *pi = l2cap_pi(sk);
3493 struct sk_buff *_skb;
3494 int err;
3495
3496 switch (control & L2CAP_CTRL_SAR) {
3497 case L2CAP_SDU_UNSEGMENTED:
3498 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3499 goto drop;
3500
3501 err = sock_queue_rcv_skb(sk, skb);
3502 if (!err)
3503 return err;
3504
3505 break;
3506
3507 case L2CAP_SDU_START:
3508 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3509 goto drop;
3510
3511 pi->sdu_len = get_unaligned_le16(skb->data);
3512
3513 if (pi->sdu_len > pi->imtu)
3514 goto disconnect;
3515
3516 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3517 if (!pi->sdu)
3518 return -ENOMEM;
3519
3520 /* pull sdu_len bytes only after alloc, because of Local Busy
3521 * condition we have to be sure that this will be executed
3522 * only once, i.e., when alloc does not fail */
3523 skb_pull(skb, 2);
3524
3525 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3526
3527 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3528 pi->partial_sdu_len = skb->len;
3529 break;
3530
3531 case L2CAP_SDU_CONTINUE:
3532 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3533 goto disconnect;
3534
3535 if (!pi->sdu)
3536 goto disconnect;
3537
3538 pi->partial_sdu_len += skb->len;
3539 if (pi->partial_sdu_len > pi->sdu_len)
3540 goto drop;
3541
3542 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3543
3544 break;
3545
3546 case L2CAP_SDU_END:
3547 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3548 goto disconnect;
3549
3550 if (!pi->sdu)
3551 goto disconnect;
3552
3553 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3554 pi->partial_sdu_len += skb->len;
3555
3556 if (pi->partial_sdu_len > pi->imtu)
3557 goto drop;
3558
3559 if (pi->partial_sdu_len != pi->sdu_len)
3560 goto drop;
3561
3562 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3563 }
3564
3565 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3566 if (!_skb) {
3567 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3568 return -ENOMEM;
3569 }
3570
3571 err = sock_queue_rcv_skb(sk, _skb);
3572 if (err < 0) {
3573 kfree_skb(_skb);
3574 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3575 return err;
3576 }
3577
3578 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3579 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3580
3581 kfree_skb(pi->sdu);
3582 break;
3583 }
3584
3585 kfree_skb(skb);
3586 return 0;
3587
3588 drop:
3589 kfree_skb(pi->sdu);
3590 pi->sdu = NULL;
3591
3592 disconnect:
3593 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3594 kfree_skb(skb);
3595 return 0;
3596 }
3597
3598 static void l2cap_busy_work(struct work_struct *work)
3599 {
3600 DECLARE_WAITQUEUE(wait, current);
3601 struct l2cap_pinfo *pi =
3602 container_of(work, struct l2cap_pinfo, busy_work);
3603 struct sock *sk = (struct sock *)pi;
3604 int n_tries = 0, timeo = HZ/5, err;
3605 struct sk_buff *skb;
3606 u16 control;
3607
3608 lock_sock(sk);
3609
3610 add_wait_queue(sk_sleep(sk), &wait);
3611 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3612 set_current_state(TASK_INTERRUPTIBLE);
3613
3614 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3615 err = -EBUSY;
3616 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3617 goto done;
3618 }
3619
3620 if (!timeo)
3621 timeo = HZ/5;
3622
3623 if (signal_pending(current)) {
3624 err = sock_intr_errno(timeo);
3625 goto done;
3626 }
3627
3628 release_sock(sk);
3629 timeo = schedule_timeout(timeo);
3630 lock_sock(sk);
3631
3632 err = sock_error(sk);
3633 if (err)
3634 goto done;
3635
3636 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3637 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3638 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3639 if (err < 0) {
3640 skb_queue_head(BUSY_QUEUE(sk), skb);
3641 break;
3642 }
3643
3644 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3645 }
3646
3647 if (!skb)
3648 break;
3649 }
3650
3651 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3652 goto done;
3653
3654 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3655 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3656 l2cap_send_sframe(pi, control);
3657 l2cap_pi(sk)->retry_count = 1;
3658
3659 del_timer(&pi->retrans_timer);
3660 __mod_monitor_timer();
3661
3662 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3663
3664 done:
3665 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3666 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3667
3668 BT_DBG("sk %p, Exit local busy", sk);
3669
3670 set_current_state(TASK_RUNNING);
3671 remove_wait_queue(sk_sleep(sk), &wait);
3672
3673 release_sock(sk);
3674 }
3675
3676 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3677 {
3678 struct l2cap_pinfo *pi = l2cap_pi(sk);
3679 int sctrl, err;
3680
3681 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3682 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3683 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3684 return -EBUSY;
3685 }
3686
3687 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3688 if (err >= 0) {
3689 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3690 return err;
3691 }
3692
3693 /* Busy Condition */
3694 BT_DBG("sk %p, Enter local busy", sk);
3695
3696 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3697 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3698 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3699
3700 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3701 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3702 l2cap_send_sframe(pi, sctrl);
3703
3704 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3705
3706 del_timer(&pi->ack_timer);
3707
3708 queue_work(_busy_wq, &pi->busy_work);
3709
3710 return err;
3711 }
3712
3713 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3714 {
3715 struct l2cap_pinfo *pi = l2cap_pi(sk);
3716 struct sk_buff *_skb;
3717 int err = -EINVAL;
3718
3719 /*
3720 * TODO: We have to notify the userland if some data is lost with the
3721 * Streaming Mode.
3722 */
3723
3724 switch (control & L2CAP_CTRL_SAR) {
3725 case L2CAP_SDU_UNSEGMENTED:
3726 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3727 kfree_skb(pi->sdu);
3728 break;
3729 }
3730
3731 err = sock_queue_rcv_skb(sk, skb);
3732 if (!err)
3733 return 0;
3734
3735 break;
3736
3737 case L2CAP_SDU_START:
3738 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3739 kfree_skb(pi->sdu);
3740 break;
3741 }
3742
3743 pi->sdu_len = get_unaligned_le16(skb->data);
3744 skb_pull(skb, 2);
3745
3746 if (pi->sdu_len > pi->imtu) {
3747 err = -EMSGSIZE;
3748 break;
3749 }
3750
3751 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3752 if (!pi->sdu) {
3753 err = -ENOMEM;
3754 break;
3755 }
3756
3757 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3758
3759 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3760 pi->partial_sdu_len = skb->len;
3761 err = 0;
3762 break;
3763
3764 case L2CAP_SDU_CONTINUE:
3765 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3766 break;
3767
3768 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3769
3770 pi->partial_sdu_len += skb->len;
3771 if (pi->partial_sdu_len > pi->sdu_len)
3772 kfree_skb(pi->sdu);
3773 else
3774 err = 0;
3775
3776 break;
3777
3778 case L2CAP_SDU_END:
3779 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3780 break;
3781
3782 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3783
3784 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3785 pi->partial_sdu_len += skb->len;
3786
3787 if (pi->partial_sdu_len > pi->imtu)
3788 goto drop;
3789
3790 if (pi->partial_sdu_len == pi->sdu_len) {
3791 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3792 err = sock_queue_rcv_skb(sk, _skb);
3793 if (err < 0)
3794 kfree_skb(_skb);
3795 }
3796 err = 0;
3797
3798 drop:
3799 kfree_skb(pi->sdu);
3800 break;
3801 }
3802
3803 kfree_skb(skb);
3804 return err;
3805 }
3806
3807 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3808 {
3809 struct sk_buff *skb;
3810 u16 control;
3811
3812 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3813 if (bt_cb(skb)->tx_seq != tx_seq)
3814 break;
3815
3816 skb = skb_dequeue(SREJ_QUEUE(sk));
3817 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3818 l2cap_ertm_reassembly_sdu(sk, skb, control);
3819 l2cap_pi(sk)->buffer_seq_srej =
3820 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3821 tx_seq = (tx_seq + 1) % 64;
3822 }
3823 }
3824
3825 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3826 {
3827 struct l2cap_pinfo *pi = l2cap_pi(sk);
3828 struct srej_list *l, *tmp;
3829 u16 control;
3830
3831 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3832 if (l->tx_seq == tx_seq) {
3833 list_del(&l->list);
3834 kfree(l);
3835 return;
3836 }
3837 control = L2CAP_SUPER_SELECT_REJECT;
3838 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3839 l2cap_send_sframe(pi, control);
3840 list_del(&l->list);
3841 list_add_tail(&l->list, SREJ_LIST(sk));
3842 }
3843 }
3844
3845 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3846 {
3847 struct l2cap_pinfo *pi = l2cap_pi(sk);
3848 struct srej_list *new;
3849 u16 control;
3850
3851 while (tx_seq != pi->expected_tx_seq) {
3852 control = L2CAP_SUPER_SELECT_REJECT;
3853 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3854 l2cap_send_sframe(pi, control);
3855
3856 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3857 new->tx_seq = pi->expected_tx_seq;
3858 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3859 list_add_tail(&new->list, SREJ_LIST(sk));
3860 }
3861 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3862 }
3863
3864 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3865 {
3866 struct l2cap_pinfo *pi = l2cap_pi(sk);
3867 u8 tx_seq = __get_txseq(rx_control);
3868 u8 req_seq = __get_reqseq(rx_control);
3869 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3870 int tx_seq_offset, expected_tx_seq_offset;
3871 int num_to_ack = (pi->tx_win/6) + 1;
3872 int err = 0;
3873
3874 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3875 rx_control);
3876
3877 if (L2CAP_CTRL_FINAL & rx_control &&
3878 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3879 del_timer(&pi->monitor_timer);
3880 if (pi->unacked_frames > 0)
3881 __mod_retrans_timer();
3882 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3883 }
3884
3885 pi->expected_ack_seq = req_seq;
3886 l2cap_drop_acked_frames(sk);
3887
3888 if (tx_seq == pi->expected_tx_seq)
3889 goto expected;
3890
3891 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3892 if (tx_seq_offset < 0)
3893 tx_seq_offset += 64;
3894
3895 /* invalid tx_seq */
3896 if (tx_seq_offset >= pi->tx_win) {
3897 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3898 goto drop;
3899 }
3900
3901 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3902 goto drop;
3903
3904 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3905 struct srej_list *first;
3906
3907 first = list_first_entry(SREJ_LIST(sk),
3908 struct srej_list, list);
3909 if (tx_seq == first->tx_seq) {
3910 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3911 l2cap_check_srej_gap(sk, tx_seq);
3912
3913 list_del(&first->list);
3914 kfree(first);
3915
3916 if (list_empty(SREJ_LIST(sk))) {
3917 pi->buffer_seq = pi->buffer_seq_srej;
3918 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3919 l2cap_send_ack(pi);
3920 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3921 }
3922 } else {
3923 struct srej_list *l;
3924
3925 /* duplicated tx_seq */
3926 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3927 goto drop;
3928
3929 list_for_each_entry(l, SREJ_LIST(sk), list) {
3930 if (l->tx_seq == tx_seq) {
3931 l2cap_resend_srejframe(sk, tx_seq);
3932 return 0;
3933 }
3934 }
3935 l2cap_send_srejframe(sk, tx_seq);
3936 }
3937 } else {
3938 expected_tx_seq_offset =
3939 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3940 if (expected_tx_seq_offset < 0)
3941 expected_tx_seq_offset += 64;
3942
3943 /* duplicated tx_seq */
3944 if (tx_seq_offset < expected_tx_seq_offset)
3945 goto drop;
3946
3947 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3948
3949 BT_DBG("sk %p, Enter SREJ", sk);
3950
3951 INIT_LIST_HEAD(SREJ_LIST(sk));
3952 pi->buffer_seq_srej = pi->buffer_seq;
3953
3954 __skb_queue_head_init(SREJ_QUEUE(sk));
3955 __skb_queue_head_init(BUSY_QUEUE(sk));
3956 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3957
3958 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3959
3960 l2cap_send_srejframe(sk, tx_seq);
3961
3962 del_timer(&pi->ack_timer);
3963 }
3964 return 0;
3965
3966 expected:
3967 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3968
3969 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3970 bt_cb(skb)->tx_seq = tx_seq;
3971 bt_cb(skb)->sar = sar;
3972 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3973 return 0;
3974 }
3975
3976 err = l2cap_push_rx_skb(sk, skb, rx_control);
3977 if (err < 0)
3978 return 0;
3979
3980 if (rx_control & L2CAP_CTRL_FINAL) {
3981 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3982 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3983 else
3984 l2cap_retransmit_frames(sk);
3985 }
3986
3987 __mod_ack_timer();
3988
3989 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3990 if (pi->num_acked == num_to_ack - 1)
3991 l2cap_send_ack(pi);
3992
3993 return 0;
3994
3995 drop:
3996 kfree_skb(skb);
3997 return 0;
3998 }
3999
4000 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4001 {
4002 struct l2cap_pinfo *pi = l2cap_pi(sk);
4003
4004 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4005 rx_control);
4006
4007 pi->expected_ack_seq = __get_reqseq(rx_control);
4008 l2cap_drop_acked_frames(sk);
4009
4010 if (rx_control & L2CAP_CTRL_POLL) {
4011 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4012 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4013 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4014 (pi->unacked_frames > 0))
4015 __mod_retrans_timer();
4016
4017 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4018 l2cap_send_srejtail(sk);
4019 } else {
4020 l2cap_send_i_or_rr_or_rnr(sk);
4021 }
4022
4023 } else if (rx_control & L2CAP_CTRL_FINAL) {
4024 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4025
4026 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4027 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4028 else
4029 l2cap_retransmit_frames(sk);
4030
4031 } else {
4032 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4033 (pi->unacked_frames > 0))
4034 __mod_retrans_timer();
4035
4036 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4037 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4038 l2cap_send_ack(pi);
4039 } else {
4040 spin_lock_bh(&pi->send_lock);
4041 l2cap_ertm_send(sk);
4042 spin_unlock_bh(&pi->send_lock);
4043 }
4044 }
4045 }
4046
4047 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4048 {
4049 struct l2cap_pinfo *pi = l2cap_pi(sk);
4050 u8 tx_seq = __get_reqseq(rx_control);
4051
4052 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4053
4054 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4055
4056 pi->expected_ack_seq = tx_seq;
4057 l2cap_drop_acked_frames(sk);
4058
4059 if (rx_control & L2CAP_CTRL_FINAL) {
4060 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4061 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4062 else
4063 l2cap_retransmit_frames(sk);
4064 } else {
4065 l2cap_retransmit_frames(sk);
4066
4067 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4068 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4069 }
4070 }
4071 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4072 {
4073 struct l2cap_pinfo *pi = l2cap_pi(sk);
4074 u8 tx_seq = __get_reqseq(rx_control);
4075
4076 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4077
4078 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4079
4080 if (rx_control & L2CAP_CTRL_POLL) {
4081 pi->expected_ack_seq = tx_seq;
4082 l2cap_drop_acked_frames(sk);
4083
4084 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4085 l2cap_retransmit_one_frame(sk, tx_seq);
4086
4087 spin_lock_bh(&pi->send_lock);
4088 l2cap_ertm_send(sk);
4089 spin_unlock_bh(&pi->send_lock);
4090
4091 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4092 pi->srej_save_reqseq = tx_seq;
4093 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4094 }
4095 } else if (rx_control & L2CAP_CTRL_FINAL) {
4096 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4097 pi->srej_save_reqseq == tx_seq)
4098 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4099 else
4100 l2cap_retransmit_one_frame(sk, tx_seq);
4101 } else {
4102 l2cap_retransmit_one_frame(sk, tx_seq);
4103 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4104 pi->srej_save_reqseq = tx_seq;
4105 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4106 }
4107 }
4108 }
4109
4110 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4111 {
4112 struct l2cap_pinfo *pi = l2cap_pi(sk);
4113 u8 tx_seq = __get_reqseq(rx_control);
4114
4115 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4116
4117 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4118 pi->expected_ack_seq = tx_seq;
4119 l2cap_drop_acked_frames(sk);
4120
4121 if (rx_control & L2CAP_CTRL_POLL)
4122 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4123
4124 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4125 del_timer(&pi->retrans_timer);
4126 if (rx_control & L2CAP_CTRL_POLL)
4127 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4128 return;
4129 }
4130
4131 if (rx_control & L2CAP_CTRL_POLL)
4132 l2cap_send_srejtail(sk);
4133 else
4134 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4135 }
4136
4137 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4138 {
4139 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4140
4141 if (L2CAP_CTRL_FINAL & rx_control &&
4142 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4143 del_timer(&l2cap_pi(sk)->monitor_timer);
4144 if (l2cap_pi(sk)->unacked_frames > 0)
4145 __mod_retrans_timer();
4146 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4147 }
4148
4149 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4150 case L2CAP_SUPER_RCV_READY:
4151 l2cap_data_channel_rrframe(sk, rx_control);
4152 break;
4153
4154 case L2CAP_SUPER_REJECT:
4155 l2cap_data_channel_rejframe(sk, rx_control);
4156 break;
4157
4158 case L2CAP_SUPER_SELECT_REJECT:
4159 l2cap_data_channel_srejframe(sk, rx_control);
4160 break;
4161
4162 case L2CAP_SUPER_RCV_NOT_READY:
4163 l2cap_data_channel_rnrframe(sk, rx_control);
4164 break;
4165 }
4166
4167 kfree_skb(skb);
4168 return 0;
4169 }
4170
4171 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4172 {
4173 struct sock *sk;
4174 struct l2cap_pinfo *pi;
4175 u16 control;
4176 u8 tx_seq, req_seq;
4177 int len, next_tx_seq_offset, req_seq_offset;
4178
4179 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4180 if (!sk) {
4181 BT_DBG("unknown cid 0x%4.4x", cid);
4182 goto drop;
4183 }
4184
4185 pi = l2cap_pi(sk);
4186
4187 BT_DBG("sk %p, len %d", sk, skb->len);
4188
4189 if (sk->sk_state != BT_CONNECTED)
4190 goto drop;
4191
4192 switch (pi->mode) {
4193 case L2CAP_MODE_BASIC:
4194 /* If socket recv buffers overflows we drop data here
4195 * which is *bad* because L2CAP has to be reliable.
4196 * But we don't have any other choice. L2CAP doesn't
4197 * provide flow control mechanism. */
4198
4199 if (pi->imtu < skb->len)
4200 goto drop;
4201
4202 if (!sock_queue_rcv_skb(sk, skb))
4203 goto done;
4204 break;
4205
4206 case L2CAP_MODE_ERTM:
4207 control = get_unaligned_le16(skb->data);
4208 skb_pull(skb, 2);
4209 len = skb->len;
4210
4211 /*
4212 * We can just drop the corrupted I-frame here.
4213 * Receiver will miss it and start proper recovery
4214 * procedures and ask retransmission.
4215 */
4216 if (l2cap_check_fcs(pi, skb))
4217 goto drop;
4218
4219 if (__is_sar_start(control) && __is_iframe(control))
4220 len -= 2;
4221
4222 if (pi->fcs == L2CAP_FCS_CRC16)
4223 len -= 2;
4224
4225 if (len > pi->mps) {
4226 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4227 goto drop;
4228 }
4229
4230 req_seq = __get_reqseq(control);
4231 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4232 if (req_seq_offset < 0)
4233 req_seq_offset += 64;
4234
4235 next_tx_seq_offset =
4236 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4237 if (next_tx_seq_offset < 0)
4238 next_tx_seq_offset += 64;
4239
4240 /* check for invalid req-seq */
4241 if (req_seq_offset > next_tx_seq_offset) {
4242 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4243 goto drop;
4244 }
4245
4246 if (__is_iframe(control)) {
4247 if (len < 0) {
4248 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4249 goto drop;
4250 }
4251
4252 l2cap_data_channel_iframe(sk, control, skb);
4253 } else {
4254 if (len != 0) {
4255 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4256 goto drop;
4257 }
4258
4259 l2cap_data_channel_sframe(sk, control, skb);
4260 }
4261
4262 goto done;
4263
4264 case L2CAP_MODE_STREAMING:
4265 control = get_unaligned_le16(skb->data);
4266 skb_pull(skb, 2);
4267 len = skb->len;
4268
4269 if (l2cap_check_fcs(pi, skb))
4270 goto drop;
4271
4272 if (__is_sar_start(control))
4273 len -= 2;
4274
4275 if (pi->fcs == L2CAP_FCS_CRC16)
4276 len -= 2;
4277
4278 if (len > pi->mps || len < 0 || __is_sframe(control))
4279 goto drop;
4280
4281 tx_seq = __get_txseq(control);
4282
4283 if (pi->expected_tx_seq == tx_seq)
4284 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4285 else
4286 pi->expected_tx_seq = (tx_seq + 1) % 64;
4287
4288 l2cap_streaming_reassembly_sdu(sk, skb, control);
4289
4290 goto done;
4291
4292 default:
4293 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4294 break;
4295 }
4296
4297 drop:
4298 kfree_skb(skb);
4299
4300 done:
4301 if (sk)
4302 bh_unlock_sock(sk);
4303
4304 return 0;
4305 }
4306
4307 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4308 {
4309 struct sock *sk;
4310
4311 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4312 if (!sk)
4313 goto drop;
4314
4315 BT_DBG("sk %p, len %d", sk, skb->len);
4316
4317 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4318 goto drop;
4319
4320 if (l2cap_pi(sk)->imtu < skb->len)
4321 goto drop;
4322
4323 if (!sock_queue_rcv_skb(sk, skb))
4324 goto done;
4325
4326 drop:
4327 kfree_skb(skb);
4328
4329 done:
4330 if (sk)
4331 bh_unlock_sock(sk);
4332 return 0;
4333 }
4334
4335 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4336 {
4337 struct l2cap_hdr *lh = (void *) skb->data;
4338 u16 cid, len;
4339 __le16 psm;
4340
4341 skb_pull(skb, L2CAP_HDR_SIZE);
4342 cid = __le16_to_cpu(lh->cid);
4343 len = __le16_to_cpu(lh->len);
4344
4345 if (len != skb->len) {
4346 kfree_skb(skb);
4347 return;
4348 }
4349
4350 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4351
4352 switch (cid) {
4353 case L2CAP_CID_SIGNALING:
4354 l2cap_sig_channel(conn, skb);
4355 break;
4356
4357 case L2CAP_CID_CONN_LESS:
4358 psm = get_unaligned_le16(skb->data);
4359 skb_pull(skb, 2);
4360 l2cap_conless_channel(conn, psm, skb);
4361 break;
4362
4363 default:
4364 l2cap_data_channel(conn, cid, skb);
4365 break;
4366 }
4367 }
4368
4369 /* ---- L2CAP interface with lower layer (HCI) ---- */
4370
4371 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4372 {
4373 int exact = 0, lm1 = 0, lm2 = 0;
4374 register struct sock *sk;
4375 struct hlist_node *node;
4376
4377 if (type != ACL_LINK)
4378 return 0;
4379
4380 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4381
4382 /* Find listening sockets and check their link_mode */
4383 read_lock(&l2cap_sk_list.lock);
4384 sk_for_each(sk, node, &l2cap_sk_list.head) {
4385 if (sk->sk_state != BT_LISTEN)
4386 continue;
4387
4388 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4389 lm1 |= HCI_LM_ACCEPT;
4390 if (l2cap_pi(sk)->role_switch)
4391 lm1 |= HCI_LM_MASTER;
4392 exact++;
4393 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4394 lm2 |= HCI_LM_ACCEPT;
4395 if (l2cap_pi(sk)->role_switch)
4396 lm2 |= HCI_LM_MASTER;
4397 }
4398 }
4399 read_unlock(&l2cap_sk_list.lock);
4400
4401 return exact ? lm1 : lm2;
4402 }
4403
4404 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4405 {
4406 struct l2cap_conn *conn;
4407
4408 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4409
4410 if (hcon->type != ACL_LINK)
4411 return 0;
4412
4413 if (!status) {
4414 conn = l2cap_conn_add(hcon, status);
4415 if (conn)
4416 l2cap_conn_ready(conn);
4417 } else
4418 l2cap_conn_del(hcon, bt_err(status));
4419
4420 return 0;
4421 }
4422
4423 static int l2cap_disconn_ind(struct hci_conn *hcon)
4424 {
4425 struct l2cap_conn *conn = hcon->l2cap_data;
4426
4427 BT_DBG("hcon %p", hcon);
4428
4429 if (hcon->type != ACL_LINK || !conn)
4430 return 0x13;
4431
4432 return conn->disc_reason;
4433 }
4434
4435 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4436 {
4437 BT_DBG("hcon %p reason %d", hcon, reason);
4438
4439 if (hcon->type != ACL_LINK)
4440 return 0;
4441
4442 l2cap_conn_del(hcon, bt_err(reason));
4443
4444 return 0;
4445 }
4446
4447 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4448 {
4449 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4450 return;
4451
4452 if (encrypt == 0x00) {
4453 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4454 l2cap_sock_clear_timer(sk);
4455 l2cap_sock_set_timer(sk, HZ * 5);
4456 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4457 __l2cap_sock_close(sk, ECONNREFUSED);
4458 } else {
4459 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4460 l2cap_sock_clear_timer(sk);
4461 }
4462 }
4463
4464 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4465 {
4466 struct l2cap_chan_list *l;
4467 struct l2cap_conn *conn = hcon->l2cap_data;
4468 struct sock *sk;
4469
4470 if (!conn)
4471 return 0;
4472
4473 l = &conn->chan_list;
4474
4475 BT_DBG("conn %p", conn);
4476
4477 read_lock(&l->lock);
4478
4479 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4480 bh_lock_sock(sk);
4481
4482 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4483 bh_unlock_sock(sk);
4484 continue;
4485 }
4486
4487 if (!status && (sk->sk_state == BT_CONNECTED ||
4488 sk->sk_state == BT_CONFIG)) {
4489 l2cap_check_encryption(sk, encrypt);
4490 bh_unlock_sock(sk);
4491 continue;
4492 }
4493
4494 if (sk->sk_state == BT_CONNECT) {
4495 if (!status) {
4496 struct l2cap_conn_req req;
4497 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4498 req.psm = l2cap_pi(sk)->psm;
4499
4500 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4501 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4502
4503 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4504 L2CAP_CONN_REQ, sizeof(req), &req);
4505 } else {
4506 l2cap_sock_clear_timer(sk);
4507 l2cap_sock_set_timer(sk, HZ / 10);
4508 }
4509 } else if (sk->sk_state == BT_CONNECT2) {
4510 struct l2cap_conn_rsp rsp;
4511 __u16 result;
4512
4513 if (!status) {
4514 sk->sk_state = BT_CONFIG;
4515 result = L2CAP_CR_SUCCESS;
4516 } else {
4517 sk->sk_state = BT_DISCONN;
4518 l2cap_sock_set_timer(sk, HZ / 10);
4519 result = L2CAP_CR_SEC_BLOCK;
4520 }
4521
4522 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4523 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4524 rsp.result = cpu_to_le16(result);
4525 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4526 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4527 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4528 }
4529
4530 bh_unlock_sock(sk);
4531 }
4532
4533 read_unlock(&l->lock);
4534
4535 return 0;
4536 }
4537
4538 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4539 {
4540 struct l2cap_conn *conn = hcon->l2cap_data;
4541
4542 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4543 goto drop;
4544
4545 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4546
4547 if (flags & ACL_START) {
4548 struct l2cap_hdr *hdr;
4549 int len;
4550
4551 if (conn->rx_len) {
4552 BT_ERR("Unexpected start frame (len %d)", skb->len);
4553 kfree_skb(conn->rx_skb);
4554 conn->rx_skb = NULL;
4555 conn->rx_len = 0;
4556 l2cap_conn_unreliable(conn, ECOMM);
4557 }
4558
4559 if (skb->len < 2) {
4560 BT_ERR("Frame is too short (len %d)", skb->len);
4561 l2cap_conn_unreliable(conn, ECOMM);
4562 goto drop;
4563 }
4564
4565 hdr = (struct l2cap_hdr *) skb->data;
4566 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4567
4568 if (len == skb->len) {
4569 /* Complete frame received */
4570 l2cap_recv_frame(conn, skb);
4571 return 0;
4572 }
4573
4574 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4575
4576 if (skb->len > len) {
4577 BT_ERR("Frame is too long (len %d, expected len %d)",
4578 skb->len, len);
4579 l2cap_conn_unreliable(conn, ECOMM);
4580 goto drop;
4581 }
4582
4583 /* Allocate skb for the complete frame (with header) */
4584 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4585 if (!conn->rx_skb)
4586 goto drop;
4587
4588 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4589 skb->len);
4590 conn->rx_len = len - skb->len;
4591 } else {
4592 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4593
4594 if (!conn->rx_len) {
4595 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4596 l2cap_conn_unreliable(conn, ECOMM);
4597 goto drop;
4598 }
4599
4600 if (skb->len > conn->rx_len) {
4601 BT_ERR("Fragment is too long (len %d, expected %d)",
4602 skb->len, conn->rx_len);
4603 kfree_skb(conn->rx_skb);
4604 conn->rx_skb = NULL;
4605 conn->rx_len = 0;
4606 l2cap_conn_unreliable(conn, ECOMM);
4607 goto drop;
4608 }
4609
4610 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4611 skb->len);
4612 conn->rx_len -= skb->len;
4613
4614 if (!conn->rx_len) {
4615 /* Complete frame received */
4616 l2cap_recv_frame(conn, conn->rx_skb);
4617 conn->rx_skb = NULL;
4618 }
4619 }
4620
4621 drop:
4622 kfree_skb(skb);
4623 return 0;
4624 }
4625
4626 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4627 {
4628 struct sock *sk;
4629 struct hlist_node *node;
4630
4631 read_lock_bh(&l2cap_sk_list.lock);
4632
4633 sk_for_each(sk, node, &l2cap_sk_list.head) {
4634 struct l2cap_pinfo *pi = l2cap_pi(sk);
4635
4636 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4637 batostr(&bt_sk(sk)->src),
4638 batostr(&bt_sk(sk)->dst),
4639 sk->sk_state, __le16_to_cpu(pi->psm),
4640 pi->scid, pi->dcid,
4641 pi->imtu, pi->omtu, pi->sec_level);
4642 }
4643
4644 read_unlock_bh(&l2cap_sk_list.lock);
4645
4646 return 0;
4647 }
4648
4649 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4650 {
4651 return single_open(file, l2cap_debugfs_show, inode->i_private);
4652 }
4653
4654 static const struct file_operations l2cap_debugfs_fops = {
4655 .open = l2cap_debugfs_open,
4656 .read = seq_read,
4657 .llseek = seq_lseek,
4658 .release = single_release,
4659 };
4660
4661 static struct dentry *l2cap_debugfs;
4662
4663 static const struct proto_ops l2cap_sock_ops = {
4664 .family = PF_BLUETOOTH,
4665 .owner = THIS_MODULE,
4666 .release = l2cap_sock_release,
4667 .bind = l2cap_sock_bind,
4668 .connect = l2cap_sock_connect,
4669 .listen = l2cap_sock_listen,
4670 .accept = l2cap_sock_accept,
4671 .getname = l2cap_sock_getname,
4672 .sendmsg = l2cap_sock_sendmsg,
4673 .recvmsg = l2cap_sock_recvmsg,
4674 .poll = bt_sock_poll,
4675 .ioctl = bt_sock_ioctl,
4676 .mmap = sock_no_mmap,
4677 .socketpair = sock_no_socketpair,
4678 .shutdown = l2cap_sock_shutdown,
4679 .setsockopt = l2cap_sock_setsockopt,
4680 .getsockopt = l2cap_sock_getsockopt
4681 };
4682
4683 static const struct net_proto_family l2cap_sock_family_ops = {
4684 .family = PF_BLUETOOTH,
4685 .owner = THIS_MODULE,
4686 .create = l2cap_sock_create,
4687 };
4688
4689 static struct hci_proto l2cap_hci_proto = {
4690 .name = "L2CAP",
4691 .id = HCI_PROTO_L2CAP,
4692 .connect_ind = l2cap_connect_ind,
4693 .connect_cfm = l2cap_connect_cfm,
4694 .disconn_ind = l2cap_disconn_ind,
4695 .disconn_cfm = l2cap_disconn_cfm,
4696 .security_cfm = l2cap_security_cfm,
4697 .recv_acldata = l2cap_recv_acldata
4698 };
4699
4700 static int __init l2cap_init(void)
4701 {
4702 int err;
4703
4704 err = proto_register(&l2cap_proto, 0);
4705 if (err < 0)
4706 return err;
4707
4708 _busy_wq = create_singlethread_workqueue("l2cap");
4709 if (!_busy_wq)
4710 goto error;
4711
4712 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4713 if (err < 0) {
4714 BT_ERR("L2CAP socket registration failed");
4715 goto error;
4716 }
4717
4718 err = hci_register_proto(&l2cap_hci_proto);
4719 if (err < 0) {
4720 BT_ERR("L2CAP protocol registration failed");
4721 bt_sock_unregister(BTPROTO_L2CAP);
4722 goto error;
4723 }
4724
4725 if (bt_debugfs) {
4726 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4727 bt_debugfs, NULL, &l2cap_debugfs_fops);
4728 if (!l2cap_debugfs)
4729 BT_ERR("Failed to create L2CAP debug file");
4730 }
4731
4732 BT_INFO("L2CAP ver %s", VERSION);
4733 BT_INFO("L2CAP socket layer initialized");
4734
4735 return 0;
4736
4737 error:
4738 proto_unregister(&l2cap_proto);
4739 return err;
4740 }
4741
4742 static void __exit l2cap_exit(void)
4743 {
4744 debugfs_remove(l2cap_debugfs);
4745
4746 flush_workqueue(_busy_wq);
4747 destroy_workqueue(_busy_wq);
4748
4749 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4750 BT_ERR("L2CAP socket unregistration failed");
4751
4752 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4753 BT_ERR("L2CAP protocol unregistration failed");
4754
4755 proto_unregister(&l2cap_proto);
4756 }
4757
4758 void l2cap_load(void)
4759 {
4760 /* Dummy function to trigger automatic L2CAP module loading by
4761 * other modules that use L2CAP sockets but don't use any other
4762 * symbols from it. */
4763 }
4764 EXPORT_SYMBOL(l2cap_load);
4765
4766 module_init(l2cap_init);
4767 module_exit(l2cap_exit);
4768
4769 module_param(enable_ertm, bool, 0644);
4770 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4771
4772 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4773 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4774 MODULE_VERSION(VERSION);
4775 MODULE_LICENSE("GPL");
4776 MODULE_ALIAS("bt-proto-0");
This page took 0.123713 seconds and 6 git commands to generate.