Bluetooth: Remove the send_lock spinlock from ERTM
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static const struct proto_ops l2cap_sock_ops;
64
65 static struct workqueue_struct *_busy_wq;
66
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
76
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
79
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
82 {
83 struct sock *sk = (struct sock *) arg;
84 int reason;
85
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
87
88 bh_lock_sock(sk);
89
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
95 else
96 reason = ETIMEDOUT;
97
98 __l2cap_sock_close(sk, reason);
99
100 bh_unlock_sock(sk);
101
102 l2cap_sock_kill(sk);
103 sock_put(sk);
104 }
105
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
107 {
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 }
111
112 static void l2cap_sock_clear_timer(struct sock *sk)
113 {
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
116 }
117
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 {
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
124 break;
125 }
126 return s;
127 }
128
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 {
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
135 }
136 return s;
137 }
138
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 {
143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
150 }
151
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 {
154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
158 }
159 return s;
160 }
161
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 {
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
171 }
172
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
174 {
175 u16 cid = L2CAP_CID_DYN_START;
176
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
179 return cid;
180 }
181
182 return 0;
183 }
184
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
186 {
187 sock_hold(sk);
188
189 if (l->head)
190 l2cap_pi(l->head)->prev_c = sk;
191
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
194 l->head = sk;
195 }
196
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
198 {
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
200
201 write_lock_bh(&l->lock);
202 if (sk == l->head)
203 l->head = next;
204
205 if (next)
206 l2cap_pi(next)->prev_c = prev;
207 if (prev)
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
210
211 __sock_put(sk);
212 }
213
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
215 {
216 struct l2cap_chan_list *l = &conn->chan_list;
217
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
220
221 conn->disc_reason = 0x13;
222
223 l2cap_pi(sk)->conn = conn;
224
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 } else {
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 }
239
240 __l2cap_chan_link(l, sk);
241
242 if (parent)
243 bt_accept_enqueue(parent, sk);
244 }
245
246 /* Delete channel.
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
249 {
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
252
253 l2cap_sock_clear_timer(sk);
254
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256
257 if (conn) {
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
262 }
263
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
266
267 if (err)
268 sk->sk_err = err;
269
270 if (parent) {
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
273 } else
274 sk->sk_state_change(sk);
275
276 skb_queue_purge(TX_QUEUE(sk));
277
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
280
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
284
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
287
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
289 list_del(&l->list);
290 kfree(l);
291 }
292 }
293 }
294
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
297 {
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
299 __u8 auth_type;
300
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
304 else
305 auth_type = HCI_AT_NO_BONDING;
306
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
309 } else {
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
313 break;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
316 break;
317 default:
318 auth_type = HCI_AT_NO_BONDING;
319 break;
320 }
321 }
322
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
324 auth_type);
325 }
326
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
328 {
329 u8 id;
330
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
335 */
336
337 spin_lock_bh(&conn->lock);
338
339 if (++conn->tx_ident > 128)
340 conn->tx_ident = 1;
341
342 id = conn->tx_ident;
343
344 spin_unlock_bh(&conn->lock);
345
346 return id;
347 }
348
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
350 {
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
352
353 BT_DBG("code 0x%2.2x", code);
354
355 if (!skb)
356 return;
357
358 hci_send_acl(conn->hcon, skb, 0);
359 }
360
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
362 {
363 struct sk_buff *skb;
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
368
369 if (sk->sk_state != BT_CONNECTED)
370 return;
371
372 if (pi->fcs == L2CAP_FCS_CRC16)
373 hlen += 2;
374
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
376
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
379
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
383 }
384
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
388 }
389
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
391 if (!skb)
392 return;
393
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
398
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
402 }
403
404 hci_send_acl(pi->conn->hcon, skb, 0);
405 }
406
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
408 {
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
412 } else
413 control |= L2CAP_SUPER_RCV_READY;
414
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
416
417 l2cap_send_sframe(pi, control);
418 }
419
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
421 {
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
423 }
424
425 static void l2cap_do_start(struct sock *sk)
426 {
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
428
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
431 return;
432
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
437
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
440
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
443 }
444 } else {
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
447
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
450
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
453
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
456 }
457 }
458
459 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
460 {
461 u32 local_feat_mask = l2cap_feat_mask;
462 if (enable_ertm)
463 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
464
465 switch (mode) {
466 case L2CAP_MODE_ERTM:
467 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
468 case L2CAP_MODE_STREAMING:
469 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
470 default:
471 return 0x00;
472 }
473 }
474
475 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
476 {
477 struct l2cap_disconn_req req;
478
479 if (!conn)
480 return;
481
482 skb_queue_purge(TX_QUEUE(sk));
483
484 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
485 del_timer(&l2cap_pi(sk)->retrans_timer);
486 del_timer(&l2cap_pi(sk)->monitor_timer);
487 del_timer(&l2cap_pi(sk)->ack_timer);
488 }
489
490 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
491 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
492 l2cap_send_cmd(conn, l2cap_get_ident(conn),
493 L2CAP_DISCONN_REQ, sizeof(req), &req);
494
495 sk->sk_state = BT_DISCONN;
496 sk->sk_err = err;
497 }
498
499 /* ---- L2CAP connections ---- */
500 static void l2cap_conn_start(struct l2cap_conn *conn)
501 {
502 struct l2cap_chan_list *l = &conn->chan_list;
503 struct sock_del_list del, *tmp1, *tmp2;
504 struct sock *sk;
505
506 BT_DBG("conn %p", conn);
507
508 INIT_LIST_HEAD(&del.list);
509
510 read_lock(&l->lock);
511
512 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
513 bh_lock_sock(sk);
514
515 if (sk->sk_type != SOCK_SEQPACKET &&
516 sk->sk_type != SOCK_STREAM) {
517 bh_unlock_sock(sk);
518 continue;
519 }
520
521 if (sk->sk_state == BT_CONNECT) {
522 if (l2cap_check_security(sk) &&
523 __l2cap_no_conn_pending(sk)) {
524 struct l2cap_conn_req req;
525
526 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
527 conn->feat_mask)
528 && l2cap_pi(sk)->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 tmp1 = kzalloc(sizeof(struct srej_list),
531 GFP_ATOMIC);
532 tmp1->sk = sk;
533 list_add_tail(&tmp1->list, &del.list);
534 bh_unlock_sock(sk);
535 continue;
536 }
537
538 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
539 req.psm = l2cap_pi(sk)->psm;
540
541 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
542 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
543
544 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
545 L2CAP_CONN_REQ, sizeof(req), &req);
546 }
547 } else if (sk->sk_state == BT_CONNECT2) {
548 struct l2cap_conn_rsp rsp;
549 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
550 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
551
552 if (l2cap_check_security(sk)) {
553 if (bt_sk(sk)->defer_setup) {
554 struct sock *parent = bt_sk(sk)->parent;
555 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
556 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
557 parent->sk_data_ready(parent, 0);
558
559 } else {
560 sk->sk_state = BT_CONFIG;
561 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
562 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
563 }
564 } else {
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
567 }
568
569 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
570 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
571 }
572
573 bh_unlock_sock(sk);
574 }
575
576 read_unlock(&l->lock);
577
578 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
579 bh_lock_sock(tmp1->sk);
580 __l2cap_sock_close(tmp1->sk, ECONNRESET);
581 bh_unlock_sock(tmp1->sk);
582 list_del(&tmp1->list);
583 kfree(tmp1);
584 }
585 }
586
587 static void l2cap_conn_ready(struct l2cap_conn *conn)
588 {
589 struct l2cap_chan_list *l = &conn->chan_list;
590 struct sock *sk;
591
592 BT_DBG("conn %p", conn);
593
594 read_lock(&l->lock);
595
596 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
597 bh_lock_sock(sk);
598
599 if (sk->sk_type != SOCK_SEQPACKET &&
600 sk->sk_type != SOCK_STREAM) {
601 l2cap_sock_clear_timer(sk);
602 sk->sk_state = BT_CONNECTED;
603 sk->sk_state_change(sk);
604 } else if (sk->sk_state == BT_CONNECT)
605 l2cap_do_start(sk);
606
607 bh_unlock_sock(sk);
608 }
609
610 read_unlock(&l->lock);
611 }
612
613 /* Notify sockets that we cannot guaranty reliability anymore */
614 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
615 {
616 struct l2cap_chan_list *l = &conn->chan_list;
617 struct sock *sk;
618
619 BT_DBG("conn %p", conn);
620
621 read_lock(&l->lock);
622
623 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
624 if (l2cap_pi(sk)->force_reliable)
625 sk->sk_err = err;
626 }
627
628 read_unlock(&l->lock);
629 }
630
631 static void l2cap_info_timeout(unsigned long arg)
632 {
633 struct l2cap_conn *conn = (void *) arg;
634
635 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
636 conn->info_ident = 0;
637
638 l2cap_conn_start(conn);
639 }
640
641 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
642 {
643 struct l2cap_conn *conn = hcon->l2cap_data;
644
645 if (conn || status)
646 return conn;
647
648 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
649 if (!conn)
650 return NULL;
651
652 hcon->l2cap_data = conn;
653 conn->hcon = hcon;
654
655 BT_DBG("hcon %p conn %p", hcon, conn);
656
657 conn->mtu = hcon->hdev->acl_mtu;
658 conn->src = &hcon->hdev->bdaddr;
659 conn->dst = &hcon->dst;
660
661 conn->feat_mask = 0;
662
663 spin_lock_init(&conn->lock);
664 rwlock_init(&conn->chan_list.lock);
665
666 setup_timer(&conn->info_timer, l2cap_info_timeout,
667 (unsigned long) conn);
668
669 conn->disc_reason = 0x13;
670
671 return conn;
672 }
673
674 static void l2cap_conn_del(struct hci_conn *hcon, int err)
675 {
676 struct l2cap_conn *conn = hcon->l2cap_data;
677 struct sock *sk;
678
679 if (!conn)
680 return;
681
682 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
683
684 kfree_skb(conn->rx_skb);
685
686 /* Kill channels */
687 while ((sk = conn->chan_list.head)) {
688 bh_lock_sock(sk);
689 l2cap_chan_del(sk, err);
690 bh_unlock_sock(sk);
691 l2cap_sock_kill(sk);
692 }
693
694 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
695 del_timer_sync(&conn->info_timer);
696
697 hcon->l2cap_data = NULL;
698 kfree(conn);
699 }
700
701 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
702 {
703 struct l2cap_chan_list *l = &conn->chan_list;
704 write_lock_bh(&l->lock);
705 __l2cap_chan_add(conn, sk, parent);
706 write_unlock_bh(&l->lock);
707 }
708
709 /* ---- Socket interface ---- */
710 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
711 {
712 struct sock *sk;
713 struct hlist_node *node;
714 sk_for_each(sk, node, &l2cap_sk_list.head)
715 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
716 goto found;
717 sk = NULL;
718 found:
719 return sk;
720 }
721
722 /* Find socket with psm and source bdaddr.
723 * Returns closest match.
724 */
725 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
726 {
727 struct sock *sk = NULL, *sk1 = NULL;
728 struct hlist_node *node;
729
730 sk_for_each(sk, node, &l2cap_sk_list.head) {
731 if (state && sk->sk_state != state)
732 continue;
733
734 if (l2cap_pi(sk)->psm == psm) {
735 /* Exact match. */
736 if (!bacmp(&bt_sk(sk)->src, src))
737 break;
738
739 /* Closest match */
740 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
741 sk1 = sk;
742 }
743 }
744 return node ? sk : sk1;
745 }
746
747 /* Find socket with given address (psm, src).
748 * Returns locked socket */
749 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
750 {
751 struct sock *s;
752 read_lock(&l2cap_sk_list.lock);
753 s = __l2cap_get_sock_by_psm(state, psm, src);
754 if (s)
755 bh_lock_sock(s);
756 read_unlock(&l2cap_sk_list.lock);
757 return s;
758 }
759
760 static void l2cap_sock_destruct(struct sock *sk)
761 {
762 BT_DBG("sk %p", sk);
763
764 skb_queue_purge(&sk->sk_receive_queue);
765 skb_queue_purge(&sk->sk_write_queue);
766 }
767
768 static void l2cap_sock_cleanup_listen(struct sock *parent)
769 {
770 struct sock *sk;
771
772 BT_DBG("parent %p", parent);
773
774 /* Close not yet accepted channels */
775 while ((sk = bt_accept_dequeue(parent, NULL)))
776 l2cap_sock_close(sk);
777
778 parent->sk_state = BT_CLOSED;
779 sock_set_flag(parent, SOCK_ZAPPED);
780 }
781
782 /* Kill socket (only if zapped and orphan)
783 * Must be called on unlocked socket.
784 */
785 static void l2cap_sock_kill(struct sock *sk)
786 {
787 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
788 return;
789
790 BT_DBG("sk %p state %d", sk, sk->sk_state);
791
792 /* Kill poor orphan */
793 bt_sock_unlink(&l2cap_sk_list, sk);
794 sock_set_flag(sk, SOCK_DEAD);
795 sock_put(sk);
796 }
797
798 static void __l2cap_sock_close(struct sock *sk, int reason)
799 {
800 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
801
802 switch (sk->sk_state) {
803 case BT_LISTEN:
804 l2cap_sock_cleanup_listen(sk);
805 break;
806
807 case BT_CONNECTED:
808 case BT_CONFIG:
809 if (sk->sk_type == SOCK_SEQPACKET ||
810 sk->sk_type == SOCK_STREAM) {
811 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
812
813 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
814 l2cap_send_disconn_req(conn, sk, reason);
815 } else
816 l2cap_chan_del(sk, reason);
817 break;
818
819 case BT_CONNECT2:
820 if (sk->sk_type == SOCK_SEQPACKET ||
821 sk->sk_type == SOCK_STREAM) {
822 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
823 struct l2cap_conn_rsp rsp;
824 __u16 result;
825
826 if (bt_sk(sk)->defer_setup)
827 result = L2CAP_CR_SEC_BLOCK;
828 else
829 result = L2CAP_CR_BAD_PSM;
830
831 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
832 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
833 rsp.result = cpu_to_le16(result);
834 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
835 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
836 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
837 } else
838 l2cap_chan_del(sk, reason);
839 break;
840
841 case BT_CONNECT:
842 case BT_DISCONN:
843 l2cap_chan_del(sk, reason);
844 break;
845
846 default:
847 sock_set_flag(sk, SOCK_ZAPPED);
848 break;
849 }
850 }
851
852 /* Must be called on unlocked socket. */
853 static void l2cap_sock_close(struct sock *sk)
854 {
855 l2cap_sock_clear_timer(sk);
856 lock_sock(sk);
857 __l2cap_sock_close(sk, ECONNRESET);
858 release_sock(sk);
859 l2cap_sock_kill(sk);
860 }
861
862 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
863 {
864 struct l2cap_pinfo *pi = l2cap_pi(sk);
865
866 BT_DBG("sk %p", sk);
867
868 if (parent) {
869 sk->sk_type = parent->sk_type;
870 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
871
872 pi->imtu = l2cap_pi(parent)->imtu;
873 pi->omtu = l2cap_pi(parent)->omtu;
874 pi->conf_state = l2cap_pi(parent)->conf_state;
875 pi->mode = l2cap_pi(parent)->mode;
876 pi->fcs = l2cap_pi(parent)->fcs;
877 pi->max_tx = l2cap_pi(parent)->max_tx;
878 pi->tx_win = l2cap_pi(parent)->tx_win;
879 pi->sec_level = l2cap_pi(parent)->sec_level;
880 pi->role_switch = l2cap_pi(parent)->role_switch;
881 pi->force_reliable = l2cap_pi(parent)->force_reliable;
882 } else {
883 pi->imtu = L2CAP_DEFAULT_MTU;
884 pi->omtu = 0;
885 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
886 pi->mode = L2CAP_MODE_ERTM;
887 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
888 } else {
889 pi->mode = L2CAP_MODE_BASIC;
890 }
891 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
892 pi->fcs = L2CAP_FCS_CRC16;
893 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
894 pi->sec_level = BT_SECURITY_LOW;
895 pi->role_switch = 0;
896 pi->force_reliable = 0;
897 }
898
899 /* Default config options */
900 pi->conf_len = 0;
901 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
902 skb_queue_head_init(TX_QUEUE(sk));
903 skb_queue_head_init(SREJ_QUEUE(sk));
904 skb_queue_head_init(BUSY_QUEUE(sk));
905 INIT_LIST_HEAD(SREJ_LIST(sk));
906 }
907
908 static struct proto l2cap_proto = {
909 .name = "L2CAP",
910 .owner = THIS_MODULE,
911 .obj_size = sizeof(struct l2cap_pinfo)
912 };
913
914 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
915 {
916 struct sock *sk;
917
918 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
919 if (!sk)
920 return NULL;
921
922 sock_init_data(sock, sk);
923 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
924
925 sk->sk_destruct = l2cap_sock_destruct;
926 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
927
928 sock_reset_flag(sk, SOCK_ZAPPED);
929
930 sk->sk_protocol = proto;
931 sk->sk_state = BT_OPEN;
932
933 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
934
935 bt_sock_link(&l2cap_sk_list, sk);
936 return sk;
937 }
938
939 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
940 int kern)
941 {
942 struct sock *sk;
943
944 BT_DBG("sock %p", sock);
945
946 sock->state = SS_UNCONNECTED;
947
948 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
949 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
950 return -ESOCKTNOSUPPORT;
951
952 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
953 return -EPERM;
954
955 sock->ops = &l2cap_sock_ops;
956
957 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
958 if (!sk)
959 return -ENOMEM;
960
961 l2cap_sock_init(sk, NULL);
962 return 0;
963 }
964
965 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
966 {
967 struct sock *sk = sock->sk;
968 struct sockaddr_l2 la;
969 int len, err = 0;
970
971 BT_DBG("sk %p", sk);
972
973 if (!addr || addr->sa_family != AF_BLUETOOTH)
974 return -EINVAL;
975
976 memset(&la, 0, sizeof(la));
977 len = min_t(unsigned int, sizeof(la), alen);
978 memcpy(&la, addr, len);
979
980 if (la.l2_cid)
981 return -EINVAL;
982
983 lock_sock(sk);
984
985 if (sk->sk_state != BT_OPEN) {
986 err = -EBADFD;
987 goto done;
988 }
989
990 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
991 !capable(CAP_NET_BIND_SERVICE)) {
992 err = -EACCES;
993 goto done;
994 }
995
996 write_lock_bh(&l2cap_sk_list.lock);
997
998 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
999 err = -EADDRINUSE;
1000 } else {
1001 /* Save source address */
1002 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1003 l2cap_pi(sk)->psm = la.l2_psm;
1004 l2cap_pi(sk)->sport = la.l2_psm;
1005 sk->sk_state = BT_BOUND;
1006
1007 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1008 __le16_to_cpu(la.l2_psm) == 0x0003)
1009 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1010 }
1011
1012 write_unlock_bh(&l2cap_sk_list.lock);
1013
1014 done:
1015 release_sock(sk);
1016 return err;
1017 }
1018
1019 static int l2cap_do_connect(struct sock *sk)
1020 {
1021 bdaddr_t *src = &bt_sk(sk)->src;
1022 bdaddr_t *dst = &bt_sk(sk)->dst;
1023 struct l2cap_conn *conn;
1024 struct hci_conn *hcon;
1025 struct hci_dev *hdev;
1026 __u8 auth_type;
1027 int err;
1028
1029 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1030 l2cap_pi(sk)->psm);
1031
1032 hdev = hci_get_route(dst, src);
1033 if (!hdev)
1034 return -EHOSTUNREACH;
1035
1036 hci_dev_lock_bh(hdev);
1037
1038 err = -ENOMEM;
1039
1040 if (sk->sk_type == SOCK_RAW) {
1041 switch (l2cap_pi(sk)->sec_level) {
1042 case BT_SECURITY_HIGH:
1043 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1044 break;
1045 case BT_SECURITY_MEDIUM:
1046 auth_type = HCI_AT_DEDICATED_BONDING;
1047 break;
1048 default:
1049 auth_type = HCI_AT_NO_BONDING;
1050 break;
1051 }
1052 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1053 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1054 auth_type = HCI_AT_NO_BONDING_MITM;
1055 else
1056 auth_type = HCI_AT_NO_BONDING;
1057
1058 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1059 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1060 } else {
1061 switch (l2cap_pi(sk)->sec_level) {
1062 case BT_SECURITY_HIGH:
1063 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1064 break;
1065 case BT_SECURITY_MEDIUM:
1066 auth_type = HCI_AT_GENERAL_BONDING;
1067 break;
1068 default:
1069 auth_type = HCI_AT_NO_BONDING;
1070 break;
1071 }
1072 }
1073
1074 hcon = hci_connect(hdev, ACL_LINK, dst,
1075 l2cap_pi(sk)->sec_level, auth_type);
1076 if (!hcon)
1077 goto done;
1078
1079 conn = l2cap_conn_add(hcon, 0);
1080 if (!conn) {
1081 hci_conn_put(hcon);
1082 goto done;
1083 }
1084
1085 err = 0;
1086
1087 /* Update source addr of the socket */
1088 bacpy(src, conn->src);
1089
1090 l2cap_chan_add(conn, sk, NULL);
1091
1092 sk->sk_state = BT_CONNECT;
1093 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1094
1095 if (hcon->state == BT_CONNECTED) {
1096 if (sk->sk_type != SOCK_SEQPACKET &&
1097 sk->sk_type != SOCK_STREAM) {
1098 l2cap_sock_clear_timer(sk);
1099 sk->sk_state = BT_CONNECTED;
1100 } else
1101 l2cap_do_start(sk);
1102 }
1103
1104 done:
1105 hci_dev_unlock_bh(hdev);
1106 hci_dev_put(hdev);
1107 return err;
1108 }
1109
1110 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1111 {
1112 struct sock *sk = sock->sk;
1113 struct sockaddr_l2 la;
1114 int len, err = 0;
1115
1116 BT_DBG("sk %p", sk);
1117
1118 if (!addr || alen < sizeof(addr->sa_family) ||
1119 addr->sa_family != AF_BLUETOOTH)
1120 return -EINVAL;
1121
1122 memset(&la, 0, sizeof(la));
1123 len = min_t(unsigned int, sizeof(la), alen);
1124 memcpy(&la, addr, len);
1125
1126 if (la.l2_cid)
1127 return -EINVAL;
1128
1129 lock_sock(sk);
1130
1131 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1132 && !la.l2_psm) {
1133 err = -EINVAL;
1134 goto done;
1135 }
1136
1137 switch (l2cap_pi(sk)->mode) {
1138 case L2CAP_MODE_BASIC:
1139 break;
1140 case L2CAP_MODE_ERTM:
1141 case L2CAP_MODE_STREAMING:
1142 if (enable_ertm)
1143 break;
1144 /* fall through */
1145 default:
1146 err = -ENOTSUPP;
1147 goto done;
1148 }
1149
1150 switch (sk->sk_state) {
1151 case BT_CONNECT:
1152 case BT_CONNECT2:
1153 case BT_CONFIG:
1154 /* Already connecting */
1155 goto wait;
1156
1157 case BT_CONNECTED:
1158 /* Already connected */
1159 goto done;
1160
1161 case BT_OPEN:
1162 case BT_BOUND:
1163 /* Can connect */
1164 break;
1165
1166 default:
1167 err = -EBADFD;
1168 goto done;
1169 }
1170
1171 /* Set destination address and psm */
1172 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1173 l2cap_pi(sk)->psm = la.l2_psm;
1174
1175 err = l2cap_do_connect(sk);
1176 if (err)
1177 goto done;
1178
1179 wait:
1180 err = bt_sock_wait_state(sk, BT_CONNECTED,
1181 sock_sndtimeo(sk, flags & O_NONBLOCK));
1182 done:
1183 release_sock(sk);
1184 return err;
1185 }
1186
1187 static int l2cap_sock_listen(struct socket *sock, int backlog)
1188 {
1189 struct sock *sk = sock->sk;
1190 int err = 0;
1191
1192 BT_DBG("sk %p backlog %d", sk, backlog);
1193
1194 lock_sock(sk);
1195
1196 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1197 || sk->sk_state != BT_BOUND) {
1198 err = -EBADFD;
1199 goto done;
1200 }
1201
1202 switch (l2cap_pi(sk)->mode) {
1203 case L2CAP_MODE_BASIC:
1204 break;
1205 case L2CAP_MODE_ERTM:
1206 case L2CAP_MODE_STREAMING:
1207 if (enable_ertm)
1208 break;
1209 /* fall through */
1210 default:
1211 err = -ENOTSUPP;
1212 goto done;
1213 }
1214
1215 if (!l2cap_pi(sk)->psm) {
1216 bdaddr_t *src = &bt_sk(sk)->src;
1217 u16 psm;
1218
1219 err = -EINVAL;
1220
1221 write_lock_bh(&l2cap_sk_list.lock);
1222
1223 for (psm = 0x1001; psm < 0x1100; psm += 2)
1224 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1225 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1226 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1227 err = 0;
1228 break;
1229 }
1230
1231 write_unlock_bh(&l2cap_sk_list.lock);
1232
1233 if (err < 0)
1234 goto done;
1235 }
1236
1237 sk->sk_max_ack_backlog = backlog;
1238 sk->sk_ack_backlog = 0;
1239 sk->sk_state = BT_LISTEN;
1240
1241 done:
1242 release_sock(sk);
1243 return err;
1244 }
1245
1246 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1247 {
1248 DECLARE_WAITQUEUE(wait, current);
1249 struct sock *sk = sock->sk, *nsk;
1250 long timeo;
1251 int err = 0;
1252
1253 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1254
1255 if (sk->sk_state != BT_LISTEN) {
1256 err = -EBADFD;
1257 goto done;
1258 }
1259
1260 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1261
1262 BT_DBG("sk %p timeo %ld", sk, timeo);
1263
1264 /* Wait for an incoming connection. (wake-one). */
1265 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1266 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1267 set_current_state(TASK_INTERRUPTIBLE);
1268 if (!timeo) {
1269 err = -EAGAIN;
1270 break;
1271 }
1272
1273 release_sock(sk);
1274 timeo = schedule_timeout(timeo);
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1276
1277 if (sk->sk_state != BT_LISTEN) {
1278 err = -EBADFD;
1279 break;
1280 }
1281
1282 if (signal_pending(current)) {
1283 err = sock_intr_errno(timeo);
1284 break;
1285 }
1286 }
1287 set_current_state(TASK_RUNNING);
1288 remove_wait_queue(sk_sleep(sk), &wait);
1289
1290 if (err)
1291 goto done;
1292
1293 newsock->state = SS_CONNECTED;
1294
1295 BT_DBG("new socket %p", nsk);
1296
1297 done:
1298 release_sock(sk);
1299 return err;
1300 }
1301
1302 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1303 {
1304 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1305 struct sock *sk = sock->sk;
1306
1307 BT_DBG("sock %p, sk %p", sock, sk);
1308
1309 addr->sa_family = AF_BLUETOOTH;
1310 *len = sizeof(struct sockaddr_l2);
1311
1312 if (peer) {
1313 la->l2_psm = l2cap_pi(sk)->psm;
1314 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1315 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1316 } else {
1317 la->l2_psm = l2cap_pi(sk)->sport;
1318 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1319 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1320 }
1321
1322 return 0;
1323 }
1324
1325 static int __l2cap_wait_ack(struct sock *sk)
1326 {
1327 DECLARE_WAITQUEUE(wait, current);
1328 int err = 0;
1329 int timeo = HZ/5;
1330
1331 add_wait_queue(sk_sleep(sk), &wait);
1332 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1333 set_current_state(TASK_INTERRUPTIBLE);
1334
1335 if (!timeo)
1336 timeo = HZ/5;
1337
1338 if (signal_pending(current)) {
1339 err = sock_intr_errno(timeo);
1340 break;
1341 }
1342
1343 release_sock(sk);
1344 timeo = schedule_timeout(timeo);
1345 lock_sock(sk);
1346
1347 err = sock_error(sk);
1348 if (err)
1349 break;
1350 }
1351 set_current_state(TASK_RUNNING);
1352 remove_wait_queue(sk_sleep(sk), &wait);
1353 return err;
1354 }
1355
1356 static void l2cap_monitor_timeout(unsigned long arg)
1357 {
1358 struct sock *sk = (void *) arg;
1359
1360 BT_DBG("sk %p", sk);
1361
1362 bh_lock_sock(sk);
1363 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1364 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1365 bh_unlock_sock(sk);
1366 return;
1367 }
1368
1369 l2cap_pi(sk)->retry_count++;
1370 __mod_monitor_timer();
1371
1372 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1373 bh_unlock_sock(sk);
1374 }
1375
1376 static void l2cap_retrans_timeout(unsigned long arg)
1377 {
1378 struct sock *sk = (void *) arg;
1379
1380 BT_DBG("sk %p", sk);
1381
1382 bh_lock_sock(sk);
1383 l2cap_pi(sk)->retry_count = 1;
1384 __mod_monitor_timer();
1385
1386 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1387
1388 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1389 bh_unlock_sock(sk);
1390 }
1391
1392 static void l2cap_drop_acked_frames(struct sock *sk)
1393 {
1394 struct sk_buff *skb;
1395
1396 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1397 l2cap_pi(sk)->unacked_frames) {
1398 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1399 break;
1400
1401 skb = skb_dequeue(TX_QUEUE(sk));
1402 kfree_skb(skb);
1403
1404 l2cap_pi(sk)->unacked_frames--;
1405 }
1406
1407 if (!l2cap_pi(sk)->unacked_frames)
1408 del_timer(&l2cap_pi(sk)->retrans_timer);
1409 }
1410
1411 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1412 {
1413 struct l2cap_pinfo *pi = l2cap_pi(sk);
1414
1415 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1416
1417 hci_send_acl(pi->conn->hcon, skb, 0);
1418 }
1419
1420 static int l2cap_streaming_send(struct sock *sk)
1421 {
1422 struct sk_buff *skb, *tx_skb;
1423 struct l2cap_pinfo *pi = l2cap_pi(sk);
1424 u16 control, fcs;
1425
1426 while ((skb = sk->sk_send_head)) {
1427 tx_skb = skb_clone(skb, GFP_ATOMIC);
1428
1429 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1430 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1431 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1432
1433 if (pi->fcs == L2CAP_FCS_CRC16) {
1434 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1435 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1436 }
1437
1438 l2cap_do_send(sk, tx_skb);
1439
1440 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1441
1442 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1443 sk->sk_send_head = NULL;
1444 else
1445 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1446
1447 skb = skb_dequeue(TX_QUEUE(sk));
1448 kfree_skb(skb);
1449 }
1450 return 0;
1451 }
1452
1453 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1454 {
1455 struct l2cap_pinfo *pi = l2cap_pi(sk);
1456 struct sk_buff *skb, *tx_skb;
1457 u16 control, fcs;
1458
1459 skb = skb_peek(TX_QUEUE(sk));
1460 if (!skb)
1461 return;
1462
1463 do {
1464 if (bt_cb(skb)->tx_seq == tx_seq)
1465 break;
1466
1467 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1468 return;
1469
1470 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1471
1472 if (pi->remote_max_tx &&
1473 bt_cb(skb)->retries == pi->remote_max_tx) {
1474 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1475 return;
1476 }
1477
1478 tx_skb = skb_clone(skb, GFP_ATOMIC);
1479 bt_cb(skb)->retries++;
1480 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1481
1482 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1483 control |= L2CAP_CTRL_FINAL;
1484 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1485 }
1486
1487 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1488 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1489
1490 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1491
1492 if (pi->fcs == L2CAP_FCS_CRC16) {
1493 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1494 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1495 }
1496
1497 l2cap_do_send(sk, tx_skb);
1498 }
1499
1500 static int l2cap_ertm_send(struct sock *sk)
1501 {
1502 struct sk_buff *skb, *tx_skb;
1503 struct l2cap_pinfo *pi = l2cap_pi(sk);
1504 u16 control, fcs;
1505 int nsent = 0;
1506
1507 if (sk->sk_state != BT_CONNECTED)
1508 return -ENOTCONN;
1509
1510 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1511
1512 if (pi->remote_max_tx &&
1513 bt_cb(skb)->retries == pi->remote_max_tx) {
1514 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1515 break;
1516 }
1517
1518 tx_skb = skb_clone(skb, GFP_ATOMIC);
1519
1520 bt_cb(skb)->retries++;
1521
1522 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1523 control &= L2CAP_CTRL_SAR;
1524
1525 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1526 control |= L2CAP_CTRL_FINAL;
1527 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1528 }
1529 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1530 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1531 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1532
1533
1534 if (pi->fcs == L2CAP_FCS_CRC16) {
1535 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1536 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1537 }
1538
1539 l2cap_do_send(sk, tx_skb);
1540
1541 __mod_retrans_timer();
1542
1543 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1544 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1545
1546 pi->unacked_frames++;
1547 pi->frames_sent++;
1548
1549 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1550 sk->sk_send_head = NULL;
1551 else
1552 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1553
1554 nsent++;
1555 }
1556
1557 return nsent;
1558 }
1559
1560 static int l2cap_retransmit_frames(struct sock *sk)
1561 {
1562 struct l2cap_pinfo *pi = l2cap_pi(sk);
1563 int ret;
1564
1565 if (!skb_queue_empty(TX_QUEUE(sk)))
1566 sk->sk_send_head = TX_QUEUE(sk)->next;
1567
1568 pi->next_tx_seq = pi->expected_ack_seq;
1569 ret = l2cap_ertm_send(sk);
1570 return ret;
1571 }
1572
1573 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1574 {
1575 struct sock *sk = (struct sock *)pi;
1576 u16 control = 0;
1577
1578 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1579
1580 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1581 control |= L2CAP_SUPER_RCV_NOT_READY;
1582 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1583 l2cap_send_sframe(pi, control);
1584 return;
1585 }
1586
1587 if (l2cap_ertm_send(sk) > 0)
1588 return;
1589
1590 control |= L2CAP_SUPER_RCV_READY;
1591 l2cap_send_sframe(pi, control);
1592 }
1593
1594 static void l2cap_send_srejtail(struct sock *sk)
1595 {
1596 struct srej_list *tail;
1597 u16 control;
1598
1599 control = L2CAP_SUPER_SELECT_REJECT;
1600 control |= L2CAP_CTRL_FINAL;
1601
1602 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1603 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1604
1605 l2cap_send_sframe(l2cap_pi(sk), control);
1606 }
1607
1608 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1609 {
1610 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1611 struct sk_buff **frag;
1612 int err, sent = 0;
1613
1614 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1615 return -EFAULT;
1616
1617 sent += count;
1618 len -= count;
1619
1620 /* Continuation fragments (no L2CAP header) */
1621 frag = &skb_shinfo(skb)->frag_list;
1622 while (len) {
1623 count = min_t(unsigned int, conn->mtu, len);
1624
1625 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1626 if (!*frag)
1627 return -EFAULT;
1628 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1629 return -EFAULT;
1630
1631 sent += count;
1632 len -= count;
1633
1634 frag = &(*frag)->next;
1635 }
1636
1637 return sent;
1638 }
1639
1640 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1641 {
1642 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1643 struct sk_buff *skb;
1644 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1645 struct l2cap_hdr *lh;
1646
1647 BT_DBG("sk %p len %d", sk, (int)len);
1648
1649 count = min_t(unsigned int, (conn->mtu - hlen), len);
1650 skb = bt_skb_send_alloc(sk, count + hlen,
1651 msg->msg_flags & MSG_DONTWAIT, &err);
1652 if (!skb)
1653 return ERR_PTR(-ENOMEM);
1654
1655 /* Create L2CAP header */
1656 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1657 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1658 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1660
1661 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1662 if (unlikely(err < 0)) {
1663 kfree_skb(skb);
1664 return ERR_PTR(err);
1665 }
1666 return skb;
1667 }
1668
1669 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1670 {
1671 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1672 struct sk_buff *skb;
1673 int err, count, hlen = L2CAP_HDR_SIZE;
1674 struct l2cap_hdr *lh;
1675
1676 BT_DBG("sk %p len %d", sk, (int)len);
1677
1678 count = min_t(unsigned int, (conn->mtu - hlen), len);
1679 skb = bt_skb_send_alloc(sk, count + hlen,
1680 msg->msg_flags & MSG_DONTWAIT, &err);
1681 if (!skb)
1682 return ERR_PTR(-ENOMEM);
1683
1684 /* Create L2CAP header */
1685 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1686 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1687 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1688
1689 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1690 if (unlikely(err < 0)) {
1691 kfree_skb(skb);
1692 return ERR_PTR(err);
1693 }
1694 return skb;
1695 }
1696
1697 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1698 {
1699 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1700 struct sk_buff *skb;
1701 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1702 struct l2cap_hdr *lh;
1703
1704 BT_DBG("sk %p len %d", sk, (int)len);
1705
1706 if (!conn)
1707 return ERR_PTR(-ENOTCONN);
1708
1709 if (sdulen)
1710 hlen += 2;
1711
1712 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1713 hlen += 2;
1714
1715 count = min_t(unsigned int, (conn->mtu - hlen), len);
1716 skb = bt_skb_send_alloc(sk, count + hlen,
1717 msg->msg_flags & MSG_DONTWAIT, &err);
1718 if (!skb)
1719 return ERR_PTR(-ENOMEM);
1720
1721 /* Create L2CAP header */
1722 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1723 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1724 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1725 put_unaligned_le16(control, skb_put(skb, 2));
1726 if (sdulen)
1727 put_unaligned_le16(sdulen, skb_put(skb, 2));
1728
1729 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1730 if (unlikely(err < 0)) {
1731 kfree_skb(skb);
1732 return ERR_PTR(err);
1733 }
1734
1735 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1736 put_unaligned_le16(0, skb_put(skb, 2));
1737
1738 bt_cb(skb)->retries = 0;
1739 return skb;
1740 }
1741
1742 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1743 {
1744 struct l2cap_pinfo *pi = l2cap_pi(sk);
1745 struct sk_buff *skb;
1746 struct sk_buff_head sar_queue;
1747 u16 control;
1748 size_t size = 0;
1749
1750 skb_queue_head_init(&sar_queue);
1751 control = L2CAP_SDU_START;
1752 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1753 if (IS_ERR(skb))
1754 return PTR_ERR(skb);
1755
1756 __skb_queue_tail(&sar_queue, skb);
1757 len -= pi->remote_mps;
1758 size += pi->remote_mps;
1759
1760 while (len > 0) {
1761 size_t buflen;
1762
1763 if (len > pi->remote_mps) {
1764 control = L2CAP_SDU_CONTINUE;
1765 buflen = pi->remote_mps;
1766 } else {
1767 control = L2CAP_SDU_END;
1768 buflen = len;
1769 }
1770
1771 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1772 if (IS_ERR(skb)) {
1773 skb_queue_purge(&sar_queue);
1774 return PTR_ERR(skb);
1775 }
1776
1777 __skb_queue_tail(&sar_queue, skb);
1778 len -= buflen;
1779 size += buflen;
1780 }
1781 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1782 if (sk->sk_send_head == NULL)
1783 sk->sk_send_head = sar_queue.next;
1784
1785 return size;
1786 }
1787
1788 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1789 {
1790 struct sock *sk = sock->sk;
1791 struct l2cap_pinfo *pi = l2cap_pi(sk);
1792 struct sk_buff *skb;
1793 u16 control;
1794 int err;
1795
1796 BT_DBG("sock %p, sk %p", sock, sk);
1797
1798 err = sock_error(sk);
1799 if (err)
1800 return err;
1801
1802 if (msg->msg_flags & MSG_OOB)
1803 return -EOPNOTSUPP;
1804
1805 lock_sock(sk);
1806
1807 if (sk->sk_state != BT_CONNECTED) {
1808 err = -ENOTCONN;
1809 goto done;
1810 }
1811
1812 /* Connectionless channel */
1813 if (sk->sk_type == SOCK_DGRAM) {
1814 skb = l2cap_create_connless_pdu(sk, msg, len);
1815 if (IS_ERR(skb)) {
1816 err = PTR_ERR(skb);
1817 } else {
1818 l2cap_do_send(sk, skb);
1819 err = len;
1820 }
1821 goto done;
1822 }
1823
1824 switch (pi->mode) {
1825 case L2CAP_MODE_BASIC:
1826 /* Check outgoing MTU */
1827 if (len > pi->omtu) {
1828 err = -EINVAL;
1829 goto done;
1830 }
1831
1832 /* Create a basic PDU */
1833 skb = l2cap_create_basic_pdu(sk, msg, len);
1834 if (IS_ERR(skb)) {
1835 err = PTR_ERR(skb);
1836 goto done;
1837 }
1838
1839 l2cap_do_send(sk, skb);
1840 err = len;
1841 break;
1842
1843 case L2CAP_MODE_ERTM:
1844 case L2CAP_MODE_STREAMING:
1845 /* Entire SDU fits into one PDU */
1846 if (len <= pi->remote_mps) {
1847 control = L2CAP_SDU_UNSEGMENTED;
1848 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1849 if (IS_ERR(skb)) {
1850 err = PTR_ERR(skb);
1851 goto done;
1852 }
1853 __skb_queue_tail(TX_QUEUE(sk), skb);
1854
1855 if (sk->sk_send_head == NULL)
1856 sk->sk_send_head = skb;
1857
1858 } else {
1859 /* Segment SDU into multiples PDUs */
1860 err = l2cap_sar_segment_sdu(sk, msg, len);
1861 if (err < 0)
1862 goto done;
1863 }
1864
1865 if (pi->mode == L2CAP_MODE_STREAMING) {
1866 err = l2cap_streaming_send(sk);
1867 } else {
1868 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1869 pi->conn_state && L2CAP_CONN_WAIT_F) {
1870 err = len;
1871 break;
1872 }
1873 err = l2cap_ertm_send(sk);
1874 }
1875
1876 if (err >= 0)
1877 err = len;
1878 break;
1879
1880 default:
1881 BT_DBG("bad state %1.1x", pi->mode);
1882 err = -EINVAL;
1883 }
1884
1885 done:
1886 release_sock(sk);
1887 return err;
1888 }
1889
1890 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1891 {
1892 struct sock *sk = sock->sk;
1893
1894 lock_sock(sk);
1895
1896 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1897 struct l2cap_conn_rsp rsp;
1898
1899 sk->sk_state = BT_CONFIG;
1900
1901 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1902 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1903 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1904 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1905 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1906 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1907
1908 release_sock(sk);
1909 return 0;
1910 }
1911
1912 release_sock(sk);
1913
1914 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1915 }
1916
1917 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1918 {
1919 struct sock *sk = sock->sk;
1920 struct l2cap_options opts;
1921 int len, err = 0;
1922 u32 opt;
1923
1924 BT_DBG("sk %p", sk);
1925
1926 lock_sock(sk);
1927
1928 switch (optname) {
1929 case L2CAP_OPTIONS:
1930 opts.imtu = l2cap_pi(sk)->imtu;
1931 opts.omtu = l2cap_pi(sk)->omtu;
1932 opts.flush_to = l2cap_pi(sk)->flush_to;
1933 opts.mode = l2cap_pi(sk)->mode;
1934 opts.fcs = l2cap_pi(sk)->fcs;
1935 opts.max_tx = l2cap_pi(sk)->max_tx;
1936 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1937
1938 len = min_t(unsigned int, sizeof(opts), optlen);
1939 if (copy_from_user((char *) &opts, optval, len)) {
1940 err = -EFAULT;
1941 break;
1942 }
1943
1944 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1945 err = -EINVAL;
1946 break;
1947 }
1948
1949 l2cap_pi(sk)->mode = opts.mode;
1950 switch (l2cap_pi(sk)->mode) {
1951 case L2CAP_MODE_BASIC:
1952 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1953 break;
1954 case L2CAP_MODE_ERTM:
1955 case L2CAP_MODE_STREAMING:
1956 if (enable_ertm)
1957 break;
1958 /* fall through */
1959 default:
1960 err = -EINVAL;
1961 break;
1962 }
1963
1964 l2cap_pi(sk)->imtu = opts.imtu;
1965 l2cap_pi(sk)->omtu = opts.omtu;
1966 l2cap_pi(sk)->fcs = opts.fcs;
1967 l2cap_pi(sk)->max_tx = opts.max_tx;
1968 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1969 break;
1970
1971 case L2CAP_LM:
1972 if (get_user(opt, (u32 __user *) optval)) {
1973 err = -EFAULT;
1974 break;
1975 }
1976
1977 if (opt & L2CAP_LM_AUTH)
1978 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1979 if (opt & L2CAP_LM_ENCRYPT)
1980 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1981 if (opt & L2CAP_LM_SECURE)
1982 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1983
1984 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1985 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1986 break;
1987
1988 default:
1989 err = -ENOPROTOOPT;
1990 break;
1991 }
1992
1993 release_sock(sk);
1994 return err;
1995 }
1996
1997 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1998 {
1999 struct sock *sk = sock->sk;
2000 struct bt_security sec;
2001 int len, err = 0;
2002 u32 opt;
2003
2004 BT_DBG("sk %p", sk);
2005
2006 if (level == SOL_L2CAP)
2007 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2008
2009 if (level != SOL_BLUETOOTH)
2010 return -ENOPROTOOPT;
2011
2012 lock_sock(sk);
2013
2014 switch (optname) {
2015 case BT_SECURITY:
2016 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2017 && sk->sk_type != SOCK_RAW) {
2018 err = -EINVAL;
2019 break;
2020 }
2021
2022 sec.level = BT_SECURITY_LOW;
2023
2024 len = min_t(unsigned int, sizeof(sec), optlen);
2025 if (copy_from_user((char *) &sec, optval, len)) {
2026 err = -EFAULT;
2027 break;
2028 }
2029
2030 if (sec.level < BT_SECURITY_LOW ||
2031 sec.level > BT_SECURITY_HIGH) {
2032 err = -EINVAL;
2033 break;
2034 }
2035
2036 l2cap_pi(sk)->sec_level = sec.level;
2037 break;
2038
2039 case BT_DEFER_SETUP:
2040 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2041 err = -EINVAL;
2042 break;
2043 }
2044
2045 if (get_user(opt, (u32 __user *) optval)) {
2046 err = -EFAULT;
2047 break;
2048 }
2049
2050 bt_sk(sk)->defer_setup = opt;
2051 break;
2052
2053 default:
2054 err = -ENOPROTOOPT;
2055 break;
2056 }
2057
2058 release_sock(sk);
2059 return err;
2060 }
2061
2062 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2063 {
2064 struct sock *sk = sock->sk;
2065 struct l2cap_options opts;
2066 struct l2cap_conninfo cinfo;
2067 int len, err = 0;
2068 u32 opt;
2069
2070 BT_DBG("sk %p", sk);
2071
2072 if (get_user(len, optlen))
2073 return -EFAULT;
2074
2075 lock_sock(sk);
2076
2077 switch (optname) {
2078 case L2CAP_OPTIONS:
2079 opts.imtu = l2cap_pi(sk)->imtu;
2080 opts.omtu = l2cap_pi(sk)->omtu;
2081 opts.flush_to = l2cap_pi(sk)->flush_to;
2082 opts.mode = l2cap_pi(sk)->mode;
2083 opts.fcs = l2cap_pi(sk)->fcs;
2084 opts.max_tx = l2cap_pi(sk)->max_tx;
2085 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2086
2087 len = min_t(unsigned int, len, sizeof(opts));
2088 if (copy_to_user(optval, (char *) &opts, len))
2089 err = -EFAULT;
2090
2091 break;
2092
2093 case L2CAP_LM:
2094 switch (l2cap_pi(sk)->sec_level) {
2095 case BT_SECURITY_LOW:
2096 opt = L2CAP_LM_AUTH;
2097 break;
2098 case BT_SECURITY_MEDIUM:
2099 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2100 break;
2101 case BT_SECURITY_HIGH:
2102 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2103 L2CAP_LM_SECURE;
2104 break;
2105 default:
2106 opt = 0;
2107 break;
2108 }
2109
2110 if (l2cap_pi(sk)->role_switch)
2111 opt |= L2CAP_LM_MASTER;
2112
2113 if (l2cap_pi(sk)->force_reliable)
2114 opt |= L2CAP_LM_RELIABLE;
2115
2116 if (put_user(opt, (u32 __user *) optval))
2117 err = -EFAULT;
2118 break;
2119
2120 case L2CAP_CONNINFO:
2121 if (sk->sk_state != BT_CONNECTED &&
2122 !(sk->sk_state == BT_CONNECT2 &&
2123 bt_sk(sk)->defer_setup)) {
2124 err = -ENOTCONN;
2125 break;
2126 }
2127
2128 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2129 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2130
2131 len = min_t(unsigned int, len, sizeof(cinfo));
2132 if (copy_to_user(optval, (char *) &cinfo, len))
2133 err = -EFAULT;
2134
2135 break;
2136
2137 default:
2138 err = -ENOPROTOOPT;
2139 break;
2140 }
2141
2142 release_sock(sk);
2143 return err;
2144 }
2145
2146 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2147 {
2148 struct sock *sk = sock->sk;
2149 struct bt_security sec;
2150 int len, err = 0;
2151
2152 BT_DBG("sk %p", sk);
2153
2154 if (level == SOL_L2CAP)
2155 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2156
2157 if (level != SOL_BLUETOOTH)
2158 return -ENOPROTOOPT;
2159
2160 if (get_user(len, optlen))
2161 return -EFAULT;
2162
2163 lock_sock(sk);
2164
2165 switch (optname) {
2166 case BT_SECURITY:
2167 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2168 && sk->sk_type != SOCK_RAW) {
2169 err = -EINVAL;
2170 break;
2171 }
2172
2173 sec.level = l2cap_pi(sk)->sec_level;
2174
2175 len = min_t(unsigned int, len, sizeof(sec));
2176 if (copy_to_user(optval, (char *) &sec, len))
2177 err = -EFAULT;
2178
2179 break;
2180
2181 case BT_DEFER_SETUP:
2182 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2183 err = -EINVAL;
2184 break;
2185 }
2186
2187 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2188 err = -EFAULT;
2189
2190 break;
2191
2192 default:
2193 err = -ENOPROTOOPT;
2194 break;
2195 }
2196
2197 release_sock(sk);
2198 return err;
2199 }
2200
2201 static int l2cap_sock_shutdown(struct socket *sock, int how)
2202 {
2203 struct sock *sk = sock->sk;
2204 int err = 0;
2205
2206 BT_DBG("sock %p, sk %p", sock, sk);
2207
2208 if (!sk)
2209 return 0;
2210
2211 lock_sock(sk);
2212 if (!sk->sk_shutdown) {
2213 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2214 err = __l2cap_wait_ack(sk);
2215
2216 sk->sk_shutdown = SHUTDOWN_MASK;
2217 l2cap_sock_clear_timer(sk);
2218 __l2cap_sock_close(sk, 0);
2219
2220 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2221 err = bt_sock_wait_state(sk, BT_CLOSED,
2222 sk->sk_lingertime);
2223 }
2224
2225 if (!err && sk->sk_err)
2226 err = -sk->sk_err;
2227
2228 release_sock(sk);
2229 return err;
2230 }
2231
2232 static int l2cap_sock_release(struct socket *sock)
2233 {
2234 struct sock *sk = sock->sk;
2235 int err;
2236
2237 BT_DBG("sock %p, sk %p", sock, sk);
2238
2239 if (!sk)
2240 return 0;
2241
2242 err = l2cap_sock_shutdown(sock, 2);
2243
2244 sock_orphan(sk);
2245 l2cap_sock_kill(sk);
2246 return err;
2247 }
2248
2249 static void l2cap_chan_ready(struct sock *sk)
2250 {
2251 struct sock *parent = bt_sk(sk)->parent;
2252
2253 BT_DBG("sk %p, parent %p", sk, parent);
2254
2255 l2cap_pi(sk)->conf_state = 0;
2256 l2cap_sock_clear_timer(sk);
2257
2258 if (!parent) {
2259 /* Outgoing channel.
2260 * Wake up socket sleeping on connect.
2261 */
2262 sk->sk_state = BT_CONNECTED;
2263 sk->sk_state_change(sk);
2264 } else {
2265 /* Incoming channel.
2266 * Wake up socket sleeping on accept.
2267 */
2268 parent->sk_data_ready(parent, 0);
2269 }
2270 }
2271
2272 /* Copy frame to all raw sockets on that connection */
2273 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2274 {
2275 struct l2cap_chan_list *l = &conn->chan_list;
2276 struct sk_buff *nskb;
2277 struct sock *sk;
2278
2279 BT_DBG("conn %p", conn);
2280
2281 read_lock(&l->lock);
2282 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2283 if (sk->sk_type != SOCK_RAW)
2284 continue;
2285
2286 /* Don't send frame to the socket it came from */
2287 if (skb->sk == sk)
2288 continue;
2289 nskb = skb_clone(skb, GFP_ATOMIC);
2290 if (!nskb)
2291 continue;
2292
2293 if (sock_queue_rcv_skb(sk, nskb))
2294 kfree_skb(nskb);
2295 }
2296 read_unlock(&l->lock);
2297 }
2298
2299 /* ---- L2CAP signalling commands ---- */
2300 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2301 u8 code, u8 ident, u16 dlen, void *data)
2302 {
2303 struct sk_buff *skb, **frag;
2304 struct l2cap_cmd_hdr *cmd;
2305 struct l2cap_hdr *lh;
2306 int len, count;
2307
2308 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2309 conn, code, ident, dlen);
2310
2311 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2312 count = min_t(unsigned int, conn->mtu, len);
2313
2314 skb = bt_skb_alloc(count, GFP_ATOMIC);
2315 if (!skb)
2316 return NULL;
2317
2318 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2319 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2320 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2321
2322 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2323 cmd->code = code;
2324 cmd->ident = ident;
2325 cmd->len = cpu_to_le16(dlen);
2326
2327 if (dlen) {
2328 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2329 memcpy(skb_put(skb, count), data, count);
2330 data += count;
2331 }
2332
2333 len -= skb->len;
2334
2335 /* Continuation fragments (no L2CAP header) */
2336 frag = &skb_shinfo(skb)->frag_list;
2337 while (len) {
2338 count = min_t(unsigned int, conn->mtu, len);
2339
2340 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2341 if (!*frag)
2342 goto fail;
2343
2344 memcpy(skb_put(*frag, count), data, count);
2345
2346 len -= count;
2347 data += count;
2348
2349 frag = &(*frag)->next;
2350 }
2351
2352 return skb;
2353
2354 fail:
2355 kfree_skb(skb);
2356 return NULL;
2357 }
2358
2359 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2360 {
2361 struct l2cap_conf_opt *opt = *ptr;
2362 int len;
2363
2364 len = L2CAP_CONF_OPT_SIZE + opt->len;
2365 *ptr += len;
2366
2367 *type = opt->type;
2368 *olen = opt->len;
2369
2370 switch (opt->len) {
2371 case 1:
2372 *val = *((u8 *) opt->val);
2373 break;
2374
2375 case 2:
2376 *val = __le16_to_cpu(*((__le16 *) opt->val));
2377 break;
2378
2379 case 4:
2380 *val = __le32_to_cpu(*((__le32 *) opt->val));
2381 break;
2382
2383 default:
2384 *val = (unsigned long) opt->val;
2385 break;
2386 }
2387
2388 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2389 return len;
2390 }
2391
2392 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2393 {
2394 struct l2cap_conf_opt *opt = *ptr;
2395
2396 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2397
2398 opt->type = type;
2399 opt->len = len;
2400
2401 switch (len) {
2402 case 1:
2403 *((u8 *) opt->val) = val;
2404 break;
2405
2406 case 2:
2407 *((__le16 *) opt->val) = cpu_to_le16(val);
2408 break;
2409
2410 case 4:
2411 *((__le32 *) opt->val) = cpu_to_le32(val);
2412 break;
2413
2414 default:
2415 memcpy(opt->val, (void *) val, len);
2416 break;
2417 }
2418
2419 *ptr += L2CAP_CONF_OPT_SIZE + len;
2420 }
2421
2422 static void l2cap_ack_timeout(unsigned long arg)
2423 {
2424 struct sock *sk = (void *) arg;
2425
2426 bh_lock_sock(sk);
2427 l2cap_send_ack(l2cap_pi(sk));
2428 bh_unlock_sock(sk);
2429 }
2430
2431 static inline void l2cap_ertm_init(struct sock *sk)
2432 {
2433 l2cap_pi(sk)->expected_ack_seq = 0;
2434 l2cap_pi(sk)->unacked_frames = 0;
2435 l2cap_pi(sk)->buffer_seq = 0;
2436 l2cap_pi(sk)->num_acked = 0;
2437 l2cap_pi(sk)->frames_sent = 0;
2438
2439 setup_timer(&l2cap_pi(sk)->retrans_timer,
2440 l2cap_retrans_timeout, (unsigned long) sk);
2441 setup_timer(&l2cap_pi(sk)->monitor_timer,
2442 l2cap_monitor_timeout, (unsigned long) sk);
2443 setup_timer(&l2cap_pi(sk)->ack_timer,
2444 l2cap_ack_timeout, (unsigned long) sk);
2445
2446 __skb_queue_head_init(SREJ_QUEUE(sk));
2447 __skb_queue_head_init(BUSY_QUEUE(sk));
2448
2449 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2450 }
2451
2452 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2453 {
2454 switch (mode) {
2455 case L2CAP_MODE_STREAMING:
2456 case L2CAP_MODE_ERTM:
2457 if (l2cap_mode_supported(mode, remote_feat_mask))
2458 return mode;
2459 /* fall through */
2460 default:
2461 return L2CAP_MODE_BASIC;
2462 }
2463 }
2464
2465 static int l2cap_build_conf_req(struct sock *sk, void *data)
2466 {
2467 struct l2cap_pinfo *pi = l2cap_pi(sk);
2468 struct l2cap_conf_req *req = data;
2469 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2470 void *ptr = req->data;
2471
2472 BT_DBG("sk %p", sk);
2473
2474 if (pi->num_conf_req || pi->num_conf_rsp)
2475 goto done;
2476
2477 switch (pi->mode) {
2478 case L2CAP_MODE_STREAMING:
2479 case L2CAP_MODE_ERTM:
2480 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2481 break;
2482
2483 /* fall through */
2484 default:
2485 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2486 break;
2487 }
2488
2489 done:
2490 switch (pi->mode) {
2491 case L2CAP_MODE_BASIC:
2492 if (pi->imtu != L2CAP_DEFAULT_MTU)
2493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2494
2495 rfc.mode = L2CAP_MODE_BASIC;
2496 rfc.txwin_size = 0;
2497 rfc.max_transmit = 0;
2498 rfc.retrans_timeout = 0;
2499 rfc.monitor_timeout = 0;
2500 rfc.max_pdu_size = 0;
2501
2502 break;
2503
2504 case L2CAP_MODE_ERTM:
2505 rfc.mode = L2CAP_MODE_ERTM;
2506 rfc.txwin_size = pi->tx_win;
2507 rfc.max_transmit = pi->max_tx;
2508 rfc.retrans_timeout = 0;
2509 rfc.monitor_timeout = 0;
2510 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2511 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2512 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2513
2514 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2515 break;
2516
2517 if (pi->fcs == L2CAP_FCS_NONE ||
2518 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2519 pi->fcs = L2CAP_FCS_NONE;
2520 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2521 }
2522 break;
2523
2524 case L2CAP_MODE_STREAMING:
2525 rfc.mode = L2CAP_MODE_STREAMING;
2526 rfc.txwin_size = 0;
2527 rfc.max_transmit = 0;
2528 rfc.retrans_timeout = 0;
2529 rfc.monitor_timeout = 0;
2530 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2531 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2532 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2533
2534 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2535 break;
2536
2537 if (pi->fcs == L2CAP_FCS_NONE ||
2538 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2539 pi->fcs = L2CAP_FCS_NONE;
2540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2541 }
2542 break;
2543 }
2544
2545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2546 (unsigned long) &rfc);
2547
2548 /* FIXME: Need actual value of the flush timeout */
2549 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2550 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2551
2552 req->dcid = cpu_to_le16(pi->dcid);
2553 req->flags = cpu_to_le16(0);
2554
2555 return ptr - data;
2556 }
2557
2558 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2559 {
2560 struct l2cap_pinfo *pi = l2cap_pi(sk);
2561 struct l2cap_conf_rsp *rsp = data;
2562 void *ptr = rsp->data;
2563 void *req = pi->conf_req;
2564 int len = pi->conf_len;
2565 int type, hint, olen;
2566 unsigned long val;
2567 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2568 u16 mtu = L2CAP_DEFAULT_MTU;
2569 u16 result = L2CAP_CONF_SUCCESS;
2570
2571 BT_DBG("sk %p", sk);
2572
2573 while (len >= L2CAP_CONF_OPT_SIZE) {
2574 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2575
2576 hint = type & L2CAP_CONF_HINT;
2577 type &= L2CAP_CONF_MASK;
2578
2579 switch (type) {
2580 case L2CAP_CONF_MTU:
2581 mtu = val;
2582 break;
2583
2584 case L2CAP_CONF_FLUSH_TO:
2585 pi->flush_to = val;
2586 break;
2587
2588 case L2CAP_CONF_QOS:
2589 break;
2590
2591 case L2CAP_CONF_RFC:
2592 if (olen == sizeof(rfc))
2593 memcpy(&rfc, (void *) val, olen);
2594 break;
2595
2596 case L2CAP_CONF_FCS:
2597 if (val == L2CAP_FCS_NONE)
2598 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2599
2600 break;
2601
2602 default:
2603 if (hint)
2604 break;
2605
2606 result = L2CAP_CONF_UNKNOWN;
2607 *((u8 *) ptr++) = type;
2608 break;
2609 }
2610 }
2611
2612 if (pi->num_conf_rsp || pi->num_conf_req)
2613 goto done;
2614
2615 switch (pi->mode) {
2616 case L2CAP_MODE_STREAMING:
2617 case L2CAP_MODE_ERTM:
2618 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2619 pi->mode = l2cap_select_mode(rfc.mode,
2620 pi->conn->feat_mask);
2621 break;
2622 }
2623
2624 if (pi->mode != rfc.mode)
2625 return -ECONNREFUSED;
2626
2627 break;
2628 }
2629
2630 done:
2631 if (pi->mode != rfc.mode) {
2632 result = L2CAP_CONF_UNACCEPT;
2633 rfc.mode = pi->mode;
2634
2635 if (pi->num_conf_rsp == 1)
2636 return -ECONNREFUSED;
2637
2638 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2639 sizeof(rfc), (unsigned long) &rfc);
2640 }
2641
2642
2643 if (result == L2CAP_CONF_SUCCESS) {
2644 /* Configure output options and let the other side know
2645 * which ones we don't like. */
2646
2647 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2648 result = L2CAP_CONF_UNACCEPT;
2649 else {
2650 pi->omtu = mtu;
2651 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2652 }
2653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2654
2655 switch (rfc.mode) {
2656 case L2CAP_MODE_BASIC:
2657 pi->fcs = L2CAP_FCS_NONE;
2658 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2659 break;
2660
2661 case L2CAP_MODE_ERTM:
2662 pi->remote_tx_win = rfc.txwin_size;
2663 pi->remote_max_tx = rfc.max_transmit;
2664 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2665 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2666
2667 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2668
2669 rfc.retrans_timeout =
2670 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2671 rfc.monitor_timeout =
2672 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2673
2674 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2675
2676 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2677 sizeof(rfc), (unsigned long) &rfc);
2678
2679 break;
2680
2681 case L2CAP_MODE_STREAMING:
2682 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2683 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2684
2685 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2686
2687 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2688
2689 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2690 sizeof(rfc), (unsigned long) &rfc);
2691
2692 break;
2693
2694 default:
2695 result = L2CAP_CONF_UNACCEPT;
2696
2697 memset(&rfc, 0, sizeof(rfc));
2698 rfc.mode = pi->mode;
2699 }
2700
2701 if (result == L2CAP_CONF_SUCCESS)
2702 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2703 }
2704 rsp->scid = cpu_to_le16(pi->dcid);
2705 rsp->result = cpu_to_le16(result);
2706 rsp->flags = cpu_to_le16(0x0000);
2707
2708 return ptr - data;
2709 }
2710
2711 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2712 {
2713 struct l2cap_pinfo *pi = l2cap_pi(sk);
2714 struct l2cap_conf_req *req = data;
2715 void *ptr = req->data;
2716 int type, olen;
2717 unsigned long val;
2718 struct l2cap_conf_rfc rfc;
2719
2720 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2721
2722 while (len >= L2CAP_CONF_OPT_SIZE) {
2723 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2724
2725 switch (type) {
2726 case L2CAP_CONF_MTU:
2727 if (val < L2CAP_DEFAULT_MIN_MTU) {
2728 *result = L2CAP_CONF_UNACCEPT;
2729 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2730 } else
2731 pi->omtu = val;
2732 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2733 break;
2734
2735 case L2CAP_CONF_FLUSH_TO:
2736 pi->flush_to = val;
2737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2738 2, pi->flush_to);
2739 break;
2740
2741 case L2CAP_CONF_RFC:
2742 if (olen == sizeof(rfc))
2743 memcpy(&rfc, (void *)val, olen);
2744
2745 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2746 rfc.mode != pi->mode)
2747 return -ECONNREFUSED;
2748
2749 pi->fcs = 0;
2750
2751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2752 sizeof(rfc), (unsigned long) &rfc);
2753 break;
2754 }
2755 }
2756
2757 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2758 return -ECONNREFUSED;
2759
2760 pi->mode = rfc.mode;
2761
2762 if (*result == L2CAP_CONF_SUCCESS) {
2763 switch (rfc.mode) {
2764 case L2CAP_MODE_ERTM:
2765 pi->remote_tx_win = rfc.txwin_size;
2766 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2767 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2768 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2769 break;
2770 case L2CAP_MODE_STREAMING:
2771 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2772 }
2773 }
2774
2775 req->dcid = cpu_to_le16(pi->dcid);
2776 req->flags = cpu_to_le16(0x0000);
2777
2778 return ptr - data;
2779 }
2780
2781 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2782 {
2783 struct l2cap_conf_rsp *rsp = data;
2784 void *ptr = rsp->data;
2785
2786 BT_DBG("sk %p", sk);
2787
2788 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2789 rsp->result = cpu_to_le16(result);
2790 rsp->flags = cpu_to_le16(flags);
2791
2792 return ptr - data;
2793 }
2794
2795 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2796 {
2797 struct l2cap_pinfo *pi = l2cap_pi(sk);
2798 int type, olen;
2799 unsigned long val;
2800 struct l2cap_conf_rfc rfc;
2801
2802 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2803
2804 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2805 return;
2806
2807 while (len >= L2CAP_CONF_OPT_SIZE) {
2808 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2809
2810 switch (type) {
2811 case L2CAP_CONF_RFC:
2812 if (olen == sizeof(rfc))
2813 memcpy(&rfc, (void *)val, olen);
2814 goto done;
2815 }
2816 }
2817
2818 done:
2819 switch (rfc.mode) {
2820 case L2CAP_MODE_ERTM:
2821 pi->remote_tx_win = rfc.txwin_size;
2822 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2823 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2824 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2825 break;
2826 case L2CAP_MODE_STREAMING:
2827 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2828 }
2829 }
2830
2831 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2832 {
2833 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2834
2835 if (rej->reason != 0x0000)
2836 return 0;
2837
2838 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2839 cmd->ident == conn->info_ident) {
2840 del_timer(&conn->info_timer);
2841
2842 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2843 conn->info_ident = 0;
2844
2845 l2cap_conn_start(conn);
2846 }
2847
2848 return 0;
2849 }
2850
2851 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2852 {
2853 struct l2cap_chan_list *list = &conn->chan_list;
2854 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2855 struct l2cap_conn_rsp rsp;
2856 struct sock *sk, *parent;
2857 int result, status = L2CAP_CS_NO_INFO;
2858
2859 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2860 __le16 psm = req->psm;
2861
2862 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2863
2864 /* Check if we have socket listening on psm */
2865 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2866 if (!parent) {
2867 result = L2CAP_CR_BAD_PSM;
2868 goto sendresp;
2869 }
2870
2871 /* Check if the ACL is secure enough (if not SDP) */
2872 if (psm != cpu_to_le16(0x0001) &&
2873 !hci_conn_check_link_mode(conn->hcon)) {
2874 conn->disc_reason = 0x05;
2875 result = L2CAP_CR_SEC_BLOCK;
2876 goto response;
2877 }
2878
2879 result = L2CAP_CR_NO_MEM;
2880
2881 /* Check for backlog size */
2882 if (sk_acceptq_is_full(parent)) {
2883 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2884 goto response;
2885 }
2886
2887 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2888 if (!sk)
2889 goto response;
2890
2891 write_lock_bh(&list->lock);
2892
2893 /* Check if we already have channel with that dcid */
2894 if (__l2cap_get_chan_by_dcid(list, scid)) {
2895 write_unlock_bh(&list->lock);
2896 sock_set_flag(sk, SOCK_ZAPPED);
2897 l2cap_sock_kill(sk);
2898 goto response;
2899 }
2900
2901 hci_conn_hold(conn->hcon);
2902
2903 l2cap_sock_init(sk, parent);
2904 bacpy(&bt_sk(sk)->src, conn->src);
2905 bacpy(&bt_sk(sk)->dst, conn->dst);
2906 l2cap_pi(sk)->psm = psm;
2907 l2cap_pi(sk)->dcid = scid;
2908
2909 __l2cap_chan_add(conn, sk, parent);
2910 dcid = l2cap_pi(sk)->scid;
2911
2912 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2913
2914 l2cap_pi(sk)->ident = cmd->ident;
2915
2916 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2917 if (l2cap_check_security(sk)) {
2918 if (bt_sk(sk)->defer_setup) {
2919 sk->sk_state = BT_CONNECT2;
2920 result = L2CAP_CR_PEND;
2921 status = L2CAP_CS_AUTHOR_PEND;
2922 parent->sk_data_ready(parent, 0);
2923 } else {
2924 sk->sk_state = BT_CONFIG;
2925 result = L2CAP_CR_SUCCESS;
2926 status = L2CAP_CS_NO_INFO;
2927 }
2928 } else {
2929 sk->sk_state = BT_CONNECT2;
2930 result = L2CAP_CR_PEND;
2931 status = L2CAP_CS_AUTHEN_PEND;
2932 }
2933 } else {
2934 sk->sk_state = BT_CONNECT2;
2935 result = L2CAP_CR_PEND;
2936 status = L2CAP_CS_NO_INFO;
2937 }
2938
2939 write_unlock_bh(&list->lock);
2940
2941 response:
2942 bh_unlock_sock(parent);
2943
2944 sendresp:
2945 rsp.scid = cpu_to_le16(scid);
2946 rsp.dcid = cpu_to_le16(dcid);
2947 rsp.result = cpu_to_le16(result);
2948 rsp.status = cpu_to_le16(status);
2949 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2950
2951 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2952 struct l2cap_info_req info;
2953 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2954
2955 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2956 conn->info_ident = l2cap_get_ident(conn);
2957
2958 mod_timer(&conn->info_timer, jiffies +
2959 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2960
2961 l2cap_send_cmd(conn, conn->info_ident,
2962 L2CAP_INFO_REQ, sizeof(info), &info);
2963 }
2964
2965 return 0;
2966 }
2967
2968 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2969 {
2970 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2971 u16 scid, dcid, result, status;
2972 struct sock *sk;
2973 u8 req[128];
2974
2975 scid = __le16_to_cpu(rsp->scid);
2976 dcid = __le16_to_cpu(rsp->dcid);
2977 result = __le16_to_cpu(rsp->result);
2978 status = __le16_to_cpu(rsp->status);
2979
2980 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2981
2982 if (scid) {
2983 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2984 if (!sk)
2985 return 0;
2986 } else {
2987 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2988 if (!sk)
2989 return 0;
2990 }
2991
2992 switch (result) {
2993 case L2CAP_CR_SUCCESS:
2994 sk->sk_state = BT_CONFIG;
2995 l2cap_pi(sk)->ident = 0;
2996 l2cap_pi(sk)->dcid = dcid;
2997 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2998 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2999
3000 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3001 l2cap_build_conf_req(sk, req), req);
3002 l2cap_pi(sk)->num_conf_req++;
3003 break;
3004
3005 case L2CAP_CR_PEND:
3006 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3007 break;
3008
3009 default:
3010 l2cap_chan_del(sk, ECONNREFUSED);
3011 break;
3012 }
3013
3014 bh_unlock_sock(sk);
3015 return 0;
3016 }
3017
3018 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3019 {
3020 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3021 u16 dcid, flags;
3022 u8 rsp[64];
3023 struct sock *sk;
3024 int len;
3025
3026 dcid = __le16_to_cpu(req->dcid);
3027 flags = __le16_to_cpu(req->flags);
3028
3029 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3030
3031 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3032 if (!sk)
3033 return -ENOENT;
3034
3035 if (sk->sk_state != BT_CONFIG) {
3036 struct l2cap_cmd_rej rej;
3037
3038 rej.reason = cpu_to_le16(0x0002);
3039 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3040 sizeof(rej), &rej);
3041 goto unlock;
3042 }
3043
3044 /* Reject if config buffer is too small. */
3045 len = cmd_len - sizeof(*req);
3046 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3047 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3048 l2cap_build_conf_rsp(sk, rsp,
3049 L2CAP_CONF_REJECT, flags), rsp);
3050 goto unlock;
3051 }
3052
3053 /* Store config. */
3054 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3055 l2cap_pi(sk)->conf_len += len;
3056
3057 if (flags & 0x0001) {
3058 /* Incomplete config. Send empty response. */
3059 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3060 l2cap_build_conf_rsp(sk, rsp,
3061 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3062 goto unlock;
3063 }
3064
3065 /* Complete config. */
3066 len = l2cap_parse_conf_req(sk, rsp);
3067 if (len < 0) {
3068 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3069 goto unlock;
3070 }
3071
3072 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3073 l2cap_pi(sk)->num_conf_rsp++;
3074
3075 /* Reset config buffer. */
3076 l2cap_pi(sk)->conf_len = 0;
3077
3078 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3079 goto unlock;
3080
3081 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3082 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3083 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3084 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3085
3086 sk->sk_state = BT_CONNECTED;
3087
3088 l2cap_pi(sk)->next_tx_seq = 0;
3089 l2cap_pi(sk)->expected_tx_seq = 0;
3090 __skb_queue_head_init(TX_QUEUE(sk));
3091 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3092 l2cap_ertm_init(sk);
3093
3094 l2cap_chan_ready(sk);
3095 goto unlock;
3096 }
3097
3098 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3099 u8 buf[64];
3100 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3101 l2cap_build_conf_req(sk, buf), buf);
3102 l2cap_pi(sk)->num_conf_req++;
3103 }
3104
3105 unlock:
3106 bh_unlock_sock(sk);
3107 return 0;
3108 }
3109
3110 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3111 {
3112 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3113 u16 scid, flags, result;
3114 struct sock *sk;
3115 int len = cmd->len - sizeof(*rsp);
3116
3117 scid = __le16_to_cpu(rsp->scid);
3118 flags = __le16_to_cpu(rsp->flags);
3119 result = __le16_to_cpu(rsp->result);
3120
3121 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3122 scid, flags, result);
3123
3124 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3125 if (!sk)
3126 return 0;
3127
3128 switch (result) {
3129 case L2CAP_CONF_SUCCESS:
3130 l2cap_conf_rfc_get(sk, rsp->data, len);
3131 break;
3132
3133 case L2CAP_CONF_UNACCEPT:
3134 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3135 char req[64];
3136
3137 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3138 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3139 goto done;
3140 }
3141
3142 /* throw out any old stored conf requests */
3143 result = L2CAP_CONF_SUCCESS;
3144 len = l2cap_parse_conf_rsp(sk, rsp->data,
3145 len, req, &result);
3146 if (len < 0) {
3147 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3148 goto done;
3149 }
3150
3151 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3152 L2CAP_CONF_REQ, len, req);
3153 l2cap_pi(sk)->num_conf_req++;
3154 if (result != L2CAP_CONF_SUCCESS)
3155 goto done;
3156 break;
3157 }
3158
3159 default:
3160 sk->sk_err = ECONNRESET;
3161 l2cap_sock_set_timer(sk, HZ * 5);
3162 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3163 goto done;
3164 }
3165
3166 if (flags & 0x01)
3167 goto done;
3168
3169 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3170
3171 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3172 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3173 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3174 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3175
3176 sk->sk_state = BT_CONNECTED;
3177 l2cap_pi(sk)->next_tx_seq = 0;
3178 l2cap_pi(sk)->expected_tx_seq = 0;
3179 __skb_queue_head_init(TX_QUEUE(sk));
3180 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3181 l2cap_ertm_init(sk);
3182
3183 l2cap_chan_ready(sk);
3184 }
3185
3186 done:
3187 bh_unlock_sock(sk);
3188 return 0;
3189 }
3190
3191 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3192 {
3193 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3194 struct l2cap_disconn_rsp rsp;
3195 u16 dcid, scid;
3196 struct sock *sk;
3197
3198 scid = __le16_to_cpu(req->scid);
3199 dcid = __le16_to_cpu(req->dcid);
3200
3201 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3202
3203 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3204 if (!sk)
3205 return 0;
3206
3207 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3208 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3209 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3210
3211 sk->sk_shutdown = SHUTDOWN_MASK;
3212
3213 l2cap_chan_del(sk, ECONNRESET);
3214 bh_unlock_sock(sk);
3215
3216 l2cap_sock_kill(sk);
3217 return 0;
3218 }
3219
3220 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3221 {
3222 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3223 u16 dcid, scid;
3224 struct sock *sk;
3225
3226 scid = __le16_to_cpu(rsp->scid);
3227 dcid = __le16_to_cpu(rsp->dcid);
3228
3229 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3230
3231 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3232 if (!sk)
3233 return 0;
3234
3235 l2cap_chan_del(sk, 0);
3236 bh_unlock_sock(sk);
3237
3238 l2cap_sock_kill(sk);
3239 return 0;
3240 }
3241
3242 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3243 {
3244 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3245 u16 type;
3246
3247 type = __le16_to_cpu(req->type);
3248
3249 BT_DBG("type 0x%4.4x", type);
3250
3251 if (type == L2CAP_IT_FEAT_MASK) {
3252 u8 buf[8];
3253 u32 feat_mask = l2cap_feat_mask;
3254 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3255 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3256 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3257 if (enable_ertm)
3258 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3259 | L2CAP_FEAT_FCS;
3260 put_unaligned_le32(feat_mask, rsp->data);
3261 l2cap_send_cmd(conn, cmd->ident,
3262 L2CAP_INFO_RSP, sizeof(buf), buf);
3263 } else if (type == L2CAP_IT_FIXED_CHAN) {
3264 u8 buf[12];
3265 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3266 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3267 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3268 memcpy(buf + 4, l2cap_fixed_chan, 8);
3269 l2cap_send_cmd(conn, cmd->ident,
3270 L2CAP_INFO_RSP, sizeof(buf), buf);
3271 } else {
3272 struct l2cap_info_rsp rsp;
3273 rsp.type = cpu_to_le16(type);
3274 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3275 l2cap_send_cmd(conn, cmd->ident,
3276 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3277 }
3278
3279 return 0;
3280 }
3281
3282 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3283 {
3284 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3285 u16 type, result;
3286
3287 type = __le16_to_cpu(rsp->type);
3288 result = __le16_to_cpu(rsp->result);
3289
3290 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3291
3292 del_timer(&conn->info_timer);
3293
3294 if (type == L2CAP_IT_FEAT_MASK) {
3295 conn->feat_mask = get_unaligned_le32(rsp->data);
3296
3297 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3298 struct l2cap_info_req req;
3299 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3300
3301 conn->info_ident = l2cap_get_ident(conn);
3302
3303 l2cap_send_cmd(conn, conn->info_ident,
3304 L2CAP_INFO_REQ, sizeof(req), &req);
3305 } else {
3306 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3307 conn->info_ident = 0;
3308
3309 l2cap_conn_start(conn);
3310 }
3311 } else if (type == L2CAP_IT_FIXED_CHAN) {
3312 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3313 conn->info_ident = 0;
3314
3315 l2cap_conn_start(conn);
3316 }
3317
3318 return 0;
3319 }
3320
3321 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3322 {
3323 u8 *data = skb->data;
3324 int len = skb->len;
3325 struct l2cap_cmd_hdr cmd;
3326 int err = 0;
3327
3328 l2cap_raw_recv(conn, skb);
3329
3330 while (len >= L2CAP_CMD_HDR_SIZE) {
3331 u16 cmd_len;
3332 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3333 data += L2CAP_CMD_HDR_SIZE;
3334 len -= L2CAP_CMD_HDR_SIZE;
3335
3336 cmd_len = le16_to_cpu(cmd.len);
3337
3338 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3339
3340 if (cmd_len > len || !cmd.ident) {
3341 BT_DBG("corrupted command");
3342 break;
3343 }
3344
3345 switch (cmd.code) {
3346 case L2CAP_COMMAND_REJ:
3347 l2cap_command_rej(conn, &cmd, data);
3348 break;
3349
3350 case L2CAP_CONN_REQ:
3351 err = l2cap_connect_req(conn, &cmd, data);
3352 break;
3353
3354 case L2CAP_CONN_RSP:
3355 err = l2cap_connect_rsp(conn, &cmd, data);
3356 break;
3357
3358 case L2CAP_CONF_REQ:
3359 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3360 break;
3361
3362 case L2CAP_CONF_RSP:
3363 err = l2cap_config_rsp(conn, &cmd, data);
3364 break;
3365
3366 case L2CAP_DISCONN_REQ:
3367 err = l2cap_disconnect_req(conn, &cmd, data);
3368 break;
3369
3370 case L2CAP_DISCONN_RSP:
3371 err = l2cap_disconnect_rsp(conn, &cmd, data);
3372 break;
3373
3374 case L2CAP_ECHO_REQ:
3375 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3376 break;
3377
3378 case L2CAP_ECHO_RSP:
3379 break;
3380
3381 case L2CAP_INFO_REQ:
3382 err = l2cap_information_req(conn, &cmd, data);
3383 break;
3384
3385 case L2CAP_INFO_RSP:
3386 err = l2cap_information_rsp(conn, &cmd, data);
3387 break;
3388
3389 default:
3390 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3391 err = -EINVAL;
3392 break;
3393 }
3394
3395 if (err) {
3396 struct l2cap_cmd_rej rej;
3397 BT_DBG("error %d", err);
3398
3399 /* FIXME: Map err to a valid reason */
3400 rej.reason = cpu_to_le16(0);
3401 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3402 }
3403
3404 data += cmd_len;
3405 len -= cmd_len;
3406 }
3407
3408 kfree_skb(skb);
3409 }
3410
3411 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3412 {
3413 u16 our_fcs, rcv_fcs;
3414 int hdr_size = L2CAP_HDR_SIZE + 2;
3415
3416 if (pi->fcs == L2CAP_FCS_CRC16) {
3417 skb_trim(skb, skb->len - 2);
3418 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3419 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3420
3421 if (our_fcs != rcv_fcs)
3422 return -EINVAL;
3423 }
3424 return 0;
3425 }
3426
3427 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3428 {
3429 struct l2cap_pinfo *pi = l2cap_pi(sk);
3430 u16 control = 0;
3431
3432 pi->frames_sent = 0;
3433
3434 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3435
3436 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3437 control |= L2CAP_SUPER_RCV_NOT_READY;
3438 l2cap_send_sframe(pi, control);
3439 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3440 }
3441
3442 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3443 l2cap_retransmit_frames(sk);
3444
3445 l2cap_ertm_send(sk);
3446
3447 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3448 pi->frames_sent == 0) {
3449 control |= L2CAP_SUPER_RCV_READY;
3450 l2cap_send_sframe(pi, control);
3451 }
3452 }
3453
3454 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3455 {
3456 struct sk_buff *next_skb;
3457 struct l2cap_pinfo *pi = l2cap_pi(sk);
3458 int tx_seq_offset, next_tx_seq_offset;
3459
3460 bt_cb(skb)->tx_seq = tx_seq;
3461 bt_cb(skb)->sar = sar;
3462
3463 next_skb = skb_peek(SREJ_QUEUE(sk));
3464 if (!next_skb) {
3465 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3466 return 0;
3467 }
3468
3469 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3470 if (tx_seq_offset < 0)
3471 tx_seq_offset += 64;
3472
3473 do {
3474 if (bt_cb(next_skb)->tx_seq == tx_seq)
3475 return -EINVAL;
3476
3477 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3478 pi->buffer_seq) % 64;
3479 if (next_tx_seq_offset < 0)
3480 next_tx_seq_offset += 64;
3481
3482 if (next_tx_seq_offset > tx_seq_offset) {
3483 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3484 return 0;
3485 }
3486
3487 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3488 break;
3489
3490 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3491
3492 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3493
3494 return 0;
3495 }
3496
3497 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3498 {
3499 struct l2cap_pinfo *pi = l2cap_pi(sk);
3500 struct sk_buff *_skb;
3501 int err;
3502
3503 switch (control & L2CAP_CTRL_SAR) {
3504 case L2CAP_SDU_UNSEGMENTED:
3505 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3506 goto drop;
3507
3508 err = sock_queue_rcv_skb(sk, skb);
3509 if (!err)
3510 return err;
3511
3512 break;
3513
3514 case L2CAP_SDU_START:
3515 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3516 goto drop;
3517
3518 pi->sdu_len = get_unaligned_le16(skb->data);
3519
3520 if (pi->sdu_len > pi->imtu)
3521 goto disconnect;
3522
3523 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3524 if (!pi->sdu)
3525 return -ENOMEM;
3526
3527 /* pull sdu_len bytes only after alloc, because of Local Busy
3528 * condition we have to be sure that this will be executed
3529 * only once, i.e., when alloc does not fail */
3530 skb_pull(skb, 2);
3531
3532 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3533
3534 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3535 pi->partial_sdu_len = skb->len;
3536 break;
3537
3538 case L2CAP_SDU_CONTINUE:
3539 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3540 goto disconnect;
3541
3542 if (!pi->sdu)
3543 goto disconnect;
3544
3545 pi->partial_sdu_len += skb->len;
3546 if (pi->partial_sdu_len > pi->sdu_len)
3547 goto drop;
3548
3549 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3550
3551 break;
3552
3553 case L2CAP_SDU_END:
3554 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3555 goto disconnect;
3556
3557 if (!pi->sdu)
3558 goto disconnect;
3559
3560 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3561 pi->partial_sdu_len += skb->len;
3562
3563 if (pi->partial_sdu_len > pi->imtu)
3564 goto drop;
3565
3566 if (pi->partial_sdu_len != pi->sdu_len)
3567 goto drop;
3568
3569 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3570 }
3571
3572 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3573 if (!_skb) {
3574 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3575 return -ENOMEM;
3576 }
3577
3578 err = sock_queue_rcv_skb(sk, _skb);
3579 if (err < 0) {
3580 kfree_skb(_skb);
3581 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3582 return err;
3583 }
3584
3585 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3586 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3587
3588 kfree_skb(pi->sdu);
3589 break;
3590 }
3591
3592 kfree_skb(skb);
3593 return 0;
3594
3595 drop:
3596 kfree_skb(pi->sdu);
3597 pi->sdu = NULL;
3598
3599 disconnect:
3600 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3601 kfree_skb(skb);
3602 return 0;
3603 }
3604
3605 static void l2cap_busy_work(struct work_struct *work)
3606 {
3607 DECLARE_WAITQUEUE(wait, current);
3608 struct l2cap_pinfo *pi =
3609 container_of(work, struct l2cap_pinfo, busy_work);
3610 struct sock *sk = (struct sock *)pi;
3611 int n_tries = 0, timeo = HZ/5, err;
3612 struct sk_buff *skb;
3613 u16 control;
3614
3615 lock_sock(sk);
3616
3617 add_wait_queue(sk_sleep(sk), &wait);
3618 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3619 set_current_state(TASK_INTERRUPTIBLE);
3620
3621 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3622 err = -EBUSY;
3623 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3624 goto done;
3625 }
3626
3627 if (!timeo)
3628 timeo = HZ/5;
3629
3630 if (signal_pending(current)) {
3631 err = sock_intr_errno(timeo);
3632 goto done;
3633 }
3634
3635 release_sock(sk);
3636 timeo = schedule_timeout(timeo);
3637 lock_sock(sk);
3638
3639 err = sock_error(sk);
3640 if (err)
3641 goto done;
3642
3643 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3644 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3645 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3646 if (err < 0) {
3647 skb_queue_head(BUSY_QUEUE(sk), skb);
3648 break;
3649 }
3650
3651 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3652 }
3653
3654 if (!skb)
3655 break;
3656 }
3657
3658 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3659 goto done;
3660
3661 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3662 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3663 l2cap_send_sframe(pi, control);
3664 l2cap_pi(sk)->retry_count = 1;
3665
3666 del_timer(&pi->retrans_timer);
3667 __mod_monitor_timer();
3668
3669 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3670
3671 done:
3672 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3673 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3674
3675 BT_DBG("sk %p, Exit local busy", sk);
3676
3677 set_current_state(TASK_RUNNING);
3678 remove_wait_queue(sk_sleep(sk), &wait);
3679
3680 release_sock(sk);
3681 }
3682
3683 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3684 {
3685 struct l2cap_pinfo *pi = l2cap_pi(sk);
3686 int sctrl, err;
3687
3688 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3689 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3690 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3691 return -EBUSY;
3692 }
3693
3694 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3695 if (err >= 0) {
3696 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3697 return err;
3698 }
3699
3700 /* Busy Condition */
3701 BT_DBG("sk %p, Enter local busy", sk);
3702
3703 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3704 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3705 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3706
3707 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3708 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3709 l2cap_send_sframe(pi, sctrl);
3710
3711 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3712
3713 del_timer(&pi->ack_timer);
3714
3715 queue_work(_busy_wq, &pi->busy_work);
3716
3717 return err;
3718 }
3719
3720 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3721 {
3722 struct l2cap_pinfo *pi = l2cap_pi(sk);
3723 struct sk_buff *_skb;
3724 int err = -EINVAL;
3725
3726 /*
3727 * TODO: We have to notify the userland if some data is lost with the
3728 * Streaming Mode.
3729 */
3730
3731 switch (control & L2CAP_CTRL_SAR) {
3732 case L2CAP_SDU_UNSEGMENTED:
3733 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3734 kfree_skb(pi->sdu);
3735 break;
3736 }
3737
3738 err = sock_queue_rcv_skb(sk, skb);
3739 if (!err)
3740 return 0;
3741
3742 break;
3743
3744 case L2CAP_SDU_START:
3745 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3746 kfree_skb(pi->sdu);
3747 break;
3748 }
3749
3750 pi->sdu_len = get_unaligned_le16(skb->data);
3751 skb_pull(skb, 2);
3752
3753 if (pi->sdu_len > pi->imtu) {
3754 err = -EMSGSIZE;
3755 break;
3756 }
3757
3758 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3759 if (!pi->sdu) {
3760 err = -ENOMEM;
3761 break;
3762 }
3763
3764 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3765
3766 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3767 pi->partial_sdu_len = skb->len;
3768 err = 0;
3769 break;
3770
3771 case L2CAP_SDU_CONTINUE:
3772 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3773 break;
3774
3775 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3776
3777 pi->partial_sdu_len += skb->len;
3778 if (pi->partial_sdu_len > pi->sdu_len)
3779 kfree_skb(pi->sdu);
3780 else
3781 err = 0;
3782
3783 break;
3784
3785 case L2CAP_SDU_END:
3786 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3787 break;
3788
3789 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3790
3791 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3792 pi->partial_sdu_len += skb->len;
3793
3794 if (pi->partial_sdu_len > pi->imtu)
3795 goto drop;
3796
3797 if (pi->partial_sdu_len == pi->sdu_len) {
3798 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3799 err = sock_queue_rcv_skb(sk, _skb);
3800 if (err < 0)
3801 kfree_skb(_skb);
3802 }
3803 err = 0;
3804
3805 drop:
3806 kfree_skb(pi->sdu);
3807 break;
3808 }
3809
3810 kfree_skb(skb);
3811 return err;
3812 }
3813
3814 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3815 {
3816 struct sk_buff *skb;
3817 u16 control;
3818
3819 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3820 if (bt_cb(skb)->tx_seq != tx_seq)
3821 break;
3822
3823 skb = skb_dequeue(SREJ_QUEUE(sk));
3824 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3825 l2cap_ertm_reassembly_sdu(sk, skb, control);
3826 l2cap_pi(sk)->buffer_seq_srej =
3827 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3828 tx_seq = (tx_seq + 1) % 64;
3829 }
3830 }
3831
3832 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3833 {
3834 struct l2cap_pinfo *pi = l2cap_pi(sk);
3835 struct srej_list *l, *tmp;
3836 u16 control;
3837
3838 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3839 if (l->tx_seq == tx_seq) {
3840 list_del(&l->list);
3841 kfree(l);
3842 return;
3843 }
3844 control = L2CAP_SUPER_SELECT_REJECT;
3845 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3846 l2cap_send_sframe(pi, control);
3847 list_del(&l->list);
3848 list_add_tail(&l->list, SREJ_LIST(sk));
3849 }
3850 }
3851
3852 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3853 {
3854 struct l2cap_pinfo *pi = l2cap_pi(sk);
3855 struct srej_list *new;
3856 u16 control;
3857
3858 while (tx_seq != pi->expected_tx_seq) {
3859 control = L2CAP_SUPER_SELECT_REJECT;
3860 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3861 l2cap_send_sframe(pi, control);
3862
3863 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3864 new->tx_seq = pi->expected_tx_seq;
3865 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3866 list_add_tail(&new->list, SREJ_LIST(sk));
3867 }
3868 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3869 }
3870
3871 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3872 {
3873 struct l2cap_pinfo *pi = l2cap_pi(sk);
3874 u8 tx_seq = __get_txseq(rx_control);
3875 u8 req_seq = __get_reqseq(rx_control);
3876 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3877 int tx_seq_offset, expected_tx_seq_offset;
3878 int num_to_ack = (pi->tx_win/6) + 1;
3879 int err = 0;
3880
3881 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3882 rx_control);
3883
3884 if (L2CAP_CTRL_FINAL & rx_control &&
3885 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3886 del_timer(&pi->monitor_timer);
3887 if (pi->unacked_frames > 0)
3888 __mod_retrans_timer();
3889 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3890 }
3891
3892 pi->expected_ack_seq = req_seq;
3893 l2cap_drop_acked_frames(sk);
3894
3895 if (tx_seq == pi->expected_tx_seq)
3896 goto expected;
3897
3898 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3899 if (tx_seq_offset < 0)
3900 tx_seq_offset += 64;
3901
3902 /* invalid tx_seq */
3903 if (tx_seq_offset >= pi->tx_win) {
3904 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3905 goto drop;
3906 }
3907
3908 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3909 goto drop;
3910
3911 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3912 struct srej_list *first;
3913
3914 first = list_first_entry(SREJ_LIST(sk),
3915 struct srej_list, list);
3916 if (tx_seq == first->tx_seq) {
3917 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3918 l2cap_check_srej_gap(sk, tx_seq);
3919
3920 list_del(&first->list);
3921 kfree(first);
3922
3923 if (list_empty(SREJ_LIST(sk))) {
3924 pi->buffer_seq = pi->buffer_seq_srej;
3925 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3926 l2cap_send_ack(pi);
3927 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3928 }
3929 } else {
3930 struct srej_list *l;
3931
3932 /* duplicated tx_seq */
3933 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3934 goto drop;
3935
3936 list_for_each_entry(l, SREJ_LIST(sk), list) {
3937 if (l->tx_seq == tx_seq) {
3938 l2cap_resend_srejframe(sk, tx_seq);
3939 return 0;
3940 }
3941 }
3942 l2cap_send_srejframe(sk, tx_seq);
3943 }
3944 } else {
3945 expected_tx_seq_offset =
3946 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3947 if (expected_tx_seq_offset < 0)
3948 expected_tx_seq_offset += 64;
3949
3950 /* duplicated tx_seq */
3951 if (tx_seq_offset < expected_tx_seq_offset)
3952 goto drop;
3953
3954 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3955
3956 BT_DBG("sk %p, Enter SREJ", sk);
3957
3958 INIT_LIST_HEAD(SREJ_LIST(sk));
3959 pi->buffer_seq_srej = pi->buffer_seq;
3960
3961 __skb_queue_head_init(SREJ_QUEUE(sk));
3962 __skb_queue_head_init(BUSY_QUEUE(sk));
3963 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3964
3965 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3966
3967 l2cap_send_srejframe(sk, tx_seq);
3968
3969 del_timer(&pi->ack_timer);
3970 }
3971 return 0;
3972
3973 expected:
3974 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3975
3976 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3977 bt_cb(skb)->tx_seq = tx_seq;
3978 bt_cb(skb)->sar = sar;
3979 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3980 return 0;
3981 }
3982
3983 err = l2cap_push_rx_skb(sk, skb, rx_control);
3984 if (err < 0)
3985 return 0;
3986
3987 if (rx_control & L2CAP_CTRL_FINAL) {
3988 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3989 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3990 else
3991 l2cap_retransmit_frames(sk);
3992 }
3993
3994 __mod_ack_timer();
3995
3996 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3997 if (pi->num_acked == num_to_ack - 1)
3998 l2cap_send_ack(pi);
3999
4000 return 0;
4001
4002 drop:
4003 kfree_skb(skb);
4004 return 0;
4005 }
4006
4007 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4008 {
4009 struct l2cap_pinfo *pi = l2cap_pi(sk);
4010
4011 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4012 rx_control);
4013
4014 pi->expected_ack_seq = __get_reqseq(rx_control);
4015 l2cap_drop_acked_frames(sk);
4016
4017 if (rx_control & L2CAP_CTRL_POLL) {
4018 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4019 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4020 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4021 (pi->unacked_frames > 0))
4022 __mod_retrans_timer();
4023
4024 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4025 l2cap_send_srejtail(sk);
4026 } else {
4027 l2cap_send_i_or_rr_or_rnr(sk);
4028 }
4029
4030 } else if (rx_control & L2CAP_CTRL_FINAL) {
4031 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4032
4033 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4034 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4035 else
4036 l2cap_retransmit_frames(sk);
4037
4038 } else {
4039 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4040 (pi->unacked_frames > 0))
4041 __mod_retrans_timer();
4042
4043 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4044 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4045 l2cap_send_ack(pi);
4046 } else {
4047 l2cap_ertm_send(sk);
4048 }
4049 }
4050 }
4051
4052 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4053 {
4054 struct l2cap_pinfo *pi = l2cap_pi(sk);
4055 u8 tx_seq = __get_reqseq(rx_control);
4056
4057 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4058
4059 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4060
4061 pi->expected_ack_seq = tx_seq;
4062 l2cap_drop_acked_frames(sk);
4063
4064 if (rx_control & L2CAP_CTRL_FINAL) {
4065 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4066 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4067 else
4068 l2cap_retransmit_frames(sk);
4069 } else {
4070 l2cap_retransmit_frames(sk);
4071
4072 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4073 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4074 }
4075 }
4076 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4077 {
4078 struct l2cap_pinfo *pi = l2cap_pi(sk);
4079 u8 tx_seq = __get_reqseq(rx_control);
4080
4081 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4082
4083 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4084
4085 if (rx_control & L2CAP_CTRL_POLL) {
4086 pi->expected_ack_seq = tx_seq;
4087 l2cap_drop_acked_frames(sk);
4088
4089 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4090 l2cap_retransmit_one_frame(sk, tx_seq);
4091
4092 l2cap_ertm_send(sk);
4093
4094 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4095 pi->srej_save_reqseq = tx_seq;
4096 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4097 }
4098 } else if (rx_control & L2CAP_CTRL_FINAL) {
4099 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4100 pi->srej_save_reqseq == tx_seq)
4101 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4102 else
4103 l2cap_retransmit_one_frame(sk, tx_seq);
4104 } else {
4105 l2cap_retransmit_one_frame(sk, tx_seq);
4106 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4107 pi->srej_save_reqseq = tx_seq;
4108 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4109 }
4110 }
4111 }
4112
4113 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4114 {
4115 struct l2cap_pinfo *pi = l2cap_pi(sk);
4116 u8 tx_seq = __get_reqseq(rx_control);
4117
4118 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4119
4120 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4121 pi->expected_ack_seq = tx_seq;
4122 l2cap_drop_acked_frames(sk);
4123
4124 if (rx_control & L2CAP_CTRL_POLL)
4125 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4126
4127 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4128 del_timer(&pi->retrans_timer);
4129 if (rx_control & L2CAP_CTRL_POLL)
4130 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4131 return;
4132 }
4133
4134 if (rx_control & L2CAP_CTRL_POLL)
4135 l2cap_send_srejtail(sk);
4136 else
4137 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4138 }
4139
4140 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4141 {
4142 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4143
4144 if (L2CAP_CTRL_FINAL & rx_control &&
4145 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4146 del_timer(&l2cap_pi(sk)->monitor_timer);
4147 if (l2cap_pi(sk)->unacked_frames > 0)
4148 __mod_retrans_timer();
4149 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4150 }
4151
4152 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4153 case L2CAP_SUPER_RCV_READY:
4154 l2cap_data_channel_rrframe(sk, rx_control);
4155 break;
4156
4157 case L2CAP_SUPER_REJECT:
4158 l2cap_data_channel_rejframe(sk, rx_control);
4159 break;
4160
4161 case L2CAP_SUPER_SELECT_REJECT:
4162 l2cap_data_channel_srejframe(sk, rx_control);
4163 break;
4164
4165 case L2CAP_SUPER_RCV_NOT_READY:
4166 l2cap_data_channel_rnrframe(sk, rx_control);
4167 break;
4168 }
4169
4170 kfree_skb(skb);
4171 return 0;
4172 }
4173
4174 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4175 {
4176 struct sock *sk;
4177 struct l2cap_pinfo *pi;
4178 u16 control;
4179 u8 tx_seq, req_seq;
4180 int len, next_tx_seq_offset, req_seq_offset;
4181
4182 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4183 if (!sk) {
4184 BT_DBG("unknown cid 0x%4.4x", cid);
4185 goto drop;
4186 }
4187
4188 pi = l2cap_pi(sk);
4189
4190 BT_DBG("sk %p, len %d", sk, skb->len);
4191
4192 if (sk->sk_state != BT_CONNECTED)
4193 goto drop;
4194
4195 switch (pi->mode) {
4196 case L2CAP_MODE_BASIC:
4197 /* If socket recv buffers overflows we drop data here
4198 * which is *bad* because L2CAP has to be reliable.
4199 * But we don't have any other choice. L2CAP doesn't
4200 * provide flow control mechanism. */
4201
4202 if (pi->imtu < skb->len)
4203 goto drop;
4204
4205 if (!sock_queue_rcv_skb(sk, skb))
4206 goto done;
4207 break;
4208
4209 case L2CAP_MODE_ERTM:
4210 control = get_unaligned_le16(skb->data);
4211 skb_pull(skb, 2);
4212 len = skb->len;
4213
4214 /*
4215 * We can just drop the corrupted I-frame here.
4216 * Receiver will miss it and start proper recovery
4217 * procedures and ask retransmission.
4218 */
4219 if (l2cap_check_fcs(pi, skb))
4220 goto drop;
4221
4222 if (__is_sar_start(control) && __is_iframe(control))
4223 len -= 2;
4224
4225 if (pi->fcs == L2CAP_FCS_CRC16)
4226 len -= 2;
4227
4228 if (len > pi->mps) {
4229 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4230 goto drop;
4231 }
4232
4233 req_seq = __get_reqseq(control);
4234 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4235 if (req_seq_offset < 0)
4236 req_seq_offset += 64;
4237
4238 next_tx_seq_offset =
4239 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4240 if (next_tx_seq_offset < 0)
4241 next_tx_seq_offset += 64;
4242
4243 /* check for invalid req-seq */
4244 if (req_seq_offset > next_tx_seq_offset) {
4245 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4246 goto drop;
4247 }
4248
4249 if (__is_iframe(control)) {
4250 if (len < 0) {
4251 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4252 goto drop;
4253 }
4254
4255 l2cap_data_channel_iframe(sk, control, skb);
4256 } else {
4257 if (len != 0) {
4258 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4259 goto drop;
4260 }
4261
4262 l2cap_data_channel_sframe(sk, control, skb);
4263 }
4264
4265 goto done;
4266
4267 case L2CAP_MODE_STREAMING:
4268 control = get_unaligned_le16(skb->data);
4269 skb_pull(skb, 2);
4270 len = skb->len;
4271
4272 if (l2cap_check_fcs(pi, skb))
4273 goto drop;
4274
4275 if (__is_sar_start(control))
4276 len -= 2;
4277
4278 if (pi->fcs == L2CAP_FCS_CRC16)
4279 len -= 2;
4280
4281 if (len > pi->mps || len < 0 || __is_sframe(control))
4282 goto drop;
4283
4284 tx_seq = __get_txseq(control);
4285
4286 if (pi->expected_tx_seq == tx_seq)
4287 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4288 else
4289 pi->expected_tx_seq = (tx_seq + 1) % 64;
4290
4291 l2cap_streaming_reassembly_sdu(sk, skb, control);
4292
4293 goto done;
4294
4295 default:
4296 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4297 break;
4298 }
4299
4300 drop:
4301 kfree_skb(skb);
4302
4303 done:
4304 if (sk)
4305 bh_unlock_sock(sk);
4306
4307 return 0;
4308 }
4309
4310 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4311 {
4312 struct sock *sk;
4313
4314 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4315 if (!sk)
4316 goto drop;
4317
4318 BT_DBG("sk %p, len %d", sk, skb->len);
4319
4320 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4321 goto drop;
4322
4323 if (l2cap_pi(sk)->imtu < skb->len)
4324 goto drop;
4325
4326 if (!sock_queue_rcv_skb(sk, skb))
4327 goto done;
4328
4329 drop:
4330 kfree_skb(skb);
4331
4332 done:
4333 if (sk)
4334 bh_unlock_sock(sk);
4335 return 0;
4336 }
4337
4338 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4339 {
4340 struct l2cap_hdr *lh = (void *) skb->data;
4341 u16 cid, len;
4342 __le16 psm;
4343
4344 skb_pull(skb, L2CAP_HDR_SIZE);
4345 cid = __le16_to_cpu(lh->cid);
4346 len = __le16_to_cpu(lh->len);
4347
4348 if (len != skb->len) {
4349 kfree_skb(skb);
4350 return;
4351 }
4352
4353 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4354
4355 switch (cid) {
4356 case L2CAP_CID_SIGNALING:
4357 l2cap_sig_channel(conn, skb);
4358 break;
4359
4360 case L2CAP_CID_CONN_LESS:
4361 psm = get_unaligned_le16(skb->data);
4362 skb_pull(skb, 2);
4363 l2cap_conless_channel(conn, psm, skb);
4364 break;
4365
4366 default:
4367 l2cap_data_channel(conn, cid, skb);
4368 break;
4369 }
4370 }
4371
4372 /* ---- L2CAP interface with lower layer (HCI) ---- */
4373
4374 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4375 {
4376 int exact = 0, lm1 = 0, lm2 = 0;
4377 register struct sock *sk;
4378 struct hlist_node *node;
4379
4380 if (type != ACL_LINK)
4381 return 0;
4382
4383 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4384
4385 /* Find listening sockets and check their link_mode */
4386 read_lock(&l2cap_sk_list.lock);
4387 sk_for_each(sk, node, &l2cap_sk_list.head) {
4388 if (sk->sk_state != BT_LISTEN)
4389 continue;
4390
4391 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4392 lm1 |= HCI_LM_ACCEPT;
4393 if (l2cap_pi(sk)->role_switch)
4394 lm1 |= HCI_LM_MASTER;
4395 exact++;
4396 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4397 lm2 |= HCI_LM_ACCEPT;
4398 if (l2cap_pi(sk)->role_switch)
4399 lm2 |= HCI_LM_MASTER;
4400 }
4401 }
4402 read_unlock(&l2cap_sk_list.lock);
4403
4404 return exact ? lm1 : lm2;
4405 }
4406
4407 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4408 {
4409 struct l2cap_conn *conn;
4410
4411 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4412
4413 if (hcon->type != ACL_LINK)
4414 return 0;
4415
4416 if (!status) {
4417 conn = l2cap_conn_add(hcon, status);
4418 if (conn)
4419 l2cap_conn_ready(conn);
4420 } else
4421 l2cap_conn_del(hcon, bt_err(status));
4422
4423 return 0;
4424 }
4425
4426 static int l2cap_disconn_ind(struct hci_conn *hcon)
4427 {
4428 struct l2cap_conn *conn = hcon->l2cap_data;
4429
4430 BT_DBG("hcon %p", hcon);
4431
4432 if (hcon->type != ACL_LINK || !conn)
4433 return 0x13;
4434
4435 return conn->disc_reason;
4436 }
4437
4438 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4439 {
4440 BT_DBG("hcon %p reason %d", hcon, reason);
4441
4442 if (hcon->type != ACL_LINK)
4443 return 0;
4444
4445 l2cap_conn_del(hcon, bt_err(reason));
4446
4447 return 0;
4448 }
4449
4450 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4451 {
4452 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4453 return;
4454
4455 if (encrypt == 0x00) {
4456 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4457 l2cap_sock_clear_timer(sk);
4458 l2cap_sock_set_timer(sk, HZ * 5);
4459 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4460 __l2cap_sock_close(sk, ECONNREFUSED);
4461 } else {
4462 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4463 l2cap_sock_clear_timer(sk);
4464 }
4465 }
4466
4467 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4468 {
4469 struct l2cap_chan_list *l;
4470 struct l2cap_conn *conn = hcon->l2cap_data;
4471 struct sock *sk;
4472
4473 if (!conn)
4474 return 0;
4475
4476 l = &conn->chan_list;
4477
4478 BT_DBG("conn %p", conn);
4479
4480 read_lock(&l->lock);
4481
4482 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4483 bh_lock_sock(sk);
4484
4485 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4486 bh_unlock_sock(sk);
4487 continue;
4488 }
4489
4490 if (!status && (sk->sk_state == BT_CONNECTED ||
4491 sk->sk_state == BT_CONFIG)) {
4492 l2cap_check_encryption(sk, encrypt);
4493 bh_unlock_sock(sk);
4494 continue;
4495 }
4496
4497 if (sk->sk_state == BT_CONNECT) {
4498 if (!status) {
4499 struct l2cap_conn_req req;
4500 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4501 req.psm = l2cap_pi(sk)->psm;
4502
4503 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4504 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4505
4506 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4507 L2CAP_CONN_REQ, sizeof(req), &req);
4508 } else {
4509 l2cap_sock_clear_timer(sk);
4510 l2cap_sock_set_timer(sk, HZ / 10);
4511 }
4512 } else if (sk->sk_state == BT_CONNECT2) {
4513 struct l2cap_conn_rsp rsp;
4514 __u16 result;
4515
4516 if (!status) {
4517 sk->sk_state = BT_CONFIG;
4518 result = L2CAP_CR_SUCCESS;
4519 } else {
4520 sk->sk_state = BT_DISCONN;
4521 l2cap_sock_set_timer(sk, HZ / 10);
4522 result = L2CAP_CR_SEC_BLOCK;
4523 }
4524
4525 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4526 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4527 rsp.result = cpu_to_le16(result);
4528 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4529 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4530 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4531 }
4532
4533 bh_unlock_sock(sk);
4534 }
4535
4536 read_unlock(&l->lock);
4537
4538 return 0;
4539 }
4540
4541 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4542 {
4543 struct l2cap_conn *conn = hcon->l2cap_data;
4544
4545 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4546 goto drop;
4547
4548 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4549
4550 if (flags & ACL_START) {
4551 struct l2cap_hdr *hdr;
4552 int len;
4553
4554 if (conn->rx_len) {
4555 BT_ERR("Unexpected start frame (len %d)", skb->len);
4556 kfree_skb(conn->rx_skb);
4557 conn->rx_skb = NULL;
4558 conn->rx_len = 0;
4559 l2cap_conn_unreliable(conn, ECOMM);
4560 }
4561
4562 if (skb->len < 2) {
4563 BT_ERR("Frame is too short (len %d)", skb->len);
4564 l2cap_conn_unreliable(conn, ECOMM);
4565 goto drop;
4566 }
4567
4568 hdr = (struct l2cap_hdr *) skb->data;
4569 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4570
4571 if (len == skb->len) {
4572 /* Complete frame received */
4573 l2cap_recv_frame(conn, skb);
4574 return 0;
4575 }
4576
4577 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4578
4579 if (skb->len > len) {
4580 BT_ERR("Frame is too long (len %d, expected len %d)",
4581 skb->len, len);
4582 l2cap_conn_unreliable(conn, ECOMM);
4583 goto drop;
4584 }
4585
4586 /* Allocate skb for the complete frame (with header) */
4587 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4588 if (!conn->rx_skb)
4589 goto drop;
4590
4591 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4592 skb->len);
4593 conn->rx_len = len - skb->len;
4594 } else {
4595 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4596
4597 if (!conn->rx_len) {
4598 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4599 l2cap_conn_unreliable(conn, ECOMM);
4600 goto drop;
4601 }
4602
4603 if (skb->len > conn->rx_len) {
4604 BT_ERR("Fragment is too long (len %d, expected %d)",
4605 skb->len, conn->rx_len);
4606 kfree_skb(conn->rx_skb);
4607 conn->rx_skb = NULL;
4608 conn->rx_len = 0;
4609 l2cap_conn_unreliable(conn, ECOMM);
4610 goto drop;
4611 }
4612
4613 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4614 skb->len);
4615 conn->rx_len -= skb->len;
4616
4617 if (!conn->rx_len) {
4618 /* Complete frame received */
4619 l2cap_recv_frame(conn, conn->rx_skb);
4620 conn->rx_skb = NULL;
4621 }
4622 }
4623
4624 drop:
4625 kfree_skb(skb);
4626 return 0;
4627 }
4628
4629 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4630 {
4631 struct sock *sk;
4632 struct hlist_node *node;
4633
4634 read_lock_bh(&l2cap_sk_list.lock);
4635
4636 sk_for_each(sk, node, &l2cap_sk_list.head) {
4637 struct l2cap_pinfo *pi = l2cap_pi(sk);
4638
4639 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4640 batostr(&bt_sk(sk)->src),
4641 batostr(&bt_sk(sk)->dst),
4642 sk->sk_state, __le16_to_cpu(pi->psm),
4643 pi->scid, pi->dcid,
4644 pi->imtu, pi->omtu, pi->sec_level);
4645 }
4646
4647 read_unlock_bh(&l2cap_sk_list.lock);
4648
4649 return 0;
4650 }
4651
4652 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4653 {
4654 return single_open(file, l2cap_debugfs_show, inode->i_private);
4655 }
4656
4657 static const struct file_operations l2cap_debugfs_fops = {
4658 .open = l2cap_debugfs_open,
4659 .read = seq_read,
4660 .llseek = seq_lseek,
4661 .release = single_release,
4662 };
4663
4664 static struct dentry *l2cap_debugfs;
4665
4666 static const struct proto_ops l2cap_sock_ops = {
4667 .family = PF_BLUETOOTH,
4668 .owner = THIS_MODULE,
4669 .release = l2cap_sock_release,
4670 .bind = l2cap_sock_bind,
4671 .connect = l2cap_sock_connect,
4672 .listen = l2cap_sock_listen,
4673 .accept = l2cap_sock_accept,
4674 .getname = l2cap_sock_getname,
4675 .sendmsg = l2cap_sock_sendmsg,
4676 .recvmsg = l2cap_sock_recvmsg,
4677 .poll = bt_sock_poll,
4678 .ioctl = bt_sock_ioctl,
4679 .mmap = sock_no_mmap,
4680 .socketpair = sock_no_socketpair,
4681 .shutdown = l2cap_sock_shutdown,
4682 .setsockopt = l2cap_sock_setsockopt,
4683 .getsockopt = l2cap_sock_getsockopt
4684 };
4685
4686 static const struct net_proto_family l2cap_sock_family_ops = {
4687 .family = PF_BLUETOOTH,
4688 .owner = THIS_MODULE,
4689 .create = l2cap_sock_create,
4690 };
4691
4692 static struct hci_proto l2cap_hci_proto = {
4693 .name = "L2CAP",
4694 .id = HCI_PROTO_L2CAP,
4695 .connect_ind = l2cap_connect_ind,
4696 .connect_cfm = l2cap_connect_cfm,
4697 .disconn_ind = l2cap_disconn_ind,
4698 .disconn_cfm = l2cap_disconn_cfm,
4699 .security_cfm = l2cap_security_cfm,
4700 .recv_acldata = l2cap_recv_acldata
4701 };
4702
4703 static int __init l2cap_init(void)
4704 {
4705 int err;
4706
4707 err = proto_register(&l2cap_proto, 0);
4708 if (err < 0)
4709 return err;
4710
4711 _busy_wq = create_singlethread_workqueue("l2cap");
4712 if (!_busy_wq)
4713 goto error;
4714
4715 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4716 if (err < 0) {
4717 BT_ERR("L2CAP socket registration failed");
4718 goto error;
4719 }
4720
4721 err = hci_register_proto(&l2cap_hci_proto);
4722 if (err < 0) {
4723 BT_ERR("L2CAP protocol registration failed");
4724 bt_sock_unregister(BTPROTO_L2CAP);
4725 goto error;
4726 }
4727
4728 if (bt_debugfs) {
4729 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4730 bt_debugfs, NULL, &l2cap_debugfs_fops);
4731 if (!l2cap_debugfs)
4732 BT_ERR("Failed to create L2CAP debug file");
4733 }
4734
4735 BT_INFO("L2CAP ver %s", VERSION);
4736 BT_INFO("L2CAP socket layer initialized");
4737
4738 return 0;
4739
4740 error:
4741 proto_unregister(&l2cap_proto);
4742 return err;
4743 }
4744
4745 static void __exit l2cap_exit(void)
4746 {
4747 debugfs_remove(l2cap_debugfs);
4748
4749 flush_workqueue(_busy_wq);
4750 destroy_workqueue(_busy_wq);
4751
4752 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4753 BT_ERR("L2CAP socket unregistration failed");
4754
4755 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4756 BT_ERR("L2CAP protocol unregistration failed");
4757
4758 proto_unregister(&l2cap_proto);
4759 }
4760
4761 void l2cap_load(void)
4762 {
4763 /* Dummy function to trigger automatic L2CAP module loading by
4764 * other modules that use L2CAP sockets but don't use any other
4765 * symbols from it. */
4766 }
4767 EXPORT_SYMBOL(l2cap_load);
4768
4769 module_init(l2cap_init);
4770 module_exit(l2cap_exit);
4771
4772 module_param(enable_ertm, bool, 0644);
4773 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4774
4775 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4776 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4777 MODULE_VERSION(VERSION);
4778 MODULE_LICENSE("GPL");
4779 MODULE_ALIAS("bt-proto-0");
This page took 0.144846 seconds and 6 git commands to generate.