NFC: Clearly separate NCI states from flags
[deliverable/linux.git] / net / nfc / nci / core.c
CommitLineData
6a2968aa
IE
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
52858b51 28#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
ed1e0ad8 29
6a2968aa
IE
30#include <linux/types.h>
31#include <linux/workqueue.h>
32#include <linux/completion.h>
bc3b2d7f 33#include <linux/export.h>
6a2968aa
IE
34#include <linux/sched.h>
35#include <linux/bitops.h>
36#include <linux/skbuff.h>
37
38#include "../nfc.h"
39#include <net/nfc/nci.h>
40#include <net/nfc/nci_core.h>
41#include <linux/nfc.h>
42
43static void nci_cmd_work(struct work_struct *work);
44static void nci_rx_work(struct work_struct *work);
45static void nci_tx_work(struct work_struct *work);
46
47/* ---- NCI requests ---- */
48
49void nci_req_complete(struct nci_dev *ndev, int result)
50{
51 if (ndev->req_status == NCI_REQ_PEND) {
52 ndev->req_result = result;
53 ndev->req_status = NCI_REQ_DONE;
54 complete(&ndev->req_completion);
55 }
56}
57
58static void nci_req_cancel(struct nci_dev *ndev, int err)
59{
60 if (ndev->req_status == NCI_REQ_PEND) {
61 ndev->req_result = err;
62 ndev->req_status = NCI_REQ_CANCELED;
63 complete(&ndev->req_completion);
64 }
65}
66
67/* Execute request and wait for completion. */
68static int __nci_request(struct nci_dev *ndev,
69 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 unsigned long opt,
71 __u32 timeout)
72{
73 int rc = 0;
f8c141c3 74 long completion_rc;
6a2968aa
IE
75
76 ndev->req_status = NCI_REQ_PEND;
77
78 init_completion(&ndev->req_completion);
79 req(ndev, opt);
80 completion_rc = wait_for_completion_interruptible_timeout(
81 &ndev->req_completion,
82 timeout);
83
20c239c1 84 pr_debug("wait_for_completion return %ld\n", completion_rc);
6a2968aa
IE
85
86 if (completion_rc > 0) {
87 switch (ndev->req_status) {
88 case NCI_REQ_DONE:
89 rc = nci_to_errno(ndev->req_result);
90 break;
91
92 case NCI_REQ_CANCELED:
93 rc = -ndev->req_result;
94 break;
95
96 default:
97 rc = -ETIMEDOUT;
98 break;
99 }
100 } else {
ed1e0ad8
JP
101 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
102 completion_rc);
6a2968aa
IE
103
104 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
105 }
106
107 ndev->req_status = ndev->req_result = 0;
108
109 return rc;
110}
111
112static inline int nci_request(struct nci_dev *ndev,
113 void (*req)(struct nci_dev *ndev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
115{
116 int rc;
117
118 if (!test_bit(NCI_UP, &ndev->flags))
119 return -ENETDOWN;
120
121 /* Serialize all requests */
122 mutex_lock(&ndev->req_lock);
123 rc = __nci_request(ndev, req, opt, timeout);
124 mutex_unlock(&ndev->req_lock);
125
126 return rc;
127}
128
129static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
130{
e8c0dacd
IE
131 struct nci_core_reset_cmd cmd;
132
133 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
134 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
6a2968aa
IE
135}
136
137static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
138{
139 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
140}
141
142static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
143{
2eb1dc10
IE
144 struct nci_rf_disc_map_cmd cmd;
145 struct disc_map_config *cfg = cmd.mapping_configs;
146 __u8 *num = &cmd.num_mapping_configs;
6a2968aa
IE
147 int i;
148
6a2968aa 149 /* set rf mapping configurations */
2eb1dc10 150 *num = 0;
6a2968aa
IE
151
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) {
2eb1dc10 156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
637d85a7
IE
157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 NCI_DISC_MAP_MODE_LISTEN;
159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
2eb1dc10 160 (*num)++;
6a2968aa
IE
161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) {
2eb1dc10 163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
637d85a7
IE
164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 NCI_DISC_MAP_MODE_LISTEN;
166 cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
2eb1dc10 167 (*num)++;
6a2968aa
IE
168 }
169
2eb1dc10 170 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
6a2968aa
IE
171 break;
172 }
173
174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
2eb1dc10 175 (1 + ((*num)*sizeof(struct disc_map_config))),
6a2968aa
IE
176 &cmd);
177}
178
179static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
180{
181 struct nci_rf_disc_cmd cmd;
182 __u32 protocols = opt;
183
184 cmd.num_disc_configs = 0;
185
186 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
187 (protocols & NFC_PROTO_JEWEL_MASK
188 || protocols & NFC_PROTO_MIFARE_MASK
189 || protocols & NFC_PROTO_ISO14443_MASK
190 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
637d85a7
IE
191 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
192 NCI_NFC_A_PASSIVE_POLL_MODE;
6a2968aa
IE
193 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
194 cmd.num_disc_configs++;
195 }
196
197 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
198 (protocols & NFC_PROTO_ISO14443_MASK)) {
637d85a7
IE
199 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
200 NCI_NFC_B_PASSIVE_POLL_MODE;
6a2968aa
IE
201 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
202 cmd.num_disc_configs++;
203 }
204
205 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
206 (protocols & NFC_PROTO_FELICA_MASK
207 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
637d85a7
IE
208 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
209 NCI_NFC_F_PASSIVE_POLL_MODE;
6a2968aa
IE
210 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
211 cmd.num_disc_configs++;
212 }
213
214 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
215 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
216 &cmd);
217}
218
219static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
220{
221 struct nci_rf_deactivate_cmd cmd;
222
223 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
224
225 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
226 sizeof(struct nci_rf_deactivate_cmd),
227 &cmd);
228}
229
230static int nci_open_device(struct nci_dev *ndev)
231{
232 int rc = 0;
233
234 mutex_lock(&ndev->req_lock);
235
236 if (test_bit(NCI_UP, &ndev->flags)) {
237 rc = -EALREADY;
238 goto done;
239 }
240
241 if (ndev->ops->open(ndev)) {
242 rc = -EIO;
243 goto done;
244 }
245
246 atomic_set(&ndev->cmd_cnt, 1);
247
248 set_bit(NCI_INIT, &ndev->flags);
249
250 rc = __nci_request(ndev, nci_reset_req, 0,
251 msecs_to_jiffies(NCI_RESET_TIMEOUT));
252
253 if (!rc) {
254 rc = __nci_request(ndev, nci_init_req, 0,
255 msecs_to_jiffies(NCI_INIT_TIMEOUT));
256 }
257
258 if (!rc) {
259 rc = __nci_request(ndev, nci_init_complete_req, 0,
260 msecs_to_jiffies(NCI_INIT_TIMEOUT));
261 }
262
263 clear_bit(NCI_INIT, &ndev->flags);
264
265 if (!rc) {
266 set_bit(NCI_UP, &ndev->flags);
8939e47f 267 atomic_set(&ndev->state, NCI_IDLE);
6a2968aa
IE
268 } else {
269 /* Init failed, cleanup */
270 skb_queue_purge(&ndev->cmd_q);
271 skb_queue_purge(&ndev->rx_q);
272 skb_queue_purge(&ndev->tx_q);
273
274 ndev->ops->close(ndev);
275 ndev->flags = 0;
276 }
277
278done:
279 mutex_unlock(&ndev->req_lock);
280 return rc;
281}
282
283static int nci_close_device(struct nci_dev *ndev)
284{
285 nci_req_cancel(ndev, ENODEV);
286 mutex_lock(&ndev->req_lock);
287
288 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
289 del_timer_sync(&ndev->cmd_timer);
c4bf98b2 290 del_timer_sync(&ndev->data_timer);
6a2968aa
IE
291 mutex_unlock(&ndev->req_lock);
292 return 0;
293 }
294
295 /* Drop RX and TX queues */
296 skb_queue_purge(&ndev->rx_q);
297 skb_queue_purge(&ndev->tx_q);
298
299 /* Flush RX and TX wq */
300 flush_workqueue(ndev->rx_wq);
301 flush_workqueue(ndev->tx_wq);
302
303 /* Reset device */
304 skb_queue_purge(&ndev->cmd_q);
305 atomic_set(&ndev->cmd_cnt, 1);
306
307 set_bit(NCI_INIT, &ndev->flags);
308 __nci_request(ndev, nci_reset_req, 0,
309 msecs_to_jiffies(NCI_RESET_TIMEOUT));
310 clear_bit(NCI_INIT, &ndev->flags);
311
312 /* Flush cmd wq */
313 flush_workqueue(ndev->cmd_wq);
314
315 /* After this point our queues are empty
316 * and no works are scheduled. */
317 ndev->ops->close(ndev);
318
319 /* Clear flags */
320 ndev->flags = 0;
321
322 mutex_unlock(&ndev->req_lock);
323
324 return 0;
325}
326
327/* NCI command timer function */
328static void nci_cmd_timer(unsigned long arg)
329{
330 struct nci_dev *ndev = (void *) arg;
331
6a2968aa
IE
332 atomic_set(&ndev->cmd_cnt, 1);
333 queue_work(ndev->cmd_wq, &ndev->cmd_work);
334}
335
c4bf98b2
IE
336/* NCI data exchange timer function */
337static void nci_data_timer(unsigned long arg)
338{
339 struct nci_dev *ndev = (void *) arg;
340
341 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
342 queue_work(ndev->rx_wq, &ndev->rx_work);
343}
344
6a2968aa
IE
345static int nci_dev_up(struct nfc_dev *nfc_dev)
346{
347 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
348
6a2968aa
IE
349 return nci_open_device(ndev);
350}
351
352static int nci_dev_down(struct nfc_dev *nfc_dev)
353{
354 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
355
6a2968aa
IE
356 return nci_close_device(ndev);
357}
358
359static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
360{
361 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
362 int rc;
363
8939e47f 364 if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
ed1e0ad8 365 pr_err("unable to start poll, since poll is already active\n");
6a2968aa
IE
366 return -EBUSY;
367 }
368
de054799 369 if (ndev->target_active_prot) {
ed1e0ad8 370 pr_err("there is an active target\n");
de054799
IE
371 return -EBUSY;
372 }
373
8939e47f 374 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
20c239c1 375 pr_debug("target is active, implicitly deactivate...\n");
6a2968aa
IE
376
377 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
378 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
379 if (rc)
380 return -EBUSY;
381 }
382
383 rc = nci_request(ndev, nci_rf_discover_req, protocols,
384 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
385
386 if (!rc)
387 ndev->poll_prots = protocols;
388
389 return rc;
390}
391
392static void nci_stop_poll(struct nfc_dev *nfc_dev)
393{
394 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
395
8939e47f 396 if (atomic_read(&ndev->state) != NCI_DISCOVERY) {
ed1e0ad8 397 pr_err("unable to stop poll, since poll is not active\n");
6a2968aa
IE
398 return;
399 }
400
401 nci_request(ndev, nci_rf_deactivate_req, 0,
402 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
403}
404
405static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
406 __u32 protocol)
407{
408 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
409
24bf3304 410 pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
6a2968aa 411
8939e47f 412 if (atomic_read(&ndev->state) != NCI_POLL_ACTIVE) {
ed1e0ad8 413 pr_err("there is no available target to activate\n");
6a2968aa
IE
414 return -EINVAL;
415 }
416
417 if (ndev->target_active_prot) {
ed1e0ad8 418 pr_err("there is already an active target\n");
6a2968aa
IE
419 return -EBUSY;
420 }
421
422 if (!(ndev->target_available_prots & (1 << protocol))) {
ed1e0ad8
JP
423 pr_err("target does not support the requested protocol 0x%x\n",
424 protocol);
6a2968aa
IE
425 return -EINVAL;
426 }
427
428 ndev->target_active_prot = protocol;
429 ndev->target_available_prots = 0;
430
431 return 0;
432}
433
434static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
435{
436 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
437
24bf3304 438 pr_debug("target_idx %d\n", target_idx);
6a2968aa
IE
439
440 if (!ndev->target_active_prot) {
ed1e0ad8 441 pr_err("unable to deactivate target, no active target\n");
6a2968aa
IE
442 return;
443 }
444
445 ndev->target_active_prot = 0;
446
8939e47f 447 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
6a2968aa
IE
448 nci_request(ndev, nci_rf_deactivate_req, 0,
449 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
450 }
451}
452
453static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
454 struct sk_buff *skb,
455 data_exchange_cb_t cb,
456 void *cb_context)
457{
458 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
38f04c6b 459 int rc;
6a2968aa 460
24bf3304 461 pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
6a2968aa
IE
462
463 if (!ndev->target_active_prot) {
ed1e0ad8 464 pr_err("unable to exchange data, no active target\n");
6a2968aa
IE
465 return -EINVAL;
466 }
467
38f04c6b
IE
468 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
469 return -EBUSY;
470
6a2968aa
IE
471 /* store cb and context to be used on receiving data */
472 ndev->data_exchange_cb = cb;
473 ndev->data_exchange_cb_context = cb_context;
474
e8c0dacd 475 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
38f04c6b
IE
476 if (rc)
477 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
478
479 return rc;
6a2968aa
IE
480}
481
482static struct nfc_ops nci_nfc_ops = {
483 .dev_up = nci_dev_up,
484 .dev_down = nci_dev_down,
485 .start_poll = nci_start_poll,
486 .stop_poll = nci_stop_poll,
487 .activate_target = nci_activate_target,
488 .deactivate_target = nci_deactivate_target,
489 .data_exchange = nci_data_exchange,
490};
491
492/* ---- Interface to NCI drivers ---- */
493
494/**
495 * nci_allocate_device - allocate a new nci device
496 *
497 * @ops: device operations
498 * @supported_protocols: NFC protocols supported by the device
499 */
500struct nci_dev *nci_allocate_device(struct nci_ops *ops,
501 __u32 supported_protocols,
502 int tx_headroom,
503 int tx_tailroom)
504{
8ebafde0 505 struct nci_dev *ndev;
6a2968aa 506
24bf3304 507 pr_debug("supported_protocols 0x%x\n", supported_protocols);
6a2968aa
IE
508
509 if (!ops->open || !ops->close || !ops->send)
8ebafde0 510 return NULL;
6a2968aa
IE
511
512 if (!supported_protocols)
8ebafde0 513 return NULL;
6a2968aa
IE
514
515 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
516 if (!ndev)
8ebafde0 517 return NULL;
6a2968aa
IE
518
519 ndev->ops = ops;
520 ndev->tx_headroom = tx_headroom;
521 ndev->tx_tailroom = tx_tailroom;
522
523 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
524 supported_protocols,
525 tx_headroom + NCI_DATA_HDR_SIZE,
526 tx_tailroom);
527 if (!ndev->nfc_dev)
528 goto free_exit;
529
530 nfc_set_drvdata(ndev->nfc_dev, ndev);
531
8ebafde0 532 return ndev;
6a2968aa
IE
533
534free_exit:
535 kfree(ndev);
8ebafde0 536 return NULL;
6a2968aa
IE
537}
538EXPORT_SYMBOL(nci_allocate_device);
539
540/**
541 * nci_free_device - deallocate nci device
542 *
543 * @ndev: The nci device to deallocate
544 */
545void nci_free_device(struct nci_dev *ndev)
546{
6a2968aa
IE
547 nfc_free_device(ndev->nfc_dev);
548 kfree(ndev);
549}
550EXPORT_SYMBOL(nci_free_device);
551
552/**
553 * nci_register_device - register a nci device in the nfc subsystem
554 *
555 * @dev: The nci device to register
556 */
557int nci_register_device(struct nci_dev *ndev)
558{
559 int rc;
560 struct device *dev = &ndev->nfc_dev->dev;
561 char name[32];
562
6a2968aa
IE
563 rc = nfc_register_device(ndev->nfc_dev);
564 if (rc)
565 goto exit;
566
567 ndev->flags = 0;
568
569 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
570 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
571 ndev->cmd_wq = create_singlethread_workqueue(name);
572 if (!ndev->cmd_wq) {
573 rc = -ENOMEM;
574 goto unreg_exit;
575 }
576
577 INIT_WORK(&ndev->rx_work, nci_rx_work);
578 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
579 ndev->rx_wq = create_singlethread_workqueue(name);
580 if (!ndev->rx_wq) {
581 rc = -ENOMEM;
582 goto destroy_cmd_wq_exit;
583 }
584
585 INIT_WORK(&ndev->tx_work, nci_tx_work);
586 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
587 ndev->tx_wq = create_singlethread_workqueue(name);
588 if (!ndev->tx_wq) {
589 rc = -ENOMEM;
590 goto destroy_rx_wq_exit;
591 }
592
593 skb_queue_head_init(&ndev->cmd_q);
594 skb_queue_head_init(&ndev->rx_q);
595 skb_queue_head_init(&ndev->tx_q);
596
597 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
598 (unsigned long) ndev);
c4bf98b2
IE
599 setup_timer(&ndev->data_timer, nci_data_timer,
600 (unsigned long) ndev);
6a2968aa
IE
601
602 mutex_init(&ndev->req_lock);
603
604 goto exit;
605
606destroy_rx_wq_exit:
607 destroy_workqueue(ndev->rx_wq);
608
609destroy_cmd_wq_exit:
610 destroy_workqueue(ndev->cmd_wq);
611
612unreg_exit:
613 nfc_unregister_device(ndev->nfc_dev);
614
615exit:
616 return rc;
617}
618EXPORT_SYMBOL(nci_register_device);
619
620/**
621 * nci_unregister_device - unregister a nci device in the nfc subsystem
622 *
623 * @dev: The nci device to unregister
624 */
625void nci_unregister_device(struct nci_dev *ndev)
626{
6a2968aa
IE
627 nci_close_device(ndev);
628
629 destroy_workqueue(ndev->cmd_wq);
630 destroy_workqueue(ndev->rx_wq);
631 destroy_workqueue(ndev->tx_wq);
632
633 nfc_unregister_device(ndev->nfc_dev);
634}
635EXPORT_SYMBOL(nci_unregister_device);
636
637/**
638 * nci_recv_frame - receive frame from NCI drivers
639 *
640 * @skb: The sk_buff to receive
641 */
642int nci_recv_frame(struct sk_buff *skb)
643{
644 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
645
24bf3304 646 pr_debug("len %d\n", skb->len);
6a2968aa
IE
647
648 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
649 && !test_bit(NCI_INIT, &ndev->flags))) {
650 kfree_skb(skb);
651 return -ENXIO;
652 }
653
654 /* Queue frame for rx worker thread */
655 skb_queue_tail(&ndev->rx_q, skb);
656 queue_work(ndev->rx_wq, &ndev->rx_work);
657
658 return 0;
659}
660EXPORT_SYMBOL(nci_recv_frame);
661
662static int nci_send_frame(struct sk_buff *skb)
663{
664 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
665
24bf3304 666 pr_debug("len %d\n", skb->len);
6a2968aa
IE
667
668 if (!ndev) {
669 kfree_skb(skb);
670 return -ENODEV;
671 }
672
673 /* Get rid of skb owner, prior to sending to the driver. */
674 skb_orphan(skb);
675
676 return ndev->ops->send(skb);
677}
678
679/* Send NCI command */
680int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
681{
682 struct nci_ctrl_hdr *hdr;
683 struct sk_buff *skb;
684
24bf3304 685 pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
6a2968aa
IE
686
687 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
688 if (!skb) {
ed1e0ad8 689 pr_err("no memory for command\n");
6a2968aa
IE
690 return -ENOMEM;
691 }
692
693 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
694 hdr->gid = nci_opcode_gid(opcode);
695 hdr->oid = nci_opcode_oid(opcode);
696 hdr->plen = plen;
697
698 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
699 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
700
701 if (plen)
702 memcpy(skb_put(skb, plen), payload, plen);
703
704 skb->dev = (void *) ndev;
705
706 skb_queue_tail(&ndev->cmd_q, skb);
707 queue_work(ndev->cmd_wq, &ndev->cmd_work);
708
709 return 0;
710}
711
712/* ---- NCI TX Data worker thread ---- */
713
714static void nci_tx_work(struct work_struct *work)
715{
716 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
717 struct sk_buff *skb;
718
24bf3304 719 pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
6a2968aa
IE
720
721 /* Send queued tx data */
722 while (atomic_read(&ndev->credits_cnt)) {
723 skb = skb_dequeue(&ndev->tx_q);
724 if (!skb)
725 return;
726
db98c829
IE
727 /* Check if data flow control is used */
728 if (atomic_read(&ndev->credits_cnt) !=
729 NCI_DATA_FLOW_CONTROL_NOT_USED)
730 atomic_dec(&ndev->credits_cnt);
6a2968aa 731
20c239c1
JP
732 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
733 nci_pbf(skb->data),
734 nci_conn_id(skb->data),
735 nci_plen(skb->data));
6a2968aa
IE
736
737 nci_send_frame(skb);
c4bf98b2
IE
738
739 mod_timer(&ndev->data_timer,
740 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
6a2968aa
IE
741 }
742}
743
744/* ----- NCI RX worker thread (data & control) ----- */
745
746static void nci_rx_work(struct work_struct *work)
747{
748 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
749 struct sk_buff *skb;
750
751 while ((skb = skb_dequeue(&ndev->rx_q))) {
752 /* Process frame */
753 switch (nci_mt(skb->data)) {
754 case NCI_MT_RSP_PKT:
755 nci_rsp_packet(ndev, skb);
756 break;
757
758 case NCI_MT_NTF_PKT:
759 nci_ntf_packet(ndev, skb);
760 break;
761
762 case NCI_MT_DATA_PKT:
763 nci_rx_data_packet(ndev, skb);
764 break;
765
766 default:
ed1e0ad8 767 pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
6a2968aa
IE
768 kfree_skb(skb);
769 break;
770 }
771 }
c4bf98b2
IE
772
773 /* check if a data exchange timout has occurred */
774 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
775 /* complete the data exchange transaction, if exists */
776 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
777 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
778
779 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
780 }
6a2968aa
IE
781}
782
783/* ----- NCI TX CMD worker thread ----- */
784
785static void nci_cmd_work(struct work_struct *work)
786{
787 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
788 struct sk_buff *skb;
789
24bf3304 790 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
6a2968aa
IE
791
792 /* Send queued command */
793 if (atomic_read(&ndev->cmd_cnt)) {
794 skb = skb_dequeue(&ndev->cmd_q);
795 if (!skb)
796 return;
797
798 atomic_dec(&ndev->cmd_cnt);
799
20c239c1
JP
800 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
801 nci_pbf(skb->data),
802 nci_opcode_gid(nci_opcode(skb->data)),
803 nci_opcode_oid(nci_opcode(skb->data)),
804 nci_plen(skb->data));
6a2968aa
IE
805
806 nci_send_frame(skb);
807
808 mod_timer(&ndev->cmd_timer,
809 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
810 }
811}
This page took 0.081297 seconds and 5 git commands to generate.