NFC: Check if NCI data flow control is used
[deliverable/linux.git] / net / nfc / nci / core.c
1 /*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <linux/completion.h>
31 #include <linux/sched.h>
32 #include <linux/bitops.h>
33 #include <linux/skbuff.h>
34
35 #include "../nfc.h"
36 #include <net/nfc/nci.h>
37 #include <net/nfc/nci_core.h>
38 #include <linux/nfc.h>
39
40 static void nci_cmd_work(struct work_struct *work);
41 static void nci_rx_work(struct work_struct *work);
42 static void nci_tx_work(struct work_struct *work);
43
44 /* ---- NCI requests ---- */
45
46 void nci_req_complete(struct nci_dev *ndev, int result)
47 {
48 if (ndev->req_status == NCI_REQ_PEND) {
49 ndev->req_result = result;
50 ndev->req_status = NCI_REQ_DONE;
51 complete(&ndev->req_completion);
52 }
53 }
54
55 static void nci_req_cancel(struct nci_dev *ndev, int err)
56 {
57 if (ndev->req_status == NCI_REQ_PEND) {
58 ndev->req_result = err;
59 ndev->req_status = NCI_REQ_CANCELED;
60 complete(&ndev->req_completion);
61 }
62 }
63
64 /* Execute request and wait for completion. */
65 static int __nci_request(struct nci_dev *ndev,
66 void (*req)(struct nci_dev *ndev, unsigned long opt),
67 unsigned long opt,
68 __u32 timeout)
69 {
70 int rc = 0;
71 unsigned long completion_rc;
72
73 ndev->req_status = NCI_REQ_PEND;
74
75 init_completion(&ndev->req_completion);
76 req(ndev, opt);
77 completion_rc = wait_for_completion_interruptible_timeout(
78 &ndev->req_completion,
79 timeout);
80
81 nfc_dbg("wait_for_completion return %ld", completion_rc);
82
83 if (completion_rc > 0) {
84 switch (ndev->req_status) {
85 case NCI_REQ_DONE:
86 rc = nci_to_errno(ndev->req_result);
87 break;
88
89 case NCI_REQ_CANCELED:
90 rc = -ndev->req_result;
91 break;
92
93 default:
94 rc = -ETIMEDOUT;
95 break;
96 }
97 } else {
98 nfc_err("wait_for_completion_interruptible_timeout failed %ld",
99 completion_rc);
100
101 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
102 }
103
104 ndev->req_status = ndev->req_result = 0;
105
106 return rc;
107 }
108
109 static inline int nci_request(struct nci_dev *ndev,
110 void (*req)(struct nci_dev *ndev, unsigned long opt),
111 unsigned long opt, __u32 timeout)
112 {
113 int rc;
114
115 if (!test_bit(NCI_UP, &ndev->flags))
116 return -ENETDOWN;
117
118 /* Serialize all requests */
119 mutex_lock(&ndev->req_lock);
120 rc = __nci_request(ndev, req, opt, timeout);
121 mutex_unlock(&ndev->req_lock);
122
123 return rc;
124 }
125
126 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
127 {
128 struct nci_core_reset_cmd cmd;
129
130 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
131 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
132 }
133
134 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
135 {
136 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
137 }
138
139 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
140 {
141 struct nci_rf_disc_map_cmd cmd;
142 struct disc_map_config *cfg = cmd.mapping_configs;
143 __u8 *num = &cmd.num_mapping_configs;
144 int i;
145
146 /* set rf mapping configurations */
147 *num = 0;
148
149 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
150 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
151 if (ndev->supported_rf_interfaces[i] ==
152 NCI_RF_INTERFACE_ISO_DEP) {
153 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
154 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
155 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
156 (*num)++;
157 } else if (ndev->supported_rf_interfaces[i] ==
158 NCI_RF_INTERFACE_NFC_DEP) {
159 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
160 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
161 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
162 (*num)++;
163 }
164
165 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
166 break;
167 }
168
169 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
170 (1 + ((*num)*sizeof(struct disc_map_config))),
171 &cmd);
172 }
173
174 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
175 {
176 struct nci_rf_disc_cmd cmd;
177 __u32 protocols = opt;
178
179 cmd.num_disc_configs = 0;
180
181 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
182 (protocols & NFC_PROTO_JEWEL_MASK
183 || protocols & NFC_PROTO_MIFARE_MASK
184 || protocols & NFC_PROTO_ISO14443_MASK
185 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
186 cmd.disc_configs[cmd.num_disc_configs].type =
187 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
188 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
189 cmd.num_disc_configs++;
190 }
191
192 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
193 (protocols & NFC_PROTO_ISO14443_MASK)) {
194 cmd.disc_configs[cmd.num_disc_configs].type =
195 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
196 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
197 cmd.num_disc_configs++;
198 }
199
200 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
201 (protocols & NFC_PROTO_FELICA_MASK
202 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
203 cmd.disc_configs[cmd.num_disc_configs].type =
204 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
205 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
206 cmd.num_disc_configs++;
207 }
208
209 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
210 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
211 &cmd);
212 }
213
214 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
215 {
216 struct nci_rf_deactivate_cmd cmd;
217
218 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
219
220 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
221 sizeof(struct nci_rf_deactivate_cmd),
222 &cmd);
223 }
224
225 static int nci_open_device(struct nci_dev *ndev)
226 {
227 int rc = 0;
228
229 mutex_lock(&ndev->req_lock);
230
231 if (test_bit(NCI_UP, &ndev->flags)) {
232 rc = -EALREADY;
233 goto done;
234 }
235
236 if (ndev->ops->open(ndev)) {
237 rc = -EIO;
238 goto done;
239 }
240
241 atomic_set(&ndev->cmd_cnt, 1);
242
243 set_bit(NCI_INIT, &ndev->flags);
244
245 rc = __nci_request(ndev, nci_reset_req, 0,
246 msecs_to_jiffies(NCI_RESET_TIMEOUT));
247
248 if (!rc) {
249 rc = __nci_request(ndev, nci_init_req, 0,
250 msecs_to_jiffies(NCI_INIT_TIMEOUT));
251 }
252
253 if (!rc) {
254 rc = __nci_request(ndev, nci_init_complete_req, 0,
255 msecs_to_jiffies(NCI_INIT_TIMEOUT));
256 }
257
258 clear_bit(NCI_INIT, &ndev->flags);
259
260 if (!rc) {
261 set_bit(NCI_UP, &ndev->flags);
262 } else {
263 /* Init failed, cleanup */
264 skb_queue_purge(&ndev->cmd_q);
265 skb_queue_purge(&ndev->rx_q);
266 skb_queue_purge(&ndev->tx_q);
267
268 ndev->ops->close(ndev);
269 ndev->flags = 0;
270 }
271
272 done:
273 mutex_unlock(&ndev->req_lock);
274 return rc;
275 }
276
277 static int nci_close_device(struct nci_dev *ndev)
278 {
279 nci_req_cancel(ndev, ENODEV);
280 mutex_lock(&ndev->req_lock);
281
282 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
283 del_timer_sync(&ndev->cmd_timer);
284 mutex_unlock(&ndev->req_lock);
285 return 0;
286 }
287
288 /* Drop RX and TX queues */
289 skb_queue_purge(&ndev->rx_q);
290 skb_queue_purge(&ndev->tx_q);
291
292 /* Flush RX and TX wq */
293 flush_workqueue(ndev->rx_wq);
294 flush_workqueue(ndev->tx_wq);
295
296 /* Reset device */
297 skb_queue_purge(&ndev->cmd_q);
298 atomic_set(&ndev->cmd_cnt, 1);
299
300 set_bit(NCI_INIT, &ndev->flags);
301 __nci_request(ndev, nci_reset_req, 0,
302 msecs_to_jiffies(NCI_RESET_TIMEOUT));
303 clear_bit(NCI_INIT, &ndev->flags);
304
305 /* Flush cmd wq */
306 flush_workqueue(ndev->cmd_wq);
307
308 /* After this point our queues are empty
309 * and no works are scheduled. */
310 ndev->ops->close(ndev);
311
312 /* Clear flags */
313 ndev->flags = 0;
314
315 mutex_unlock(&ndev->req_lock);
316
317 return 0;
318 }
319
320 /* NCI command timer function */
321 static void nci_cmd_timer(unsigned long arg)
322 {
323 struct nci_dev *ndev = (void *) arg;
324
325 nfc_dbg("entry");
326
327 atomic_set(&ndev->cmd_cnt, 1);
328 queue_work(ndev->cmd_wq, &ndev->cmd_work);
329 }
330
331 static int nci_dev_up(struct nfc_dev *nfc_dev)
332 {
333 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
334
335 nfc_dbg("entry");
336
337 return nci_open_device(ndev);
338 }
339
340 static int nci_dev_down(struct nfc_dev *nfc_dev)
341 {
342 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
343
344 nfc_dbg("entry");
345
346 return nci_close_device(ndev);
347 }
348
349 static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
350 {
351 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
352 int rc;
353
354 nfc_dbg("entry");
355
356 if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
357 nfc_err("unable to start poll, since poll is already active");
358 return -EBUSY;
359 }
360
361 if (ndev->target_active_prot) {
362 nfc_err("there is an active target");
363 return -EBUSY;
364 }
365
366 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
367 nfc_dbg("target is active, implicitly deactivate...");
368
369 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
370 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
371 if (rc)
372 return -EBUSY;
373 }
374
375 rc = nci_request(ndev, nci_rf_discover_req, protocols,
376 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
377
378 if (!rc)
379 ndev->poll_prots = protocols;
380
381 return rc;
382 }
383
384 static void nci_stop_poll(struct nfc_dev *nfc_dev)
385 {
386 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
387
388 nfc_dbg("entry");
389
390 if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
391 nfc_err("unable to stop poll, since poll is not active");
392 return;
393 }
394
395 nci_request(ndev, nci_rf_deactivate_req, 0,
396 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
397 }
398
399 static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
400 __u32 protocol)
401 {
402 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
403
404 nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
405
406 if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
407 nfc_err("there is no available target to activate");
408 return -EINVAL;
409 }
410
411 if (ndev->target_active_prot) {
412 nfc_err("there is already an active target");
413 return -EBUSY;
414 }
415
416 if (!(ndev->target_available_prots & (1 << protocol))) {
417 nfc_err("target does not support the requested protocol 0x%x",
418 protocol);
419 return -EINVAL;
420 }
421
422 ndev->target_active_prot = protocol;
423 ndev->target_available_prots = 0;
424
425 return 0;
426 }
427
428 static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
429 {
430 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
431
432 nfc_dbg("entry, target_idx %d", target_idx);
433
434 if (!ndev->target_active_prot) {
435 nfc_err("unable to deactivate target, no active target");
436 return;
437 }
438
439 ndev->target_active_prot = 0;
440
441 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
442 nci_request(ndev, nci_rf_deactivate_req, 0,
443 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
444 }
445 }
446
447 static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
448 struct sk_buff *skb,
449 data_exchange_cb_t cb,
450 void *cb_context)
451 {
452 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
453 int rc;
454
455 nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
456
457 if (!ndev->target_active_prot) {
458 nfc_err("unable to exchange data, no active target");
459 return -EINVAL;
460 }
461
462 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
463 return -EBUSY;
464
465 /* store cb and context to be used on receiving data */
466 ndev->data_exchange_cb = cb;
467 ndev->data_exchange_cb_context = cb_context;
468
469 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
470 if (rc)
471 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
472
473 return rc;
474 }
475
476 static struct nfc_ops nci_nfc_ops = {
477 .dev_up = nci_dev_up,
478 .dev_down = nci_dev_down,
479 .start_poll = nci_start_poll,
480 .stop_poll = nci_stop_poll,
481 .activate_target = nci_activate_target,
482 .deactivate_target = nci_deactivate_target,
483 .data_exchange = nci_data_exchange,
484 };
485
486 /* ---- Interface to NCI drivers ---- */
487
488 /**
489 * nci_allocate_device - allocate a new nci device
490 *
491 * @ops: device operations
492 * @supported_protocols: NFC protocols supported by the device
493 */
494 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
495 __u32 supported_protocols,
496 int tx_headroom,
497 int tx_tailroom)
498 {
499 struct nci_dev *ndev;
500
501 nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
502
503 if (!ops->open || !ops->close || !ops->send)
504 return NULL;
505
506 if (!supported_protocols)
507 return NULL;
508
509 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
510 if (!ndev)
511 return NULL;
512
513 ndev->ops = ops;
514 ndev->tx_headroom = tx_headroom;
515 ndev->tx_tailroom = tx_tailroom;
516
517 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
518 supported_protocols,
519 tx_headroom + NCI_DATA_HDR_SIZE,
520 tx_tailroom);
521 if (!ndev->nfc_dev)
522 goto free_exit;
523
524 nfc_set_drvdata(ndev->nfc_dev, ndev);
525
526 return ndev;
527
528 free_exit:
529 kfree(ndev);
530 return NULL;
531 }
532 EXPORT_SYMBOL(nci_allocate_device);
533
534 /**
535 * nci_free_device - deallocate nci device
536 *
537 * @ndev: The nci device to deallocate
538 */
539 void nci_free_device(struct nci_dev *ndev)
540 {
541 nfc_dbg("entry");
542
543 nfc_free_device(ndev->nfc_dev);
544 kfree(ndev);
545 }
546 EXPORT_SYMBOL(nci_free_device);
547
548 /**
549 * nci_register_device - register a nci device in the nfc subsystem
550 *
551 * @dev: The nci device to register
552 */
553 int nci_register_device(struct nci_dev *ndev)
554 {
555 int rc;
556 struct device *dev = &ndev->nfc_dev->dev;
557 char name[32];
558
559 nfc_dbg("entry");
560
561 rc = nfc_register_device(ndev->nfc_dev);
562 if (rc)
563 goto exit;
564
565 ndev->flags = 0;
566
567 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
568 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
569 ndev->cmd_wq = create_singlethread_workqueue(name);
570 if (!ndev->cmd_wq) {
571 rc = -ENOMEM;
572 goto unreg_exit;
573 }
574
575 INIT_WORK(&ndev->rx_work, nci_rx_work);
576 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
577 ndev->rx_wq = create_singlethread_workqueue(name);
578 if (!ndev->rx_wq) {
579 rc = -ENOMEM;
580 goto destroy_cmd_wq_exit;
581 }
582
583 INIT_WORK(&ndev->tx_work, nci_tx_work);
584 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
585 ndev->tx_wq = create_singlethread_workqueue(name);
586 if (!ndev->tx_wq) {
587 rc = -ENOMEM;
588 goto destroy_rx_wq_exit;
589 }
590
591 skb_queue_head_init(&ndev->cmd_q);
592 skb_queue_head_init(&ndev->rx_q);
593 skb_queue_head_init(&ndev->tx_q);
594
595 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
596 (unsigned long) ndev);
597
598 mutex_init(&ndev->req_lock);
599
600 goto exit;
601
602 destroy_rx_wq_exit:
603 destroy_workqueue(ndev->rx_wq);
604
605 destroy_cmd_wq_exit:
606 destroy_workqueue(ndev->cmd_wq);
607
608 unreg_exit:
609 nfc_unregister_device(ndev->nfc_dev);
610
611 exit:
612 return rc;
613 }
614 EXPORT_SYMBOL(nci_register_device);
615
616 /**
617 * nci_unregister_device - unregister a nci device in the nfc subsystem
618 *
619 * @dev: The nci device to unregister
620 */
621 void nci_unregister_device(struct nci_dev *ndev)
622 {
623 nfc_dbg("entry");
624
625 nci_close_device(ndev);
626
627 destroy_workqueue(ndev->cmd_wq);
628 destroy_workqueue(ndev->rx_wq);
629 destroy_workqueue(ndev->tx_wq);
630
631 nfc_unregister_device(ndev->nfc_dev);
632 }
633 EXPORT_SYMBOL(nci_unregister_device);
634
635 /**
636 * nci_recv_frame - receive frame from NCI drivers
637 *
638 * @skb: The sk_buff to receive
639 */
640 int nci_recv_frame(struct sk_buff *skb)
641 {
642 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
643
644 nfc_dbg("entry, len %d", skb->len);
645
646 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
647 && !test_bit(NCI_INIT, &ndev->flags))) {
648 kfree_skb(skb);
649 return -ENXIO;
650 }
651
652 /* Queue frame for rx worker thread */
653 skb_queue_tail(&ndev->rx_q, skb);
654 queue_work(ndev->rx_wq, &ndev->rx_work);
655
656 return 0;
657 }
658 EXPORT_SYMBOL(nci_recv_frame);
659
660 static int nci_send_frame(struct sk_buff *skb)
661 {
662 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
663
664 nfc_dbg("entry, len %d", skb->len);
665
666 if (!ndev) {
667 kfree_skb(skb);
668 return -ENODEV;
669 }
670
671 /* Get rid of skb owner, prior to sending to the driver. */
672 skb_orphan(skb);
673
674 return ndev->ops->send(skb);
675 }
676
677 /* Send NCI command */
678 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
679 {
680 struct nci_ctrl_hdr *hdr;
681 struct sk_buff *skb;
682
683 nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
684
685 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
686 if (!skb) {
687 nfc_err("no memory for command");
688 return -ENOMEM;
689 }
690
691 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
692 hdr->gid = nci_opcode_gid(opcode);
693 hdr->oid = nci_opcode_oid(opcode);
694 hdr->plen = plen;
695
696 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
697 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
698
699 if (plen)
700 memcpy(skb_put(skb, plen), payload, plen);
701
702 skb->dev = (void *) ndev;
703
704 skb_queue_tail(&ndev->cmd_q, skb);
705 queue_work(ndev->cmd_wq, &ndev->cmd_work);
706
707 return 0;
708 }
709
710 /* ---- NCI TX Data worker thread ---- */
711
712 static void nci_tx_work(struct work_struct *work)
713 {
714 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
715 struct sk_buff *skb;
716
717 nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
718
719 /* Send queued tx data */
720 while (atomic_read(&ndev->credits_cnt)) {
721 skb = skb_dequeue(&ndev->tx_q);
722 if (!skb)
723 return;
724
725 /* Check if data flow control is used */
726 if (atomic_read(&ndev->credits_cnt) !=
727 NCI_DATA_FLOW_CONTROL_NOT_USED)
728 atomic_dec(&ndev->credits_cnt);
729
730 nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
731 nci_pbf(skb->data),
732 nci_conn_id(skb->data),
733 nci_plen(skb->data));
734
735 nci_send_frame(skb);
736 }
737 }
738
739 /* ----- NCI RX worker thread (data & control) ----- */
740
741 static void nci_rx_work(struct work_struct *work)
742 {
743 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
744 struct sk_buff *skb;
745
746 while ((skb = skb_dequeue(&ndev->rx_q))) {
747 /* Process frame */
748 switch (nci_mt(skb->data)) {
749 case NCI_MT_RSP_PKT:
750 nci_rsp_packet(ndev, skb);
751 break;
752
753 case NCI_MT_NTF_PKT:
754 nci_ntf_packet(ndev, skb);
755 break;
756
757 case NCI_MT_DATA_PKT:
758 nci_rx_data_packet(ndev, skb);
759 break;
760
761 default:
762 nfc_err("unknown MT 0x%x", nci_mt(skb->data));
763 kfree_skb(skb);
764 break;
765 }
766 }
767 }
768
769 /* ----- NCI TX CMD worker thread ----- */
770
771 static void nci_cmd_work(struct work_struct *work)
772 {
773 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
774 struct sk_buff *skb;
775
776 nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
777
778 /* Send queued command */
779 if (atomic_read(&ndev->cmd_cnt)) {
780 skb = skb_dequeue(&ndev->cmd_q);
781 if (!skb)
782 return;
783
784 atomic_dec(&ndev->cmd_cnt);
785
786 nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
787 nci_pbf(skb->data),
788 nci_opcode_gid(nci_opcode(skb->data)),
789 nci_opcode_oid(nci_opcode(skb->data)),
790 nci_plen(skb->data));
791
792 nci_send_frame(skb);
793
794 mod_timer(&ndev->cmd_timer,
795 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
796 }
797 }
This page took 0.049813 seconds and 5 git commands to generate.