net: remove interrupt.h inclusion from netdevice.h
[deliverable/linux.git] / drivers / net / caif / caif_serial.c
CommitLineData
9b27105b
SB
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
a6b7a407 7#include <linux/hardirq.h>
9b27105b
SB
8#include <linux/init.h>
9#include <linux/version.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/types.h>
13#include <linux/skbuff.h>
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/tty.h>
17#include <linux/file.h>
18#include <linux/if_arp.h>
19#include <net/caif/caif_device.h>
20#include <net/caif/cfcnfg.h>
21#include <linux/err.h>
22#include <linux/debugfs.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>");
26MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
27MODULE_LICENSE("GPL");
28MODULE_ALIAS_LDISC(N_CAIF);
29
30#define SEND_QUEUE_LOW 10
31#define SEND_QUEUE_HIGH 100
32#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
33#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
34#define MAX_WRITE_CHUNK 4096
35#define ON 1
36#define OFF 0
37#define CAIF_MAX_MTU 4096
38
39/*This list is protected by the rtnl lock. */
40static LIST_HEAD(ser_list);
41
42static int ser_loop;
43module_param(ser_loop, bool, S_IRUGO);
44MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
45
46static int ser_use_stx = 1;
47module_param(ser_use_stx, bool, S_IRUGO);
48MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
49
50static int ser_use_fcs = 1;
51
52module_param(ser_use_fcs, bool, S_IRUGO);
53MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
54
55static int ser_write_chunk = MAX_WRITE_CHUNK;
56module_param(ser_write_chunk, int, S_IRUGO);
57
58MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
59
60static struct dentry *debugfsdir;
61
62static int caif_net_open(struct net_device *dev);
63static int caif_net_close(struct net_device *dev);
64
65struct ser_device {
66 struct caif_dev_common common;
67 struct list_head node;
68 struct net_device *dev;
69 struct sk_buff_head head;
70 struct tty_struct *tty;
71 bool tx_started;
72 unsigned long state;
73 char *tty_name;
74#ifdef CONFIG_DEBUG_FS
75 struct dentry *debugfs_tty_dir;
76 struct debugfs_blob_wrapper tx_blob;
77 struct debugfs_blob_wrapper rx_blob;
78 u8 rx_data[128];
79 u8 tx_data[128];
80 u8 tty_status;
81
82#endif
83};
84
85static void caifdev_setup(struct net_device *dev);
86static void ldisc_tx_wakeup(struct tty_struct *tty);
87#ifdef CONFIG_DEBUG_FS
88static inline void update_tty_status(struct ser_device *ser)
89{
90 ser->tty_status =
91 ser->tty->stopped << 5 |
92 ser->tty->hw_stopped << 4 |
93 ser->tty->flow_stopped << 3 |
94 ser->tty->packet << 2 |
95 ser->tty->low_latency << 1 |
96 ser->tty->warned;
97}
98static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
99{
100 ser->debugfs_tty_dir =
101 debugfs_create_dir(tty->name, debugfsdir);
102 if (!IS_ERR(ser->debugfs_tty_dir)) {
103 debugfs_create_blob("last_tx_msg", S_IRUSR,
104 ser->debugfs_tty_dir,
105 &ser->tx_blob);
106
107 debugfs_create_blob("last_rx_msg", S_IRUSR,
108 ser->debugfs_tty_dir,
109 &ser->rx_blob);
110
111 debugfs_create_x32("ser_state", S_IRUSR,
112 ser->debugfs_tty_dir,
113 (u32 *)&ser->state);
114
115 debugfs_create_x8("tty_status", S_IRUSR,
116 ser->debugfs_tty_dir,
117 &ser->tty_status);
118
119 }
120 ser->tx_blob.data = ser->tx_data;
121 ser->tx_blob.size = 0;
122 ser->rx_blob.data = ser->rx_data;
123 ser->rx_blob.size = 0;
124}
125
126static inline void debugfs_deinit(struct ser_device *ser)
127{
128 debugfs_remove_recursive(ser->debugfs_tty_dir);
129}
130
131static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
132{
133 if (size > sizeof(ser->rx_data))
134 size = sizeof(ser->rx_data);
135 memcpy(ser->rx_data, data, size);
136 ser->rx_blob.data = ser->rx_data;
137 ser->rx_blob.size = size;
138}
139
140static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
141{
142 if (size > sizeof(ser->tx_data))
143 size = sizeof(ser->tx_data);
144 memcpy(ser->tx_data, data, size);
145 ser->tx_blob.data = ser->tx_data;
146 ser->tx_blob.size = size;
147}
148#else
149static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
150{
151}
152
153static inline void debugfs_deinit(struct ser_device *ser)
154{
155}
156
157static inline void update_tty_status(struct ser_device *ser)
158{
159}
160
161static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
162{
163}
164
165static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
166{
167}
168
169#endif
170
55db4c64
LT
171static void ldisc_receive(struct tty_struct *tty, const u8 *data,
172 char *flags, int count)
9b27105b
SB
173{
174 struct sk_buff *skb = NULL;
175 struct ser_device *ser;
176 int ret;
177 u8 *p;
c1f8fc57 178
9b27105b
SB
179 ser = tty->disc_data;
180
181 /*
182 * NOTE: flags may contain information about break or overrun.
183 * This is not yet handled.
184 */
185
186
187 /*
188 * Workaround for garbage at start of transmission,
189 * only enable if STX handling is not enabled.
190 */
191 if (!ser->common.use_stx && !ser->tx_started) {
192 dev_info(&ser->dev->dev,
193 "Bytes received before initial transmission -"
194 "bytes discarded.\n");
195 return;
196 }
197
198 BUG_ON(ser->dev == NULL);
199
200 /* Get a suitable caif packet and copy in data. */
201 skb = netdev_alloc_skb(ser->dev, count+1);
d3f744e0
SB
202 if (skb == NULL)
203 return;
9b27105b
SB
204 p = skb_put(skb, count);
205 memcpy(p, data, count);
206
207 skb->protocol = htons(ETH_P_CAIF);
208 skb_reset_mac_header(skb);
209 skb->dev = ser->dev;
210 debugfs_rx(ser, data, count);
211 /* Push received packet up the stack. */
212 ret = netif_rx_ni(skb);
213 if (!ret) {
214 ser->dev->stats.rx_packets++;
215 ser->dev->stats.rx_bytes += count;
216 } else
217 ++ser->dev->stats.rx_dropped;
218 update_tty_status(ser);
219}
220
221static int handle_tx(struct ser_device *ser)
222{
223 struct tty_struct *tty;
224 struct sk_buff *skb;
225 int tty_wr, len, room;
c1f8fc57 226
9b27105b
SB
227 tty = ser->tty;
228 ser->tx_started = true;
229
230 /* Enter critical section */
231 if (test_and_set_bit(CAIF_SENDING, &ser->state))
232 return 0;
233
234 /* skb_peek is safe because handle_tx is called after skb_queue_tail */
235 while ((skb = skb_peek(&ser->head)) != NULL) {
236
237 /* Make sure you don't write too much */
238 len = skb->len;
239 room = tty_write_room(tty);
240 if (!room)
241 break;
242 if (room > ser_write_chunk)
243 room = ser_write_chunk;
244 if (len > room)
245 len = room;
246
247 /* Write to tty or loopback */
248 if (!ser_loop) {
249 tty_wr = tty->ops->write(tty, skb->data, len);
250 update_tty_status(ser);
251 } else {
252 tty_wr = len;
253 ldisc_receive(tty, skb->data, NULL, len);
254 }
255 ser->dev->stats.tx_packets++;
256 ser->dev->stats.tx_bytes += tty_wr;
257
258 /* Error on TTY ?! */
259 if (tty_wr < 0)
260 goto error;
261 /* Reduce buffer written, and discard if empty */
262 skb_pull(skb, tty_wr);
263 if (skb->len == 0) {
264 struct sk_buff *tmp = skb_dequeue(&ser->head);
265 BUG_ON(tmp != skb);
266 if (in_interrupt())
267 dev_kfree_skb_irq(skb);
268 else
269 kfree_skb(skb);
270 }
271 }
272 /* Send flow off if queue is empty */
273 if (ser->head.qlen <= SEND_QUEUE_LOW &&
274 test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
275 ser->common.flowctrl != NULL)
276 ser->common.flowctrl(ser->dev, ON);
277 clear_bit(CAIF_SENDING, &ser->state);
278 return 0;
279error:
280 clear_bit(CAIF_SENDING, &ser->state);
281 return tty_wr;
282}
283
284static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
285{
286 struct ser_device *ser;
c1f8fc57 287
9b27105b
SB
288 BUG_ON(dev == NULL);
289 ser = netdev_priv(dev);
290
291 /* Send flow off once, on high water mark */
292 if (ser->head.qlen > SEND_QUEUE_HIGH &&
293 !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
294 ser->common.flowctrl != NULL)
295
296 ser->common.flowctrl(ser->dev, OFF);
297
298 skb_queue_tail(&ser->head, skb);
299 return handle_tx(ser);
300}
301
302
303static void ldisc_tx_wakeup(struct tty_struct *tty)
304{
305 struct ser_device *ser;
c1f8fc57 306
9b27105b
SB
307 ser = tty->disc_data;
308 BUG_ON(ser == NULL);
309 BUG_ON(ser->tty != tty);
310 handle_tx(ser);
311}
312
313
314static int ldisc_open(struct tty_struct *tty)
315{
316 struct ser_device *ser;
317 struct net_device *dev;
318 char name[64];
319 int result;
320
c93f0940
AC
321 /* No write no play */
322 if (tty->ops->write == NULL)
323 return -EOPNOTSUPP;
d3f744e0
SB
324 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
325 return -EPERM;
c93f0940 326
9b27105b
SB
327 sprintf(name, "cf%s", tty->name);
328 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
329 ser = netdev_priv(dev);
e31d5a05 330 ser->tty = tty_kref_get(tty);
9b27105b
SB
331 ser->dev = dev;
332 debugfs_init(ser, tty);
333 tty->receive_room = N_TTY_BUF_SIZE;
334 tty->disc_data = ser;
335 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
336 rtnl_lock();
337 result = register_netdevice(dev);
338 if (result) {
339 rtnl_unlock();
340 free_netdev(dev);
341 return -ENODEV;
342 }
343
344 list_add(&ser->node, &ser_list);
345 rtnl_unlock();
346 netif_stop_queue(dev);
347 update_tty_status(ser);
348 return 0;
349}
350
351static void ldisc_close(struct tty_struct *tty)
352{
353 struct ser_device *ser = tty->disc_data;
354 /* Remove may be called inside or outside of rtnl_lock */
355 int islocked = rtnl_is_locked();
c1f8fc57 356
9b27105b
SB
357 if (!islocked)
358 rtnl_lock();
359 /* device is freed automagically by net-sysfs */
360 dev_close(ser->dev);
361 unregister_netdevice(ser->dev);
362 list_del(&ser->node);
363 debugfs_deinit(ser);
e31d5a05 364 tty_kref_put(ser->tty);
9b27105b
SB
365 if (!islocked)
366 rtnl_unlock();
367}
368
369/* The line discipline structure. */
370static struct tty_ldisc_ops caif_ldisc = {
371 .owner = THIS_MODULE,
372 .magic = TTY_LDISC_MAGIC,
373 .name = "n_caif",
374 .open = ldisc_open,
375 .close = ldisc_close,
376 .receive_buf = ldisc_receive,
377 .write_wakeup = ldisc_tx_wakeup
378};
379
380static int register_ldisc(void)
381{
382 int result;
c1f8fc57 383
9b27105b
SB
384 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
385 if (result < 0) {
386 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
387 result);
388 return result;
389 }
390 return result;
391}
392static const struct net_device_ops netdev_ops = {
393 .ndo_open = caif_net_open,
394 .ndo_stop = caif_net_close,
395 .ndo_start_xmit = caif_xmit
396};
397
398static void caifdev_setup(struct net_device *dev)
399{
400 struct ser_device *serdev = netdev_priv(dev);
c1f8fc57 401
9b27105b
SB
402 dev->features = 0;
403 dev->netdev_ops = &netdev_ops;
404 dev->type = ARPHRD_CAIF;
405 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
406 dev->mtu = CAIF_MAX_MTU;
9b27105b
SB
407 dev->tx_queue_len = 0;
408 dev->destructor = free_netdev;
409 skb_queue_head_init(&serdev->head);
410 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
411 serdev->common.use_frag = true;
412 serdev->common.use_stx = ser_use_stx;
413 serdev->common.use_fcs = ser_use_fcs;
414 serdev->dev = dev;
415}
416
417
418static int caif_net_open(struct net_device *dev)
419{
9b27105b
SB
420 netif_wake_queue(dev);
421 return 0;
422}
423
424static int caif_net_close(struct net_device *dev)
425{
426 netif_stop_queue(dev);
427 return 0;
428}
429
430static int __init caif_ser_init(void)
431{
432 int ret;
c1f8fc57 433
9b27105b
SB
434 ret = register_ldisc();
435 debugfsdir = debugfs_create_dir("caif_serial", NULL);
436 return ret;
437}
438
439static void __exit caif_ser_exit(void)
440{
441 struct ser_device *ser = NULL;
442 struct list_head *node;
443 struct list_head *_tmp;
c1f8fc57 444
9b27105b
SB
445 list_for_each_safe(node, _tmp, &ser_list) {
446 ser = list_entry(node, struct ser_device, node);
447 dev_close(ser->dev);
448 unregister_netdevice(ser->dev);
449 list_del(node);
450 }
451 tty_unregister_ldisc(N_CAIF);
452 debugfs_remove_recursive(debugfsdir);
453}
454
455module_init(caif_ser_init);
456module_exit(caif_ser_exit);
This page took 0.286512 seconds and 5 git commands to generate.