594320ca1b7c96ae7e69284017e6d77edb0e691e
[deliverable/linux.git] / drivers / s390 / net / netiucv.c
1 /*
2 * IUCV network driver
3 *
4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 *
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
9 *
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 */
33
34 #undef DEBUG
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/timer.h>
44 #include <linux/bitops.h>
45
46 #include <linux/signal.h>
47 #include <linux/string.h>
48 #include <linux/device.h>
49
50 #include <linux/ip.h>
51 #include <linux/if_arp.h>
52 #include <linux/tcp.h>
53 #include <linux/skbuff.h>
54 #include <linux/ctype.h>
55 #include <net/dst.h>
56
57 #include <asm/io.h>
58 #include <asm/uaccess.h>
59
60 #include <net/iucv/iucv.h>
61 #include "fsm.h"
62
63 MODULE_AUTHOR
64 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
65 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
66
67 /**
68 * Debug Facility stuff
69 */
70 #define IUCV_DBF_SETUP_NAME "iucv_setup"
71 #define IUCV_DBF_SETUP_LEN 32
72 #define IUCV_DBF_SETUP_PAGES 2
73 #define IUCV_DBF_SETUP_NR_AREAS 1
74 #define IUCV_DBF_SETUP_LEVEL 3
75
76 #define IUCV_DBF_DATA_NAME "iucv_data"
77 #define IUCV_DBF_DATA_LEN 128
78 #define IUCV_DBF_DATA_PAGES 2
79 #define IUCV_DBF_DATA_NR_AREAS 1
80 #define IUCV_DBF_DATA_LEVEL 2
81
82 #define IUCV_DBF_TRACE_NAME "iucv_trace"
83 #define IUCV_DBF_TRACE_LEN 16
84 #define IUCV_DBF_TRACE_PAGES 4
85 #define IUCV_DBF_TRACE_NR_AREAS 1
86 #define IUCV_DBF_TRACE_LEVEL 3
87
88 #define IUCV_DBF_TEXT(name,level,text) \
89 do { \
90 debug_text_event(iucv_dbf_##name,level,text); \
91 } while (0)
92
93 #define IUCV_DBF_HEX(name,level,addr,len) \
94 do { \
95 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
96 } while (0)
97
98 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
99
100 #define IUCV_DBF_TEXT_(name,level,text...) \
101 do { \
102 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
103 sprintf(iucv_dbf_txt_buf, text); \
104 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
105 put_cpu_var(iucv_dbf_txt_buf); \
106 } while (0)
107
108 #define IUCV_DBF_SPRINTF(name,level,text...) \
109 do { \
110 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
111 debug_sprintf_event(iucv_dbf_trace, level, text ); \
112 } while (0)
113
114 /**
115 * some more debug stuff
116 */
117 #define IUCV_HEXDUMP16(importance,header,ptr) \
118 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
119 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
120 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
121 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
122 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
123 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
124 *(((char*)ptr)+12),*(((char*)ptr)+13), \
125 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
126 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
127 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
128 *(((char*)ptr)+16),*(((char*)ptr)+17), \
129 *(((char*)ptr)+18),*(((char*)ptr)+19), \
130 *(((char*)ptr)+20),*(((char*)ptr)+21), \
131 *(((char*)ptr)+22),*(((char*)ptr)+23), \
132 *(((char*)ptr)+24),*(((char*)ptr)+25), \
133 *(((char*)ptr)+26),*(((char*)ptr)+27), \
134 *(((char*)ptr)+28),*(((char*)ptr)+29), \
135 *(((char*)ptr)+30),*(((char*)ptr)+31));
136
137 static inline void iucv_hex_dump(unsigned char *buf, size_t len)
138 {
139 size_t i;
140
141 for (i = 0; i < len; i++) {
142 if (i && !(i % 16))
143 printk("\n");
144 printk("%02x ", *(buf + i));
145 }
146 printk("\n");
147 }
148
149 #define PRINTK_HEADER " iucv: " /* for debugging */
150
151 static struct device_driver netiucv_driver = {
152 .name = "netiucv",
153 .bus = &iucv_bus,
154 };
155
156 static int netiucv_callback_connreq(struct iucv_path *,
157 u8 ipvmid[8], u8 ipuser[16]);
158 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
159 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
160 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
162 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
163 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
164
165 static struct iucv_handler netiucv_handler = {
166 .path_pending = netiucv_callback_connreq,
167 .path_complete = netiucv_callback_connack,
168 .path_severed = netiucv_callback_connrej,
169 .path_quiesced = netiucv_callback_connsusp,
170 .path_resumed = netiucv_callback_connres,
171 .message_pending = netiucv_callback_rx,
172 .message_complete = netiucv_callback_txdone
173 };
174
175 /**
176 * Per connection profiling data
177 */
178 struct connection_profile {
179 unsigned long maxmulti;
180 unsigned long maxcqueue;
181 unsigned long doios_single;
182 unsigned long doios_multi;
183 unsigned long txlen;
184 unsigned long tx_time;
185 struct timespec send_stamp;
186 unsigned long tx_pending;
187 unsigned long tx_max_pending;
188 };
189
190 /**
191 * Representation of one iucv connection
192 */
193 struct iucv_connection {
194 struct list_head list;
195 struct iucv_path *path;
196 struct sk_buff *rx_buff;
197 struct sk_buff *tx_buff;
198 struct sk_buff_head collect_queue;
199 struct sk_buff_head commit_queue;
200 spinlock_t collect_lock;
201 int collect_len;
202 int max_buffsize;
203 fsm_timer timer;
204 fsm_instance *fsm;
205 struct net_device *netdev;
206 struct connection_profile prof;
207 char userid[9];
208 };
209
210 /**
211 * Linked list of all connection structs.
212 */
213 static struct list_head iucv_connection_list =
214 LIST_HEAD_INIT(iucv_connection_list);
215 static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED;
216
217 /**
218 * Representation of event-data for the
219 * connection state machine.
220 */
221 struct iucv_event {
222 struct iucv_connection *conn;
223 void *data;
224 };
225
226 /**
227 * Private part of the network device structure
228 */
229 struct netiucv_priv {
230 struct net_device_stats stats;
231 unsigned long tbusy;
232 fsm_instance *fsm;
233 struct iucv_connection *conn;
234 struct device *dev;
235 };
236
237 /**
238 * Link level header for a packet.
239 */
240 struct ll_header {
241 u16 next;
242 };
243
244 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
245 #define NETIUCV_BUFSIZE_MAX 32768
246 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
247 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
248 #define NETIUCV_MTU_DEFAULT 9216
249 #define NETIUCV_QUEUELEN_DEFAULT 50
250 #define NETIUCV_TIMEOUT_5SEC 5000
251
252 /**
253 * Compatibility macros for busy handling
254 * of network devices.
255 */
256 static inline void netiucv_clear_busy(struct net_device *dev)
257 {
258 struct netiucv_priv *priv = netdev_priv(dev);
259 clear_bit(0, &priv->tbusy);
260 netif_wake_queue(dev);
261 }
262
263 static inline int netiucv_test_and_set_busy(struct net_device *dev)
264 {
265 struct netiucv_priv *priv = netdev_priv(dev);
266 netif_stop_queue(dev);
267 return test_and_set_bit(0, &priv->tbusy);
268 }
269
270 static u8 iucvMagic[16] = {
271 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
273 };
274
275 /**
276 * Convert an iucv userId to its printable
277 * form (strip whitespace at end).
278 *
279 * @param An iucv userId
280 *
281 * @returns The printable string (static data!!)
282 */
283 static inline char *netiucv_printname(char *name)
284 {
285 static char tmp[9];
286 char *p = tmp;
287 memcpy(tmp, name, 8);
288 tmp[8] = '\0';
289 while (*p && (!isspace(*p)))
290 p++;
291 *p = '\0';
292 return tmp;
293 }
294
295 /**
296 * States of the interface statemachine.
297 */
298 enum dev_states {
299 DEV_STATE_STOPPED,
300 DEV_STATE_STARTWAIT,
301 DEV_STATE_STOPWAIT,
302 DEV_STATE_RUNNING,
303 /**
304 * MUST be always the last element!!
305 */
306 NR_DEV_STATES
307 };
308
309 static const char *dev_state_names[] = {
310 "Stopped",
311 "StartWait",
312 "StopWait",
313 "Running",
314 };
315
316 /**
317 * Events of the interface statemachine.
318 */
319 enum dev_events {
320 DEV_EVENT_START,
321 DEV_EVENT_STOP,
322 DEV_EVENT_CONUP,
323 DEV_EVENT_CONDOWN,
324 /**
325 * MUST be always the last element!!
326 */
327 NR_DEV_EVENTS
328 };
329
330 static const char *dev_event_names[] = {
331 "Start",
332 "Stop",
333 "Connection up",
334 "Connection down",
335 };
336
337 /**
338 * Events of the connection statemachine
339 */
340 enum conn_events {
341 /**
342 * Events, representing callbacks from
343 * lowlevel iucv layer)
344 */
345 CONN_EVENT_CONN_REQ,
346 CONN_EVENT_CONN_ACK,
347 CONN_EVENT_CONN_REJ,
348 CONN_EVENT_CONN_SUS,
349 CONN_EVENT_CONN_RES,
350 CONN_EVENT_RX,
351 CONN_EVENT_TXDONE,
352
353 /**
354 * Events, representing errors return codes from
355 * calls to lowlevel iucv layer
356 */
357
358 /**
359 * Event, representing timer expiry.
360 */
361 CONN_EVENT_TIMER,
362
363 /**
364 * Events, representing commands from upper levels.
365 */
366 CONN_EVENT_START,
367 CONN_EVENT_STOP,
368
369 /**
370 * MUST be always the last element!!
371 */
372 NR_CONN_EVENTS,
373 };
374
375 static const char *conn_event_names[] = {
376 "Remote connection request",
377 "Remote connection acknowledge",
378 "Remote connection reject",
379 "Connection suspended",
380 "Connection resumed",
381 "Data received",
382 "Data sent",
383
384 "Timer",
385
386 "Start",
387 "Stop",
388 };
389
390 /**
391 * States of the connection statemachine.
392 */
393 enum conn_states {
394 /**
395 * Connection not assigned to any device,
396 * initial state, invalid
397 */
398 CONN_STATE_INVALID,
399
400 /**
401 * Userid assigned but not operating
402 */
403 CONN_STATE_STOPPED,
404
405 /**
406 * Connection registered,
407 * no connection request sent yet,
408 * no connection request received
409 */
410 CONN_STATE_STARTWAIT,
411
412 /**
413 * Connection registered and connection request sent,
414 * no acknowledge and no connection request received yet.
415 */
416 CONN_STATE_SETUPWAIT,
417
418 /**
419 * Connection up and running idle
420 */
421 CONN_STATE_IDLE,
422
423 /**
424 * Data sent, awaiting CONN_EVENT_TXDONE
425 */
426 CONN_STATE_TX,
427
428 /**
429 * Error during registration.
430 */
431 CONN_STATE_REGERR,
432
433 /**
434 * Error during registration.
435 */
436 CONN_STATE_CONNERR,
437
438 /**
439 * MUST be always the last element!!
440 */
441 NR_CONN_STATES,
442 };
443
444 static const char *conn_state_names[] = {
445 "Invalid",
446 "Stopped",
447 "StartWait",
448 "SetupWait",
449 "Idle",
450 "TX",
451 "Terminating",
452 "Registration error",
453 "Connect error",
454 };
455
456
457 /**
458 * Debug Facility Stuff
459 */
460 static debug_info_t *iucv_dbf_setup = NULL;
461 static debug_info_t *iucv_dbf_data = NULL;
462 static debug_info_t *iucv_dbf_trace = NULL;
463
464 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
465
466 static void iucv_unregister_dbf_views(void)
467 {
468 if (iucv_dbf_setup)
469 debug_unregister(iucv_dbf_setup);
470 if (iucv_dbf_data)
471 debug_unregister(iucv_dbf_data);
472 if (iucv_dbf_trace)
473 debug_unregister(iucv_dbf_trace);
474 }
475 static int iucv_register_dbf_views(void)
476 {
477 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
478 IUCV_DBF_SETUP_PAGES,
479 IUCV_DBF_SETUP_NR_AREAS,
480 IUCV_DBF_SETUP_LEN);
481 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
482 IUCV_DBF_DATA_PAGES,
483 IUCV_DBF_DATA_NR_AREAS,
484 IUCV_DBF_DATA_LEN);
485 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
486 IUCV_DBF_TRACE_PAGES,
487 IUCV_DBF_TRACE_NR_AREAS,
488 IUCV_DBF_TRACE_LEN);
489
490 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
491 (iucv_dbf_trace == NULL)) {
492 iucv_unregister_dbf_views();
493 return -ENOMEM;
494 }
495 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
496 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
497
498 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
499 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
500
501 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
502 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
503
504 return 0;
505 }
506
507 /*
508 * Callback-wrappers, called from lowlevel iucv layer.
509 */
510
511 static void netiucv_callback_rx(struct iucv_path *path,
512 struct iucv_message *msg)
513 {
514 struct iucv_connection *conn = path->private;
515 struct iucv_event ev;
516
517 ev.conn = conn;
518 ev.data = msg;
519 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
520 }
521
522 static void netiucv_callback_txdone(struct iucv_path *path,
523 struct iucv_message *msg)
524 {
525 struct iucv_connection *conn = path->private;
526 struct iucv_event ev;
527
528 ev.conn = conn;
529 ev.data = msg;
530 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
531 }
532
533 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
534 {
535 struct iucv_connection *conn = path->private;
536
537 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
538 }
539
540 static int netiucv_callback_connreq(struct iucv_path *path,
541 u8 ipvmid[8], u8 ipuser[16])
542 {
543 struct iucv_connection *conn = path->private;
544 struct iucv_event ev;
545 int rc;
546
547 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
548 /* ipuser must match iucvMagic. */
549 return -EINVAL;
550 rc = -EINVAL;
551 read_lock_bh(&iucv_connection_rwlock);
552 list_for_each_entry(conn, &iucv_connection_list, list) {
553 if (strncmp(ipvmid, conn->userid, 8))
554 continue;
555 /* Found a matching connection for this path. */
556 conn->path = path;
557 ev.conn = conn;
558 ev.data = path;
559 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
560 rc = 0;
561 }
562 read_unlock_bh(&iucv_connection_rwlock);
563 return rc;
564 }
565
566 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
567 {
568 struct iucv_connection *conn = path->private;
569
570 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
571 }
572
573 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
574 {
575 struct iucv_connection *conn = path->private;
576
577 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
578 }
579
580 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
581 {
582 struct iucv_connection *conn = path->private;
583
584 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
585 }
586
587 /**
588 * Dummy NOP action for all statemachines
589 */
590 static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
591 {
592 }
593
594 /*
595 * Actions of the connection statemachine
596 */
597
598 /**
599 * netiucv_unpack_skb
600 * @conn: The connection where this skb has been received.
601 * @pskb: The received skb.
602 *
603 * Unpack a just received skb and hand it over to upper layers.
604 * Helper function for conn_action_rx.
605 */
606 static void netiucv_unpack_skb(struct iucv_connection *conn,
607 struct sk_buff *pskb)
608 {
609 struct net_device *dev = conn->netdev;
610 struct netiucv_priv *privptr = netdev_priv(dev);
611 u16 offset = 0;
612
613 skb_put(pskb, NETIUCV_HDRLEN);
614 pskb->dev = dev;
615 pskb->ip_summed = CHECKSUM_NONE;
616 pskb->protocol = ntohs(ETH_P_IP);
617
618 while (1) {
619 struct sk_buff *skb;
620 struct ll_header *header = (struct ll_header *) pskb->data;
621
622 if (!header->next)
623 break;
624
625 skb_pull(pskb, NETIUCV_HDRLEN);
626 header->next -= offset;
627 offset += header->next;
628 header->next -= NETIUCV_HDRLEN;
629 if (skb_tailroom(pskb) < header->next) {
630 PRINT_WARN("%s: Illegal next field in iucv header: "
631 "%d > %d\n",
632 dev->name, header->next, skb_tailroom(pskb));
633 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
634 header->next, skb_tailroom(pskb));
635 return;
636 }
637 skb_put(pskb, header->next);
638 pskb->mac.raw = pskb->data;
639 skb = dev_alloc_skb(pskb->len);
640 if (!skb) {
641 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
642 dev->name);
643 IUCV_DBF_TEXT(data, 2,
644 "Out of memory in netiucv_unpack_skb\n");
645 privptr->stats.rx_dropped++;
646 return;
647 }
648 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
649 skb->mac.raw = skb->data;
650 skb->dev = pskb->dev;
651 skb->protocol = pskb->protocol;
652 pskb->ip_summed = CHECKSUM_UNNECESSARY;
653 /*
654 * Since receiving is always initiated from a tasklet (in iucv.c),
655 * we must use netif_rx_ni() instead of netif_rx()
656 */
657 netif_rx_ni(skb);
658 dev->last_rx = jiffies;
659 privptr->stats.rx_packets++;
660 privptr->stats.rx_bytes += skb->len;
661 skb_pull(pskb, header->next);
662 skb_put(pskb, NETIUCV_HDRLEN);
663 }
664 }
665
666 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
667 {
668 struct iucv_event *ev = arg;
669 struct iucv_connection *conn = ev->conn;
670 struct iucv_message *msg = ev->data;
671 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
672 int rc;
673
674 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
675
676 if (!conn->netdev) {
677 iucv_message_reject(conn->path, msg);
678 PRINT_WARN("Received data for unlinked connection\n");
679 IUCV_DBF_TEXT(data, 2,
680 "Received data for unlinked connection\n");
681 return;
682 }
683 if (msg->length > conn->max_buffsize) {
684 iucv_message_reject(conn->path, msg);
685 privptr->stats.rx_dropped++;
686 PRINT_WARN("msglen %d > max_buffsize %d\n",
687 msg->length, conn->max_buffsize);
688 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
689 msg->length, conn->max_buffsize);
690 return;
691 }
692 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
693 conn->rx_buff->len = 0;
694 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
695 msg->length, NULL);
696 if (rc || msg->length < 5) {
697 privptr->stats.rx_errors++;
698 PRINT_WARN("iucv_receive returned %08x\n", rc);
699 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 return;
701 }
702 netiucv_unpack_skb(conn, conn->rx_buff);
703 }
704
705 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
706 {
707 struct iucv_event *ev = arg;
708 struct iucv_connection *conn = ev->conn;
709 struct iucv_message *msg = ev->data;
710 struct iucv_message txmsg;
711 struct netiucv_priv *privptr = NULL;
712 u32 single_flag = msg->tag;
713 u32 txbytes = 0;
714 u32 txpackets = 0;
715 u32 stat_maxcq = 0;
716 struct sk_buff *skb;
717 unsigned long saveflags;
718 struct ll_header header;
719 int rc;
720
721 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
722
723 if (conn && conn->netdev)
724 privptr = netdev_priv(conn->netdev);
725 conn->prof.tx_pending--;
726 if (single_flag) {
727 if ((skb = skb_dequeue(&conn->commit_queue))) {
728 atomic_dec(&skb->users);
729 dev_kfree_skb_any(skb);
730 if (privptr) {
731 privptr->stats.tx_packets++;
732 privptr->stats.tx_bytes +=
733 (skb->len - NETIUCV_HDRLEN
734 - NETIUCV_HDRLEN);
735 }
736 }
737 }
738 conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
739 conn->tx_buff->len = 0;
740 spin_lock_irqsave(&conn->collect_lock, saveflags);
741 while ((skb = skb_dequeue(&conn->collect_queue))) {
742 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
743 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
744 NETIUCV_HDRLEN);
745 memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
746 txbytes += skb->len;
747 txpackets++;
748 stat_maxcq++;
749 atomic_dec(&skb->users);
750 dev_kfree_skb_any(skb);
751 }
752 if (conn->collect_len > conn->prof.maxmulti)
753 conn->prof.maxmulti = conn->collect_len;
754 conn->collect_len = 0;
755 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
756 if (conn->tx_buff->len == 0) {
757 fsm_newstate(fi, CONN_STATE_IDLE);
758 return;
759 }
760
761 header.next = 0;
762 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
763 conn->prof.send_stamp = xtime;
764 txmsg.class = 0;
765 txmsg.tag = 0;
766 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
767 conn->tx_buff->data, conn->tx_buff->len);
768 conn->prof.doios_multi++;
769 conn->prof.txlen += conn->tx_buff->len;
770 conn->prof.tx_pending++;
771 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
772 conn->prof.tx_max_pending = conn->prof.tx_pending;
773 if (rc) {
774 conn->prof.tx_pending--;
775 fsm_newstate(fi, CONN_STATE_IDLE);
776 if (privptr)
777 privptr->stats.tx_errors += txpackets;
778 PRINT_WARN("iucv_send returned %08x\n", rc);
779 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
780 } else {
781 if (privptr) {
782 privptr->stats.tx_packets += txpackets;
783 privptr->stats.tx_bytes += txbytes;
784 }
785 if (stat_maxcq > conn->prof.maxcqueue)
786 conn->prof.maxcqueue = stat_maxcq;
787 }
788 }
789
790 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
791 {
792 struct iucv_event *ev = arg;
793 struct iucv_connection *conn = ev->conn;
794 struct iucv_path *path = ev->data;
795 struct net_device *netdev = conn->netdev;
796 struct netiucv_priv *privptr = netdev_priv(netdev);
797 int rc;
798
799 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
800
801 conn->path = path;
802 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
803 path->flags = 0;
804 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
805 if (rc) {
806 PRINT_WARN("%s: IUCV accept failed with error %d\n",
807 netdev->name, rc);
808 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
809 return;
810 }
811 fsm_newstate(fi, CONN_STATE_IDLE);
812 netdev->tx_queue_len = conn->path->msglim;
813 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
814 }
815
816 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
817 {
818 struct iucv_event *ev = arg;
819 struct iucv_path *path = ev->data;
820
821 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
822 iucv_path_sever(path, NULL);
823 }
824
825 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
826 {
827 struct iucv_connection *conn = arg;
828 struct net_device *netdev = conn->netdev;
829 struct netiucv_priv *privptr = netdev_priv(netdev);
830
831 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
832 fsm_deltimer(&conn->timer);
833 fsm_newstate(fi, CONN_STATE_IDLE);
834 netdev->tx_queue_len = conn->path->msglim;
835 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
836 }
837
838 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
839 {
840 struct iucv_connection *conn = arg;
841
842 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
843 fsm_deltimer(&conn->timer);
844 iucv_path_sever(conn->path, NULL);
845 fsm_newstate(fi, CONN_STATE_STARTWAIT);
846 }
847
848 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
849 {
850 struct iucv_connection *conn = arg;
851 struct net_device *netdev = conn->netdev;
852 struct netiucv_priv *privptr = netdev_priv(netdev);
853
854 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
855
856 fsm_deltimer(&conn->timer);
857 iucv_path_sever(conn->path, NULL);
858 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
859 IUCV_DBF_TEXT(data, 2,
860 "conn_action_connsever: Remote dropped connection\n");
861 fsm_newstate(fi, CONN_STATE_STARTWAIT);
862 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
863 }
864
865 static void conn_action_start(fsm_instance *fi, int event, void *arg)
866 {
867 struct iucv_connection *conn = arg;
868 int rc;
869
870 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
871
872 fsm_newstate(fi, CONN_STATE_STARTWAIT);
873 PRINT_DEBUG("%s('%s'): connecting ...\n",
874 conn->netdev->name, conn->userid);
875
876 /*
877 * We must set the state before calling iucv_connect because the
878 * callback handler could be called at any point after the connection
879 * request is sent
880 */
881
882 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
883 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
884 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
885 NULL, iucvMagic, conn);
886 switch (rc) {
887 case 0:
888 conn->netdev->tx_queue_len = conn->path->msglim;
889 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
890 CONN_EVENT_TIMER, conn);
891 return;
892 case 11:
893 PRINT_INFO("%s: User %s is currently not available.\n",
894 conn->netdev->name,
895 netiucv_printname(conn->userid));
896 fsm_newstate(fi, CONN_STATE_STARTWAIT);
897 break;
898 case 12:
899 PRINT_INFO("%s: User %s is currently not ready.\n",
900 conn->netdev->name,
901 netiucv_printname(conn->userid));
902 fsm_newstate(fi, CONN_STATE_STARTWAIT);
903 break;
904 case 13:
905 PRINT_WARN("%s: Too many IUCV connections.\n",
906 conn->netdev->name);
907 fsm_newstate(fi, CONN_STATE_CONNERR);
908 break;
909 case 14:
910 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
911 conn->netdev->name,
912 netiucv_printname(conn->userid));
913 fsm_newstate(fi, CONN_STATE_CONNERR);
914 break;
915 case 15:
916 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
917 conn->netdev->name);
918 fsm_newstate(fi, CONN_STATE_CONNERR);
919 break;
920 default:
921 PRINT_WARN("%s: iucv_connect returned error %d\n",
922 conn->netdev->name, rc);
923 fsm_newstate(fi, CONN_STATE_CONNERR);
924 break;
925 }
926 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
927 kfree(conn->path);
928 conn->path = NULL;
929 }
930
931 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
932 {
933 struct sk_buff *skb;
934
935 while ((skb = skb_dequeue(q))) {
936 atomic_dec(&skb->users);
937 dev_kfree_skb_any(skb);
938 }
939 }
940
941 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
942 {
943 struct iucv_event *ev = arg;
944 struct iucv_connection *conn = ev->conn;
945 struct net_device *netdev = conn->netdev;
946 struct netiucv_priv *privptr = netdev_priv(netdev);
947
948 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
949
950 fsm_deltimer(&conn->timer);
951 fsm_newstate(fi, CONN_STATE_STOPPED);
952 netiucv_purge_skb_queue(&conn->collect_queue);
953 if (conn->path) {
954 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
955 iucv_path_sever(conn->path, iucvMagic);
956 kfree(conn->path);
957 conn->path = NULL;
958 }
959 netiucv_purge_skb_queue(&conn->commit_queue);
960 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
961 }
962
963 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
964 {
965 struct iucv_connection *conn = arg;
966 struct net_device *netdev = conn->netdev;
967
968 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
969 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
970 }
971
972 static const fsm_node conn_fsm[] = {
973 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
974 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
975
976 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
977 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
978 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
979 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
980 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
981 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
982 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
983
984 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
985 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
986 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
987 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
988 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
989
990 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
991 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
992
993 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
994 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
995 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
996
997 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
998 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
999
1000 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1001 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1002 };
1003
1004 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1005
1006
1007 /*
1008 * Actions for interface - statemachine.
1009 */
1010
1011 /**
1012 * dev_action_start
1013 * @fi: An instance of an interface statemachine.
1014 * @event: The event, just happened.
1015 * @arg: Generic pointer, casted from struct net_device * upon call.
1016 *
1017 * Startup connection by sending CONN_EVENT_START to it.
1018 */
1019 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1020 {
1021 struct net_device *dev = arg;
1022 struct netiucv_priv *privptr = netdev_priv(dev);
1023
1024 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1025
1026 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1027 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1028 }
1029
1030 /**
1031 * Shutdown connection by sending CONN_EVENT_STOP to it.
1032 *
1033 * @param fi An instance of an interface statemachine.
1034 * @param event The event, just happened.
1035 * @param arg Generic pointer, casted from struct net_device * upon call.
1036 */
1037 static void
1038 dev_action_stop(fsm_instance *fi, int event, void *arg)
1039 {
1040 struct net_device *dev = arg;
1041 struct netiucv_priv *privptr = netdev_priv(dev);
1042 struct iucv_event ev;
1043
1044 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1045
1046 ev.conn = privptr->conn;
1047
1048 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1049 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1050 }
1051
1052 /**
1053 * Called from connection statemachine
1054 * when a connection is up and running.
1055 *
1056 * @param fi An instance of an interface statemachine.
1057 * @param event The event, just happened.
1058 * @param arg Generic pointer, casted from struct net_device * upon call.
1059 */
1060 static void
1061 dev_action_connup(fsm_instance *fi, int event, void *arg)
1062 {
1063 struct net_device *dev = arg;
1064 struct netiucv_priv *privptr = netdev_priv(dev);
1065
1066 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1067
1068 switch (fsm_getstate(fi)) {
1069 case DEV_STATE_STARTWAIT:
1070 fsm_newstate(fi, DEV_STATE_RUNNING);
1071 PRINT_INFO("%s: connected with remote side %s\n",
1072 dev->name, privptr->conn->userid);
1073 IUCV_DBF_TEXT(setup, 3,
1074 "connection is up and running\n");
1075 break;
1076 case DEV_STATE_STOPWAIT:
1077 PRINT_INFO(
1078 "%s: got connection UP event during shutdown!\n",
1079 dev->name);
1080 IUCV_DBF_TEXT(data, 2,
1081 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1082 break;
1083 }
1084 }
1085
1086 /**
1087 * Called from connection statemachine
1088 * when a connection has been shutdown.
1089 *
1090 * @param fi An instance of an interface statemachine.
1091 * @param event The event, just happened.
1092 * @param arg Generic pointer, casted from struct net_device * upon call.
1093 */
1094 static void
1095 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1096 {
1097 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1098
1099 switch (fsm_getstate(fi)) {
1100 case DEV_STATE_RUNNING:
1101 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1102 break;
1103 case DEV_STATE_STOPWAIT:
1104 fsm_newstate(fi, DEV_STATE_STOPPED);
1105 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1106 break;
1107 }
1108 }
1109
1110 static const fsm_node dev_fsm[] = {
1111 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1112
1113 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1114 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1115
1116 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1117 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1118
1119 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1120 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1121 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
1122 };
1123
1124 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1125
1126 /**
1127 * Transmit a packet.
1128 * This is a helper function for netiucv_tx().
1129 *
1130 * @param conn Connection to be used for sending.
1131 * @param skb Pointer to struct sk_buff of packet to send.
1132 * The linklevel header has already been set up
1133 * by netiucv_tx().
1134 *
1135 * @return 0 on success, -ERRNO on failure. (Never fails.)
1136 */
1137 static int netiucv_transmit_skb(struct iucv_connection *conn,
1138 struct sk_buff *skb)
1139 {
1140 struct iucv_message msg;
1141 unsigned long saveflags;
1142 struct ll_header header;
1143 int rc;
1144
1145 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1146 int l = skb->len + NETIUCV_HDRLEN;
1147
1148 spin_lock_irqsave(&conn->collect_lock, saveflags);
1149 if (conn->collect_len + l >
1150 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1151 rc = -EBUSY;
1152 IUCV_DBF_TEXT(data, 2,
1153 "EBUSY from netiucv_transmit_skb\n");
1154 } else {
1155 atomic_inc(&skb->users);
1156 skb_queue_tail(&conn->collect_queue, skb);
1157 conn->collect_len += l;
1158 rc = 0;
1159 }
1160 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1161 } else {
1162 struct sk_buff *nskb = skb;
1163 /**
1164 * Copy the skb to a new allocated skb in lowmem only if the
1165 * data is located above 2G in memory or tailroom is < 2.
1166 */
1167 unsigned long hi =
1168 ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
1169 int copied = 0;
1170 if (hi || (skb_tailroom(skb) < 2)) {
1171 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1172 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1173 if (!nskb) {
1174 PRINT_WARN("%s: Could not allocate tx_skb\n",
1175 conn->netdev->name);
1176 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1177 rc = -ENOMEM;
1178 return rc;
1179 } else {
1180 skb_reserve(nskb, NETIUCV_HDRLEN);
1181 memcpy(skb_put(nskb, skb->len),
1182 skb->data, skb->len);
1183 }
1184 copied = 1;
1185 }
1186 /**
1187 * skb now is below 2G and has enough room. Add headers.
1188 */
1189 header.next = nskb->len + NETIUCV_HDRLEN;
1190 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1191 header.next = 0;
1192 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1193
1194 fsm_newstate(conn->fsm, CONN_STATE_TX);
1195 conn->prof.send_stamp = xtime;
1196
1197 msg.tag = 1;
1198 msg.class = 0;
1199 rc = iucv_message_send(conn->path, &msg, 0, 0,
1200 nskb->data, nskb->len);
1201 conn->prof.doios_single++;
1202 conn->prof.txlen += skb->len;
1203 conn->prof.tx_pending++;
1204 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1205 conn->prof.tx_max_pending = conn->prof.tx_pending;
1206 if (rc) {
1207 struct netiucv_priv *privptr;
1208 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1209 conn->prof.tx_pending--;
1210 privptr = netdev_priv(conn->netdev);
1211 if (privptr)
1212 privptr->stats.tx_errors++;
1213 if (copied)
1214 dev_kfree_skb(nskb);
1215 else {
1216 /**
1217 * Remove our headers. They get added
1218 * again on retransmit.
1219 */
1220 skb_pull(skb, NETIUCV_HDRLEN);
1221 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1222 }
1223 PRINT_WARN("iucv_send returned %08x\n", rc);
1224 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1225 } else {
1226 if (copied)
1227 dev_kfree_skb(skb);
1228 atomic_inc(&nskb->users);
1229 skb_queue_tail(&conn->commit_queue, nskb);
1230 }
1231 }
1232
1233 return rc;
1234 }
1235
1236 /*
1237 * Interface API for upper network layers
1238 */
1239
1240 /**
1241 * Open an interface.
1242 * Called from generic network layer when ifconfig up is run.
1243 *
1244 * @param dev Pointer to interface struct.
1245 *
1246 * @return 0 on success, -ERRNO on failure. (Never fails.)
1247 */
1248 static int netiucv_open(struct net_device *dev)
1249 {
1250 struct netiucv_priv *priv = netdev_priv(dev);
1251
1252 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1253 return 0;
1254 }
1255
1256 /**
1257 * Close an interface.
1258 * Called from generic network layer when ifconfig down is run.
1259 *
1260 * @param dev Pointer to interface struct.
1261 *
1262 * @return 0 on success, -ERRNO on failure. (Never fails.)
1263 */
1264 static int netiucv_close(struct net_device *dev)
1265 {
1266 struct netiucv_priv *priv = netdev_priv(dev);
1267
1268 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1269 return 0;
1270 }
1271
1272 /**
1273 * Start transmission of a packet.
1274 * Called from generic network device layer.
1275 *
1276 * @param skb Pointer to buffer containing the packet.
1277 * @param dev Pointer to interface struct.
1278 *
1279 * @return 0 if packet consumed, !0 if packet rejected.
1280 * Note: If we return !0, then the packet is free'd by
1281 * the generic network layer.
1282 */
1283 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1284 {
1285 struct netiucv_priv *privptr = netdev_priv(dev);
1286 int rc;
1287
1288 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1289 /**
1290 * Some sanity checks ...
1291 */
1292 if (skb == NULL) {
1293 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1294 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1295 privptr->stats.tx_dropped++;
1296 return 0;
1297 }
1298 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1299 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1300 dev->name, NETIUCV_HDRLEN);
1301 IUCV_DBF_TEXT(data, 2,
1302 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1303 dev_kfree_skb(skb);
1304 privptr->stats.tx_dropped++;
1305 return 0;
1306 }
1307
1308 /**
1309 * If connection is not running, try to restart it
1310 * and throw away packet.
1311 */
1312 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1313 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1314 dev_kfree_skb(skb);
1315 privptr->stats.tx_dropped++;
1316 privptr->stats.tx_errors++;
1317 privptr->stats.tx_carrier_errors++;
1318 return 0;
1319 }
1320
1321 if (netiucv_test_and_set_busy(dev)) {
1322 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1323 return -EBUSY;
1324 }
1325 dev->trans_start = jiffies;
1326 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1327 netiucv_clear_busy(dev);
1328 return rc;
1329 }
1330
1331 /**
1332 * netiucv_stats
1333 * @dev: Pointer to interface struct.
1334 *
1335 * Returns interface statistics of a device.
1336 *
1337 * Returns pointer to stats struct of this interface.
1338 */
1339 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1340 {
1341 struct netiucv_priv *priv = netdev_priv(dev);
1342
1343 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1344 return &priv->stats;
1345 }
1346
1347 /**
1348 * netiucv_change_mtu
1349 * @dev: Pointer to interface struct.
1350 * @new_mtu: The new MTU to use for this interface.
1351 *
1352 * Sets MTU of an interface.
1353 *
1354 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1355 * (valid range is 576 .. NETIUCV_MTU_MAX).
1356 */
1357 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1358 {
1359 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1360 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1361 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1362 return -EINVAL;
1363 }
1364 dev->mtu = new_mtu;
1365 return 0;
1366 }
1367
1368 /*
1369 * attributes in sysfs
1370 */
1371
1372 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1373 char *buf)
1374 {
1375 struct netiucv_priv *priv = dev->driver_data;
1376
1377 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1378 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1379 }
1380
1381 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1382 const char *buf, size_t count)
1383 {
1384 struct netiucv_priv *priv = dev->driver_data;
1385 struct net_device *ndev = priv->conn->netdev;
1386 char *p;
1387 char *tmp;
1388 char username[9];
1389 int i;
1390 struct iucv_connection *cp;
1391
1392 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1393 if (count > 9) {
1394 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1395 IUCV_DBF_TEXT_(setup, 2,
1396 "%d is length of username\n", (int) count);
1397 return -EINVAL;
1398 }
1399
1400 tmp = strsep((char **) &buf, "\n");
1401 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1402 if (isalnum(*p) || (*p == '$')) {
1403 username[i]= toupper(*p);
1404 continue;
1405 }
1406 if (*p == '\n') {
1407 /* trailing lf, grr */
1408 break;
1409 }
1410 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1411 IUCV_DBF_TEXT_(setup, 2,
1412 "username: invalid character %c\n", *p);
1413 return -EINVAL;
1414 }
1415 while (i < 8)
1416 username[i++] = ' ';
1417 username[8] = '\0';
1418
1419 if (memcmp(username, priv->conn->userid, 9) &&
1420 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1421 /* username changed while the interface is active. */
1422 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1423 dev->bus_id, priv->conn->userid);
1424 PRINT_WARN("netiucv: user cannot be updated\n");
1425 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1426 return -EBUSY;
1427 }
1428 read_lock_bh(&iucv_connection_rwlock);
1429 list_for_each_entry(cp, &iucv_connection_list, list) {
1430 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1431 read_unlock_bh(&iucv_connection_rwlock);
1432 PRINT_WARN("netiucv: Connection to %s already "
1433 "exists\n", username);
1434 return -EEXIST;
1435 }
1436 }
1437 read_unlock_bh(&iucv_connection_rwlock);
1438 memcpy(priv->conn->userid, username, 9);
1439 return count;
1440 }
1441
1442 static DEVICE_ATTR(user, 0644, user_show, user_write);
1443
1444 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1445 char *buf)
1446 { struct netiucv_priv *priv = dev->driver_data;
1447
1448 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1449 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1450 }
1451
1452 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1453 const char *buf, size_t count)
1454 {
1455 struct netiucv_priv *priv = dev->driver_data;
1456 struct net_device *ndev = priv->conn->netdev;
1457 char *e;
1458 int bs1;
1459
1460 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1461 if (count >= 39)
1462 return -EINVAL;
1463
1464 bs1 = simple_strtoul(buf, &e, 0);
1465
1466 if (e && (!isspace(*e))) {
1467 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1468 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1469 return -EINVAL;
1470 }
1471 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1472 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1473 bs1);
1474 IUCV_DBF_TEXT_(setup, 2,
1475 "buffer_write: buffer size %d too large\n",
1476 bs1);
1477 return -EINVAL;
1478 }
1479 if ((ndev->flags & IFF_RUNNING) &&
1480 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1481 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1482 bs1);
1483 IUCV_DBF_TEXT_(setup, 2,
1484 "buffer_write: buffer size %d too small\n",
1485 bs1);
1486 return -EINVAL;
1487 }
1488 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1489 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1490 bs1);
1491 IUCV_DBF_TEXT_(setup, 2,
1492 "buffer_write: buffer size %d too small\n",
1493 bs1);
1494 return -EINVAL;
1495 }
1496
1497 priv->conn->max_buffsize = bs1;
1498 if (!(ndev->flags & IFF_RUNNING))
1499 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1500
1501 return count;
1502
1503 }
1504
1505 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1506
1507 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1508 char *buf)
1509 {
1510 struct netiucv_priv *priv = dev->driver_data;
1511
1512 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1513 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1514 }
1515
1516 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1517
1518 static ssize_t conn_fsm_show (struct device *dev,
1519 struct device_attribute *attr, char *buf)
1520 {
1521 struct netiucv_priv *priv = dev->driver_data;
1522
1523 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1524 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1525 }
1526
1527 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1528
1529 static ssize_t maxmulti_show (struct device *dev,
1530 struct device_attribute *attr, char *buf)
1531 {
1532 struct netiucv_priv *priv = dev->driver_data;
1533
1534 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1535 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1536 }
1537
1538 static ssize_t maxmulti_write (struct device *dev,
1539 struct device_attribute *attr,
1540 const char *buf, size_t count)
1541 {
1542 struct netiucv_priv *priv = dev->driver_data;
1543
1544 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1545 priv->conn->prof.maxmulti = 0;
1546 return count;
1547 }
1548
1549 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1550
1551 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1552 char *buf)
1553 {
1554 struct netiucv_priv *priv = dev->driver_data;
1555
1556 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1557 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1558 }
1559
1560 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1561 const char *buf, size_t count)
1562 {
1563 struct netiucv_priv *priv = dev->driver_data;
1564
1565 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1566 priv->conn->prof.maxcqueue = 0;
1567 return count;
1568 }
1569
1570 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1571
1572 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1573 char *buf)
1574 {
1575 struct netiucv_priv *priv = dev->driver_data;
1576
1577 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1578 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1579 }
1580
1581 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1582 const char *buf, size_t count)
1583 {
1584 struct netiucv_priv *priv = dev->driver_data;
1585
1586 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1587 priv->conn->prof.doios_single = 0;
1588 return count;
1589 }
1590
1591 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1592
1593 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1594 char *buf)
1595 {
1596 struct netiucv_priv *priv = dev->driver_data;
1597
1598 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1599 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1600 }
1601
1602 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1603 const char *buf, size_t count)
1604 {
1605 struct netiucv_priv *priv = dev->driver_data;
1606
1607 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1608 priv->conn->prof.doios_multi = 0;
1609 return count;
1610 }
1611
1612 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1613
1614 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1615 char *buf)
1616 {
1617 struct netiucv_priv *priv = dev->driver_data;
1618
1619 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1620 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1621 }
1622
1623 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1624 const char *buf, size_t count)
1625 {
1626 struct netiucv_priv *priv = dev->driver_data;
1627
1628 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1629 priv->conn->prof.txlen = 0;
1630 return count;
1631 }
1632
1633 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1634
1635 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1636 char *buf)
1637 {
1638 struct netiucv_priv *priv = dev->driver_data;
1639
1640 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1641 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1642 }
1643
1644 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1645 const char *buf, size_t count)
1646 {
1647 struct netiucv_priv *priv = dev->driver_data;
1648
1649 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1650 priv->conn->prof.tx_time = 0;
1651 return count;
1652 }
1653
1654 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1655
1656 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1657 char *buf)
1658 {
1659 struct netiucv_priv *priv = dev->driver_data;
1660
1661 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1662 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1663 }
1664
1665 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1666 const char *buf, size_t count)
1667 {
1668 struct netiucv_priv *priv = dev->driver_data;
1669
1670 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1671 priv->conn->prof.tx_pending = 0;
1672 return count;
1673 }
1674
1675 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1676
1677 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1678 char *buf)
1679 {
1680 struct netiucv_priv *priv = dev->driver_data;
1681
1682 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1683 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1684 }
1685
1686 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1687 const char *buf, size_t count)
1688 {
1689 struct netiucv_priv *priv = dev->driver_data;
1690
1691 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1692 priv->conn->prof.tx_max_pending = 0;
1693 return count;
1694 }
1695
1696 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1697
1698 static struct attribute *netiucv_attrs[] = {
1699 &dev_attr_buffer.attr,
1700 &dev_attr_user.attr,
1701 NULL,
1702 };
1703
1704 static struct attribute_group netiucv_attr_group = {
1705 .attrs = netiucv_attrs,
1706 };
1707
1708 static struct attribute *netiucv_stat_attrs[] = {
1709 &dev_attr_device_fsm_state.attr,
1710 &dev_attr_connection_fsm_state.attr,
1711 &dev_attr_max_tx_buffer_used.attr,
1712 &dev_attr_max_chained_skbs.attr,
1713 &dev_attr_tx_single_write_ops.attr,
1714 &dev_attr_tx_multi_write_ops.attr,
1715 &dev_attr_netto_bytes.attr,
1716 &dev_attr_max_tx_io_time.attr,
1717 &dev_attr_tx_pending.attr,
1718 &dev_attr_tx_max_pending.attr,
1719 NULL,
1720 };
1721
1722 static struct attribute_group netiucv_stat_attr_group = {
1723 .name = "stats",
1724 .attrs = netiucv_stat_attrs,
1725 };
1726
1727 static inline int netiucv_add_files(struct device *dev)
1728 {
1729 int ret;
1730
1731 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1732 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1733 if (ret)
1734 return ret;
1735 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1736 if (ret)
1737 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1738 return ret;
1739 }
1740
1741 static inline void netiucv_remove_files(struct device *dev)
1742 {
1743 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1744 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1745 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1746 }
1747
1748 static int netiucv_register_device(struct net_device *ndev)
1749 {
1750 struct netiucv_priv *priv = netdev_priv(ndev);
1751 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 int ret;
1753
1754
1755 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1756
1757 if (dev) {
1758 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1759 dev->bus = &iucv_bus;
1760 dev->parent = iucv_root;
1761 /*
1762 * The release function could be called after the
1763 * module has been unloaded. It's _only_ task is to
1764 * free the struct. Therefore, we specify kfree()
1765 * directly here. (Probably a little bit obfuscating
1766 * but legitime ...).
1767 */
1768 dev->release = (void (*)(struct device *))kfree;
1769 dev->driver = &netiucv_driver;
1770 } else
1771 return -ENOMEM;
1772
1773 ret = device_register(dev);
1774
1775 if (ret)
1776 return ret;
1777 ret = netiucv_add_files(dev);
1778 if (ret)
1779 goto out_unreg;
1780 priv->dev = dev;
1781 dev->driver_data = priv;
1782 return 0;
1783
1784 out_unreg:
1785 device_unregister(dev);
1786 return ret;
1787 }
1788
1789 static void netiucv_unregister_device(struct device *dev)
1790 {
1791 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1792 netiucv_remove_files(dev);
1793 device_unregister(dev);
1794 }
1795
1796 /**
1797 * Allocate and initialize a new connection structure.
1798 * Add it to the list of netiucv connections;
1799 */
1800 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1801 char *username)
1802 {
1803 struct iucv_connection *conn;
1804
1805 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1806 if (!conn)
1807 goto out;
1808 skb_queue_head_init(&conn->collect_queue);
1809 skb_queue_head_init(&conn->commit_queue);
1810 spin_lock_init(&conn->collect_lock);
1811 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1812 conn->netdev = dev;
1813
1814 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1815 if (!conn->rx_buff)
1816 goto out_conn;
1817 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1818 if (!conn->tx_buff)
1819 goto out_rx;
1820 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1821 conn_event_names, NR_CONN_STATES,
1822 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1823 GFP_KERNEL);
1824 if (!conn->fsm)
1825 goto out_tx;
1826
1827 fsm_settimer(conn->fsm, &conn->timer);
1828 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1829
1830 if (username) {
1831 memcpy(conn->userid, username, 9);
1832 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1833 }
1834
1835 write_lock_bh(&iucv_connection_rwlock);
1836 list_add_tail(&conn->list, &iucv_connection_list);
1837 write_unlock_bh(&iucv_connection_rwlock);
1838 return conn;
1839
1840 out_tx:
1841 kfree_skb(conn->tx_buff);
1842 out_rx:
1843 kfree_skb(conn->rx_buff);
1844 out_conn:
1845 kfree(conn);
1846 out:
1847 return NULL;
1848 }
1849
1850 /**
1851 * Release a connection structure and remove it from the
1852 * list of netiucv connections.
1853 */
1854 static void netiucv_remove_connection(struct iucv_connection *conn)
1855 {
1856 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1857 write_lock_bh(&iucv_connection_rwlock);
1858 list_del_init(&conn->list);
1859 write_unlock_bh(&iucv_connection_rwlock);
1860 if (conn->path) {
1861 iucv_path_sever(conn->path, iucvMagic);
1862 kfree(conn->path);
1863 conn->path = NULL;
1864 }
1865 fsm_deltimer(&conn->timer);
1866 kfree_fsm(conn->fsm);
1867 kfree_skb(conn->rx_buff);
1868 kfree_skb(conn->tx_buff);
1869 }
1870
1871 /**
1872 * Release everything of a net device.
1873 */
1874 static void netiucv_free_netdevice(struct net_device *dev)
1875 {
1876 struct netiucv_priv *privptr = netdev_priv(dev);
1877
1878 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1879
1880 if (!dev)
1881 return;
1882
1883 if (privptr) {
1884 if (privptr->conn)
1885 netiucv_remove_connection(privptr->conn);
1886 if (privptr->fsm)
1887 kfree_fsm(privptr->fsm);
1888 privptr->conn = NULL; privptr->fsm = NULL;
1889 /* privptr gets freed by free_netdev() */
1890 }
1891 free_netdev(dev);
1892 }
1893
1894 /**
1895 * Initialize a net device. (Called from kernel in alloc_netdev())
1896 */
1897 static void netiucv_setup_netdevice(struct net_device *dev)
1898 {
1899 dev->mtu = NETIUCV_MTU_DEFAULT;
1900 dev->hard_start_xmit = netiucv_tx;
1901 dev->open = netiucv_open;
1902 dev->stop = netiucv_close;
1903 dev->get_stats = netiucv_stats;
1904 dev->change_mtu = netiucv_change_mtu;
1905 dev->destructor = netiucv_free_netdevice;
1906 dev->hard_header_len = NETIUCV_HDRLEN;
1907 dev->addr_len = 0;
1908 dev->type = ARPHRD_SLIP;
1909 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1910 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1911 SET_MODULE_OWNER(dev);
1912 }
1913
1914 /**
1915 * Allocate and initialize everything of a net device.
1916 */
1917 static struct net_device *netiucv_init_netdevice(char *username)
1918 {
1919 struct netiucv_priv *privptr;
1920 struct net_device *dev;
1921
1922 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1923 netiucv_setup_netdevice);
1924 if (!dev)
1925 return NULL;
1926 if (dev_alloc_name(dev, dev->name) < 0)
1927 goto out_netdev;
1928
1929 privptr = netdev_priv(dev);
1930 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1931 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1932 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1933 if (!privptr->fsm)
1934 goto out_netdev;
1935
1936 privptr->conn = netiucv_new_connection(dev, username);
1937 if (!privptr->conn) {
1938 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1939 goto out_fsm;
1940 }
1941 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1942 return dev;
1943
1944 out_fsm:
1945 kfree_fsm(privptr->fsm);
1946 out_netdev:
1947 free_netdev(dev);
1948 return NULL;
1949 }
1950
1951 static ssize_t conn_write(struct device_driver *drv,
1952 const char *buf, size_t count)
1953 {
1954 const char *p;
1955 char username[9];
1956 int i, rc;
1957 struct net_device *dev;
1958 struct netiucv_priv *priv;
1959 struct iucv_connection *cp;
1960
1961 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1962 if (count>9) {
1963 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1964 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1965 return -EINVAL;
1966 }
1967
1968 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1969 if (isalnum(*p) || *p == '$') {
1970 username[i] = toupper(*p);
1971 continue;
1972 }
1973 if (*p == '\n')
1974 /* trailing lf, grr */
1975 break;
1976 PRINT_WARN("netiucv: Invalid character in username!\n");
1977 IUCV_DBF_TEXT_(setup, 2,
1978 "conn_write: invalid character %c\n", *p);
1979 return -EINVAL;
1980 }
1981 while (i < 8)
1982 username[i++] = ' ';
1983 username[8] = '\0';
1984
1985 read_lock_bh(&iucv_connection_rwlock);
1986 list_for_each_entry(cp, &iucv_connection_list, list) {
1987 if (!strncmp(username, cp->userid, 9)) {
1988 read_unlock_bh(&iucv_connection_rwlock);
1989 PRINT_WARN("netiucv: Connection to %s already "
1990 "exists\n", username);
1991 return -EEXIST;
1992 }
1993 }
1994 read_unlock_bh(&iucv_connection_rwlock);
1995
1996 dev = netiucv_init_netdevice(username);
1997 if (!dev) {
1998 PRINT_WARN("netiucv: Could not allocate network device "
1999 "structure for user '%s'\n",
2000 netiucv_printname(username));
2001 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2002 return -ENODEV;
2003 }
2004
2005 rc = netiucv_register_device(dev);
2006 if (rc) {
2007 IUCV_DBF_TEXT_(setup, 2,
2008 "ret %d from netiucv_register_device\n", rc);
2009 goto out_free_ndev;
2010 }
2011
2012 /* sysfs magic */
2013 priv = netdev_priv(dev);
2014 SET_NETDEV_DEV(dev, priv->dev);
2015
2016 rc = register_netdev(dev);
2017 if (rc)
2018 goto out_unreg;
2019
2020 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2021
2022 return count;
2023
2024 out_unreg:
2025 netiucv_unregister_device(priv->dev);
2026 out_free_ndev:
2027 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2028 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2029 netiucv_free_netdevice(dev);
2030 return rc;
2031 }
2032
2033 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2034
2035 static ssize_t remove_write (struct device_driver *drv,
2036 const char *buf, size_t count)
2037 {
2038 struct iucv_connection *cp;
2039 struct net_device *ndev;
2040 struct netiucv_priv *priv;
2041 struct device *dev;
2042 char name[IFNAMSIZ];
2043 const char *p;
2044 int i;
2045
2046 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2047
2048 if (count >= IFNAMSIZ)
2049 count = IFNAMSIZ - 1;;
2050
2051 for (i = 0, p = buf; i < count && *p; i++, p++) {
2052 if (*p == '\n' || *p == ' ')
2053 /* trailing lf, grr */
2054 break;
2055 name[i] = *p;
2056 }
2057 name[i] = '\0';
2058
2059 read_lock_bh(&iucv_connection_rwlock);
2060 list_for_each_entry(cp, &iucv_connection_list, list) {
2061 ndev = cp->netdev;
2062 priv = netdev_priv(ndev);
2063 dev = priv->dev;
2064 if (strncmp(name, ndev->name, count))
2065 continue;
2066 read_unlock_bh(&iucv_connection_rwlock);
2067 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2068 PRINT_WARN("netiucv: net device %s active with peer "
2069 "%s\n", ndev->name, priv->conn->userid);
2070 PRINT_WARN("netiucv: %s cannot be removed\n",
2071 ndev->name);
2072 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2073 return -EBUSY;
2074 }
2075 unregister_netdev(ndev);
2076 netiucv_unregister_device(dev);
2077 return count;
2078 }
2079 read_unlock_bh(&iucv_connection_rwlock);
2080 PRINT_WARN("netiucv: net device %s unknown\n", name);
2081 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2082 return -EINVAL;
2083 }
2084
2085 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2086
2087 static struct attribute * netiucv_drv_attrs[] = {
2088 &driver_attr_connection.attr,
2089 &driver_attr_remove.attr,
2090 NULL,
2091 };
2092
2093 static struct attribute_group netiucv_drv_attr_group = {
2094 .attrs = netiucv_drv_attrs,
2095 };
2096
2097 static void netiucv_banner(void)
2098 {
2099 PRINT_INFO("NETIUCV driver initialized\n");
2100 }
2101
2102 static void __exit netiucv_exit(void)
2103 {
2104 struct iucv_connection *cp;
2105 struct net_device *ndev;
2106 struct netiucv_priv *priv;
2107 struct device *dev;
2108
2109 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2110 while (!list_empty(&iucv_connection_list)) {
2111 cp = list_entry(iucv_connection_list.next,
2112 struct iucv_connection, list);
2113 list_del(&cp->list);
2114 ndev = cp->netdev;
2115 priv = netdev_priv(ndev);
2116 dev = priv->dev;
2117
2118 unregister_netdev(ndev);
2119 netiucv_unregister_device(dev);
2120 }
2121
2122 sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2123 driver_unregister(&netiucv_driver);
2124 iucv_unregister(&netiucv_handler, 1);
2125 iucv_unregister_dbf_views();
2126
2127 PRINT_INFO("NETIUCV driver unloaded\n");
2128 return;
2129 }
2130
2131 static int __init netiucv_init(void)
2132 {
2133 int rc;
2134
2135 rc = iucv_register_dbf_views();
2136 if (rc)
2137 goto out;
2138 rc = iucv_register(&netiucv_handler, 1);
2139 if (rc)
2140 goto out_dbf;
2141 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2142 rc = driver_register(&netiucv_driver);
2143 if (rc) {
2144 PRINT_ERR("NETIUCV: failed to register driver.\n");
2145 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2146 goto out_iucv;
2147 }
2148
2149 rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2150 if (rc) {
2151 PRINT_ERR("NETIUCV: failed to add driver attributes.\n");
2152 IUCV_DBF_TEXT_(setup, 2,
2153 "ret %d - netiucv_drv_attr_group\n", rc);
2154 goto out_driver;
2155 }
2156 netiucv_banner();
2157 return rc;
2158
2159 out_driver:
2160 driver_unregister(&netiucv_driver);
2161 out_iucv:
2162 iucv_unregister(&netiucv_handler, 1);
2163 out_dbf:
2164 iucv_unregister_dbf_views();
2165 out:
2166 return rc;
2167 }
2168
2169 module_init(netiucv_init);
2170 module_exit(netiucv_exit);
2171 MODULE_LICENSE("GPL");
This page took 0.074934 seconds and 5 git commands to generate.