[SK_BUFF]: Introduce skb_copy_from_linear_data{_offset}
[deliverable/linux.git] / drivers / s390 / net / netiucv.c
1 /*
2 * IUCV network driver
3 *
4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 *
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
9 *
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 */
33
34 #undef DEBUG
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/timer.h>
44 #include <linux/bitops.h>
45
46 #include <linux/signal.h>
47 #include <linux/string.h>
48 #include <linux/device.h>
49
50 #include <linux/ip.h>
51 #include <linux/if_arp.h>
52 #include <linux/tcp.h>
53 #include <linux/skbuff.h>
54 #include <linux/ctype.h>
55 #include <net/dst.h>
56
57 #include <asm/io.h>
58 #include <asm/uaccess.h>
59
60 #include <net/iucv/iucv.h>
61 #include "fsm.h"
62
63 MODULE_AUTHOR
64 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
65 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
66
67 /**
68 * Debug Facility stuff
69 */
70 #define IUCV_DBF_SETUP_NAME "iucv_setup"
71 #define IUCV_DBF_SETUP_LEN 32
72 #define IUCV_DBF_SETUP_PAGES 2
73 #define IUCV_DBF_SETUP_NR_AREAS 1
74 #define IUCV_DBF_SETUP_LEVEL 3
75
76 #define IUCV_DBF_DATA_NAME "iucv_data"
77 #define IUCV_DBF_DATA_LEN 128
78 #define IUCV_DBF_DATA_PAGES 2
79 #define IUCV_DBF_DATA_NR_AREAS 1
80 #define IUCV_DBF_DATA_LEVEL 2
81
82 #define IUCV_DBF_TRACE_NAME "iucv_trace"
83 #define IUCV_DBF_TRACE_LEN 16
84 #define IUCV_DBF_TRACE_PAGES 4
85 #define IUCV_DBF_TRACE_NR_AREAS 1
86 #define IUCV_DBF_TRACE_LEVEL 3
87
88 #define IUCV_DBF_TEXT(name,level,text) \
89 do { \
90 debug_text_event(iucv_dbf_##name,level,text); \
91 } while (0)
92
93 #define IUCV_DBF_HEX(name,level,addr,len) \
94 do { \
95 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
96 } while (0)
97
98 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
99
100 #define IUCV_DBF_TEXT_(name,level,text...) \
101 do { \
102 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
103 sprintf(iucv_dbf_txt_buf, text); \
104 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
105 put_cpu_var(iucv_dbf_txt_buf); \
106 } while (0)
107
108 #define IUCV_DBF_SPRINTF(name,level,text...) \
109 do { \
110 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
111 debug_sprintf_event(iucv_dbf_trace, level, text ); \
112 } while (0)
113
114 /**
115 * some more debug stuff
116 */
117 #define IUCV_HEXDUMP16(importance,header,ptr) \
118 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
119 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
120 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
121 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
122 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
123 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
124 *(((char*)ptr)+12),*(((char*)ptr)+13), \
125 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
126 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
127 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
128 *(((char*)ptr)+16),*(((char*)ptr)+17), \
129 *(((char*)ptr)+18),*(((char*)ptr)+19), \
130 *(((char*)ptr)+20),*(((char*)ptr)+21), \
131 *(((char*)ptr)+22),*(((char*)ptr)+23), \
132 *(((char*)ptr)+24),*(((char*)ptr)+25), \
133 *(((char*)ptr)+26),*(((char*)ptr)+27), \
134 *(((char*)ptr)+28),*(((char*)ptr)+29), \
135 *(((char*)ptr)+30),*(((char*)ptr)+31));
136
137 static inline void iucv_hex_dump(unsigned char *buf, size_t len)
138 {
139 size_t i;
140
141 for (i = 0; i < len; i++) {
142 if (i && !(i % 16))
143 printk("\n");
144 printk("%02x ", *(buf + i));
145 }
146 printk("\n");
147 }
148
149 #define PRINTK_HEADER " iucv: " /* for debugging */
150
151 static struct device_driver netiucv_driver = {
152 .name = "netiucv",
153 .bus = &iucv_bus,
154 };
155
156 static int netiucv_callback_connreq(struct iucv_path *,
157 u8 ipvmid[8], u8 ipuser[16]);
158 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
159 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
160 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
162 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
163 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
164
165 static struct iucv_handler netiucv_handler = {
166 .path_pending = netiucv_callback_connreq,
167 .path_complete = netiucv_callback_connack,
168 .path_severed = netiucv_callback_connrej,
169 .path_quiesced = netiucv_callback_connsusp,
170 .path_resumed = netiucv_callback_connres,
171 .message_pending = netiucv_callback_rx,
172 .message_complete = netiucv_callback_txdone
173 };
174
175 /**
176 * Per connection profiling data
177 */
178 struct connection_profile {
179 unsigned long maxmulti;
180 unsigned long maxcqueue;
181 unsigned long doios_single;
182 unsigned long doios_multi;
183 unsigned long txlen;
184 unsigned long tx_time;
185 struct timespec send_stamp;
186 unsigned long tx_pending;
187 unsigned long tx_max_pending;
188 };
189
190 /**
191 * Representation of one iucv connection
192 */
193 struct iucv_connection {
194 struct list_head list;
195 struct iucv_path *path;
196 struct sk_buff *rx_buff;
197 struct sk_buff *tx_buff;
198 struct sk_buff_head collect_queue;
199 struct sk_buff_head commit_queue;
200 spinlock_t collect_lock;
201 int collect_len;
202 int max_buffsize;
203 fsm_timer timer;
204 fsm_instance *fsm;
205 struct net_device *netdev;
206 struct connection_profile prof;
207 char userid[9];
208 };
209
210 /**
211 * Linked list of all connection structs.
212 */
213 static struct list_head iucv_connection_list =
214 LIST_HEAD_INIT(iucv_connection_list);
215 static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED;
216
217 /**
218 * Representation of event-data for the
219 * connection state machine.
220 */
221 struct iucv_event {
222 struct iucv_connection *conn;
223 void *data;
224 };
225
226 /**
227 * Private part of the network device structure
228 */
229 struct netiucv_priv {
230 struct net_device_stats stats;
231 unsigned long tbusy;
232 fsm_instance *fsm;
233 struct iucv_connection *conn;
234 struct device *dev;
235 };
236
237 /**
238 * Link level header for a packet.
239 */
240 struct ll_header {
241 u16 next;
242 };
243
244 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
245 #define NETIUCV_BUFSIZE_MAX 32768
246 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
247 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
248 #define NETIUCV_MTU_DEFAULT 9216
249 #define NETIUCV_QUEUELEN_DEFAULT 50
250 #define NETIUCV_TIMEOUT_5SEC 5000
251
252 /**
253 * Compatibility macros for busy handling
254 * of network devices.
255 */
256 static inline void netiucv_clear_busy(struct net_device *dev)
257 {
258 struct netiucv_priv *priv = netdev_priv(dev);
259 clear_bit(0, &priv->tbusy);
260 netif_wake_queue(dev);
261 }
262
263 static inline int netiucv_test_and_set_busy(struct net_device *dev)
264 {
265 struct netiucv_priv *priv = netdev_priv(dev);
266 netif_stop_queue(dev);
267 return test_and_set_bit(0, &priv->tbusy);
268 }
269
270 static u8 iucvMagic[16] = {
271 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
273 };
274
275 /**
276 * Convert an iucv userId to its printable
277 * form (strip whitespace at end).
278 *
279 * @param An iucv userId
280 *
281 * @returns The printable string (static data!!)
282 */
283 static inline char *netiucv_printname(char *name)
284 {
285 static char tmp[9];
286 char *p = tmp;
287 memcpy(tmp, name, 8);
288 tmp[8] = '\0';
289 while (*p && (!isspace(*p)))
290 p++;
291 *p = '\0';
292 return tmp;
293 }
294
295 /**
296 * States of the interface statemachine.
297 */
298 enum dev_states {
299 DEV_STATE_STOPPED,
300 DEV_STATE_STARTWAIT,
301 DEV_STATE_STOPWAIT,
302 DEV_STATE_RUNNING,
303 /**
304 * MUST be always the last element!!
305 */
306 NR_DEV_STATES
307 };
308
309 static const char *dev_state_names[] = {
310 "Stopped",
311 "StartWait",
312 "StopWait",
313 "Running",
314 };
315
316 /**
317 * Events of the interface statemachine.
318 */
319 enum dev_events {
320 DEV_EVENT_START,
321 DEV_EVENT_STOP,
322 DEV_EVENT_CONUP,
323 DEV_EVENT_CONDOWN,
324 /**
325 * MUST be always the last element!!
326 */
327 NR_DEV_EVENTS
328 };
329
330 static const char *dev_event_names[] = {
331 "Start",
332 "Stop",
333 "Connection up",
334 "Connection down",
335 };
336
337 /**
338 * Events of the connection statemachine
339 */
340 enum conn_events {
341 /**
342 * Events, representing callbacks from
343 * lowlevel iucv layer)
344 */
345 CONN_EVENT_CONN_REQ,
346 CONN_EVENT_CONN_ACK,
347 CONN_EVENT_CONN_REJ,
348 CONN_EVENT_CONN_SUS,
349 CONN_EVENT_CONN_RES,
350 CONN_EVENT_RX,
351 CONN_EVENT_TXDONE,
352
353 /**
354 * Events, representing errors return codes from
355 * calls to lowlevel iucv layer
356 */
357
358 /**
359 * Event, representing timer expiry.
360 */
361 CONN_EVENT_TIMER,
362
363 /**
364 * Events, representing commands from upper levels.
365 */
366 CONN_EVENT_START,
367 CONN_EVENT_STOP,
368
369 /**
370 * MUST be always the last element!!
371 */
372 NR_CONN_EVENTS,
373 };
374
375 static const char *conn_event_names[] = {
376 "Remote connection request",
377 "Remote connection acknowledge",
378 "Remote connection reject",
379 "Connection suspended",
380 "Connection resumed",
381 "Data received",
382 "Data sent",
383
384 "Timer",
385
386 "Start",
387 "Stop",
388 };
389
390 /**
391 * States of the connection statemachine.
392 */
393 enum conn_states {
394 /**
395 * Connection not assigned to any device,
396 * initial state, invalid
397 */
398 CONN_STATE_INVALID,
399
400 /**
401 * Userid assigned but not operating
402 */
403 CONN_STATE_STOPPED,
404
405 /**
406 * Connection registered,
407 * no connection request sent yet,
408 * no connection request received
409 */
410 CONN_STATE_STARTWAIT,
411
412 /**
413 * Connection registered and connection request sent,
414 * no acknowledge and no connection request received yet.
415 */
416 CONN_STATE_SETUPWAIT,
417
418 /**
419 * Connection up and running idle
420 */
421 CONN_STATE_IDLE,
422
423 /**
424 * Data sent, awaiting CONN_EVENT_TXDONE
425 */
426 CONN_STATE_TX,
427
428 /**
429 * Error during registration.
430 */
431 CONN_STATE_REGERR,
432
433 /**
434 * Error during registration.
435 */
436 CONN_STATE_CONNERR,
437
438 /**
439 * MUST be always the last element!!
440 */
441 NR_CONN_STATES,
442 };
443
444 static const char *conn_state_names[] = {
445 "Invalid",
446 "Stopped",
447 "StartWait",
448 "SetupWait",
449 "Idle",
450 "TX",
451 "Terminating",
452 "Registration error",
453 "Connect error",
454 };
455
456
457 /**
458 * Debug Facility Stuff
459 */
460 static debug_info_t *iucv_dbf_setup = NULL;
461 static debug_info_t *iucv_dbf_data = NULL;
462 static debug_info_t *iucv_dbf_trace = NULL;
463
464 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
465
466 static void iucv_unregister_dbf_views(void)
467 {
468 if (iucv_dbf_setup)
469 debug_unregister(iucv_dbf_setup);
470 if (iucv_dbf_data)
471 debug_unregister(iucv_dbf_data);
472 if (iucv_dbf_trace)
473 debug_unregister(iucv_dbf_trace);
474 }
475 static int iucv_register_dbf_views(void)
476 {
477 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
478 IUCV_DBF_SETUP_PAGES,
479 IUCV_DBF_SETUP_NR_AREAS,
480 IUCV_DBF_SETUP_LEN);
481 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
482 IUCV_DBF_DATA_PAGES,
483 IUCV_DBF_DATA_NR_AREAS,
484 IUCV_DBF_DATA_LEN);
485 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
486 IUCV_DBF_TRACE_PAGES,
487 IUCV_DBF_TRACE_NR_AREAS,
488 IUCV_DBF_TRACE_LEN);
489
490 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
491 (iucv_dbf_trace == NULL)) {
492 iucv_unregister_dbf_views();
493 return -ENOMEM;
494 }
495 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
496 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
497
498 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
499 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
500
501 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
502 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
503
504 return 0;
505 }
506
507 /*
508 * Callback-wrappers, called from lowlevel iucv layer.
509 */
510
511 static void netiucv_callback_rx(struct iucv_path *path,
512 struct iucv_message *msg)
513 {
514 struct iucv_connection *conn = path->private;
515 struct iucv_event ev;
516
517 ev.conn = conn;
518 ev.data = msg;
519 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
520 }
521
522 static void netiucv_callback_txdone(struct iucv_path *path,
523 struct iucv_message *msg)
524 {
525 struct iucv_connection *conn = path->private;
526 struct iucv_event ev;
527
528 ev.conn = conn;
529 ev.data = msg;
530 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
531 }
532
533 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
534 {
535 struct iucv_connection *conn = path->private;
536
537 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
538 }
539
540 static int netiucv_callback_connreq(struct iucv_path *path,
541 u8 ipvmid[8], u8 ipuser[16])
542 {
543 struct iucv_connection *conn = path->private;
544 struct iucv_event ev;
545 int rc;
546
547 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
548 /* ipuser must match iucvMagic. */
549 return -EINVAL;
550 rc = -EINVAL;
551 read_lock_bh(&iucv_connection_rwlock);
552 list_for_each_entry(conn, &iucv_connection_list, list) {
553 if (strncmp(ipvmid, conn->userid, 8))
554 continue;
555 /* Found a matching connection for this path. */
556 conn->path = path;
557 ev.conn = conn;
558 ev.data = path;
559 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
560 rc = 0;
561 }
562 read_unlock_bh(&iucv_connection_rwlock);
563 return rc;
564 }
565
566 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
567 {
568 struct iucv_connection *conn = path->private;
569
570 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
571 }
572
573 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
574 {
575 struct iucv_connection *conn = path->private;
576
577 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
578 }
579
580 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
581 {
582 struct iucv_connection *conn = path->private;
583
584 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
585 }
586
587 /**
588 * Dummy NOP action for all statemachines
589 */
590 static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
591 {
592 }
593
594 /*
595 * Actions of the connection statemachine
596 */
597
598 /**
599 * netiucv_unpack_skb
600 * @conn: The connection where this skb has been received.
601 * @pskb: The received skb.
602 *
603 * Unpack a just received skb and hand it over to upper layers.
604 * Helper function for conn_action_rx.
605 */
606 static void netiucv_unpack_skb(struct iucv_connection *conn,
607 struct sk_buff *pskb)
608 {
609 struct net_device *dev = conn->netdev;
610 struct netiucv_priv *privptr = netdev_priv(dev);
611 u16 offset = 0;
612
613 skb_put(pskb, NETIUCV_HDRLEN);
614 pskb->dev = dev;
615 pskb->ip_summed = CHECKSUM_NONE;
616 pskb->protocol = ntohs(ETH_P_IP);
617
618 while (1) {
619 struct sk_buff *skb;
620 struct ll_header *header = (struct ll_header *) pskb->data;
621
622 if (!header->next)
623 break;
624
625 skb_pull(pskb, NETIUCV_HDRLEN);
626 header->next -= offset;
627 offset += header->next;
628 header->next -= NETIUCV_HDRLEN;
629 if (skb_tailroom(pskb) < header->next) {
630 PRINT_WARN("%s: Illegal next field in iucv header: "
631 "%d > %d\n",
632 dev->name, header->next, skb_tailroom(pskb));
633 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
634 header->next, skb_tailroom(pskb));
635 return;
636 }
637 skb_put(pskb, header->next);
638 skb_reset_mac_header(pskb);
639 skb = dev_alloc_skb(pskb->len);
640 if (!skb) {
641 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
642 dev->name);
643 IUCV_DBF_TEXT(data, 2,
644 "Out of memory in netiucv_unpack_skb\n");
645 privptr->stats.rx_dropped++;
646 return;
647 }
648 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
649 pskb->len);
650 skb_reset_mac_header(skb);
651 skb->dev = pskb->dev;
652 skb->protocol = pskb->protocol;
653 pskb->ip_summed = CHECKSUM_UNNECESSARY;
654 /*
655 * Since receiving is always initiated from a tasklet (in iucv.c),
656 * we must use netif_rx_ni() instead of netif_rx()
657 */
658 netif_rx_ni(skb);
659 dev->last_rx = jiffies;
660 privptr->stats.rx_packets++;
661 privptr->stats.rx_bytes += skb->len;
662 skb_pull(pskb, header->next);
663 skb_put(pskb, NETIUCV_HDRLEN);
664 }
665 }
666
667 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
668 {
669 struct iucv_event *ev = arg;
670 struct iucv_connection *conn = ev->conn;
671 struct iucv_message *msg = ev->data;
672 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
673 int rc;
674
675 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
676
677 if (!conn->netdev) {
678 iucv_message_reject(conn->path, msg);
679 PRINT_WARN("Received data for unlinked connection\n");
680 IUCV_DBF_TEXT(data, 2,
681 "Received data for unlinked connection\n");
682 return;
683 }
684 if (msg->length > conn->max_buffsize) {
685 iucv_message_reject(conn->path, msg);
686 privptr->stats.rx_dropped++;
687 PRINT_WARN("msglen %d > max_buffsize %d\n",
688 msg->length, conn->max_buffsize);
689 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
690 msg->length, conn->max_buffsize);
691 return;
692 }
693 conn->rx_buff->data = conn->rx_buff->head;
694 skb_reset_tail_pointer(conn->rx_buff);
695 conn->rx_buff->len = 0;
696 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
697 msg->length, NULL);
698 if (rc || msg->length < 5) {
699 privptr->stats.rx_errors++;
700 PRINT_WARN("iucv_receive returned %08x\n", rc);
701 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
702 return;
703 }
704 netiucv_unpack_skb(conn, conn->rx_buff);
705 }
706
707 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
708 {
709 struct iucv_event *ev = arg;
710 struct iucv_connection *conn = ev->conn;
711 struct iucv_message *msg = ev->data;
712 struct iucv_message txmsg;
713 struct netiucv_priv *privptr = NULL;
714 u32 single_flag = msg->tag;
715 u32 txbytes = 0;
716 u32 txpackets = 0;
717 u32 stat_maxcq = 0;
718 struct sk_buff *skb;
719 unsigned long saveflags;
720 struct ll_header header;
721 int rc;
722
723 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
724
725 if (conn && conn->netdev)
726 privptr = netdev_priv(conn->netdev);
727 conn->prof.tx_pending--;
728 if (single_flag) {
729 if ((skb = skb_dequeue(&conn->commit_queue))) {
730 atomic_dec(&skb->users);
731 dev_kfree_skb_any(skb);
732 if (privptr) {
733 privptr->stats.tx_packets++;
734 privptr->stats.tx_bytes +=
735 (skb->len - NETIUCV_HDRLEN
736 - NETIUCV_HDRLEN);
737 }
738 }
739 }
740 conn->tx_buff->data = conn->tx_buff->head;
741 skb_reset_tail_pointer(conn->tx_buff);
742 conn->tx_buff->len = 0;
743 spin_lock_irqsave(&conn->collect_lock, saveflags);
744 while ((skb = skb_dequeue(&conn->collect_queue))) {
745 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
746 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
747 NETIUCV_HDRLEN);
748 skb_copy_from_linear_data(skb,
749 skb_put(conn->tx_buff, skb->len),
750 skb->len);
751 txbytes += skb->len;
752 txpackets++;
753 stat_maxcq++;
754 atomic_dec(&skb->users);
755 dev_kfree_skb_any(skb);
756 }
757 if (conn->collect_len > conn->prof.maxmulti)
758 conn->prof.maxmulti = conn->collect_len;
759 conn->collect_len = 0;
760 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
761 if (conn->tx_buff->len == 0) {
762 fsm_newstate(fi, CONN_STATE_IDLE);
763 return;
764 }
765
766 header.next = 0;
767 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
768 conn->prof.send_stamp = xtime;
769 txmsg.class = 0;
770 txmsg.tag = 0;
771 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
772 conn->tx_buff->data, conn->tx_buff->len);
773 conn->prof.doios_multi++;
774 conn->prof.txlen += conn->tx_buff->len;
775 conn->prof.tx_pending++;
776 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
777 conn->prof.tx_max_pending = conn->prof.tx_pending;
778 if (rc) {
779 conn->prof.tx_pending--;
780 fsm_newstate(fi, CONN_STATE_IDLE);
781 if (privptr)
782 privptr->stats.tx_errors += txpackets;
783 PRINT_WARN("iucv_send returned %08x\n", rc);
784 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
785 } else {
786 if (privptr) {
787 privptr->stats.tx_packets += txpackets;
788 privptr->stats.tx_bytes += txbytes;
789 }
790 if (stat_maxcq > conn->prof.maxcqueue)
791 conn->prof.maxcqueue = stat_maxcq;
792 }
793 }
794
795 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
796 {
797 struct iucv_event *ev = arg;
798 struct iucv_connection *conn = ev->conn;
799 struct iucv_path *path = ev->data;
800 struct net_device *netdev = conn->netdev;
801 struct netiucv_priv *privptr = netdev_priv(netdev);
802 int rc;
803
804 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
805
806 conn->path = path;
807 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
808 path->flags = 0;
809 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
810 if (rc) {
811 PRINT_WARN("%s: IUCV accept failed with error %d\n",
812 netdev->name, rc);
813 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
814 return;
815 }
816 fsm_newstate(fi, CONN_STATE_IDLE);
817 netdev->tx_queue_len = conn->path->msglim;
818 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
819 }
820
821 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
822 {
823 struct iucv_event *ev = arg;
824 struct iucv_path *path = ev->data;
825
826 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
827 iucv_path_sever(path, NULL);
828 }
829
830 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
831 {
832 struct iucv_connection *conn = arg;
833 struct net_device *netdev = conn->netdev;
834 struct netiucv_priv *privptr = netdev_priv(netdev);
835
836 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
837 fsm_deltimer(&conn->timer);
838 fsm_newstate(fi, CONN_STATE_IDLE);
839 netdev->tx_queue_len = conn->path->msglim;
840 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
841 }
842
843 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
844 {
845 struct iucv_connection *conn = arg;
846
847 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
848 fsm_deltimer(&conn->timer);
849 iucv_path_sever(conn->path, NULL);
850 fsm_newstate(fi, CONN_STATE_STARTWAIT);
851 }
852
853 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
854 {
855 struct iucv_connection *conn = arg;
856 struct net_device *netdev = conn->netdev;
857 struct netiucv_priv *privptr = netdev_priv(netdev);
858
859 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
860
861 fsm_deltimer(&conn->timer);
862 iucv_path_sever(conn->path, NULL);
863 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
864 IUCV_DBF_TEXT(data, 2,
865 "conn_action_connsever: Remote dropped connection\n");
866 fsm_newstate(fi, CONN_STATE_STARTWAIT);
867 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
868 }
869
870 static void conn_action_start(fsm_instance *fi, int event, void *arg)
871 {
872 struct iucv_connection *conn = arg;
873 int rc;
874
875 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
876
877 fsm_newstate(fi, CONN_STATE_STARTWAIT);
878 PRINT_DEBUG("%s('%s'): connecting ...\n",
879 conn->netdev->name, conn->userid);
880
881 /*
882 * We must set the state before calling iucv_connect because the
883 * callback handler could be called at any point after the connection
884 * request is sent
885 */
886
887 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
888 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
889 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
890 NULL, iucvMagic, conn);
891 switch (rc) {
892 case 0:
893 conn->netdev->tx_queue_len = conn->path->msglim;
894 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
895 CONN_EVENT_TIMER, conn);
896 return;
897 case 11:
898 PRINT_INFO("%s: User %s is currently not available.\n",
899 conn->netdev->name,
900 netiucv_printname(conn->userid));
901 fsm_newstate(fi, CONN_STATE_STARTWAIT);
902 break;
903 case 12:
904 PRINT_INFO("%s: User %s is currently not ready.\n",
905 conn->netdev->name,
906 netiucv_printname(conn->userid));
907 fsm_newstate(fi, CONN_STATE_STARTWAIT);
908 break;
909 case 13:
910 PRINT_WARN("%s: Too many IUCV connections.\n",
911 conn->netdev->name);
912 fsm_newstate(fi, CONN_STATE_CONNERR);
913 break;
914 case 14:
915 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
916 conn->netdev->name,
917 netiucv_printname(conn->userid));
918 fsm_newstate(fi, CONN_STATE_CONNERR);
919 break;
920 case 15:
921 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
922 conn->netdev->name);
923 fsm_newstate(fi, CONN_STATE_CONNERR);
924 break;
925 default:
926 PRINT_WARN("%s: iucv_connect returned error %d\n",
927 conn->netdev->name, rc);
928 fsm_newstate(fi, CONN_STATE_CONNERR);
929 break;
930 }
931 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
932 kfree(conn->path);
933 conn->path = NULL;
934 }
935
936 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
937 {
938 struct sk_buff *skb;
939
940 while ((skb = skb_dequeue(q))) {
941 atomic_dec(&skb->users);
942 dev_kfree_skb_any(skb);
943 }
944 }
945
946 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
947 {
948 struct iucv_event *ev = arg;
949 struct iucv_connection *conn = ev->conn;
950 struct net_device *netdev = conn->netdev;
951 struct netiucv_priv *privptr = netdev_priv(netdev);
952
953 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
954
955 fsm_deltimer(&conn->timer);
956 fsm_newstate(fi, CONN_STATE_STOPPED);
957 netiucv_purge_skb_queue(&conn->collect_queue);
958 if (conn->path) {
959 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
960 iucv_path_sever(conn->path, iucvMagic);
961 kfree(conn->path);
962 conn->path = NULL;
963 }
964 netiucv_purge_skb_queue(&conn->commit_queue);
965 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
966 }
967
968 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
969 {
970 struct iucv_connection *conn = arg;
971 struct net_device *netdev = conn->netdev;
972
973 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
974 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
975 }
976
977 static const fsm_node conn_fsm[] = {
978 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
979 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
980
981 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
982 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
983 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
984 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
985 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
986 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
987 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
988
989 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
990 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
991 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
992 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
993 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
994
995 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
996 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
997
998 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
999 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
1000 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
1001
1002 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1003 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1004
1005 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1006 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1007 };
1008
1009 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1010
1011
1012 /*
1013 * Actions for interface - statemachine.
1014 */
1015
1016 /**
1017 * dev_action_start
1018 * @fi: An instance of an interface statemachine.
1019 * @event: The event, just happened.
1020 * @arg: Generic pointer, casted from struct net_device * upon call.
1021 *
1022 * Startup connection by sending CONN_EVENT_START to it.
1023 */
1024 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1025 {
1026 struct net_device *dev = arg;
1027 struct netiucv_priv *privptr = netdev_priv(dev);
1028
1029 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1030
1031 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1032 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1033 }
1034
1035 /**
1036 * Shutdown connection by sending CONN_EVENT_STOP to it.
1037 *
1038 * @param fi An instance of an interface statemachine.
1039 * @param event The event, just happened.
1040 * @param arg Generic pointer, casted from struct net_device * upon call.
1041 */
1042 static void
1043 dev_action_stop(fsm_instance *fi, int event, void *arg)
1044 {
1045 struct net_device *dev = arg;
1046 struct netiucv_priv *privptr = netdev_priv(dev);
1047 struct iucv_event ev;
1048
1049 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1050
1051 ev.conn = privptr->conn;
1052
1053 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1054 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1055 }
1056
1057 /**
1058 * Called from connection statemachine
1059 * when a connection is up and running.
1060 *
1061 * @param fi An instance of an interface statemachine.
1062 * @param event The event, just happened.
1063 * @param arg Generic pointer, casted from struct net_device * upon call.
1064 */
1065 static void
1066 dev_action_connup(fsm_instance *fi, int event, void *arg)
1067 {
1068 struct net_device *dev = arg;
1069 struct netiucv_priv *privptr = netdev_priv(dev);
1070
1071 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1072
1073 switch (fsm_getstate(fi)) {
1074 case DEV_STATE_STARTWAIT:
1075 fsm_newstate(fi, DEV_STATE_RUNNING);
1076 PRINT_INFO("%s: connected with remote side %s\n",
1077 dev->name, privptr->conn->userid);
1078 IUCV_DBF_TEXT(setup, 3,
1079 "connection is up and running\n");
1080 break;
1081 case DEV_STATE_STOPWAIT:
1082 PRINT_INFO(
1083 "%s: got connection UP event during shutdown!\n",
1084 dev->name);
1085 IUCV_DBF_TEXT(data, 2,
1086 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1087 break;
1088 }
1089 }
1090
1091 /**
1092 * Called from connection statemachine
1093 * when a connection has been shutdown.
1094 *
1095 * @param fi An instance of an interface statemachine.
1096 * @param event The event, just happened.
1097 * @param arg Generic pointer, casted from struct net_device * upon call.
1098 */
1099 static void
1100 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1101 {
1102 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1103
1104 switch (fsm_getstate(fi)) {
1105 case DEV_STATE_RUNNING:
1106 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1107 break;
1108 case DEV_STATE_STOPWAIT:
1109 fsm_newstate(fi, DEV_STATE_STOPPED);
1110 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1111 break;
1112 }
1113 }
1114
1115 static const fsm_node dev_fsm[] = {
1116 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1117
1118 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1119 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1120
1121 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1122 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1123
1124 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1125 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1126 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
1127 };
1128
1129 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1130
1131 /**
1132 * Transmit a packet.
1133 * This is a helper function for netiucv_tx().
1134 *
1135 * @param conn Connection to be used for sending.
1136 * @param skb Pointer to struct sk_buff of packet to send.
1137 * The linklevel header has already been set up
1138 * by netiucv_tx().
1139 *
1140 * @return 0 on success, -ERRNO on failure. (Never fails.)
1141 */
1142 static int netiucv_transmit_skb(struct iucv_connection *conn,
1143 struct sk_buff *skb)
1144 {
1145 struct iucv_message msg;
1146 unsigned long saveflags;
1147 struct ll_header header;
1148 int rc;
1149
1150 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1151 int l = skb->len + NETIUCV_HDRLEN;
1152
1153 spin_lock_irqsave(&conn->collect_lock, saveflags);
1154 if (conn->collect_len + l >
1155 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1156 rc = -EBUSY;
1157 IUCV_DBF_TEXT(data, 2,
1158 "EBUSY from netiucv_transmit_skb\n");
1159 } else {
1160 atomic_inc(&skb->users);
1161 skb_queue_tail(&conn->collect_queue, skb);
1162 conn->collect_len += l;
1163 rc = 0;
1164 }
1165 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1166 } else {
1167 struct sk_buff *nskb = skb;
1168 /**
1169 * Copy the skb to a new allocated skb in lowmem only if the
1170 * data is located above 2G in memory or tailroom is < 2.
1171 */
1172 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1173 NETIUCV_HDRLEN)) >> 31;
1174 int copied = 0;
1175 if (hi || (skb_tailroom(skb) < 2)) {
1176 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1177 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1178 if (!nskb) {
1179 PRINT_WARN("%s: Could not allocate tx_skb\n",
1180 conn->netdev->name);
1181 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1182 rc = -ENOMEM;
1183 return rc;
1184 } else {
1185 skb_reserve(nskb, NETIUCV_HDRLEN);
1186 memcpy(skb_put(nskb, skb->len),
1187 skb->data, skb->len);
1188 }
1189 copied = 1;
1190 }
1191 /**
1192 * skb now is below 2G and has enough room. Add headers.
1193 */
1194 header.next = nskb->len + NETIUCV_HDRLEN;
1195 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1196 header.next = 0;
1197 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1198
1199 fsm_newstate(conn->fsm, CONN_STATE_TX);
1200 conn->prof.send_stamp = xtime;
1201
1202 msg.tag = 1;
1203 msg.class = 0;
1204 rc = iucv_message_send(conn->path, &msg, 0, 0,
1205 nskb->data, nskb->len);
1206 conn->prof.doios_single++;
1207 conn->prof.txlen += skb->len;
1208 conn->prof.tx_pending++;
1209 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1210 conn->prof.tx_max_pending = conn->prof.tx_pending;
1211 if (rc) {
1212 struct netiucv_priv *privptr;
1213 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1214 conn->prof.tx_pending--;
1215 privptr = netdev_priv(conn->netdev);
1216 if (privptr)
1217 privptr->stats.tx_errors++;
1218 if (copied)
1219 dev_kfree_skb(nskb);
1220 else {
1221 /**
1222 * Remove our headers. They get added
1223 * again on retransmit.
1224 */
1225 skb_pull(skb, NETIUCV_HDRLEN);
1226 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1227 }
1228 PRINT_WARN("iucv_send returned %08x\n", rc);
1229 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1230 } else {
1231 if (copied)
1232 dev_kfree_skb(skb);
1233 atomic_inc(&nskb->users);
1234 skb_queue_tail(&conn->commit_queue, nskb);
1235 }
1236 }
1237
1238 return rc;
1239 }
1240
1241 /*
1242 * Interface API for upper network layers
1243 */
1244
1245 /**
1246 * Open an interface.
1247 * Called from generic network layer when ifconfig up is run.
1248 *
1249 * @param dev Pointer to interface struct.
1250 *
1251 * @return 0 on success, -ERRNO on failure. (Never fails.)
1252 */
1253 static int netiucv_open(struct net_device *dev)
1254 {
1255 struct netiucv_priv *priv = netdev_priv(dev);
1256
1257 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1258 return 0;
1259 }
1260
1261 /**
1262 * Close an interface.
1263 * Called from generic network layer when ifconfig down is run.
1264 *
1265 * @param dev Pointer to interface struct.
1266 *
1267 * @return 0 on success, -ERRNO on failure. (Never fails.)
1268 */
1269 static int netiucv_close(struct net_device *dev)
1270 {
1271 struct netiucv_priv *priv = netdev_priv(dev);
1272
1273 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1274 return 0;
1275 }
1276
1277 /**
1278 * Start transmission of a packet.
1279 * Called from generic network device layer.
1280 *
1281 * @param skb Pointer to buffer containing the packet.
1282 * @param dev Pointer to interface struct.
1283 *
1284 * @return 0 if packet consumed, !0 if packet rejected.
1285 * Note: If we return !0, then the packet is free'd by
1286 * the generic network layer.
1287 */
1288 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1289 {
1290 struct netiucv_priv *privptr = netdev_priv(dev);
1291 int rc;
1292
1293 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1294 /**
1295 * Some sanity checks ...
1296 */
1297 if (skb == NULL) {
1298 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1299 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1300 privptr->stats.tx_dropped++;
1301 return 0;
1302 }
1303 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1304 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1305 dev->name, NETIUCV_HDRLEN);
1306 IUCV_DBF_TEXT(data, 2,
1307 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1308 dev_kfree_skb(skb);
1309 privptr->stats.tx_dropped++;
1310 return 0;
1311 }
1312
1313 /**
1314 * If connection is not running, try to restart it
1315 * and throw away packet.
1316 */
1317 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1318 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1319 dev_kfree_skb(skb);
1320 privptr->stats.tx_dropped++;
1321 privptr->stats.tx_errors++;
1322 privptr->stats.tx_carrier_errors++;
1323 return 0;
1324 }
1325
1326 if (netiucv_test_and_set_busy(dev)) {
1327 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1328 return -EBUSY;
1329 }
1330 dev->trans_start = jiffies;
1331 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1332 netiucv_clear_busy(dev);
1333 return rc;
1334 }
1335
1336 /**
1337 * netiucv_stats
1338 * @dev: Pointer to interface struct.
1339 *
1340 * Returns interface statistics of a device.
1341 *
1342 * Returns pointer to stats struct of this interface.
1343 */
1344 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1345 {
1346 struct netiucv_priv *priv = netdev_priv(dev);
1347
1348 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1349 return &priv->stats;
1350 }
1351
1352 /**
1353 * netiucv_change_mtu
1354 * @dev: Pointer to interface struct.
1355 * @new_mtu: The new MTU to use for this interface.
1356 *
1357 * Sets MTU of an interface.
1358 *
1359 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1360 * (valid range is 576 .. NETIUCV_MTU_MAX).
1361 */
1362 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1363 {
1364 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1365 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1366 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1367 return -EINVAL;
1368 }
1369 dev->mtu = new_mtu;
1370 return 0;
1371 }
1372
1373 /*
1374 * attributes in sysfs
1375 */
1376
1377 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1378 char *buf)
1379 {
1380 struct netiucv_priv *priv = dev->driver_data;
1381
1382 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1383 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1384 }
1385
1386 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1387 const char *buf, size_t count)
1388 {
1389 struct netiucv_priv *priv = dev->driver_data;
1390 struct net_device *ndev = priv->conn->netdev;
1391 char *p;
1392 char *tmp;
1393 char username[9];
1394 int i;
1395 struct iucv_connection *cp;
1396
1397 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1398 if (count > 9) {
1399 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1400 IUCV_DBF_TEXT_(setup, 2,
1401 "%d is length of username\n", (int) count);
1402 return -EINVAL;
1403 }
1404
1405 tmp = strsep((char **) &buf, "\n");
1406 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1407 if (isalnum(*p) || (*p == '$')) {
1408 username[i]= toupper(*p);
1409 continue;
1410 }
1411 if (*p == '\n') {
1412 /* trailing lf, grr */
1413 break;
1414 }
1415 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1416 IUCV_DBF_TEXT_(setup, 2,
1417 "username: invalid character %c\n", *p);
1418 return -EINVAL;
1419 }
1420 while (i < 8)
1421 username[i++] = ' ';
1422 username[8] = '\0';
1423
1424 if (memcmp(username, priv->conn->userid, 9) &&
1425 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1426 /* username changed while the interface is active. */
1427 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1428 dev->bus_id, priv->conn->userid);
1429 PRINT_WARN("netiucv: user cannot be updated\n");
1430 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1431 return -EBUSY;
1432 }
1433 read_lock_bh(&iucv_connection_rwlock);
1434 list_for_each_entry(cp, &iucv_connection_list, list) {
1435 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1436 read_unlock_bh(&iucv_connection_rwlock);
1437 PRINT_WARN("netiucv: Connection to %s already "
1438 "exists\n", username);
1439 return -EEXIST;
1440 }
1441 }
1442 read_unlock_bh(&iucv_connection_rwlock);
1443 memcpy(priv->conn->userid, username, 9);
1444 return count;
1445 }
1446
1447 static DEVICE_ATTR(user, 0644, user_show, user_write);
1448
1449 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1450 char *buf)
1451 { struct netiucv_priv *priv = dev->driver_data;
1452
1453 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1454 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1455 }
1456
1457 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1458 const char *buf, size_t count)
1459 {
1460 struct netiucv_priv *priv = dev->driver_data;
1461 struct net_device *ndev = priv->conn->netdev;
1462 char *e;
1463 int bs1;
1464
1465 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1466 if (count >= 39)
1467 return -EINVAL;
1468
1469 bs1 = simple_strtoul(buf, &e, 0);
1470
1471 if (e && (!isspace(*e))) {
1472 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1473 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1474 return -EINVAL;
1475 }
1476 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1477 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1478 bs1);
1479 IUCV_DBF_TEXT_(setup, 2,
1480 "buffer_write: buffer size %d too large\n",
1481 bs1);
1482 return -EINVAL;
1483 }
1484 if ((ndev->flags & IFF_RUNNING) &&
1485 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1486 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1487 bs1);
1488 IUCV_DBF_TEXT_(setup, 2,
1489 "buffer_write: buffer size %d too small\n",
1490 bs1);
1491 return -EINVAL;
1492 }
1493 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1494 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1495 bs1);
1496 IUCV_DBF_TEXT_(setup, 2,
1497 "buffer_write: buffer size %d too small\n",
1498 bs1);
1499 return -EINVAL;
1500 }
1501
1502 priv->conn->max_buffsize = bs1;
1503 if (!(ndev->flags & IFF_RUNNING))
1504 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1505
1506 return count;
1507
1508 }
1509
1510 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1511
1512 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1513 char *buf)
1514 {
1515 struct netiucv_priv *priv = dev->driver_data;
1516
1517 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1518 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1519 }
1520
1521 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1522
1523 static ssize_t conn_fsm_show (struct device *dev,
1524 struct device_attribute *attr, char *buf)
1525 {
1526 struct netiucv_priv *priv = dev->driver_data;
1527
1528 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1529 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1530 }
1531
1532 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1533
1534 static ssize_t maxmulti_show (struct device *dev,
1535 struct device_attribute *attr, char *buf)
1536 {
1537 struct netiucv_priv *priv = dev->driver_data;
1538
1539 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1540 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1541 }
1542
1543 static ssize_t maxmulti_write (struct device *dev,
1544 struct device_attribute *attr,
1545 const char *buf, size_t count)
1546 {
1547 struct netiucv_priv *priv = dev->driver_data;
1548
1549 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1550 priv->conn->prof.maxmulti = 0;
1551 return count;
1552 }
1553
1554 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1555
1556 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1557 char *buf)
1558 {
1559 struct netiucv_priv *priv = dev->driver_data;
1560
1561 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1562 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1563 }
1564
1565 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1566 const char *buf, size_t count)
1567 {
1568 struct netiucv_priv *priv = dev->driver_data;
1569
1570 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1571 priv->conn->prof.maxcqueue = 0;
1572 return count;
1573 }
1574
1575 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1576
1577 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1578 char *buf)
1579 {
1580 struct netiucv_priv *priv = dev->driver_data;
1581
1582 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1583 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1584 }
1585
1586 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1587 const char *buf, size_t count)
1588 {
1589 struct netiucv_priv *priv = dev->driver_data;
1590
1591 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1592 priv->conn->prof.doios_single = 0;
1593 return count;
1594 }
1595
1596 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1597
1598 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1599 char *buf)
1600 {
1601 struct netiucv_priv *priv = dev->driver_data;
1602
1603 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1604 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1605 }
1606
1607 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1608 const char *buf, size_t count)
1609 {
1610 struct netiucv_priv *priv = dev->driver_data;
1611
1612 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1613 priv->conn->prof.doios_multi = 0;
1614 return count;
1615 }
1616
1617 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1618
1619 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1620 char *buf)
1621 {
1622 struct netiucv_priv *priv = dev->driver_data;
1623
1624 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1625 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1626 }
1627
1628 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1629 const char *buf, size_t count)
1630 {
1631 struct netiucv_priv *priv = dev->driver_data;
1632
1633 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1634 priv->conn->prof.txlen = 0;
1635 return count;
1636 }
1637
1638 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1639
1640 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1641 char *buf)
1642 {
1643 struct netiucv_priv *priv = dev->driver_data;
1644
1645 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1646 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1647 }
1648
1649 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1650 const char *buf, size_t count)
1651 {
1652 struct netiucv_priv *priv = dev->driver_data;
1653
1654 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1655 priv->conn->prof.tx_time = 0;
1656 return count;
1657 }
1658
1659 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1660
1661 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1662 char *buf)
1663 {
1664 struct netiucv_priv *priv = dev->driver_data;
1665
1666 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1667 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1668 }
1669
1670 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1671 const char *buf, size_t count)
1672 {
1673 struct netiucv_priv *priv = dev->driver_data;
1674
1675 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1676 priv->conn->prof.tx_pending = 0;
1677 return count;
1678 }
1679
1680 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1681
1682 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1683 char *buf)
1684 {
1685 struct netiucv_priv *priv = dev->driver_data;
1686
1687 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1688 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1689 }
1690
1691 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1692 const char *buf, size_t count)
1693 {
1694 struct netiucv_priv *priv = dev->driver_data;
1695
1696 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1697 priv->conn->prof.tx_max_pending = 0;
1698 return count;
1699 }
1700
1701 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1702
1703 static struct attribute *netiucv_attrs[] = {
1704 &dev_attr_buffer.attr,
1705 &dev_attr_user.attr,
1706 NULL,
1707 };
1708
1709 static struct attribute_group netiucv_attr_group = {
1710 .attrs = netiucv_attrs,
1711 };
1712
1713 static struct attribute *netiucv_stat_attrs[] = {
1714 &dev_attr_device_fsm_state.attr,
1715 &dev_attr_connection_fsm_state.attr,
1716 &dev_attr_max_tx_buffer_used.attr,
1717 &dev_attr_max_chained_skbs.attr,
1718 &dev_attr_tx_single_write_ops.attr,
1719 &dev_attr_tx_multi_write_ops.attr,
1720 &dev_attr_netto_bytes.attr,
1721 &dev_attr_max_tx_io_time.attr,
1722 &dev_attr_tx_pending.attr,
1723 &dev_attr_tx_max_pending.attr,
1724 NULL,
1725 };
1726
1727 static struct attribute_group netiucv_stat_attr_group = {
1728 .name = "stats",
1729 .attrs = netiucv_stat_attrs,
1730 };
1731
1732 static inline int netiucv_add_files(struct device *dev)
1733 {
1734 int ret;
1735
1736 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1737 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1738 if (ret)
1739 return ret;
1740 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1741 if (ret)
1742 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1743 return ret;
1744 }
1745
1746 static inline void netiucv_remove_files(struct device *dev)
1747 {
1748 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1749 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1750 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1751 }
1752
1753 static int netiucv_register_device(struct net_device *ndev)
1754 {
1755 struct netiucv_priv *priv = netdev_priv(ndev);
1756 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1757 int ret;
1758
1759
1760 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1761
1762 if (dev) {
1763 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1764 dev->bus = &iucv_bus;
1765 dev->parent = iucv_root;
1766 /*
1767 * The release function could be called after the
1768 * module has been unloaded. It's _only_ task is to
1769 * free the struct. Therefore, we specify kfree()
1770 * directly here. (Probably a little bit obfuscating
1771 * but legitime ...).
1772 */
1773 dev->release = (void (*)(struct device *))kfree;
1774 dev->driver = &netiucv_driver;
1775 } else
1776 return -ENOMEM;
1777
1778 ret = device_register(dev);
1779
1780 if (ret)
1781 return ret;
1782 ret = netiucv_add_files(dev);
1783 if (ret)
1784 goto out_unreg;
1785 priv->dev = dev;
1786 dev->driver_data = priv;
1787 return 0;
1788
1789 out_unreg:
1790 device_unregister(dev);
1791 return ret;
1792 }
1793
1794 static void netiucv_unregister_device(struct device *dev)
1795 {
1796 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1797 netiucv_remove_files(dev);
1798 device_unregister(dev);
1799 }
1800
1801 /**
1802 * Allocate and initialize a new connection structure.
1803 * Add it to the list of netiucv connections;
1804 */
1805 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1806 char *username)
1807 {
1808 struct iucv_connection *conn;
1809
1810 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1811 if (!conn)
1812 goto out;
1813 skb_queue_head_init(&conn->collect_queue);
1814 skb_queue_head_init(&conn->commit_queue);
1815 spin_lock_init(&conn->collect_lock);
1816 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1817 conn->netdev = dev;
1818
1819 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1820 if (!conn->rx_buff)
1821 goto out_conn;
1822 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1823 if (!conn->tx_buff)
1824 goto out_rx;
1825 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1826 conn_event_names, NR_CONN_STATES,
1827 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1828 GFP_KERNEL);
1829 if (!conn->fsm)
1830 goto out_tx;
1831
1832 fsm_settimer(conn->fsm, &conn->timer);
1833 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1834
1835 if (username) {
1836 memcpy(conn->userid, username, 9);
1837 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1838 }
1839
1840 write_lock_bh(&iucv_connection_rwlock);
1841 list_add_tail(&conn->list, &iucv_connection_list);
1842 write_unlock_bh(&iucv_connection_rwlock);
1843 return conn;
1844
1845 out_tx:
1846 kfree_skb(conn->tx_buff);
1847 out_rx:
1848 kfree_skb(conn->rx_buff);
1849 out_conn:
1850 kfree(conn);
1851 out:
1852 return NULL;
1853 }
1854
1855 /**
1856 * Release a connection structure and remove it from the
1857 * list of netiucv connections.
1858 */
1859 static void netiucv_remove_connection(struct iucv_connection *conn)
1860 {
1861 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1862 write_lock_bh(&iucv_connection_rwlock);
1863 list_del_init(&conn->list);
1864 write_unlock_bh(&iucv_connection_rwlock);
1865 if (conn->path) {
1866 iucv_path_sever(conn->path, iucvMagic);
1867 kfree(conn->path);
1868 conn->path = NULL;
1869 }
1870 fsm_deltimer(&conn->timer);
1871 kfree_fsm(conn->fsm);
1872 kfree_skb(conn->rx_buff);
1873 kfree_skb(conn->tx_buff);
1874 }
1875
1876 /**
1877 * Release everything of a net device.
1878 */
1879 static void netiucv_free_netdevice(struct net_device *dev)
1880 {
1881 struct netiucv_priv *privptr = netdev_priv(dev);
1882
1883 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1884
1885 if (!dev)
1886 return;
1887
1888 if (privptr) {
1889 if (privptr->conn)
1890 netiucv_remove_connection(privptr->conn);
1891 if (privptr->fsm)
1892 kfree_fsm(privptr->fsm);
1893 privptr->conn = NULL; privptr->fsm = NULL;
1894 /* privptr gets freed by free_netdev() */
1895 }
1896 free_netdev(dev);
1897 }
1898
1899 /**
1900 * Initialize a net device. (Called from kernel in alloc_netdev())
1901 */
1902 static void netiucv_setup_netdevice(struct net_device *dev)
1903 {
1904 dev->mtu = NETIUCV_MTU_DEFAULT;
1905 dev->hard_start_xmit = netiucv_tx;
1906 dev->open = netiucv_open;
1907 dev->stop = netiucv_close;
1908 dev->get_stats = netiucv_stats;
1909 dev->change_mtu = netiucv_change_mtu;
1910 dev->destructor = netiucv_free_netdevice;
1911 dev->hard_header_len = NETIUCV_HDRLEN;
1912 dev->addr_len = 0;
1913 dev->type = ARPHRD_SLIP;
1914 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1915 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1916 SET_MODULE_OWNER(dev);
1917 }
1918
1919 /**
1920 * Allocate and initialize everything of a net device.
1921 */
1922 static struct net_device *netiucv_init_netdevice(char *username)
1923 {
1924 struct netiucv_priv *privptr;
1925 struct net_device *dev;
1926
1927 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1928 netiucv_setup_netdevice);
1929 if (!dev)
1930 return NULL;
1931 if (dev_alloc_name(dev, dev->name) < 0)
1932 goto out_netdev;
1933
1934 privptr = netdev_priv(dev);
1935 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1936 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1937 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1938 if (!privptr->fsm)
1939 goto out_netdev;
1940
1941 privptr->conn = netiucv_new_connection(dev, username);
1942 if (!privptr->conn) {
1943 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1944 goto out_fsm;
1945 }
1946 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1947 return dev;
1948
1949 out_fsm:
1950 kfree_fsm(privptr->fsm);
1951 out_netdev:
1952 free_netdev(dev);
1953 return NULL;
1954 }
1955
1956 static ssize_t conn_write(struct device_driver *drv,
1957 const char *buf, size_t count)
1958 {
1959 const char *p;
1960 char username[9];
1961 int i, rc;
1962 struct net_device *dev;
1963 struct netiucv_priv *priv;
1964 struct iucv_connection *cp;
1965
1966 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1967 if (count>9) {
1968 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1969 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1970 return -EINVAL;
1971 }
1972
1973 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1974 if (isalnum(*p) || *p == '$') {
1975 username[i] = toupper(*p);
1976 continue;
1977 }
1978 if (*p == '\n')
1979 /* trailing lf, grr */
1980 break;
1981 PRINT_WARN("netiucv: Invalid character in username!\n");
1982 IUCV_DBF_TEXT_(setup, 2,
1983 "conn_write: invalid character %c\n", *p);
1984 return -EINVAL;
1985 }
1986 while (i < 8)
1987 username[i++] = ' ';
1988 username[8] = '\0';
1989
1990 read_lock_bh(&iucv_connection_rwlock);
1991 list_for_each_entry(cp, &iucv_connection_list, list) {
1992 if (!strncmp(username, cp->userid, 9)) {
1993 read_unlock_bh(&iucv_connection_rwlock);
1994 PRINT_WARN("netiucv: Connection to %s already "
1995 "exists\n", username);
1996 return -EEXIST;
1997 }
1998 }
1999 read_unlock_bh(&iucv_connection_rwlock);
2000
2001 dev = netiucv_init_netdevice(username);
2002 if (!dev) {
2003 PRINT_WARN("netiucv: Could not allocate network device "
2004 "structure for user '%s'\n",
2005 netiucv_printname(username));
2006 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2007 return -ENODEV;
2008 }
2009
2010 rc = netiucv_register_device(dev);
2011 if (rc) {
2012 IUCV_DBF_TEXT_(setup, 2,
2013 "ret %d from netiucv_register_device\n", rc);
2014 goto out_free_ndev;
2015 }
2016
2017 /* sysfs magic */
2018 priv = netdev_priv(dev);
2019 SET_NETDEV_DEV(dev, priv->dev);
2020
2021 rc = register_netdev(dev);
2022 if (rc)
2023 goto out_unreg;
2024
2025 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2026
2027 return count;
2028
2029 out_unreg:
2030 netiucv_unregister_device(priv->dev);
2031 out_free_ndev:
2032 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2033 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2034 netiucv_free_netdevice(dev);
2035 return rc;
2036 }
2037
2038 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2039
2040 static ssize_t remove_write (struct device_driver *drv,
2041 const char *buf, size_t count)
2042 {
2043 struct iucv_connection *cp;
2044 struct net_device *ndev;
2045 struct netiucv_priv *priv;
2046 struct device *dev;
2047 char name[IFNAMSIZ];
2048 const char *p;
2049 int i;
2050
2051 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2052
2053 if (count >= IFNAMSIZ)
2054 count = IFNAMSIZ - 1;;
2055
2056 for (i = 0, p = buf; i < count && *p; i++, p++) {
2057 if (*p == '\n' || *p == ' ')
2058 /* trailing lf, grr */
2059 break;
2060 name[i] = *p;
2061 }
2062 name[i] = '\0';
2063
2064 read_lock_bh(&iucv_connection_rwlock);
2065 list_for_each_entry(cp, &iucv_connection_list, list) {
2066 ndev = cp->netdev;
2067 priv = netdev_priv(ndev);
2068 dev = priv->dev;
2069 if (strncmp(name, ndev->name, count))
2070 continue;
2071 read_unlock_bh(&iucv_connection_rwlock);
2072 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2073 PRINT_WARN("netiucv: net device %s active with peer "
2074 "%s\n", ndev->name, priv->conn->userid);
2075 PRINT_WARN("netiucv: %s cannot be removed\n",
2076 ndev->name);
2077 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2078 return -EBUSY;
2079 }
2080 unregister_netdev(ndev);
2081 netiucv_unregister_device(dev);
2082 return count;
2083 }
2084 read_unlock_bh(&iucv_connection_rwlock);
2085 PRINT_WARN("netiucv: net device %s unknown\n", name);
2086 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2087 return -EINVAL;
2088 }
2089
2090 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2091
2092 static struct attribute * netiucv_drv_attrs[] = {
2093 &driver_attr_connection.attr,
2094 &driver_attr_remove.attr,
2095 NULL,
2096 };
2097
2098 static struct attribute_group netiucv_drv_attr_group = {
2099 .attrs = netiucv_drv_attrs,
2100 };
2101
2102 static void netiucv_banner(void)
2103 {
2104 PRINT_INFO("NETIUCV driver initialized\n");
2105 }
2106
2107 static void __exit netiucv_exit(void)
2108 {
2109 struct iucv_connection *cp;
2110 struct net_device *ndev;
2111 struct netiucv_priv *priv;
2112 struct device *dev;
2113
2114 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2115 while (!list_empty(&iucv_connection_list)) {
2116 cp = list_entry(iucv_connection_list.next,
2117 struct iucv_connection, list);
2118 list_del(&cp->list);
2119 ndev = cp->netdev;
2120 priv = netdev_priv(ndev);
2121 dev = priv->dev;
2122
2123 unregister_netdev(ndev);
2124 netiucv_unregister_device(dev);
2125 }
2126
2127 sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2128 driver_unregister(&netiucv_driver);
2129 iucv_unregister(&netiucv_handler, 1);
2130 iucv_unregister_dbf_views();
2131
2132 PRINT_INFO("NETIUCV driver unloaded\n");
2133 return;
2134 }
2135
2136 static int __init netiucv_init(void)
2137 {
2138 int rc;
2139
2140 rc = iucv_register_dbf_views();
2141 if (rc)
2142 goto out;
2143 rc = iucv_register(&netiucv_handler, 1);
2144 if (rc)
2145 goto out_dbf;
2146 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2147 rc = driver_register(&netiucv_driver);
2148 if (rc) {
2149 PRINT_ERR("NETIUCV: failed to register driver.\n");
2150 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2151 goto out_iucv;
2152 }
2153
2154 rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2155 if (rc) {
2156 PRINT_ERR("NETIUCV: failed to add driver attributes.\n");
2157 IUCV_DBF_TEXT_(setup, 2,
2158 "ret %d - netiucv_drv_attr_group\n", rc);
2159 goto out_driver;
2160 }
2161 netiucv_banner();
2162 return rc;
2163
2164 out_driver:
2165 driver_unregister(&netiucv_driver);
2166 out_iucv:
2167 iucv_unregister(&netiucv_handler, 1);
2168 out_dbf:
2169 iucv_unregister_dbf_views();
2170 out:
2171 return rc;
2172 }
2173
2174 module_init(netiucv_init);
2175 module_exit(netiucv_exit);
2176 MODULE_LICENSE("GPL");
This page took 0.07438 seconds and 5 git commands to generate.