[PKTGEN]: Respect hard_header_len of device.
[deliverable/linux.git] / net / core / pktgen.c
1 /*
2 * Authors:
3 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se>
4 * Uppsala University and
5 * Swedish University of Agricultural Sciences
6 *
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * Ben Greear <greearb@candelatech.com>
9 * Jens Låås <jens.laas@data.slu.se>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 *
17 * A tool for loading the network with preconfigurated packets.
18 * The tool is implemented as a linux module. Parameters are output
19 * device, delay (to hard_xmit), number of packets, and whether
20 * to use multiple SKBs or just the same one.
21 * pktgen uses the installed interface's output routine.
22 *
23 * Additional hacking by:
24 *
25 * Jens.Laas@data.slu.se
26 * Improved by ANK. 010120.
27 * Improved by ANK even more. 010212.
28 * MAC address typo fixed. 010417 --ro
29 * Integrated. 020301 --DaveM
30 * Added multiskb option 020301 --DaveM
31 * Scaling of results. 020417--sigurdur@linpro.no
32 * Significant re-work of the module:
33 * * Convert to threaded model to more efficiently be able to transmit
34 * and receive on multiple interfaces at once.
35 * * Converted many counters to __u64 to allow longer runs.
36 * * Allow configuration of ranges, like min/max IP address, MACs,
37 * and UDP-ports, for both source and destination, and can
38 * set to use a random distribution or sequentially walk the range.
39 * * Can now change most values after starting.
40 * * Place 12-byte packet in UDP payload with magic number,
41 * sequence number, and timestamp.
42 * * Add receiver code that detects dropped pkts, re-ordered pkts, and
43 * latencies (with micro-second) precision.
44 * * Add IOCTL interface to easily get counters & configuration.
45 * --Ben Greear <greearb@candelatech.com>
46 *
47 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct
48 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0
49 * as a "fastpath" with a configurable number of clones after alloc's.
50 * clone_skb=0 means all packets are allocated this also means ranges time
51 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100
52 * clones.
53 *
54 * Also moved to /proc/net/pktgen/
55 * --ro
56 *
57 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever
58 * mistakes. Also merged in DaveM's patch in the -pre6 patch.
59 * --Ben Greear <greearb@candelatech.com>
60 *
61 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br)
62 *
63 *
64 * 021124 Finished major redesign and rewrite for new functionality.
65 * See Documentation/networking/pktgen.txt for how to use this.
66 *
67 * The new operation:
68 * For each CPU one thread/process is created at start. This process checks
69 * for running devices in the if_list and sends packets until count is 0 it
70 * also the thread checks the thread->control which is used for inter-process
71 * communication. controlling process "posts" operations to the threads this
72 * way. The if_lock should be possible to remove when add/rem_device is merged
73 * into this too.
74 *
75 * By design there should only be *one* "controlling" process. In practice
76 * multiple write accesses gives unpredictable result. Understood by "write"
77 * to /proc gives result code thats should be read be the "writer".
78 * For practical use this should be no problem.
79 *
80 * Note when adding devices to a specific CPU there good idea to also assign
81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU.
82 * --ro
83 *
84 * Fix refcount off by one if first packet fails, potential null deref,
85 * memleak 030710- KJP
86 *
87 * First "ranges" functionality for ipv6 030726 --ro
88 *
89 * Included flow support. 030802 ANK.
90 *
91 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org>
92 *
93 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419
94 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604
95 *
96 * New xmit() return, do_div and misc clean up by Stephen Hemminger
97 * <shemminger@osdl.org> 040923
98 *
99 * Randy Dunlap fixed u64 printk compiler waring
100 *
101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org>
102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
103 *
104 * Corrections from Nikolai Malykh (nmalykh@bilim.com)
105 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230
106 *
107 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com>
108 * 050103
109 */
110 #include <linux/sys.h>
111 #include <linux/types.h>
112 #include <linux/module.h>
113 #include <linux/moduleparam.h>
114 #include <linux/kernel.h>
115 #include <linux/smp_lock.h>
116 #include <linux/sched.h>
117 #include <linux/slab.h>
118 #include <linux/vmalloc.h>
119 #include <linux/unistd.h>
120 #include <linux/string.h>
121 #include <linux/ptrace.h>
122 #include <linux/errno.h>
123 #include <linux/ioport.h>
124 #include <linux/interrupt.h>
125 #include <linux/capability.h>
126 #include <linux/delay.h>
127 #include <linux/timer.h>
128 #include <linux/init.h>
129 #include <linux/skbuff.h>
130 #include <linux/netdevice.h>
131 #include <linux/inet.h>
132 #include <linux/inetdevice.h>
133 #include <linux/rtnetlink.h>
134 #include <linux/if_arp.h>
135 #include <linux/in.h>
136 #include <linux/ip.h>
137 #include <linux/ipv6.h>
138 #include <linux/udp.h>
139 #include <linux/proc_fs.h>
140 #include <linux/seq_file.h>
141 #include <linux/wait.h>
142 #include <linux/etherdevice.h>
143 #include <net/checksum.h>
144 #include <net/ipv6.h>
145 #include <net/addrconf.h>
146 #include <asm/byteorder.h>
147 #include <linux/rcupdate.h>
148 #include <asm/bitops.h>
149 #include <asm/io.h>
150 #include <asm/dma.h>
151 #include <asm/uaccess.h>
152 #include <asm/div64.h> /* do_div */
153 #include <asm/timex.h>
154
155
156 #define VERSION "pktgen v2.63: Packet Generator for packet performance testing.\n"
157
158 /* #define PG_DEBUG(a) a */
159 #define PG_DEBUG(a)
160
161 /* The buckets are exponential in 'width' */
162 #define LAT_BUCKETS_MAX 32
163 #define IP_NAME_SZ 32
164
165 /* Device flag bits */
166 #define F_IPSRC_RND (1<<0) /* IP-Src Random */
167 #define F_IPDST_RND (1<<1) /* IP-Dst Random */
168 #define F_UDPSRC_RND (1<<2) /* UDP-Src Random */
169 #define F_UDPDST_RND (1<<3) /* UDP-Dst Random */
170 #define F_MACSRC_RND (1<<4) /* MAC-Src Random */
171 #define F_MACDST_RND (1<<5) /* MAC-Dst Random */
172 #define F_TXSIZE_RND (1<<6) /* Transmit size is random */
173 #define F_IPV6 (1<<7) /* Interface in IPV6 Mode */
174
175 /* Thread control flag bits */
176 #define T_TERMINATE (1<<0)
177 #define T_STOP (1<<1) /* Stop run */
178 #define T_RUN (1<<2) /* Start run */
179 #define T_REMDEV (1<<3) /* Remove all devs */
180
181 /* Locks */
182 #define thread_lock() down(&pktgen_sem)
183 #define thread_unlock() up(&pktgen_sem)
184
185 /* If lock -- can be removed after some work */
186 #define if_lock(t) spin_lock(&(t->if_lock));
187 #define if_unlock(t) spin_unlock(&(t->if_lock));
188
189 /* Used to help with determining the pkts on receive */
190 #define PKTGEN_MAGIC 0xbe9be955
191 #define PG_PROC_DIR "pktgen"
192 #define PGCTRL "pgctrl"
193 static struct proc_dir_entry *pg_proc_dir = NULL;
194
195 #define MAX_CFLOWS 65536
196
197 struct flow_state
198 {
199 __u32 cur_daddr;
200 int count;
201 };
202
203 struct pktgen_dev {
204
205 /*
206 * Try to keep frequent/infrequent used vars. separated.
207 */
208
209 char ifname[IFNAMSIZ];
210 char result[512];
211
212 struct pktgen_thread* pg_thread; /* the owner */
213 struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */
214
215 int running; /* if this changes to false, the test will stop */
216
217 /* If min != max, then we will either do a linear iteration, or
218 * we will do a random selection from within the range.
219 */
220 __u32 flags;
221
222 int min_pkt_size; /* = ETH_ZLEN; */
223 int max_pkt_size; /* = ETH_ZLEN; */
224 int nfrags;
225 __u32 delay_us; /* Default delay */
226 __u32 delay_ns;
227 __u64 count; /* Default No packets to send */
228 __u64 sofar; /* How many pkts we've sent so far */
229 __u64 tx_bytes; /* How many bytes we've transmitted */
230 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */
231
232 /* runtime counters relating to clone_skb */
233 __u64 next_tx_us; /* timestamp of when to tx next */
234 __u32 next_tx_ns;
235
236 __u64 allocated_skbs;
237 __u32 clone_count;
238 int last_ok; /* Was last skb sent?
239 * Or a failed transmit of some sort? This will keep
240 * sequence numbers in order, for example.
241 */
242 __u64 started_at; /* micro-seconds */
243 __u64 stopped_at; /* micro-seconds */
244 __u64 idle_acc; /* micro-seconds */
245 __u32 seq_num;
246
247 int clone_skb; /* Use multiple SKBs during packet gen. If this number
248 * is greater than 1, then that many copies of the same
249 * packet will be sent before a new packet is allocated.
250 * For instance, if you want to send 1024 identical packets
251 * before creating a new packet, set clone_skb to 1024.
252 */
253
254 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
255 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
256 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
257 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
258
259 struct in6_addr in6_saddr;
260 struct in6_addr in6_daddr;
261 struct in6_addr cur_in6_daddr;
262 struct in6_addr cur_in6_saddr;
263 /* For ranges */
264 struct in6_addr min_in6_daddr;
265 struct in6_addr max_in6_daddr;
266 struct in6_addr min_in6_saddr;
267 struct in6_addr max_in6_saddr;
268
269 /* If we're doing ranges, random or incremental, then this
270 * defines the min/max for those ranges.
271 */
272 __u32 saddr_min; /* inclusive, source IP address */
273 __u32 saddr_max; /* exclusive, source IP address */
274 __u32 daddr_min; /* inclusive, dest IP address */
275 __u32 daddr_max; /* exclusive, dest IP address */
276
277 __u16 udp_src_min; /* inclusive, source UDP port */
278 __u16 udp_src_max; /* exclusive, source UDP port */
279 __u16 udp_dst_min; /* inclusive, dest UDP port */
280 __u16 udp_dst_max; /* exclusive, dest UDP port */
281
282 __u32 src_mac_count; /* How many MACs to iterate through */
283 __u32 dst_mac_count; /* How many MACs to iterate through */
284
285 unsigned char dst_mac[ETH_ALEN];
286 unsigned char src_mac[ETH_ALEN];
287
288 __u32 cur_dst_mac_offset;
289 __u32 cur_src_mac_offset;
290 __u32 cur_saddr;
291 __u32 cur_daddr;
292 __u16 cur_udp_dst;
293 __u16 cur_udp_src;
294 __u32 cur_pkt_size;
295
296 __u8 hh[14];
297 /* = {
298 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB,
299
300 We fill in SRC address later
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
302 0x08, 0x00
303 };
304 */
305 __u16 pad; /* pad out the hh struct to an even 16 bytes */
306
307 struct sk_buff* skb; /* skb we are to transmit next, mainly used for when we
308 * are transmitting the same one multiple times
309 */
310 struct net_device* odev; /* The out-going device. Note that the device should
311 * have it's pg_info pointer pointing back to this
312 * device. This will be set when the user specifies
313 * the out-going device name (not when the inject is
314 * started as it used to do.)
315 */
316 struct flow_state *flows;
317 unsigned cflows; /* Concurrent flows (config) */
318 unsigned lflow; /* Flow length (config) */
319 unsigned nflows; /* accumulated flows (stats) */
320 };
321
322 struct pktgen_hdr {
323 __u32 pgh_magic;
324 __u32 seq_num;
325 __u32 tv_sec;
326 __u32 tv_usec;
327 };
328
329 struct pktgen_thread {
330 spinlock_t if_lock;
331 struct pktgen_dev *if_list; /* All device here */
332 struct pktgen_thread* next;
333 char name[32];
334 char result[512];
335 u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */
336
337 /* Field for thread to receive "posted" events terminate, stop ifs etc.*/
338
339 u32 control;
340 int pid;
341 int cpu;
342
343 wait_queue_head_t queue;
344 };
345
346 #define REMOVE 1
347 #define FIND 0
348
349 /* This code works around the fact that do_div cannot handle two 64-bit
350 numbers, and regular 64-bit division doesn't work on x86 kernels.
351 --Ben
352 */
353
354 #define PG_DIV 0
355
356 /* This was emailed to LMKL by: Chris Caputo <ccaputo@alt.net>
357 * Function copied/adapted/optimized from:
358 *
359 * nemesis.sourceforge.net/browse/lib/static/intmath/ix86/intmath.c.html
360 *
361 * Copyright 1994, University of Cambridge Computer Laboratory
362 * All Rights Reserved.
363 *
364 */
365 static inline s64 divremdi3(s64 x, s64 y, int type)
366 {
367 u64 a = (x < 0) ? -x : x;
368 u64 b = (y < 0) ? -y : y;
369 u64 res = 0, d = 1;
370
371 if (b > 0) {
372 while (b < a) {
373 b <<= 1;
374 d <<= 1;
375 }
376 }
377
378 do {
379 if ( a >= b ) {
380 a -= b;
381 res += d;
382 }
383 b >>= 1;
384 d >>= 1;
385 }
386 while (d);
387
388 if (PG_DIV == type) {
389 return (((x ^ y) & (1ll<<63)) == 0) ? res : -(s64)res;
390 }
391 else {
392 return ((x & (1ll<<63)) == 0) ? a : -(s64)a;
393 }
394 }
395
396 /* End of hacks to deal with 64-bit math on x86 */
397
398 /** Convert to milliseconds */
399 static inline __u64 tv_to_ms(const struct timeval* tv)
400 {
401 __u64 ms = tv->tv_usec / 1000;
402 ms += (__u64)tv->tv_sec * (__u64)1000;
403 return ms;
404 }
405
406
407 /** Convert to micro-seconds */
408 static inline __u64 tv_to_us(const struct timeval* tv)
409 {
410 __u64 us = tv->tv_usec;
411 us += (__u64)tv->tv_sec * (__u64)1000000;
412 return us;
413 }
414
415 static inline __u64 pg_div(__u64 n, __u32 base) {
416 __u64 tmp = n;
417 do_div(tmp, base);
418 /* printk("pktgen: pg_div, n: %llu base: %d rv: %llu\n",
419 n, base, tmp); */
420 return tmp;
421 }
422
423 static inline __u64 pg_div64(__u64 n, __u64 base)
424 {
425 __u64 tmp = n;
426 /*
427 * How do we know if the architecture we are running on
428 * supports division with 64 bit base?
429 *
430 */
431 #if defined(__sparc_v9__) || defined(__powerpc64__) || defined(__alpha__) || defined(__x86_64__) || defined(__ia64__)
432
433 do_div(tmp, base);
434 #else
435 tmp = divremdi3(n, base, PG_DIV);
436 #endif
437 return tmp;
438 }
439
440 static inline u32 pktgen_random(void)
441 {
442 #if 0
443 __u32 n;
444 get_random_bytes(&n, 4);
445 return n;
446 #else
447 return net_random();
448 #endif
449 }
450
451 static inline __u64 getCurMs(void)
452 {
453 struct timeval tv;
454 do_gettimeofday(&tv);
455 return tv_to_ms(&tv);
456 }
457
458 static inline __u64 getCurUs(void)
459 {
460 struct timeval tv;
461 do_gettimeofday(&tv);
462 return tv_to_us(&tv);
463 }
464
465 static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b)
466 {
467 return tv_to_us(a) - tv_to_us(b);
468 }
469
470
471 /* old include end */
472
473 static char version[] __initdata = VERSION;
474
475 static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i);
476 static int pktgen_add_device(struct pktgen_thread* t, const char* ifname);
477 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread* t, const char* ifname);
478 static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
479 static void pktgen_run_all_threads(void);
480 static void pktgen_stop_all_threads_ifs(void);
481 static int pktgen_stop_device(struct pktgen_dev *pkt_dev);
482 static void pktgen_stop(struct pktgen_thread* t);
483 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
484 static struct pktgen_dev *pktgen_NN_threads(const char* dev_name, int remove);
485 static unsigned int scan_ip6(const char *s,char ip[16]);
486 static unsigned int fmt_ip6(char *s,const char ip[16]);
487
488 /* Module parameters, defaults. */
489 static int pg_count_d = 1000; /* 1000 pkts by default */
490 static int pg_delay_d;
491 static int pg_clone_skb_d;
492 static int debug;
493
494 static DECLARE_MUTEX(pktgen_sem);
495 static struct pktgen_thread *pktgen_threads = NULL;
496
497 static struct notifier_block pktgen_notifier_block = {
498 .notifier_call = pktgen_device_event,
499 };
500
501 /*
502 * /proc handling functions
503 *
504 */
505
506 static int pgctrl_show(struct seq_file *seq, void *v)
507 {
508 seq_puts(seq, VERSION);
509 return 0;
510 }
511
512 static ssize_t pgctrl_write(struct file* file,const char __user * buf,
513 size_t count, loff_t *ppos)
514 {
515 int err = 0;
516 char data[128];
517
518 if (!capable(CAP_NET_ADMIN)){
519 err = -EPERM;
520 goto out;
521 }
522
523 if (count > sizeof(data))
524 count = sizeof(data);
525
526 if (copy_from_user(data, buf, count)) {
527 err = -EFAULT;
528 goto out;
529 }
530 data[count-1] = 0; /* Make string */
531
532 if (!strcmp(data, "stop"))
533 pktgen_stop_all_threads_ifs();
534
535 else if (!strcmp(data, "start"))
536 pktgen_run_all_threads();
537
538 else
539 printk("pktgen: Unknown command: %s\n", data);
540
541 err = count;
542
543 out:
544 return err;
545 }
546
547 static int pgctrl_open(struct inode *inode, struct file *file)
548 {
549 return single_open(file, pgctrl_show, PDE(inode)->data);
550 }
551
552 static struct file_operations pktgen_fops = {
553 .owner = THIS_MODULE,
554 .open = pgctrl_open,
555 .read = seq_read,
556 .llseek = seq_lseek,
557 .write = pgctrl_write,
558 .release = single_release,
559 };
560
561 static int pktgen_if_show(struct seq_file *seq, void *v)
562 {
563 int i;
564 struct pktgen_dev *pkt_dev = seq->private;
565 __u64 sa;
566 __u64 stopped;
567 __u64 now = getCurUs();
568
569 seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
570 (unsigned long long) pkt_dev->count,
571 pkt_dev->min_pkt_size, pkt_dev->max_pkt_size);
572
573 seq_printf(seq, " frags: %d delay: %u clone_skb: %d ifname: %s\n",
574 pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname);
575
576 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow);
577
578
579 if(pkt_dev->flags & F_IPV6) {
580 char b1[128], b2[128], b3[128];
581 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
582 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr);
583 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr);
584 seq_printf(seq, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3);
585
586 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr);
587 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr);
588 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr);
589 seq_printf(seq, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3);
590
591 }
592 else
593 seq_printf(seq," dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n",
594 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max);
595
596 seq_puts(seq, " src_mac: ");
597
598 if (is_zero_ether_addr(pkt_dev->src_mac))
599 for (i = 0; i < 6; i++)
600 seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":");
601 else
602 for (i = 0; i < 6; i++)
603 seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":");
604
605 seq_printf(seq, "dst_mac: ");
606 for (i = 0; i < 6; i++)
607 seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":");
608
609 seq_printf(seq, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n",
610 pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min,
611 pkt_dev->udp_dst_max);
612
613 seq_printf(seq, " src_mac_count: %d dst_mac_count: %d \n Flags: ",
614 pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
615
616
617 if (pkt_dev->flags & F_IPV6)
618 seq_printf(seq, "IPV6 ");
619
620 if (pkt_dev->flags & F_IPSRC_RND)
621 seq_printf(seq, "IPSRC_RND ");
622
623 if (pkt_dev->flags & F_IPDST_RND)
624 seq_printf(seq, "IPDST_RND ");
625
626 if (pkt_dev->flags & F_TXSIZE_RND)
627 seq_printf(seq, "TXSIZE_RND ");
628
629 if (pkt_dev->flags & F_UDPSRC_RND)
630 seq_printf(seq, "UDPSRC_RND ");
631
632 if (pkt_dev->flags & F_UDPDST_RND)
633 seq_printf(seq, "UDPDST_RND ");
634
635 if (pkt_dev->flags & F_MACSRC_RND)
636 seq_printf(seq, "MACSRC_RND ");
637
638 if (pkt_dev->flags & F_MACDST_RND)
639 seq_printf(seq, "MACDST_RND ");
640
641
642 seq_puts(seq, "\n");
643
644 sa = pkt_dev->started_at;
645 stopped = pkt_dev->stopped_at;
646 if (pkt_dev->running)
647 stopped = now; /* not really stopped, more like last-running-at */
648
649 seq_printf(seq, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n",
650 (unsigned long long) pkt_dev->sofar,
651 (unsigned long long) pkt_dev->errors,
652 (unsigned long long) sa,
653 (unsigned long long) stopped,
654 (unsigned long long) pkt_dev->idle_acc);
655
656 seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
657 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset,
658 pkt_dev->cur_src_mac_offset);
659
660 if(pkt_dev->flags & F_IPV6) {
661 char b1[128], b2[128];
662 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr);
663 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr);
664 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1);
665 }
666 else
667 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
668 pkt_dev->cur_saddr, pkt_dev->cur_daddr);
669
670
671 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n",
672 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
673
674 seq_printf(seq, " flows: %u\n", pkt_dev->nflows);
675
676 if (pkt_dev->result[0])
677 seq_printf(seq, "Result: %s\n", pkt_dev->result);
678 else
679 seq_printf(seq, "Result: Idle\n");
680
681 return 0;
682 }
683
684
685 static int count_trail_chars(const char __user *user_buffer, unsigned int maxlen)
686 {
687 int i;
688
689 for (i = 0; i < maxlen; i++) {
690 char c;
691 if (get_user(c, &user_buffer[i]))
692 return -EFAULT;
693 switch (c) {
694 case '\"':
695 case '\n':
696 case '\r':
697 case '\t':
698 case ' ':
699 case '=':
700 break;
701 default:
702 goto done;
703 };
704 }
705 done:
706 return i;
707 }
708
709 static unsigned long num_arg(const char __user *user_buffer, unsigned long maxlen,
710 unsigned long *num)
711 {
712 int i = 0;
713 *num = 0;
714
715 for(; i < maxlen; i++) {
716 char c;
717 if (get_user(c, &user_buffer[i]))
718 return -EFAULT;
719 if ((c >= '0') && (c <= '9')) {
720 *num *= 10;
721 *num += c -'0';
722 } else
723 break;
724 }
725 return i;
726 }
727
728 static int strn_len(const char __user *user_buffer, unsigned int maxlen)
729 {
730 int i = 0;
731
732 for(; i < maxlen; i++) {
733 char c;
734 if (get_user(c, &user_buffer[i]))
735 return -EFAULT;
736 switch (c) {
737 case '\"':
738 case '\n':
739 case '\r':
740 case '\t':
741 case ' ':
742 goto done_str;
743 break;
744 default:
745 break;
746 };
747 }
748 done_str:
749
750 return i;
751 }
752
753 static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer,
754 size_t count, loff_t *offset)
755 {
756 struct seq_file *seq = (struct seq_file *) file->private_data;
757 struct pktgen_dev *pkt_dev = seq->private;
758 int i = 0, max, len;
759 char name[16], valstr[32];
760 unsigned long value = 0;
761 char* pg_result = NULL;
762 int tmp = 0;
763 char buf[128];
764
765 pg_result = &(pkt_dev->result[0]);
766
767 if (count < 1) {
768 printk("pktgen: wrong command format\n");
769 return -EINVAL;
770 }
771
772 max = count - i;
773 tmp = count_trail_chars(&user_buffer[i], max);
774 if (tmp < 0) {
775 printk("pktgen: illegal format\n");
776 return tmp;
777 }
778 i += tmp;
779
780 /* Read variable name */
781
782 len = strn_len(&user_buffer[i], sizeof(name) - 1);
783 if (len < 0) { return len; }
784 memset(name, 0, sizeof(name));
785 if (copy_from_user(name, &user_buffer[i], len) )
786 return -EFAULT;
787 i += len;
788
789 max = count -i;
790 len = count_trail_chars(&user_buffer[i], max);
791 if (len < 0)
792 return len;
793
794 i += len;
795
796 if (debug) {
797 char tb[count + 1];
798 if (copy_from_user(tb, user_buffer, count))
799 return -EFAULT;
800 tb[count] = 0;
801 printk("pktgen: %s,%lu buffer -:%s:-\n", name,
802 (unsigned long) count, tb);
803 }
804
805 if (!strcmp(name, "min_pkt_size")) {
806 len = num_arg(&user_buffer[i], 10, &value);
807 if (len < 0) { return len; }
808 i += len;
809 if (value < 14+20+8)
810 value = 14+20+8;
811 if (value != pkt_dev->min_pkt_size) {
812 pkt_dev->min_pkt_size = value;
813 pkt_dev->cur_pkt_size = value;
814 }
815 sprintf(pg_result, "OK: min_pkt_size=%u", pkt_dev->min_pkt_size);
816 return count;
817 }
818
819 if (!strcmp(name, "max_pkt_size")) {
820 len = num_arg(&user_buffer[i], 10, &value);
821 if (len < 0) { return len; }
822 i += len;
823 if (value < 14+20+8)
824 value = 14+20+8;
825 if (value != pkt_dev->max_pkt_size) {
826 pkt_dev->max_pkt_size = value;
827 pkt_dev->cur_pkt_size = value;
828 }
829 sprintf(pg_result, "OK: max_pkt_size=%u", pkt_dev->max_pkt_size);
830 return count;
831 }
832
833 /* Shortcut for min = max */
834
835 if (!strcmp(name, "pkt_size")) {
836 len = num_arg(&user_buffer[i], 10, &value);
837 if (len < 0) { return len; }
838 i += len;
839 if (value < 14+20+8)
840 value = 14+20+8;
841 if (value != pkt_dev->min_pkt_size) {
842 pkt_dev->min_pkt_size = value;
843 pkt_dev->max_pkt_size = value;
844 pkt_dev->cur_pkt_size = value;
845 }
846 sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size);
847 return count;
848 }
849
850 if (!strcmp(name, "debug")) {
851 len = num_arg(&user_buffer[i], 10, &value);
852 if (len < 0) { return len; }
853 i += len;
854 debug = value;
855 sprintf(pg_result, "OK: debug=%u", debug);
856 return count;
857 }
858
859 if (!strcmp(name, "frags")) {
860 len = num_arg(&user_buffer[i], 10, &value);
861 if (len < 0) { return len; }
862 i += len;
863 pkt_dev->nfrags = value;
864 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags);
865 return count;
866 }
867 if (!strcmp(name, "delay")) {
868 len = num_arg(&user_buffer[i], 10, &value);
869 if (len < 0) { return len; }
870 i += len;
871 if (value == 0x7FFFFFFF) {
872 pkt_dev->delay_us = 0x7FFFFFFF;
873 pkt_dev->delay_ns = 0;
874 } else {
875 pkt_dev->delay_us = value / 1000;
876 pkt_dev->delay_ns = value % 1000;
877 }
878 sprintf(pg_result, "OK: delay=%u", 1000*pkt_dev->delay_us+pkt_dev->delay_ns);
879 return count;
880 }
881 if (!strcmp(name, "udp_src_min")) {
882 len = num_arg(&user_buffer[i], 10, &value);
883 if (len < 0) { return len; }
884 i += len;
885 if (value != pkt_dev->udp_src_min) {
886 pkt_dev->udp_src_min = value;
887 pkt_dev->cur_udp_src = value;
888 }
889 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min);
890 return count;
891 }
892 if (!strcmp(name, "udp_dst_min")) {
893 len = num_arg(&user_buffer[i], 10, &value);
894 if (len < 0) { return len; }
895 i += len;
896 if (value != pkt_dev->udp_dst_min) {
897 pkt_dev->udp_dst_min = value;
898 pkt_dev->cur_udp_dst = value;
899 }
900 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min);
901 return count;
902 }
903 if (!strcmp(name, "udp_src_max")) {
904 len = num_arg(&user_buffer[i], 10, &value);
905 if (len < 0) { return len; }
906 i += len;
907 if (value != pkt_dev->udp_src_max) {
908 pkt_dev->udp_src_max = value;
909 pkt_dev->cur_udp_src = value;
910 }
911 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max);
912 return count;
913 }
914 if (!strcmp(name, "udp_dst_max")) {
915 len = num_arg(&user_buffer[i], 10, &value);
916 if (len < 0) { return len; }
917 i += len;
918 if (value != pkt_dev->udp_dst_max) {
919 pkt_dev->udp_dst_max = value;
920 pkt_dev->cur_udp_dst = value;
921 }
922 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max);
923 return count;
924 }
925 if (!strcmp(name, "clone_skb")) {
926 len = num_arg(&user_buffer[i], 10, &value);
927 if (len < 0) { return len; }
928 i += len;
929 pkt_dev->clone_skb = value;
930
931 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb);
932 return count;
933 }
934 if (!strcmp(name, "count")) {
935 len = num_arg(&user_buffer[i], 10, &value);
936 if (len < 0) { return len; }
937 i += len;
938 pkt_dev->count = value;
939 sprintf(pg_result, "OK: count=%llu",
940 (unsigned long long) pkt_dev->count);
941 return count;
942 }
943 if (!strcmp(name, "src_mac_count")) {
944 len = num_arg(&user_buffer[i], 10, &value);
945 if (len < 0) { return len; }
946 i += len;
947 if (pkt_dev->src_mac_count != value) {
948 pkt_dev->src_mac_count = value;
949 pkt_dev->cur_src_mac_offset = 0;
950 }
951 sprintf(pg_result, "OK: src_mac_count=%d", pkt_dev->src_mac_count);
952 return count;
953 }
954 if (!strcmp(name, "dst_mac_count")) {
955 len = num_arg(&user_buffer[i], 10, &value);
956 if (len < 0) { return len; }
957 i += len;
958 if (pkt_dev->dst_mac_count != value) {
959 pkt_dev->dst_mac_count = value;
960 pkt_dev->cur_dst_mac_offset = 0;
961 }
962 sprintf(pg_result, "OK: dst_mac_count=%d", pkt_dev->dst_mac_count);
963 return count;
964 }
965 if (!strcmp(name, "flag")) {
966 char f[32];
967 memset(f, 0, 32);
968 len = strn_len(&user_buffer[i], sizeof(f) - 1);
969 if (len < 0) { return len; }
970 if (copy_from_user(f, &user_buffer[i], len))
971 return -EFAULT;
972 i += len;
973 if (strcmp(f, "IPSRC_RND") == 0)
974 pkt_dev->flags |= F_IPSRC_RND;
975
976 else if (strcmp(f, "!IPSRC_RND") == 0)
977 pkt_dev->flags &= ~F_IPSRC_RND;
978
979 else if (strcmp(f, "TXSIZE_RND") == 0)
980 pkt_dev->flags |= F_TXSIZE_RND;
981
982 else if (strcmp(f, "!TXSIZE_RND") == 0)
983 pkt_dev->flags &= ~F_TXSIZE_RND;
984
985 else if (strcmp(f, "IPDST_RND") == 0)
986 pkt_dev->flags |= F_IPDST_RND;
987
988 else if (strcmp(f, "!IPDST_RND") == 0)
989 pkt_dev->flags &= ~F_IPDST_RND;
990
991 else if (strcmp(f, "UDPSRC_RND") == 0)
992 pkt_dev->flags |= F_UDPSRC_RND;
993
994 else if (strcmp(f, "!UDPSRC_RND") == 0)
995 pkt_dev->flags &= ~F_UDPSRC_RND;
996
997 else if (strcmp(f, "UDPDST_RND") == 0)
998 pkt_dev->flags |= F_UDPDST_RND;
999
1000 else if (strcmp(f, "!UDPDST_RND") == 0)
1001 pkt_dev->flags &= ~F_UDPDST_RND;
1002
1003 else if (strcmp(f, "MACSRC_RND") == 0)
1004 pkt_dev->flags |= F_MACSRC_RND;
1005
1006 else if (strcmp(f, "!MACSRC_RND") == 0)
1007 pkt_dev->flags &= ~F_MACSRC_RND;
1008
1009 else if (strcmp(f, "MACDST_RND") == 0)
1010 pkt_dev->flags |= F_MACDST_RND;
1011
1012 else if (strcmp(f, "!MACDST_RND") == 0)
1013 pkt_dev->flags &= ~F_MACDST_RND;
1014
1015 else {
1016 sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1017 f,
1018 "IPSRC_RND, IPDST_RND, TXSIZE_RND, UDPSRC_RND, UDPDST_RND, MACSRC_RND, MACDST_RND\n");
1019 return count;
1020 }
1021 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
1022 return count;
1023 }
1024 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
1025 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1);
1026 if (len < 0) { return len; }
1027
1028 if (copy_from_user(buf, &user_buffer[i], len))
1029 return -EFAULT;
1030 buf[len] = 0;
1031 if (strcmp(buf, pkt_dev->dst_min) != 0) {
1032 memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min));
1033 strncpy(pkt_dev->dst_min, buf, len);
1034 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
1035 pkt_dev->cur_daddr = pkt_dev->daddr_min;
1036 }
1037 if(debug)
1038 printk("pktgen: dst_min set to: %s\n", pkt_dev->dst_min);
1039 i += len;
1040 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
1041 return count;
1042 }
1043 if (!strcmp(name, "dst_max")) {
1044 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1);
1045 if (len < 0) { return len; }
1046
1047 if (copy_from_user(buf, &user_buffer[i], len))
1048 return -EFAULT;
1049
1050 buf[len] = 0;
1051 if (strcmp(buf, pkt_dev->dst_max) != 0) {
1052 memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max));
1053 strncpy(pkt_dev->dst_max, buf, len);
1054 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
1055 pkt_dev->cur_daddr = pkt_dev->daddr_max;
1056 }
1057 if(debug)
1058 printk("pktgen: dst_max set to: %s\n", pkt_dev->dst_max);
1059 i += len;
1060 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
1061 return count;
1062 }
1063 if (!strcmp(name, "dst6")) {
1064 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1065 if (len < 0) return len;
1066
1067 pkt_dev->flags |= F_IPV6;
1068
1069 if (copy_from_user(buf, &user_buffer[i], len))
1070 return -EFAULT;
1071 buf[len] = 0;
1072
1073 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
1074 fmt_ip6(buf, pkt_dev->in6_daddr.s6_addr);
1075
1076 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
1077
1078 if(debug)
1079 printk("pktgen: dst6 set to: %s\n", buf);
1080
1081 i += len;
1082 sprintf(pg_result, "OK: dst6=%s", buf);
1083 return count;
1084 }
1085 if (!strcmp(name, "dst6_min")) {
1086 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1087 if (len < 0) return len;
1088
1089 pkt_dev->flags |= F_IPV6;
1090
1091 if (copy_from_user(buf, &user_buffer[i], len))
1092 return -EFAULT;
1093 buf[len] = 0;
1094
1095 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
1096 fmt_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
1097
1098 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->min_in6_daddr);
1099 if(debug)
1100 printk("pktgen: dst6_min set to: %s\n", buf);
1101
1102 i += len;
1103 sprintf(pg_result, "OK: dst6_min=%s", buf);
1104 return count;
1105 }
1106 if (!strcmp(name, "dst6_max")) {
1107 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1108 if (len < 0) return len;
1109
1110 pkt_dev->flags |= F_IPV6;
1111
1112 if (copy_from_user(buf, &user_buffer[i], len))
1113 return -EFAULT;
1114 buf[len] = 0;
1115
1116 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr);
1117 fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr);
1118
1119 if(debug)
1120 printk("pktgen: dst6_max set to: %s\n", buf);
1121
1122 i += len;
1123 sprintf(pg_result, "OK: dst6_max=%s", buf);
1124 return count;
1125 }
1126 if (!strcmp(name, "src6")) {
1127 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1128 if (len < 0) return len;
1129
1130 pkt_dev->flags |= F_IPV6;
1131
1132 if (copy_from_user(buf, &user_buffer[i], len))
1133 return -EFAULT;
1134 buf[len] = 0;
1135
1136 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
1137 fmt_ip6(buf, pkt_dev->in6_saddr.s6_addr);
1138
1139 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
1140
1141 if(debug)
1142 printk("pktgen: src6 set to: %s\n", buf);
1143
1144 i += len;
1145 sprintf(pg_result, "OK: src6=%s", buf);
1146 return count;
1147 }
1148 if (!strcmp(name, "src_min")) {
1149 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1);
1150 if (len < 0) { return len; }
1151 if (copy_from_user(buf, &user_buffer[i], len))
1152 return -EFAULT;
1153 buf[len] = 0;
1154 if (strcmp(buf, pkt_dev->src_min) != 0) {
1155 memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min));
1156 strncpy(pkt_dev->src_min, buf, len);
1157 pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
1158 pkt_dev->cur_saddr = pkt_dev->saddr_min;
1159 }
1160 if(debug)
1161 printk("pktgen: src_min set to: %s\n", pkt_dev->src_min);
1162 i += len;
1163 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
1164 return count;
1165 }
1166 if (!strcmp(name, "src_max")) {
1167 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1);
1168 if (len < 0) { return len; }
1169 if (copy_from_user(buf, &user_buffer[i], len))
1170 return -EFAULT;
1171 buf[len] = 0;
1172 if (strcmp(buf, pkt_dev->src_max) != 0) {
1173 memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max));
1174 strncpy(pkt_dev->src_max, buf, len);
1175 pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
1176 pkt_dev->cur_saddr = pkt_dev->saddr_max;
1177 }
1178 if(debug)
1179 printk("pktgen: src_max set to: %s\n", pkt_dev->src_max);
1180 i += len;
1181 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
1182 return count;
1183 }
1184 if (!strcmp(name, "dst_mac")) {
1185 char *v = valstr;
1186 unsigned char old_dmac[ETH_ALEN];
1187 unsigned char *m = pkt_dev->dst_mac;
1188 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
1189
1190 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1191 if (len < 0) { return len; }
1192 memset(valstr, 0, sizeof(valstr));
1193 if( copy_from_user(valstr, &user_buffer[i], len))
1194 return -EFAULT;
1195 i += len;
1196
1197 for(*m = 0;*v && m < pkt_dev->dst_mac + 6; v++) {
1198 if (*v >= '0' && *v <= '9') {
1199 *m *= 16;
1200 *m += *v - '0';
1201 }
1202 if (*v >= 'A' && *v <= 'F') {
1203 *m *= 16;
1204 *m += *v - 'A' + 10;
1205 }
1206 if (*v >= 'a' && *v <= 'f') {
1207 *m *= 16;
1208 *m += *v - 'a' + 10;
1209 }
1210 if (*v == ':') {
1211 m++;
1212 *m = 0;
1213 }
1214 }
1215
1216 /* Set up Dest MAC */
1217 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac))
1218 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
1219
1220 sprintf(pg_result, "OK: dstmac");
1221 return count;
1222 }
1223 if (!strcmp(name, "src_mac")) {
1224 char *v = valstr;
1225 unsigned char *m = pkt_dev->src_mac;
1226
1227 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1228 if (len < 0) { return len; }
1229 memset(valstr, 0, sizeof(valstr));
1230 if( copy_from_user(valstr, &user_buffer[i], len))
1231 return -EFAULT;
1232 i += len;
1233
1234 for(*m = 0;*v && m < pkt_dev->src_mac + 6; v++) {
1235 if (*v >= '0' && *v <= '9') {
1236 *m *= 16;
1237 *m += *v - '0';
1238 }
1239 if (*v >= 'A' && *v <= 'F') {
1240 *m *= 16;
1241 *m += *v - 'A' + 10;
1242 }
1243 if (*v >= 'a' && *v <= 'f') {
1244 *m *= 16;
1245 *m += *v - 'a' + 10;
1246 }
1247 if (*v == ':') {
1248 m++;
1249 *m = 0;
1250 }
1251 }
1252
1253 sprintf(pg_result, "OK: srcmac");
1254 return count;
1255 }
1256
1257 if (!strcmp(name, "clear_counters")) {
1258 pktgen_clear_counters(pkt_dev);
1259 sprintf(pg_result, "OK: Clearing counters.\n");
1260 return count;
1261 }
1262
1263 if (!strcmp(name, "flows")) {
1264 len = num_arg(&user_buffer[i], 10, &value);
1265 if (len < 0) { return len; }
1266 i += len;
1267 if (value > MAX_CFLOWS)
1268 value = MAX_CFLOWS;
1269
1270 pkt_dev->cflows = value;
1271 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows);
1272 return count;
1273 }
1274
1275 if (!strcmp(name, "flowlen")) {
1276 len = num_arg(&user_buffer[i], 10, &value);
1277 if (len < 0) { return len; }
1278 i += len;
1279 pkt_dev->lflow = value;
1280 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow);
1281 return count;
1282 }
1283
1284 sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
1285 return -EINVAL;
1286 }
1287
1288 static int pktgen_if_open(struct inode *inode, struct file *file)
1289 {
1290 return single_open(file, pktgen_if_show, PDE(inode)->data);
1291 }
1292
1293 static struct file_operations pktgen_if_fops = {
1294 .owner = THIS_MODULE,
1295 .open = pktgen_if_open,
1296 .read = seq_read,
1297 .llseek = seq_lseek,
1298 .write = pktgen_if_write,
1299 .release = single_release,
1300 };
1301
1302 static int pktgen_thread_show(struct seq_file *seq, void *v)
1303 {
1304 struct pktgen_thread *t = seq->private;
1305 struct pktgen_dev *pkt_dev = NULL;
1306
1307 BUG_ON(!t);
1308
1309 seq_printf(seq, "Name: %s max_before_softirq: %d\n",
1310 t->name, t->max_before_softirq);
1311
1312 seq_printf(seq, "Running: ");
1313
1314 if_lock(t);
1315 for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next)
1316 if(pkt_dev->running)
1317 seq_printf(seq, "%s ", pkt_dev->ifname);
1318
1319 seq_printf(seq, "\nStopped: ");
1320
1321 for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next)
1322 if(!pkt_dev->running)
1323 seq_printf(seq, "%s ", pkt_dev->ifname);
1324
1325 if (t->result[0])
1326 seq_printf(seq, "\nResult: %s\n", t->result);
1327 else
1328 seq_printf(seq, "\nResult: NA\n");
1329
1330 if_unlock(t);
1331
1332 return 0;
1333 }
1334
1335 static ssize_t pktgen_thread_write(struct file *file,
1336 const char __user *user_buffer,
1337 size_t count, loff_t *offset)
1338 {
1339 struct seq_file *seq = (struct seq_file *) file->private_data;
1340 struct pktgen_thread *t = seq->private;
1341 int i = 0, max, len, ret;
1342 char name[40];
1343 char *pg_result;
1344 unsigned long value = 0;
1345
1346 if (count < 1) {
1347 // sprintf(pg_result, "Wrong command format");
1348 return -EINVAL;
1349 }
1350
1351 max = count - i;
1352 len = count_trail_chars(&user_buffer[i], max);
1353 if (len < 0)
1354 return len;
1355
1356 i += len;
1357
1358 /* Read variable name */
1359
1360 len = strn_len(&user_buffer[i], sizeof(name) - 1);
1361 if (len < 0)
1362 return len;
1363
1364 memset(name, 0, sizeof(name));
1365 if (copy_from_user(name, &user_buffer[i], len))
1366 return -EFAULT;
1367 i += len;
1368
1369 max = count -i;
1370 len = count_trail_chars(&user_buffer[i], max);
1371 if (len < 0)
1372 return len;
1373
1374 i += len;
1375
1376 if (debug)
1377 printk("pktgen: t=%s, count=%lu\n", name,
1378 (unsigned long) count);
1379
1380 if(!t) {
1381 printk("pktgen: ERROR: No thread\n");
1382 ret = -EINVAL;
1383 goto out;
1384 }
1385
1386 pg_result = &(t->result[0]);
1387
1388 if (!strcmp(name, "add_device")) {
1389 char f[32];
1390 memset(f, 0, 32);
1391 len = strn_len(&user_buffer[i], sizeof(f) - 1);
1392 if (len < 0) {
1393 ret = len;
1394 goto out;
1395 }
1396 if( copy_from_user(f, &user_buffer[i], len) )
1397 return -EFAULT;
1398 i += len;
1399 thread_lock();
1400 pktgen_add_device(t, f);
1401 thread_unlock();
1402 ret = count;
1403 sprintf(pg_result, "OK: add_device=%s", f);
1404 goto out;
1405 }
1406
1407 if (!strcmp(name, "rem_device_all")) {
1408 thread_lock();
1409 t->control |= T_REMDEV;
1410 thread_unlock();
1411 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */
1412 ret = count;
1413 sprintf(pg_result, "OK: rem_device_all");
1414 goto out;
1415 }
1416
1417 if (!strcmp(name, "max_before_softirq")) {
1418 len = num_arg(&user_buffer[i], 10, &value);
1419 thread_lock();
1420 t->max_before_softirq = value;
1421 thread_unlock();
1422 ret = count;
1423 sprintf(pg_result, "OK: max_before_softirq=%lu", value);
1424 goto out;
1425 }
1426
1427 ret = -EINVAL;
1428 out:
1429
1430 return ret;
1431 }
1432
1433 static int pktgen_thread_open(struct inode *inode, struct file *file)
1434 {
1435 return single_open(file, pktgen_thread_show, PDE(inode)->data);
1436 }
1437
1438 static struct file_operations pktgen_thread_fops = {
1439 .owner = THIS_MODULE,
1440 .open = pktgen_thread_open,
1441 .read = seq_read,
1442 .llseek = seq_lseek,
1443 .write = pktgen_thread_write,
1444 .release = single_release,
1445 };
1446
1447 /* Think find or remove for NN */
1448 static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove)
1449 {
1450 struct pktgen_thread *t;
1451 struct pktgen_dev *pkt_dev = NULL;
1452
1453 t = pktgen_threads;
1454
1455 while (t) {
1456 pkt_dev = pktgen_find_dev(t, ifname);
1457 if (pkt_dev) {
1458 if(remove) {
1459 if_lock(t);
1460 pktgen_remove_device(t, pkt_dev);
1461 if_unlock(t);
1462 }
1463 break;
1464 }
1465 t = t->next;
1466 }
1467 return pkt_dev;
1468 }
1469
1470 static struct pktgen_dev *pktgen_NN_threads(const char* ifname, int remove)
1471 {
1472 struct pktgen_dev *pkt_dev = NULL;
1473 thread_lock();
1474 pkt_dev = __pktgen_NN_threads(ifname, remove);
1475 thread_unlock();
1476 return pkt_dev;
1477 }
1478
1479 static int pktgen_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
1480 {
1481 struct net_device *dev = (struct net_device *)(ptr);
1482
1483 /* It is OK that we do not hold the group lock right now,
1484 * as we run under the RTNL lock.
1485 */
1486
1487 switch (event) {
1488 case NETDEV_CHANGEADDR:
1489 case NETDEV_GOING_DOWN:
1490 case NETDEV_DOWN:
1491 case NETDEV_UP:
1492 /* Ignore for now */
1493 break;
1494
1495 case NETDEV_UNREGISTER:
1496 pktgen_NN_threads(dev->name, REMOVE);
1497 break;
1498 };
1499
1500 return NOTIFY_DONE;
1501 }
1502
1503 /* Associate pktgen_dev with a device. */
1504
1505 static struct net_device* pktgen_setup_dev(struct pktgen_dev *pkt_dev) {
1506 struct net_device *odev;
1507
1508 /* Clean old setups */
1509
1510 if (pkt_dev->odev) {
1511 dev_put(pkt_dev->odev);
1512 pkt_dev->odev = NULL;
1513 }
1514
1515 odev = dev_get_by_name(pkt_dev->ifname);
1516
1517 if (!odev) {
1518 printk("pktgen: no such netdevice: \"%s\"\n", pkt_dev->ifname);
1519 goto out;
1520 }
1521 if (odev->type != ARPHRD_ETHER) {
1522 printk("pktgen: not an ethernet device: \"%s\"\n", pkt_dev->ifname);
1523 goto out_put;
1524 }
1525 if (!netif_running(odev)) {
1526 printk("pktgen: device is down: \"%s\"\n", pkt_dev->ifname);
1527 goto out_put;
1528 }
1529 pkt_dev->odev = odev;
1530
1531 return pkt_dev->odev;
1532
1533 out_put:
1534 dev_put(odev);
1535 out:
1536 return NULL;
1537
1538 }
1539
1540 /* Read pkt_dev from the interface and set up internal pktgen_dev
1541 * structure to have the right information to create/send packets
1542 */
1543 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1544 {
1545 /* Try once more, just in case it works now. */
1546 if (!pkt_dev->odev)
1547 pktgen_setup_dev(pkt_dev);
1548
1549 if (!pkt_dev->odev) {
1550 printk("pktgen: ERROR: pkt_dev->odev == NULL in setup_inject.\n");
1551 sprintf(pkt_dev->result, "ERROR: pkt_dev->odev == NULL in setup_inject.\n");
1552 return;
1553 }
1554
1555 /* Default to the interface's mac if not explicitly set. */
1556
1557 if (is_zero_ether_addr(pkt_dev->src_mac))
1558 memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN);
1559
1560 /* Set up Dest MAC */
1561 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
1562
1563 /* Set up pkt size */
1564 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
1565
1566 if(pkt_dev->flags & F_IPV6) {
1567 /*
1568 * Skip this automatic address setting until locks or functions
1569 * gets exported
1570 */
1571
1572 #ifdef NOTNOW
1573 int i, set = 0, err=1;
1574 struct inet6_dev *idev;
1575
1576 for(i=0; i< IN6_ADDR_HSIZE; i++)
1577 if(pkt_dev->cur_in6_saddr.s6_addr[i]) {
1578 set = 1;
1579 break;
1580 }
1581
1582 if(!set) {
1583
1584 /*
1585 * Use linklevel address if unconfigured.
1586 *
1587 * use ipv6_get_lladdr if/when it's get exported
1588 */
1589
1590
1591 read_lock(&addrconf_lock);
1592 if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) {
1593 struct inet6_ifaddr *ifp;
1594
1595 read_lock_bh(&idev->lock);
1596 for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
1597 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
1598 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &ifp->addr);
1599 err = 0;
1600 break;
1601 }
1602 }
1603 read_unlock_bh(&idev->lock);
1604 }
1605 read_unlock(&addrconf_lock);
1606 if(err) printk("pktgen: ERROR: IPv6 link address not availble.\n");
1607 }
1608 #endif
1609 }
1610 else {
1611 pkt_dev->saddr_min = 0;
1612 pkt_dev->saddr_max = 0;
1613 if (strlen(pkt_dev->src_min) == 0) {
1614
1615 struct in_device *in_dev;
1616
1617 rcu_read_lock();
1618 in_dev = __in_dev_get_rcu(pkt_dev->odev);
1619 if (in_dev) {
1620 if (in_dev->ifa_list) {
1621 pkt_dev->saddr_min = in_dev->ifa_list->ifa_address;
1622 pkt_dev->saddr_max = pkt_dev->saddr_min;
1623 }
1624 }
1625 rcu_read_unlock();
1626 }
1627 else {
1628 pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
1629 pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
1630 }
1631
1632 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
1633 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
1634 }
1635 /* Initialize current values. */
1636 pkt_dev->cur_dst_mac_offset = 0;
1637 pkt_dev->cur_src_mac_offset = 0;
1638 pkt_dev->cur_saddr = pkt_dev->saddr_min;
1639 pkt_dev->cur_daddr = pkt_dev->daddr_min;
1640 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
1641 pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
1642 pkt_dev->nflows = 0;
1643 }
1644
1645 static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
1646 {
1647 __u64 start;
1648 __u64 now;
1649
1650 start = now = getCurUs();
1651 printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now));
1652 while (now < spin_until_us) {
1653 /* TODO: optimize sleeping behavior */
1654 if (spin_until_us - now > jiffies_to_usecs(1)+1)
1655 schedule_timeout_interruptible(1);
1656 else if (spin_until_us - now > 100) {
1657 do_softirq();
1658 if (!pkt_dev->running)
1659 return;
1660 if (need_resched())
1661 schedule();
1662 }
1663
1664 now = getCurUs();
1665 }
1666
1667 pkt_dev->idle_acc += now - start;
1668 }
1669
1670
1671 /* Increment/randomize headers according to flags and current values
1672 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
1673 */
1674 static void mod_cur_headers(struct pktgen_dev *pkt_dev) {
1675 __u32 imn;
1676 __u32 imx;
1677 int flow = 0;
1678
1679 if(pkt_dev->cflows) {
1680 flow = pktgen_random() % pkt_dev->cflows;
1681
1682 if (pkt_dev->flows[flow].count > pkt_dev->lflow)
1683 pkt_dev->flows[flow].count = 0;
1684 }
1685
1686
1687 /* Deal with source MAC */
1688 if (pkt_dev->src_mac_count > 1) {
1689 __u32 mc;
1690 __u32 tmp;
1691
1692 if (pkt_dev->flags & F_MACSRC_RND)
1693 mc = pktgen_random() % (pkt_dev->src_mac_count);
1694 else {
1695 mc = pkt_dev->cur_src_mac_offset++;
1696 if (pkt_dev->cur_src_mac_offset > pkt_dev->src_mac_count)
1697 pkt_dev->cur_src_mac_offset = 0;
1698 }
1699
1700 tmp = pkt_dev->src_mac[5] + (mc & 0xFF);
1701 pkt_dev->hh[11] = tmp;
1702 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
1703 pkt_dev->hh[10] = tmp;
1704 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
1705 pkt_dev->hh[9] = tmp;
1706 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
1707 pkt_dev->hh[8] = tmp;
1708 tmp = (pkt_dev->src_mac[1] + (tmp >> 8));
1709 pkt_dev->hh[7] = tmp;
1710 }
1711
1712 /* Deal with Destination MAC */
1713 if (pkt_dev->dst_mac_count > 1) {
1714 __u32 mc;
1715 __u32 tmp;
1716
1717 if (pkt_dev->flags & F_MACDST_RND)
1718 mc = pktgen_random() % (pkt_dev->dst_mac_count);
1719
1720 else {
1721 mc = pkt_dev->cur_dst_mac_offset++;
1722 if (pkt_dev->cur_dst_mac_offset > pkt_dev->dst_mac_count) {
1723 pkt_dev->cur_dst_mac_offset = 0;
1724 }
1725 }
1726
1727 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF);
1728 pkt_dev->hh[5] = tmp;
1729 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
1730 pkt_dev->hh[4] = tmp;
1731 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
1732 pkt_dev->hh[3] = tmp;
1733 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
1734 pkt_dev->hh[2] = tmp;
1735 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8));
1736 pkt_dev->hh[1] = tmp;
1737 }
1738
1739 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
1740 if (pkt_dev->flags & F_UDPSRC_RND)
1741 pkt_dev->cur_udp_src = ((pktgen_random() % (pkt_dev->udp_src_max - pkt_dev->udp_src_min)) + pkt_dev->udp_src_min);
1742
1743 else {
1744 pkt_dev->cur_udp_src++;
1745 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max)
1746 pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
1747 }
1748 }
1749
1750 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
1751 if (pkt_dev->flags & F_UDPDST_RND) {
1752 pkt_dev->cur_udp_dst = ((pktgen_random() % (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)) + pkt_dev->udp_dst_min);
1753 }
1754 else {
1755 pkt_dev->cur_udp_dst++;
1756 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
1757 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
1758 }
1759 }
1760
1761 if (!(pkt_dev->flags & F_IPV6)) {
1762
1763 if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = ntohl(pkt_dev->saddr_max))) {
1764 __u32 t;
1765 if (pkt_dev->flags & F_IPSRC_RND)
1766 t = ((pktgen_random() % (imx - imn)) + imn);
1767 else {
1768 t = ntohl(pkt_dev->cur_saddr);
1769 t++;
1770 if (t > imx) {
1771 t = imn;
1772 }
1773 }
1774 pkt_dev->cur_saddr = htonl(t);
1775 }
1776
1777 if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) {
1778 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr;
1779 } else {
1780
1781 if ((imn = ntohl(pkt_dev->daddr_min)) < (imx = ntohl(pkt_dev->daddr_max))) {
1782 __u32 t;
1783 if (pkt_dev->flags & F_IPDST_RND) {
1784
1785 t = ((pktgen_random() % (imx - imn)) + imn);
1786 t = htonl(t);
1787
1788 while( LOOPBACK(t) || MULTICAST(t) || BADCLASS(t) || ZERONET(t) || LOCAL_MCAST(t) ) {
1789 t = ((pktgen_random() % (imx - imn)) + imn);
1790 t = htonl(t);
1791 }
1792 pkt_dev->cur_daddr = t;
1793 }
1794
1795 else {
1796 t = ntohl(pkt_dev->cur_daddr);
1797 t++;
1798 if (t > imx) {
1799 t = imn;
1800 }
1801 pkt_dev->cur_daddr = htonl(t);
1802 }
1803 }
1804 if(pkt_dev->cflows) {
1805 pkt_dev->flows[flow].cur_daddr = pkt_dev->cur_daddr;
1806 pkt_dev->nflows++;
1807 }
1808 }
1809 }
1810 else /* IPV6 * */
1811 {
1812 if(pkt_dev->min_in6_daddr.s6_addr32[0] == 0 &&
1813 pkt_dev->min_in6_daddr.s6_addr32[1] == 0 &&
1814 pkt_dev->min_in6_daddr.s6_addr32[2] == 0 &&
1815 pkt_dev->min_in6_daddr.s6_addr32[3] == 0);
1816 else {
1817 int i;
1818
1819 /* Only random destinations yet */
1820
1821 for(i=0; i < 4; i++) {
1822 pkt_dev->cur_in6_daddr.s6_addr32[i] =
1823 ((pktgen_random() |
1824 pkt_dev->min_in6_daddr.s6_addr32[i]) &
1825 pkt_dev->max_in6_daddr.s6_addr32[i]);
1826 }
1827 }
1828 }
1829
1830 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
1831 __u32 t;
1832 if (pkt_dev->flags & F_TXSIZE_RND) {
1833 t = ((pktgen_random() % (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size))
1834 + pkt_dev->min_pkt_size);
1835 }
1836 else {
1837 t = pkt_dev->cur_pkt_size + 1;
1838 if (t > pkt_dev->max_pkt_size)
1839 t = pkt_dev->min_pkt_size;
1840 }
1841 pkt_dev->cur_pkt_size = t;
1842 }
1843
1844 pkt_dev->flows[flow].count++;
1845 }
1846
1847
1848 static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
1849 struct pktgen_dev *pkt_dev)
1850 {
1851 struct sk_buff *skb = NULL;
1852 __u8 *eth;
1853 struct udphdr *udph;
1854 int datalen, iplen;
1855 struct iphdr *iph;
1856 struct pktgen_hdr *pgh = NULL;
1857
1858 /* Update any of the values, used when we're incrementing various
1859 * fields.
1860 */
1861 mod_cur_headers(pkt_dev);
1862
1863 datalen = (odev->hard_header_len + 16) & ~0xf;
1864 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen, GFP_ATOMIC);
1865 if (!skb) {
1866 sprintf(pkt_dev->result, "No memory");
1867 return NULL;
1868 }
1869
1870 skb_reserve(skb, datalen);
1871
1872 /* Reserve for ethernet and IP header */
1873 eth = (__u8 *) skb_push(skb, 14);
1874 iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
1875 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
1876
1877 memcpy(eth, pkt_dev->hh, 12);
1878 *(u16*)&eth[12] = __constant_htons(ETH_P_IP);
1879
1880 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8; /* Eth + IPh + UDPh */
1881 if (datalen < sizeof(struct pktgen_hdr))
1882 datalen = sizeof(struct pktgen_hdr);
1883
1884 udph->source = htons(pkt_dev->cur_udp_src);
1885 udph->dest = htons(pkt_dev->cur_udp_dst);
1886 udph->len = htons(datalen + 8); /* DATA + udphdr */
1887 udph->check = 0; /* No checksum */
1888
1889 iph->ihl = 5;
1890 iph->version = 4;
1891 iph->ttl = 32;
1892 iph->tos = 0;
1893 iph->protocol = IPPROTO_UDP; /* UDP */
1894 iph->saddr = pkt_dev->cur_saddr;
1895 iph->daddr = pkt_dev->cur_daddr;
1896 iph->frag_off = 0;
1897 iplen = 20 + 8 + datalen;
1898 iph->tot_len = htons(iplen);
1899 iph->check = 0;
1900 iph->check = ip_fast_csum((void *) iph, iph->ihl);
1901 skb->protocol = __constant_htons(ETH_P_IP);
1902 skb->mac.raw = ((u8 *)iph) - 14;
1903 skb->dev = odev;
1904 skb->pkt_type = PACKET_HOST;
1905
1906 if (pkt_dev->nfrags <= 0)
1907 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
1908 else {
1909 int frags = pkt_dev->nfrags;
1910 int i;
1911
1912 pgh = (struct pktgen_hdr*)(((char*)(udph)) + 8);
1913
1914 if (frags > MAX_SKB_FRAGS)
1915 frags = MAX_SKB_FRAGS;
1916 if (datalen > frags*PAGE_SIZE) {
1917 skb_put(skb, datalen-frags*PAGE_SIZE);
1918 datalen = frags*PAGE_SIZE;
1919 }
1920
1921 i = 0;
1922 while (datalen > 0) {
1923 struct page *page = alloc_pages(GFP_KERNEL, 0);
1924 skb_shinfo(skb)->frags[i].page = page;
1925 skb_shinfo(skb)->frags[i].page_offset = 0;
1926 skb_shinfo(skb)->frags[i].size =
1927 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
1928 datalen -= skb_shinfo(skb)->frags[i].size;
1929 skb->len += skb_shinfo(skb)->frags[i].size;
1930 skb->data_len += skb_shinfo(skb)->frags[i].size;
1931 i++;
1932 skb_shinfo(skb)->nr_frags = i;
1933 }
1934
1935 while (i < frags) {
1936 int rem;
1937
1938 if (i == 0)
1939 break;
1940
1941 rem = skb_shinfo(skb)->frags[i - 1].size / 2;
1942 if (rem == 0)
1943 break;
1944
1945 skb_shinfo(skb)->frags[i - 1].size -= rem;
1946
1947 skb_shinfo(skb)->frags[i] = skb_shinfo(skb)->frags[i - 1];
1948 get_page(skb_shinfo(skb)->frags[i].page);
1949 skb_shinfo(skb)->frags[i].page = skb_shinfo(skb)->frags[i - 1].page;
1950 skb_shinfo(skb)->frags[i].page_offset += skb_shinfo(skb)->frags[i - 1].size;
1951 skb_shinfo(skb)->frags[i].size = rem;
1952 i++;
1953 skb_shinfo(skb)->nr_frags = i;
1954 }
1955 }
1956
1957 /* Stamp the time, and sequence number, convert them to network byte order */
1958
1959 if (pgh) {
1960 struct timeval timestamp;
1961
1962 pgh->pgh_magic = htonl(PKTGEN_MAGIC);
1963 pgh->seq_num = htonl(pkt_dev->seq_num);
1964
1965 do_gettimeofday(&timestamp);
1966 pgh->tv_sec = htonl(timestamp.tv_sec);
1967 pgh->tv_usec = htonl(timestamp.tv_usec);
1968 }
1969 pkt_dev->seq_num++;
1970
1971 return skb;
1972 }
1973
1974 /*
1975 * scan_ip6, fmt_ip taken from dietlibc-0.21
1976 * Author Felix von Leitner <felix-dietlibc@fefe.de>
1977 *
1978 * Slightly modified for kernel.
1979 * Should be candidate for net/ipv4/utils.c
1980 * --ro
1981 */
1982
1983 static unsigned int scan_ip6(const char *s,char ip[16])
1984 {
1985 unsigned int i;
1986 unsigned int len=0;
1987 unsigned long u;
1988 char suffix[16];
1989 unsigned int prefixlen=0;
1990 unsigned int suffixlen=0;
1991 __u32 tmp;
1992
1993 for (i=0; i<16; i++) ip[i]=0;
1994
1995 for (;;) {
1996 if (*s == ':') {
1997 len++;
1998 if (s[1] == ':') { /* Found "::", skip to part 2 */
1999 s+=2;
2000 len++;
2001 break;
2002 }
2003 s++;
2004 }
2005 {
2006 char *tmp;
2007 u=simple_strtoul(s,&tmp,16);
2008 i=tmp-s;
2009 }
2010
2011 if (!i) return 0;
2012 if (prefixlen==12 && s[i]=='.') {
2013
2014 /* the last 4 bytes may be written as IPv4 address */
2015
2016 tmp = in_aton(s);
2017 memcpy((struct in_addr*)(ip+12), &tmp, sizeof(tmp));
2018 return i+len;
2019 }
2020 ip[prefixlen++] = (u >> 8);
2021 ip[prefixlen++] = (u & 255);
2022 s += i; len += i;
2023 if (prefixlen==16)
2024 return len;
2025 }
2026
2027 /* part 2, after "::" */
2028 for (;;) {
2029 if (*s == ':') {
2030 if (suffixlen==0)
2031 break;
2032 s++;
2033 len++;
2034 } else if (suffixlen!=0)
2035 break;
2036 {
2037 char *tmp;
2038 u=simple_strtol(s,&tmp,16);
2039 i=tmp-s;
2040 }
2041 if (!i) {
2042 if (*s) len--;
2043 break;
2044 }
2045 if (suffixlen+prefixlen<=12 && s[i]=='.') {
2046 tmp = in_aton(s);
2047 memcpy((struct in_addr*)(suffix+suffixlen), &tmp, sizeof(tmp));
2048 suffixlen+=4;
2049 len+=strlen(s);
2050 break;
2051 }
2052 suffix[suffixlen++] = (u >> 8);
2053 suffix[suffixlen++] = (u & 255);
2054 s += i; len += i;
2055 if (prefixlen+suffixlen==16)
2056 break;
2057 }
2058 for (i=0; i<suffixlen; i++)
2059 ip[16-suffixlen+i] = suffix[i];
2060 return len;
2061 }
2062
2063 static char tohex(char hexdigit) {
2064 return hexdigit>9?hexdigit+'a'-10:hexdigit+'0';
2065 }
2066
2067 static int fmt_xlong(char* s,unsigned int i) {
2068 char* bak=s;
2069 *s=tohex((i>>12)&0xf); if (s!=bak || *s!='0') ++s;
2070 *s=tohex((i>>8)&0xf); if (s!=bak || *s!='0') ++s;
2071 *s=tohex((i>>4)&0xf); if (s!=bak || *s!='0') ++s;
2072 *s=tohex(i&0xf);
2073 return s-bak+1;
2074 }
2075
2076 static unsigned int fmt_ip6(char *s,const char ip[16]) {
2077 unsigned int len;
2078 unsigned int i;
2079 unsigned int temp;
2080 unsigned int compressing;
2081 int j;
2082
2083 len = 0; compressing = 0;
2084 for (j=0; j<16; j+=2) {
2085
2086 #ifdef V4MAPPEDPREFIX
2087 if (j==12 && !memcmp(ip,V4mappedprefix,12)) {
2088 inet_ntoa_r(*(struct in_addr*)(ip+12),s);
2089 temp=strlen(s);
2090 return len+temp;
2091 }
2092 #endif
2093 temp = ((unsigned long) (unsigned char) ip[j] << 8) +
2094 (unsigned long) (unsigned char) ip[j+1];
2095 if (temp == 0) {
2096 if (!compressing) {
2097 compressing=1;
2098 if (j==0) {
2099 *s++=':'; ++len;
2100 }
2101 }
2102 } else {
2103 if (compressing) {
2104 compressing=0;
2105 *s++=':'; ++len;
2106 }
2107 i = fmt_xlong(s,temp); len += i; s += i;
2108 if (j<14) {
2109 *s++ = ':';
2110 ++len;
2111 }
2112 }
2113 }
2114 if (compressing) {
2115 *s++=':'; ++len;
2116 }
2117 *s=0;
2118 return len;
2119 }
2120
2121 static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2122 struct pktgen_dev *pkt_dev)
2123 {
2124 struct sk_buff *skb = NULL;
2125 __u8 *eth;
2126 struct udphdr *udph;
2127 int datalen;
2128 struct ipv6hdr *iph;
2129 struct pktgen_hdr *pgh = NULL;
2130
2131 /* Update any of the values, used when we're incrementing various
2132 * fields.
2133 */
2134 mod_cur_headers(pkt_dev);
2135
2136 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16, GFP_ATOMIC);
2137 if (!skb) {
2138 sprintf(pkt_dev->result, "No memory");
2139 return NULL;
2140 }
2141
2142 skb_reserve(skb, 16);
2143
2144 /* Reserve for ethernet and IP header */
2145 eth = (__u8 *) skb_push(skb, 14);
2146 iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr));
2147 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
2148
2149 memcpy(eth, pkt_dev->hh, 12);
2150 *(u16*)&eth[12] = __constant_htons(ETH_P_IPV6);
2151
2152 datalen = pkt_dev->cur_pkt_size-14-
2153 sizeof(struct ipv6hdr)-sizeof(struct udphdr); /* Eth + IPh + UDPh */
2154
2155 if (datalen < sizeof(struct pktgen_hdr)) {
2156 datalen = sizeof(struct pktgen_hdr);
2157 if (net_ratelimit())
2158 printk(KERN_INFO "pktgen: increased datalen to %d\n", datalen);
2159 }
2160
2161 udph->source = htons(pkt_dev->cur_udp_src);
2162 udph->dest = htons(pkt_dev->cur_udp_dst);
2163 udph->len = htons(datalen + sizeof(struct udphdr));
2164 udph->check = 0; /* No checksum */
2165
2166 *(u32*)iph = __constant_htonl(0x60000000); /* Version + flow */
2167
2168 iph->hop_limit = 32;
2169
2170 iph->payload_len = htons(sizeof(struct udphdr) + datalen);
2171 iph->nexthdr = IPPROTO_UDP;
2172
2173 ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
2174 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
2175
2176 skb->mac.raw = ((u8 *)iph) - 14;
2177 skb->protocol = __constant_htons(ETH_P_IPV6);
2178 skb->dev = odev;
2179 skb->pkt_type = PACKET_HOST;
2180
2181 if (pkt_dev->nfrags <= 0)
2182 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
2183 else {
2184 int frags = pkt_dev->nfrags;
2185 int i;
2186
2187 pgh = (struct pktgen_hdr*)(((char*)(udph)) + 8);
2188
2189 if (frags > MAX_SKB_FRAGS)
2190 frags = MAX_SKB_FRAGS;
2191 if (datalen > frags*PAGE_SIZE) {
2192 skb_put(skb, datalen-frags*PAGE_SIZE);
2193 datalen = frags*PAGE_SIZE;
2194 }
2195
2196 i = 0;
2197 while (datalen > 0) {
2198 struct page *page = alloc_pages(GFP_KERNEL, 0);
2199 skb_shinfo(skb)->frags[i].page = page;
2200 skb_shinfo(skb)->frags[i].page_offset = 0;
2201 skb_shinfo(skb)->frags[i].size =
2202 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
2203 datalen -= skb_shinfo(skb)->frags[i].size;
2204 skb->len += skb_shinfo(skb)->frags[i].size;
2205 skb->data_len += skb_shinfo(skb)->frags[i].size;
2206 i++;
2207 skb_shinfo(skb)->nr_frags = i;
2208 }
2209
2210 while (i < frags) {
2211 int rem;
2212
2213 if (i == 0)
2214 break;
2215
2216 rem = skb_shinfo(skb)->frags[i - 1].size / 2;
2217 if (rem == 0)
2218 break;
2219
2220 skb_shinfo(skb)->frags[i - 1].size -= rem;
2221
2222 skb_shinfo(skb)->frags[i] = skb_shinfo(skb)->frags[i - 1];
2223 get_page(skb_shinfo(skb)->frags[i].page);
2224 skb_shinfo(skb)->frags[i].page = skb_shinfo(skb)->frags[i - 1].page;
2225 skb_shinfo(skb)->frags[i].page_offset += skb_shinfo(skb)->frags[i - 1].size;
2226 skb_shinfo(skb)->frags[i].size = rem;
2227 i++;
2228 skb_shinfo(skb)->nr_frags = i;
2229 }
2230 }
2231
2232 /* Stamp the time, and sequence number, convert them to network byte order */
2233 /* should we update cloned packets too ? */
2234 if (pgh) {
2235 struct timeval timestamp;
2236
2237 pgh->pgh_magic = htonl(PKTGEN_MAGIC);
2238 pgh->seq_num = htonl(pkt_dev->seq_num);
2239
2240 do_gettimeofday(&timestamp);
2241 pgh->tv_sec = htonl(timestamp.tv_sec);
2242 pgh->tv_usec = htonl(timestamp.tv_usec);
2243 }
2244 pkt_dev->seq_num++;
2245
2246 return skb;
2247 }
2248
2249 static inline struct sk_buff *fill_packet(struct net_device *odev,
2250 struct pktgen_dev *pkt_dev)
2251 {
2252 if(pkt_dev->flags & F_IPV6)
2253 return fill_packet_ipv6(odev, pkt_dev);
2254 else
2255 return fill_packet_ipv4(odev, pkt_dev);
2256 }
2257
2258 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev)
2259 {
2260 pkt_dev->seq_num = 1;
2261 pkt_dev->idle_acc = 0;
2262 pkt_dev->sofar = 0;
2263 pkt_dev->tx_bytes = 0;
2264 pkt_dev->errors = 0;
2265 }
2266
2267 /* Set up structure for sending pkts, clear counters */
2268
2269 static void pktgen_run(struct pktgen_thread *t)
2270 {
2271 struct pktgen_dev *pkt_dev = NULL;
2272 int started = 0;
2273
2274 PG_DEBUG(printk("pktgen: entering pktgen_run. %p\n", t));
2275
2276 if_lock(t);
2277 for (pkt_dev = t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) {
2278
2279 /*
2280 * setup odev and create initial packet.
2281 */
2282 pktgen_setup_inject(pkt_dev);
2283
2284 if(pkt_dev->odev) {
2285 pktgen_clear_counters(pkt_dev);
2286 pkt_dev->running = 1; /* Cranke yeself! */
2287 pkt_dev->skb = NULL;
2288 pkt_dev->started_at = getCurUs();
2289 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */
2290 pkt_dev->next_tx_ns = 0;
2291
2292 strcpy(pkt_dev->result, "Starting");
2293 started++;
2294 }
2295 else
2296 strcpy(pkt_dev->result, "Error starting");
2297 }
2298 if_unlock(t);
2299 if(started) t->control &= ~(T_STOP);
2300 }
2301
2302 static void pktgen_stop_all_threads_ifs(void)
2303 {
2304 struct pktgen_thread *t = pktgen_threads;
2305
2306 PG_DEBUG(printk("pktgen: entering pktgen_stop_all_threads.\n"));
2307
2308 thread_lock();
2309 while(t) {
2310 pktgen_stop(t);
2311 t = t->next;
2312 }
2313 thread_unlock();
2314 }
2315
2316 static int thread_is_running(struct pktgen_thread *t )
2317 {
2318 struct pktgen_dev *next;
2319 int res = 0;
2320
2321 for(next=t->if_list; next; next=next->next) {
2322 if(next->running) {
2323 res = 1;
2324 break;
2325 }
2326 }
2327 return res;
2328 }
2329
2330 static int pktgen_wait_thread_run(struct pktgen_thread *t )
2331 {
2332 if_lock(t);
2333
2334 while(thread_is_running(t)) {
2335
2336 if_unlock(t);
2337
2338 msleep_interruptible(100);
2339
2340 if (signal_pending(current))
2341 goto signal;
2342 if_lock(t);
2343 }
2344 if_unlock(t);
2345 return 1;
2346 signal:
2347 return 0;
2348 }
2349
2350 static int pktgen_wait_all_threads_run(void)
2351 {
2352 struct pktgen_thread *t = pktgen_threads;
2353 int sig = 1;
2354
2355 while (t) {
2356 sig = pktgen_wait_thread_run(t);
2357 if( sig == 0 ) break;
2358 thread_lock();
2359 t=t->next;
2360 thread_unlock();
2361 }
2362 if(sig == 0) {
2363 thread_lock();
2364 while (t) {
2365 t->control |= (T_STOP);
2366 t=t->next;
2367 }
2368 thread_unlock();
2369 }
2370 return sig;
2371 }
2372
2373 static void pktgen_run_all_threads(void)
2374 {
2375 struct pktgen_thread *t = pktgen_threads;
2376
2377 PG_DEBUG(printk("pktgen: entering pktgen_run_all_threads.\n"));
2378
2379 thread_lock();
2380
2381 while(t) {
2382 t->control |= (T_RUN);
2383 t = t->next;
2384 }
2385 thread_unlock();
2386
2387 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */
2388
2389 pktgen_wait_all_threads_run();
2390 }
2391
2392
2393 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
2394 {
2395 __u64 total_us, bps, mbps, pps, idle;
2396 char *p = pkt_dev->result;
2397
2398 total_us = pkt_dev->stopped_at - pkt_dev->started_at;
2399
2400 idle = pkt_dev->idle_acc;
2401
2402 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
2403 (unsigned long long) total_us,
2404 (unsigned long long)(total_us - idle),
2405 (unsigned long long) idle,
2406 (unsigned long long) pkt_dev->sofar,
2407 pkt_dev->cur_pkt_size, nr_frags);
2408
2409 pps = pkt_dev->sofar * USEC_PER_SEC;
2410
2411 while ((total_us >> 32) != 0) {
2412 pps >>= 1;
2413 total_us >>= 1;
2414 }
2415
2416 do_div(pps, total_us);
2417
2418 bps = pps * 8 * pkt_dev->cur_pkt_size;
2419
2420 mbps = bps;
2421 do_div(mbps, 1000000);
2422 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu",
2423 (unsigned long long) pps,
2424 (unsigned long long) mbps,
2425 (unsigned long long) bps,
2426 (unsigned long long) pkt_dev->errors);
2427 }
2428
2429
2430 /* Set stopped-at timer, remove from running list, do counters & statistics */
2431
2432 static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
2433 {
2434
2435 if (!pkt_dev->running) {
2436 printk("pktgen: interface: %s is already stopped\n", pkt_dev->ifname);
2437 return -EINVAL;
2438 }
2439
2440 pkt_dev->stopped_at = getCurUs();
2441 pkt_dev->running = 0;
2442
2443 show_results(pkt_dev, skb_shinfo(pkt_dev->skb)->nr_frags);
2444
2445 if (pkt_dev->skb)
2446 kfree_skb(pkt_dev->skb);
2447
2448 pkt_dev->skb = NULL;
2449
2450 return 0;
2451 }
2452
2453 static struct pktgen_dev *next_to_run(struct pktgen_thread *t )
2454 {
2455 struct pktgen_dev *next, *best = NULL;
2456
2457 if_lock(t);
2458
2459 for(next=t->if_list; next ; next=next->next) {
2460 if(!next->running) continue;
2461 if(best == NULL) best=next;
2462 else if ( next->next_tx_us < best->next_tx_us)
2463 best = next;
2464 }
2465 if_unlock(t);
2466 return best;
2467 }
2468
2469 static void pktgen_stop(struct pktgen_thread *t) {
2470 struct pktgen_dev *next = NULL;
2471
2472 PG_DEBUG(printk("pktgen: entering pktgen_stop.\n"));
2473
2474 if_lock(t);
2475
2476 for(next=t->if_list; next; next=next->next)
2477 pktgen_stop_device(next);
2478
2479 if_unlock(t);
2480 }
2481
2482 static void pktgen_rem_all_ifs(struct pktgen_thread *t)
2483 {
2484 struct pktgen_dev *cur, *next = NULL;
2485
2486 /* Remove all devices, free mem */
2487
2488 if_lock(t);
2489
2490 for(cur=t->if_list; cur; cur=next) {
2491 next = cur->next;
2492 pktgen_remove_device(t, cur);
2493 }
2494
2495 if_unlock(t);
2496 }
2497
2498 static void pktgen_rem_thread(struct pktgen_thread *t)
2499 {
2500 /* Remove from the thread list */
2501
2502 struct pktgen_thread *tmp = pktgen_threads;
2503
2504 remove_proc_entry(t->name, pg_proc_dir);
2505
2506 thread_lock();
2507
2508 if (tmp == t)
2509 pktgen_threads = tmp->next;
2510 else {
2511 while (tmp) {
2512 if (tmp->next == t) {
2513 tmp->next = t->next;
2514 t->next = NULL;
2515 break;
2516 }
2517 tmp = tmp->next;
2518 }
2519 }
2520 thread_unlock();
2521 }
2522
2523 static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
2524 {
2525 struct net_device *odev = NULL;
2526 __u64 idle_start = 0;
2527 int ret;
2528
2529 odev = pkt_dev->odev;
2530
2531 if (pkt_dev->delay_us || pkt_dev->delay_ns) {
2532 u64 now;
2533
2534 now = getCurUs();
2535 if (now < pkt_dev->next_tx_us)
2536 spin(pkt_dev, pkt_dev->next_tx_us);
2537
2538 /* This is max DELAY, this has special meaning of
2539 * "never transmit"
2540 */
2541 if (pkt_dev->delay_us == 0x7FFFFFFF) {
2542 pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us;
2543 pkt_dev->next_tx_ns = pkt_dev->delay_ns;
2544 goto out;
2545 }
2546 }
2547
2548 if (netif_queue_stopped(odev) || need_resched()) {
2549 idle_start = getCurUs();
2550
2551 if (!netif_running(odev)) {
2552 pktgen_stop_device(pkt_dev);
2553 goto out;
2554 }
2555 if (need_resched())
2556 schedule();
2557
2558 pkt_dev->idle_acc += getCurUs() - idle_start;
2559
2560 if (netif_queue_stopped(odev)) {
2561 pkt_dev->next_tx_us = getCurUs(); /* TODO */
2562 pkt_dev->next_tx_ns = 0;
2563 goto out; /* Try the next interface */
2564 }
2565 }
2566
2567 if (pkt_dev->last_ok || !pkt_dev->skb) {
2568 if ((++pkt_dev->clone_count >= pkt_dev->clone_skb ) || (!pkt_dev->skb)) {
2569 /* build a new pkt */
2570 if (pkt_dev->skb)
2571 kfree_skb(pkt_dev->skb);
2572
2573 pkt_dev->skb = fill_packet(odev, pkt_dev);
2574 if (pkt_dev->skb == NULL) {
2575 printk("pktgen: ERROR: couldn't allocate skb in fill_packet.\n");
2576 schedule();
2577 pkt_dev->clone_count--; /* back out increment, OOM */
2578 goto out;
2579 }
2580 pkt_dev->allocated_skbs++;
2581 pkt_dev->clone_count = 0; /* reset counter */
2582 }
2583 }
2584
2585 spin_lock_bh(&odev->xmit_lock);
2586 if (!netif_queue_stopped(odev)) {
2587
2588 atomic_inc(&(pkt_dev->skb->users));
2589 retry_now:
2590 ret = odev->hard_start_xmit(pkt_dev->skb, odev);
2591 if (likely(ret == NETDEV_TX_OK)) {
2592 pkt_dev->last_ok = 1;
2593 pkt_dev->sofar++;
2594 pkt_dev->seq_num++;
2595 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
2596
2597 } else if (ret == NETDEV_TX_LOCKED
2598 && (odev->features & NETIF_F_LLTX)) {
2599 cpu_relax();
2600 goto retry_now;
2601 } else { /* Retry it next time */
2602
2603 atomic_dec(&(pkt_dev->skb->users));
2604
2605 if (debug && net_ratelimit())
2606 printk(KERN_INFO "pktgen: Hard xmit error\n");
2607
2608 pkt_dev->errors++;
2609 pkt_dev->last_ok = 0;
2610 }
2611
2612 pkt_dev->next_tx_us = getCurUs();
2613 pkt_dev->next_tx_ns = 0;
2614
2615 pkt_dev->next_tx_us += pkt_dev->delay_us;
2616 pkt_dev->next_tx_ns += pkt_dev->delay_ns;
2617
2618 if (pkt_dev->next_tx_ns > 1000) {
2619 pkt_dev->next_tx_us++;
2620 pkt_dev->next_tx_ns -= 1000;
2621 }
2622 }
2623
2624 else { /* Retry it next time */
2625 pkt_dev->last_ok = 0;
2626 pkt_dev->next_tx_us = getCurUs(); /* TODO */
2627 pkt_dev->next_tx_ns = 0;
2628 }
2629
2630 spin_unlock_bh(&odev->xmit_lock);
2631
2632 /* If pkt_dev->count is zero, then run forever */
2633 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
2634 if (atomic_read(&(pkt_dev->skb->users)) != 1) {
2635 idle_start = getCurUs();
2636 while (atomic_read(&(pkt_dev->skb->users)) != 1) {
2637 if (signal_pending(current)) {
2638 break;
2639 }
2640 schedule();
2641 }
2642 pkt_dev->idle_acc += getCurUs() - idle_start;
2643 }
2644
2645 /* Done with this */
2646 pktgen_stop_device(pkt_dev);
2647 }
2648 out:;
2649 }
2650
2651 /*
2652 * Main loop of the thread goes here
2653 */
2654
2655 static void pktgen_thread_worker(struct pktgen_thread *t)
2656 {
2657 DEFINE_WAIT(wait);
2658 struct pktgen_dev *pkt_dev = NULL;
2659 int cpu = t->cpu;
2660 sigset_t tmpsig;
2661 u32 max_before_softirq;
2662 u32 tx_since_softirq = 0;
2663
2664 daemonize("pktgen/%d", cpu);
2665
2666 /* Block all signals except SIGKILL, SIGSTOP and SIGTERM */
2667
2668 spin_lock_irq(&current->sighand->siglock);
2669 tmpsig = current->blocked;
2670 siginitsetinv(&current->blocked,
2671 sigmask(SIGKILL) |
2672 sigmask(SIGSTOP)|
2673 sigmask(SIGTERM));
2674
2675 recalc_sigpending();
2676 spin_unlock_irq(&current->sighand->siglock);
2677
2678 /* Migrate to the right CPU */
2679 set_cpus_allowed(current, cpumask_of_cpu(cpu));
2680 if (smp_processor_id() != cpu)
2681 BUG();
2682
2683 init_waitqueue_head(&t->queue);
2684
2685 t->control &= ~(T_TERMINATE);
2686 t->control &= ~(T_RUN);
2687 t->control &= ~(T_STOP);
2688 t->control &= ~(T_REMDEV);
2689
2690 t->pid = current->pid;
2691
2692 PG_DEBUG(printk("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid));
2693
2694 max_before_softirq = t->max_before_softirq;
2695
2696 __set_current_state(TASK_INTERRUPTIBLE);
2697 mb();
2698
2699 while (1) {
2700
2701 __set_current_state(TASK_RUNNING);
2702
2703 /*
2704 * Get next dev to xmit -- if any.
2705 */
2706
2707 pkt_dev = next_to_run(t);
2708
2709 if (pkt_dev) {
2710
2711 pktgen_xmit(pkt_dev);
2712
2713 /*
2714 * We like to stay RUNNING but must also give
2715 * others fair share.
2716 */
2717
2718 tx_since_softirq += pkt_dev->last_ok;
2719
2720 if (tx_since_softirq > max_before_softirq) {
2721 if (local_softirq_pending())
2722 do_softirq();
2723 tx_since_softirq = 0;
2724 }
2725 } else {
2726 prepare_to_wait(&(t->queue), &wait, TASK_INTERRUPTIBLE);
2727 schedule_timeout(HZ/10);
2728 finish_wait(&(t->queue), &wait);
2729 }
2730
2731 /*
2732 * Back from sleep, either due to the timeout or signal.
2733 * We check if we have any "posted" work for us.
2734 */
2735
2736 if (t->control & T_TERMINATE || signal_pending(current))
2737 /* we received a request to terminate ourself */
2738 break;
2739
2740
2741 if(t->control & T_STOP) {
2742 pktgen_stop(t);
2743 t->control &= ~(T_STOP);
2744 }
2745
2746 if(t->control & T_RUN) {
2747 pktgen_run(t);
2748 t->control &= ~(T_RUN);
2749 }
2750
2751 if(t->control & T_REMDEV) {
2752 pktgen_rem_all_ifs(t);
2753 t->control &= ~(T_REMDEV);
2754 }
2755
2756 if (need_resched())
2757 schedule();
2758 }
2759
2760 PG_DEBUG(printk("pktgen: %s stopping all device\n", t->name));
2761 pktgen_stop(t);
2762
2763 PG_DEBUG(printk("pktgen: %s removing all device\n", t->name));
2764 pktgen_rem_all_ifs(t);
2765
2766 PG_DEBUG(printk("pktgen: %s removing thread.\n", t->name));
2767 pktgen_rem_thread(t);
2768 }
2769
2770 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char* ifname)
2771 {
2772 struct pktgen_dev *pkt_dev = NULL;
2773 if_lock(t);
2774
2775 for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) {
2776 if (strncmp(pkt_dev->ifname, ifname, IFNAMSIZ) == 0) {
2777 break;
2778 }
2779 }
2780
2781 if_unlock(t);
2782 PG_DEBUG(printk("pktgen: find_dev(%s) returning %p\n", ifname,pkt_dev));
2783 return pkt_dev;
2784 }
2785
2786 /*
2787 * Adds a dev at front of if_list.
2788 */
2789
2790 static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev)
2791 {
2792 int rv = 0;
2793
2794 if_lock(t);
2795
2796 if (pkt_dev->pg_thread) {
2797 printk("pktgen: ERROR: already assigned to a thread.\n");
2798 rv = -EBUSY;
2799 goto out;
2800 }
2801 pkt_dev->next =t->if_list; t->if_list=pkt_dev;
2802 pkt_dev->pg_thread = t;
2803 pkt_dev->running = 0;
2804
2805 out:
2806 if_unlock(t);
2807 return rv;
2808 }
2809
2810 /* Called under thread lock */
2811
2812 static int pktgen_add_device(struct pktgen_thread *t, const char* ifname)
2813 {
2814 struct pktgen_dev *pkt_dev;
2815 struct proc_dir_entry *pe;
2816
2817 /* We don't allow a device to be on several threads */
2818
2819 pkt_dev = __pktgen_NN_threads(ifname, FIND);
2820 if (pkt_dev) {
2821 printk("pktgen: ERROR: interface already used.\n");
2822 return -EBUSY;
2823 }
2824
2825 pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL);
2826 if (!pkt_dev)
2827 return -ENOMEM;
2828
2829 pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state));
2830 if (pkt_dev->flows == NULL) {
2831 kfree(pkt_dev);
2832 return -ENOMEM;
2833 }
2834 memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state));
2835
2836 pkt_dev->min_pkt_size = ETH_ZLEN;
2837 pkt_dev->max_pkt_size = ETH_ZLEN;
2838 pkt_dev->nfrags = 0;
2839 pkt_dev->clone_skb = pg_clone_skb_d;
2840 pkt_dev->delay_us = pg_delay_d / 1000;
2841 pkt_dev->delay_ns = pg_delay_d % 1000;
2842 pkt_dev->count = pg_count_d;
2843 pkt_dev->sofar = 0;
2844 pkt_dev->udp_src_min = 9; /* sink port */
2845 pkt_dev->udp_src_max = 9;
2846 pkt_dev->udp_dst_min = 9;
2847 pkt_dev->udp_dst_max = 9;
2848
2849 strncpy(pkt_dev->ifname, ifname, IFNAMSIZ);
2850
2851 if (! pktgen_setup_dev(pkt_dev)) {
2852 printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
2853 if (pkt_dev->flows)
2854 vfree(pkt_dev->flows);
2855 kfree(pkt_dev);
2856 return -ENODEV;
2857 }
2858
2859 pe = create_proc_entry(ifname, 0600, pg_proc_dir);
2860 if (!pe) {
2861 printk("pktgen: cannot create %s/%s procfs entry.\n",
2862 PG_PROC_DIR, ifname);
2863 if (pkt_dev->flows)
2864 vfree(pkt_dev->flows);
2865 kfree(pkt_dev);
2866 return -EINVAL;
2867 }
2868 pe->proc_fops = &pktgen_if_fops;
2869 pe->data = pkt_dev;
2870
2871 return add_dev_to_thread(t, pkt_dev);
2872 }
2873
2874 static struct pktgen_thread * __init pktgen_find_thread(const char* name)
2875 {
2876 struct pktgen_thread *t = NULL;
2877
2878 thread_lock();
2879
2880 t = pktgen_threads;
2881 while (t) {
2882 if (strcmp(t->name, name) == 0)
2883 break;
2884
2885 t = t->next;
2886 }
2887 thread_unlock();
2888 return t;
2889 }
2890
2891 static int __init pktgen_create_thread(const char* name, int cpu)
2892 {
2893 struct pktgen_thread *t = NULL;
2894 struct proc_dir_entry *pe;
2895
2896 if (strlen(name) > 31) {
2897 printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n");
2898 return -EINVAL;
2899 }
2900
2901 if (pktgen_find_thread(name)) {
2902 printk("pktgen: ERROR: thread: %s already exists\n", name);
2903 return -EINVAL;
2904 }
2905
2906 t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL);
2907 if (!t) {
2908 printk("pktgen: ERROR: out of memory, can't create new thread.\n");
2909 return -ENOMEM;
2910 }
2911
2912 strcpy(t->name, name);
2913 spin_lock_init(&t->if_lock);
2914 t->cpu = cpu;
2915
2916 pe = create_proc_entry(t->name, 0600, pg_proc_dir);
2917 if (!pe) {
2918 printk("pktgen: cannot create %s/%s procfs entry.\n",
2919 PG_PROC_DIR, t->name);
2920 kfree(t);
2921 return -EINVAL;
2922 }
2923
2924 pe->proc_fops = &pktgen_thread_fops;
2925 pe->data = t;
2926
2927 t->next = pktgen_threads;
2928 pktgen_threads = t;
2929
2930 if (kernel_thread((void *) pktgen_thread_worker, (void *) t,
2931 CLONE_FS | CLONE_FILES | CLONE_SIGHAND) < 0)
2932 printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu);
2933
2934 return 0;
2935 }
2936
2937 /*
2938 * Removes a device from the thread if_list.
2939 */
2940 static void _rem_dev_from_if_list(struct pktgen_thread *t, struct pktgen_dev *pkt_dev)
2941 {
2942 struct pktgen_dev *i, *prev = NULL;
2943
2944 i = t->if_list;
2945
2946 while(i) {
2947 if(i == pkt_dev) {
2948 if(prev) prev->next = i->next;
2949 else t->if_list = NULL;
2950 break;
2951 }
2952 prev = i;
2953 i=i->next;
2954 }
2955 }
2956
2957 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_dev)
2958 {
2959
2960 PG_DEBUG(printk("pktgen: remove_device pkt_dev=%p\n", pkt_dev));
2961
2962 if (pkt_dev->running) {
2963 printk("pktgen:WARNING: trying to remove a running interface, stopping it now.\n");
2964 pktgen_stop_device(pkt_dev);
2965 }
2966
2967 /* Dis-associate from the interface */
2968
2969 if (pkt_dev->odev) {
2970 dev_put(pkt_dev->odev);
2971 pkt_dev->odev = NULL;
2972 }
2973
2974 /* And update the thread if_list */
2975
2976 _rem_dev_from_if_list(t, pkt_dev);
2977
2978 /* Clean up proc file system */
2979
2980 remove_proc_entry(pkt_dev->ifname, pg_proc_dir);
2981
2982 if (pkt_dev->flows)
2983 vfree(pkt_dev->flows);
2984 kfree(pkt_dev);
2985 return 0;
2986 }
2987
2988 static int __init pg_init(void)
2989 {
2990 int cpu;
2991 struct proc_dir_entry *pe;
2992
2993 printk(version);
2994
2995 pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net);
2996 if (!pg_proc_dir)
2997 return -ENODEV;
2998 pg_proc_dir->owner = THIS_MODULE;
2999
3000 pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir);
3001 if (pe == NULL) {
3002 printk("pktgen: ERROR: cannot create %s procfs entry.\n", PGCTRL);
3003 proc_net_remove(PG_PROC_DIR);
3004 return -EINVAL;
3005 }
3006
3007 pe->proc_fops = &pktgen_fops;
3008 pe->data = NULL;
3009
3010 /* Register us to receive netdevice events */
3011 register_netdevice_notifier(&pktgen_notifier_block);
3012
3013 for_each_online_cpu(cpu) {
3014 char buf[30];
3015
3016 sprintf(buf, "kpktgend_%i", cpu);
3017 pktgen_create_thread(buf, cpu);
3018 }
3019 return 0;
3020 }
3021
3022 static void __exit pg_cleanup(void)
3023 {
3024 wait_queue_head_t queue;
3025 init_waitqueue_head(&queue);
3026
3027 /* Stop all interfaces & threads */
3028
3029 while (pktgen_threads) {
3030 struct pktgen_thread *t = pktgen_threads;
3031 pktgen_threads->control |= (T_TERMINATE);
3032
3033 wait_event_interruptible_timeout(queue, (t != pktgen_threads), HZ);
3034 }
3035
3036 /* Un-register us from receiving netdevice events */
3037 unregister_netdevice_notifier(&pktgen_notifier_block);
3038
3039 /* Clean up proc file system */
3040 remove_proc_entry(PGCTRL, pg_proc_dir);
3041 proc_net_remove(PG_PROC_DIR);
3042 }
3043
3044
3045 module_init(pg_init);
3046 module_exit(pg_cleanup);
3047
3048 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se");
3049 MODULE_DESCRIPTION("Packet Generator tool");
3050 MODULE_LICENSE("GPL");
3051 module_param(pg_count_d, int, 0);
3052 module_param(pg_delay_d, int, 0);
3053 module_param(pg_clone_skb_d, int, 0);
3054 module_param(debug, int, 0);
This page took 0.153405 seconds and 5 git commands to generate.