2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/moduleparam.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h> /* printk() */
20 #include <linux/slab.h> /* kmalloc() */
21 #include <linux/errno.h> /* error codes */
22 #include <linux/types.h> /* size_t */
23 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/netdevice.h> /* struct device, and other headers */
27 #include <linux/etherdevice.h> /* eth_type_trans */
28 #include <linux/skbuff.h>
29 #include <linux/ioctl.h>
30 #include <linux/cdev.h>
31 #include <linux/hugetlb.h>
32 #include <linux/in6.h>
33 #include <linux/timer.h>
34 #include <linux/hrtimer.h>
35 #include <linux/ktime.h>
37 #include <linux/ctype.h>
39 #include <linux/tcp.h>
41 #include <asm/checksum.h>
42 #include <asm/homecache.h>
43 #include <gxio/mpipe.h>
46 /* Default transmit lockup timeout period, in jiffies. */
47 #define TILE_NET_TIMEOUT (5 * HZ)
49 /* The maximum number of distinct channels (idesc.channel is 5 bits). */
50 #define TILE_NET_CHANNELS 32
52 /* Maximum number of idescs to handle per "poll". */
53 #define TILE_NET_BATCH 128
55 /* Maximum number of packets to handle per "poll". */
56 #define TILE_NET_WEIGHT 64
58 /* Number of entries in each iqueue. */
59 #define IQUEUE_ENTRIES 512
61 /* Number of entries in each equeue. */
62 #define EQUEUE_ENTRIES 2048
64 /* Total header bytes per equeue slot. Must be big enough for 2 bytes
65 * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
66 * 60 bytes of actual TCP header. We round up to align to cache lines.
68 #define HEADER_BYTES 128
70 /* Maximum completions per cpu per device (must be a power of two).
71 * ISSUE: What is the right number here? If this is too small, then
72 * egress might block waiting for free space in a completions array.
73 * ISSUE: At the least, allocate these only for initialized echannels.
75 #define TILE_NET_MAX_COMPS 64
77 #define MAX_FRAGS (MAX_SKB_FRAGS + 1)
79 /* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels.
82 #define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
84 /* Size of NotifRing data to allocate. */
85 #define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
87 /* Timeout to wake the per-device TX timer after we stop the queue.
88 * We don't want the timeout too short (adds overhead, and might end
89 * up causing stop/wake/stop/wake cycles) or too long (affects performance).
90 * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
92 #define TX_TIMER_DELAY_USEC 30
94 /* Timeout to wake the per-cpu egress timer to free completions. */
95 #define EGRESS_TIMER_DELAY_USEC 1000
97 MODULE_AUTHOR("Tilera Corporation");
98 MODULE_LICENSE("GPL");
100 /* A "packet fragment" (a chunk of memory). */
106 /* A single completion. */
107 struct tile_net_comp
{
108 /* The "complete_count" when the completion will be complete. */
110 /* The buffer to be freed when the completion is complete. */
114 /* The completions for a given cpu and echannel. */
115 struct tile_net_comps
{
116 /* The completions. */
117 struct tile_net_comp comp_queue
[TILE_NET_MAX_COMPS
];
118 /* The number of completions used. */
119 unsigned long comp_next
;
120 /* The number of completions freed. */
121 unsigned long comp_last
;
124 /* The transmit wake timer for a given cpu and echannel. */
125 struct tile_net_tx_wake
{
126 struct hrtimer timer
;
127 struct net_device
*dev
;
130 /* Info for a specific cpu. */
131 struct tile_net_info
{
132 /* The NAPI struct. */
133 struct napi_struct napi
;
135 gxio_mpipe_iqueue_t iqueue
;
138 /* True if iqueue is valid. */
143 /* Number of small sk_buffs which must still be provided. */
144 unsigned int num_needed_small_buffers
;
145 /* Number of large sk_buffs which must still be provided. */
146 unsigned int num_needed_large_buffers
;
147 /* A timer for handling egress completions. */
148 struct hrtimer egress_timer
;
149 /* True if "egress_timer" is scheduled. */
150 bool egress_timer_scheduled
;
151 /* Comps for each egress channel. */
152 struct tile_net_comps
*comps_for_echannel
[TILE_NET_CHANNELS
];
153 /* Transmit wake timer for each egress channel. */
154 struct tile_net_tx_wake tx_wake
[TILE_NET_CHANNELS
];
157 /* Info for egress on a particular egress channel. */
158 struct tile_net_egress
{
160 gxio_mpipe_equeue_t
*equeue
;
161 /* The headers for TSO. */
162 unsigned char *headers
;
165 /* Info for a specific device. */
166 struct tile_net_priv
{
167 /* Our network device. */
168 struct net_device
*dev
;
169 /* The primary link. */
170 gxio_mpipe_link_t link
;
171 /* The primary channel, if open, else -1. */
173 /* The "loopify" egress link, if needed. */
174 gxio_mpipe_link_t loopify_link
;
175 /* The "loopify" egress channel, if open, else -1. */
177 /* The egress channel (channel or loopify_channel). */
180 struct net_device_stats stats
;
183 /* Egress info, indexed by "priv->echannel" (lazily created as needed). */
184 static struct tile_net_egress egress_for_echannel
[TILE_NET_CHANNELS
];
186 /* Devices currently associated with each channel.
187 * NOTE: The array entry can become NULL after ifconfig down, but
188 * we do not free the underlying net_device structures, so it is
189 * safe to use a pointer after reading it from this array.
191 static struct net_device
*tile_net_devs_for_channel
[TILE_NET_CHANNELS
];
193 /* A mutex for "tile_net_devs_for_channel". */
194 static DEFINE_MUTEX(tile_net_devs_for_channel_mutex
);
196 /* The per-cpu info. */
197 static DEFINE_PER_CPU(struct tile_net_info
, per_cpu_info
);
199 /* The "context" for all devices. */
200 static gxio_mpipe_context_t context
;
202 /* Buffer sizes and mpipe enum codes for buffer stacks.
203 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
205 #define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
206 #define BUFFER_SIZE_SMALL 128
207 #define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
208 #define BUFFER_SIZE_LARGE 1664
210 /* The small/large "buffer stacks". */
211 static int small_buffer_stack
= -1;
212 static int large_buffer_stack
= -1;
214 /* Amount of memory allocated for each buffer stack. */
215 static size_t buffer_stack_size
;
217 /* The actual memory allocated for the buffer stacks. */
218 static void *small_buffer_stack_va
;
219 static void *large_buffer_stack_va
;
222 static int first_bucket
= -1;
223 static int num_buckets
= 1;
225 /* The ingress irq. */
226 static int ingress_irq
= -1;
228 /* Text value of tile_net.cpus if passed as a module parameter. */
229 static char *network_cpus_string
;
231 /* The actual cpus in "network_cpus". */
232 static struct cpumask network_cpus_map
;
234 /* If "loopify=LINK" was specified, this is "LINK". */
235 static char *loopify_link_name
;
237 /* If "tile_net.custom" was specified, this is non-NULL. */
238 static char *custom_str
;
240 /* The "tile_net.cpus" argument specifies the cpus that are dedicated
241 * to handle ingress packets.
243 * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
244 * m, n, x, y are integer numbers that represent the cpus that can be
245 * neither a dedicated cpu nor a dataplane cpu.
247 static bool network_cpus_init(void)
252 if (network_cpus_string
== NULL
)
255 rc
= cpulist_parse_crop(network_cpus_string
, &network_cpus_map
);
257 pr_warn("tile_net.cpus=%s: malformed cpu list\n",
258 network_cpus_string
);
262 /* Remove dedicated cpus. */
263 cpumask_and(&network_cpus_map
, &network_cpus_map
, cpu_possible_mask
);
265 if (cpumask_empty(&network_cpus_map
)) {
266 pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
267 network_cpus_string
);
271 cpulist_scnprintf(buf
, sizeof(buf
), &network_cpus_map
);
272 pr_info("Linux network CPUs: %s\n", buf
);
276 module_param_named(cpus
, network_cpus_string
, charp
, 0444);
277 MODULE_PARM_DESC(cpus
, "cpulist of cores that handle network interrupts");
279 /* The "tile_net.loopify=LINK" argument causes the named device to
280 * actually use "loop0" for ingress, and "loop1" for egress. This
281 * allows an app to sit between the actual link and linux, passing
282 * (some) packets along to linux, and forwarding (some) packets sent
285 module_param_named(loopify
, loopify_link_name
, charp
, 0444);
286 MODULE_PARM_DESC(loopify
, "name the device to use loop0/1 for ingress/egress");
288 /* The "tile_net.custom" argument causes us to ignore the "conventional"
289 * classifier metadata, in particular, the "l2_offset".
291 module_param_named(custom
, custom_str
, charp
, 0444);
292 MODULE_PARM_DESC(custom
, "indicates a (heavily) customized classifier");
294 /* Atomically update a statistics field.
295 * Note that on TILE-Gx, this operation is fire-and-forget on the
296 * issuing core (single-cycle dispatch) and takes only a few cycles
297 * longer than a regular store when the request reaches the home cache.
298 * No expensive bus management overhead is required.
300 static void tile_net_stats_add(unsigned long value
, unsigned long *field
)
302 BUILD_BUG_ON(sizeof(atomic_long_t
) != sizeof(unsigned long));
303 atomic_long_add(value
, (atomic_long_t
*)field
);
306 /* Allocate and push a buffer. */
307 static bool tile_net_provide_buffer(bool small
)
309 int stack
= small
? small_buffer_stack
: large_buffer_stack
;
310 const unsigned long buffer_alignment
= 128;
314 len
= sizeof(struct sk_buff
**) + buffer_alignment
;
315 len
+= (small
? BUFFER_SIZE_SMALL
: BUFFER_SIZE_LARGE
);
316 skb
= dev_alloc_skb(len
);
320 /* Make room for a back-pointer to 'skb' and guarantee alignment. */
321 skb_reserve(skb
, sizeof(struct sk_buff
**));
322 skb_reserve(skb
, -(long)skb
->data
& (buffer_alignment
- 1));
324 /* Save a back-pointer to 'skb'. */
325 *(struct sk_buff
**)(skb
->data
- sizeof(struct sk_buff
**)) = skb
;
327 /* Make sure "skb" and the back-pointer have been flushed. */
330 gxio_mpipe_push_buffer(&context
, stack
,
331 (void *)va_to_tile_io_addr(skb
->data
));
336 /* Convert a raw mpipe buffer to its matching skb pointer. */
337 static struct sk_buff
*mpipe_buf_to_skb(void *va
)
339 /* Acquire the associated "skb". */
340 struct sk_buff
**skb_ptr
= va
- sizeof(*skb_ptr
);
341 struct sk_buff
*skb
= *skb_ptr
;
344 if (skb
->data
!= va
) {
345 /* Panic here since there's a reasonable chance
346 * that corrupt buffers means generic memory
347 * corruption, with unpredictable system effects.
349 panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
356 static void tile_net_pop_all_buffers(int stack
)
359 tile_io_addr_t addr
=
360 (tile_io_addr_t
)gxio_mpipe_pop_buffer(&context
, stack
);
363 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr
)));
367 /* Provide linux buffers to mPIPE. */
368 static void tile_net_provide_needed_buffers(void)
370 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
372 while (info
->num_needed_small_buffers
!= 0) {
373 if (!tile_net_provide_buffer(true))
375 info
->num_needed_small_buffers
--;
378 while (info
->num_needed_large_buffers
!= 0) {
379 if (!tile_net_provide_buffer(false))
381 info
->num_needed_large_buffers
--;
387 /* Add a description to the page allocation failure dump. */
388 pr_notice("Tile %d still needs some buffers\n", info
->my_cpu
);
391 static inline bool filter_packet(struct net_device
*dev
, void *buf
)
393 /* Filter packets received before we're up. */
394 if (dev
== NULL
|| !(dev
->flags
& IFF_UP
))
397 /* Filter out packets that aren't for us. */
398 if (!(dev
->flags
& IFF_PROMISC
) &&
399 !is_multicast_ether_addr(buf
) &&
400 compare_ether_addr(dev
->dev_addr
, buf
) != 0)
406 static void tile_net_receive_skb(struct net_device
*dev
, struct sk_buff
*skb
,
407 gxio_mpipe_idesc_t
*idesc
, unsigned long len
)
409 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
410 struct tile_net_priv
*priv
= netdev_priv(dev
);
412 /* Encode the actual packet length. */
415 skb
->protocol
= eth_type_trans(skb
, dev
);
417 /* Acknowledge "good" hardware checksums. */
418 if (idesc
->cs
&& idesc
->csum_seed_val
== 0xFFFF)
419 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
421 netif_receive_skb(skb
);
424 tile_net_stats_add(1, &priv
->stats
.rx_packets
);
425 tile_net_stats_add(len
, &priv
->stats
.rx_bytes
);
427 /* Need a new buffer. */
428 if (idesc
->size
== BUFFER_SIZE_SMALL_ENUM
)
429 info
->num_needed_small_buffers
++;
431 info
->num_needed_large_buffers
++;
434 /* Handle a packet. Return true if "processed", false if "filtered". */
435 static bool tile_net_handle_packet(gxio_mpipe_idesc_t
*idesc
)
437 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
438 struct net_device
*dev
= tile_net_devs_for_channel
[idesc
->channel
];
445 /* Drop packets for which no buffer was available.
446 * NOTE: This happens under heavy load.
449 struct tile_net_priv
*priv
= netdev_priv(dev
);
450 tile_net_stats_add(1, &priv
->stats
.rx_dropped
);
451 gxio_mpipe_iqueue_consume(&info
->iqueue
, idesc
);
453 pr_info("Dropping packet (insufficient buffers).\n");
457 /* Get the "l2_offset", if allowed. */
458 l2_offset
= custom_str
? 0 : gxio_mpipe_idesc_get_l2_offset(idesc
);
460 /* Get the raw buffer VA (includes "headroom"). */
461 va
= tile_io_addr_to_va((unsigned long)(long)idesc
->va
);
463 /* Get the actual packet start/length. */
464 buf
= va
+ l2_offset
;
465 len
= idesc
->l2_size
- l2_offset
;
467 /* Point "va" at the raw buffer. */
470 filter
= filter_packet(dev
, buf
);
472 gxio_mpipe_iqueue_drop(&info
->iqueue
, idesc
);
474 struct sk_buff
*skb
= mpipe_buf_to_skb(va
);
476 /* Skip headroom, and any custom header. */
477 skb_reserve(skb
, NET_IP_ALIGN
+ l2_offset
);
479 tile_net_receive_skb(dev
, skb
, idesc
, len
);
482 gxio_mpipe_iqueue_consume(&info
->iqueue
, idesc
);
486 /* Handle some packets for the current CPU.
488 * This function handles up to TILE_NET_BATCH idescs per call.
490 * ISSUE: Since we do not provide new buffers until this function is
491 * complete, we must initially provide enough buffers for each network
492 * cpu to fill its iqueue and also its batched idescs.
494 * ISSUE: The "rotting packet" race condition occurs if a packet
495 * arrives after the queue appears to be empty, and before the
496 * hypervisor interrupt is re-enabled.
498 static int tile_net_poll(struct napi_struct
*napi
, int budget
)
500 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
501 unsigned int work
= 0;
502 gxio_mpipe_idesc_t
*idesc
;
505 /* Process packets. */
506 while ((n
= gxio_mpipe_iqueue_try_peek(&info
->iqueue
, &idesc
)) > 0) {
507 for (i
= 0; i
< n
; i
++) {
508 if (i
== TILE_NET_BATCH
)
510 if (tile_net_handle_packet(idesc
+ i
)) {
511 if (++work
>= budget
)
517 /* There are no packets left. */
518 napi_complete(&info
->napi
);
520 /* Re-enable hypervisor interrupts. */
521 gxio_mpipe_enable_notif_ring_interrupt(&context
, info
->iqueue
.ring
);
523 /* HACK: Avoid the "rotting packet" problem. */
524 if (gxio_mpipe_iqueue_try_peek(&info
->iqueue
, &idesc
) > 0)
525 napi_schedule(&info
->napi
);
527 /* ISSUE: Handle completions? */
530 tile_net_provide_needed_buffers();
535 /* Handle an ingress interrupt on the current cpu. */
536 static irqreturn_t
tile_net_handle_ingress_irq(int irq
, void *unused
)
538 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
539 napi_schedule(&info
->napi
);
543 /* Free some completions. This must be called with interrupts blocked. */
544 static int tile_net_free_comps(gxio_mpipe_equeue_t
*equeue
,
545 struct tile_net_comps
*comps
,
546 int limit
, bool force_update
)
549 while (comps
->comp_last
< comps
->comp_next
) {
550 unsigned int cid
= comps
->comp_last
% TILE_NET_MAX_COMPS
;
551 struct tile_net_comp
*comp
= &comps
->comp_queue
[cid
];
552 if (!gxio_mpipe_equeue_is_complete(equeue
, comp
->when
,
553 force_update
|| n
== 0))
555 dev_kfree_skb_irq(comp
->skb
);
563 /* Add a completion. This must be called with interrupts blocked.
564 * tile_net_equeue_try_reserve() will have ensured a free completion entry.
566 static void add_comp(gxio_mpipe_equeue_t
*equeue
,
567 struct tile_net_comps
*comps
,
568 uint64_t when
, struct sk_buff
*skb
)
570 int cid
= comps
->comp_next
% TILE_NET_MAX_COMPS
;
571 comps
->comp_queue
[cid
].when
= when
;
572 comps
->comp_queue
[cid
].skb
= skb
;
576 static void tile_net_schedule_tx_wake_timer(struct net_device
*dev
)
578 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
579 struct tile_net_priv
*priv
= netdev_priv(dev
);
581 hrtimer_start(&info
->tx_wake
[priv
->echannel
].timer
,
582 ktime_set(0, TX_TIMER_DELAY_USEC
* 1000UL),
583 HRTIMER_MODE_REL_PINNED
);
586 static enum hrtimer_restart
tile_net_handle_tx_wake_timer(struct hrtimer
*t
)
588 struct tile_net_tx_wake
*tx_wake
=
589 container_of(t
, struct tile_net_tx_wake
, timer
);
590 netif_wake_subqueue(tx_wake
->dev
, smp_processor_id());
591 return HRTIMER_NORESTART
;
594 /* Make sure the egress timer is scheduled. */
595 static void tile_net_schedule_egress_timer(void)
597 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
599 if (!info
->egress_timer_scheduled
) {
600 hrtimer_start(&info
->egress_timer
,
601 ktime_set(0, EGRESS_TIMER_DELAY_USEC
* 1000UL),
602 HRTIMER_MODE_REL_PINNED
);
603 info
->egress_timer_scheduled
= true;
607 /* The "function" for "info->egress_timer".
609 * This timer will reschedule itself as long as there are any pending
610 * completions expected for this tile.
612 static enum hrtimer_restart
tile_net_handle_egress_timer(struct hrtimer
*t
)
614 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
615 unsigned long irqflags
;
616 bool pending
= false;
619 local_irq_save(irqflags
);
621 /* The timer is no longer scheduled. */
622 info
->egress_timer_scheduled
= false;
624 /* Free all possible comps for this tile. */
625 for (i
= 0; i
< TILE_NET_CHANNELS
; i
++) {
626 struct tile_net_egress
*egress
= &egress_for_echannel
[i
];
627 struct tile_net_comps
*comps
= info
->comps_for_echannel
[i
];
628 if (comps
->comp_last
>= comps
->comp_next
)
630 tile_net_free_comps(egress
->equeue
, comps
, -1, true);
631 pending
= pending
|| (comps
->comp_last
< comps
->comp_next
);
634 /* Reschedule timer if needed. */
636 tile_net_schedule_egress_timer();
638 local_irq_restore(irqflags
);
640 return HRTIMER_NORESTART
;
643 /* Helper function for "tile_net_update()".
644 * "dev" (i.e. arg) is the device being brought up or down,
645 * or NULL if all devices are now down.
647 static void tile_net_update_cpu(void *arg
)
649 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
650 struct net_device
*dev
= arg
;
652 if (!info
->has_iqueue
)
656 if (!info
->napi_added
) {
657 netif_napi_add(dev
, &info
->napi
,
658 tile_net_poll
, TILE_NET_WEIGHT
);
659 info
->napi_added
= true;
661 if (!info
->napi_enabled
) {
662 napi_enable(&info
->napi
);
663 info
->napi_enabled
= true;
665 enable_percpu_irq(ingress_irq
, 0);
667 disable_percpu_irq(ingress_irq
);
668 if (info
->napi_enabled
) {
669 napi_disable(&info
->napi
);
670 info
->napi_enabled
= false;
672 /* FIXME: Drain the iqueue. */
676 /* Helper function for tile_net_open() and tile_net_stop().
677 * Always called under tile_net_devs_for_channel_mutex.
679 static int tile_net_update(struct net_device
*dev
)
681 static gxio_mpipe_rules_t rules
; /* too big to fit on the stack */
682 bool saw_channel
= false;
687 gxio_mpipe_rules_init(&rules
, &context
);
689 for (channel
= 0; channel
< TILE_NET_CHANNELS
; channel
++) {
690 if (tile_net_devs_for_channel
[channel
] == NULL
)
694 gxio_mpipe_rules_begin(&rules
, first_bucket
,
696 gxio_mpipe_rules_set_headroom(&rules
, NET_IP_ALIGN
);
698 gxio_mpipe_rules_add_channel(&rules
, channel
);
701 /* NOTE: This can fail if there is no classifier.
702 * ISSUE: Can anything else cause it to fail?
704 rc
= gxio_mpipe_rules_commit(&rules
);
706 netdev_warn(dev
, "gxio_mpipe_rules_commit failed: %d\n", rc
);
710 /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
711 for_each_online_cpu(cpu
)
712 smp_call_function_single(cpu
, tile_net_update_cpu
,
713 (saw_channel
? dev
: NULL
), 1);
715 /* HACK: Allow packets to flow in the simulator. */
717 sim_enable_mpipe_links(0, -1);
722 /* Allocate and initialize mpipe buffer stacks, and register them in
723 * the mPIPE TLBs, for both small and large packet sizes.
724 * This routine supports tile_net_init_mpipe(), below.
726 static int init_buffer_stacks(struct net_device
*dev
, int num_buffers
)
728 pte_t hash_pte
= pte_set_home((pte_t
) { 0 }, PAGE_HOME_HASH
);
731 /* Compute stack bytes; we round up to 64KB and then use
732 * alloc_pages() so we get the required 64KB alignment as well.
735 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers
),
738 /* Allocate two buffer stack indices. */
739 rc
= gxio_mpipe_alloc_buffer_stacks(&context
, 2, 0, 0);
741 netdev_err(dev
, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
745 small_buffer_stack
= rc
;
746 large_buffer_stack
= rc
+ 1;
748 /* Allocate the small memory stack. */
749 small_buffer_stack_va
=
750 alloc_pages_exact(buffer_stack_size
, GFP_KERNEL
);
751 if (small_buffer_stack_va
== NULL
) {
753 "Could not alloc %zd bytes for buffer stacks\n",
757 rc
= gxio_mpipe_init_buffer_stack(&context
, small_buffer_stack
,
758 BUFFER_SIZE_SMALL_ENUM
,
759 small_buffer_stack_va
,
760 buffer_stack_size
, 0);
762 netdev_err(dev
, "gxio_mpipe_init_buffer_stack: %d\n", rc
);
765 rc
= gxio_mpipe_register_client_memory(&context
, small_buffer_stack
,
769 "gxio_mpipe_register_buffer_memory failed: %d\n",
774 /* Allocate the large buffer stack. */
775 large_buffer_stack_va
=
776 alloc_pages_exact(buffer_stack_size
, GFP_KERNEL
);
777 if (large_buffer_stack_va
== NULL
) {
779 "Could not alloc %zd bytes for buffer stacks\n",
783 rc
= gxio_mpipe_init_buffer_stack(&context
, large_buffer_stack
,
784 BUFFER_SIZE_LARGE_ENUM
,
785 large_buffer_stack_va
,
786 buffer_stack_size
, 0);
788 netdev_err(dev
, "gxio_mpipe_init_buffer_stack failed: %d\n",
792 rc
= gxio_mpipe_register_client_memory(&context
, large_buffer_stack
,
796 "gxio_mpipe_register_buffer_memory failed: %d\n",
804 /* Allocate per-cpu resources (memory for completions and idescs).
805 * This routine supports tile_net_init_mpipe(), below.
807 static int alloc_percpu_mpipe_resources(struct net_device
*dev
,
810 struct tile_net_info
*info
= &per_cpu(per_cpu_info
, cpu
);
815 /* Allocate the "comps". */
816 order
= get_order(COMPS_SIZE
);
817 page
= homecache_alloc_pages(GFP_KERNEL
, order
, cpu
);
819 netdev_err(dev
, "Failed to alloc %zd bytes comps memory\n",
823 addr
= pfn_to_kaddr(page_to_pfn(page
));
824 memset(addr
, 0, COMPS_SIZE
);
825 for (i
= 0; i
< TILE_NET_CHANNELS
; i
++)
826 info
->comps_for_echannel
[i
] =
827 addr
+ i
* sizeof(struct tile_net_comps
);
829 /* If this is a network cpu, create an iqueue. */
830 if (cpu_isset(cpu
, network_cpus_map
)) {
831 order
= get_order(NOTIF_RING_SIZE
);
832 page
= homecache_alloc_pages(GFP_KERNEL
, order
, cpu
);
835 "Failed to alloc %zd bytes iqueue memory\n",
839 addr
= pfn_to_kaddr(page_to_pfn(page
));
840 rc
= gxio_mpipe_iqueue_init(&info
->iqueue
, &context
, ring
++,
841 addr
, NOTIF_RING_SIZE
, 0);
844 "gxio_mpipe_iqueue_init failed: %d\n", rc
);
847 info
->has_iqueue
= true;
853 /* Initialize NotifGroup and buckets.
854 * This routine supports tile_net_init_mpipe(), below.
856 static int init_notif_group_and_buckets(struct net_device
*dev
,
857 int ring
, int network_cpus_count
)
861 /* Allocate one NotifGroup. */
862 rc
= gxio_mpipe_alloc_notif_groups(&context
, 1, 0, 0);
864 netdev_err(dev
, "gxio_mpipe_alloc_notif_groups failed: %d\n",
870 /* Initialize global num_buckets value. */
871 if (network_cpus_count
> 4)
873 else if (network_cpus_count
> 1)
876 /* Allocate some buckets, and set global first_bucket value. */
877 rc
= gxio_mpipe_alloc_buckets(&context
, num_buckets
, 0, 0);
879 netdev_err(dev
, "gxio_mpipe_alloc_buckets failed: %d\n", rc
);
884 /* Init group and buckets. */
885 rc
= gxio_mpipe_init_notif_group_and_buckets(
886 &context
, group
, ring
, network_cpus_count
,
887 first_bucket
, num_buckets
,
888 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY
);
892 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
900 /* Create an irq and register it, then activate the irq and request
901 * interrupts on all cores. Note that "ingress_irq" being initialized
902 * is how we know not to call tile_net_init_mpipe() again.
903 * This routine supports tile_net_init_mpipe(), below.
905 static int tile_net_setup_interrupts(struct net_device
*dev
)
911 netdev_err(dev
, "create_irq failed: %d\n", rc
);
915 tile_irq_activate(ingress_irq
, TILE_IRQ_PERCPU
);
916 rc
= request_irq(ingress_irq
, tile_net_handle_ingress_irq
,
919 netdev_err(dev
, "request_irq failed: %d\n", rc
);
920 destroy_irq(ingress_irq
);
925 for_each_online_cpu(cpu
) {
926 struct tile_net_info
*info
= &per_cpu(per_cpu_info
, cpu
);
927 if (info
->has_iqueue
) {
928 gxio_mpipe_request_notif_ring_interrupt(
929 &context
, cpu_x(cpu
), cpu_y(cpu
),
930 1, ingress_irq
, info
->iqueue
.ring
);
937 /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
938 static void tile_net_init_mpipe_fail(void)
942 /* Do cleanups that require the mpipe context first. */
943 if (small_buffer_stack
>= 0)
944 tile_net_pop_all_buffers(small_buffer_stack
);
945 if (large_buffer_stack
>= 0)
946 tile_net_pop_all_buffers(large_buffer_stack
);
948 /* Destroy mpipe context so the hardware no longer owns any memory. */
949 gxio_mpipe_destroy(&context
);
951 for_each_online_cpu(cpu
) {
952 struct tile_net_info
*info
= &per_cpu(per_cpu_info
, cpu
);
953 free_pages((unsigned long)(info
->comps_for_echannel
[0]),
954 get_order(COMPS_SIZE
));
955 info
->comps_for_echannel
[0] = NULL
;
956 free_pages((unsigned long)(info
->iqueue
.idescs
),
957 get_order(NOTIF_RING_SIZE
));
958 info
->iqueue
.idescs
= NULL
;
961 if (small_buffer_stack_va
)
962 free_pages_exact(small_buffer_stack_va
, buffer_stack_size
);
963 if (large_buffer_stack_va
)
964 free_pages_exact(large_buffer_stack_va
, buffer_stack_size
);
966 small_buffer_stack_va
= NULL
;
967 large_buffer_stack_va
= NULL
;
968 large_buffer_stack
= -1;
969 small_buffer_stack
= -1;
973 /* The first time any tilegx network device is opened, we initialize
974 * the global mpipe state. If this step fails, we fail to open the
975 * device, but if it succeeds, we never need to do it again, and since
976 * tile_net can't be unloaded, we never undo it.
978 * Note that some resources in this path (buffer stack indices,
979 * bindings from init_buffer_stack, etc.) are hypervisor resources
980 * that are freed implicitly by gxio_mpipe_destroy().
982 static int tile_net_init_mpipe(struct net_device
*dev
)
984 int i
, num_buffers
, rc
;
986 int first_ring
, ring
;
987 int network_cpus_count
= cpus_weight(network_cpus_map
);
990 netdev_err(dev
, "Networking requires hash_default!\n");
994 rc
= gxio_mpipe_init(&context
, 0);
996 netdev_err(dev
, "gxio_mpipe_init failed: %d\n", rc
);
1000 /* Set up the buffer stacks. */
1002 network_cpus_count
* (IQUEUE_ENTRIES
+ TILE_NET_BATCH
);
1003 rc
= init_buffer_stacks(dev
, num_buffers
);
1007 /* Provide initial buffers. */
1009 for (i
= 0; i
< num_buffers
; i
++) {
1010 if (!tile_net_provide_buffer(true)) {
1011 netdev_err(dev
, "Cannot allocate initial sk_bufs!\n");
1015 for (i
= 0; i
< num_buffers
; i
++) {
1016 if (!tile_net_provide_buffer(false)) {
1017 netdev_err(dev
, "Cannot allocate initial sk_bufs!\n");
1022 /* Allocate one NotifRing for each network cpu. */
1023 rc
= gxio_mpipe_alloc_notif_rings(&context
, network_cpus_count
, 0, 0);
1025 netdev_err(dev
, "gxio_mpipe_alloc_notif_rings failed %d\n",
1030 /* Init NotifRings per-cpu. */
1033 for_each_online_cpu(cpu
) {
1034 rc
= alloc_percpu_mpipe_resources(dev
, cpu
, ring
);
1040 /* Initialize NotifGroup and buckets. */
1041 rc
= init_notif_group_and_buckets(dev
, first_ring
, network_cpus_count
);
1045 /* Create and enable interrupts. */
1046 rc
= tile_net_setup_interrupts(dev
);
1053 tile_net_init_mpipe_fail();
1057 /* Create persistent egress info for a given egress channel.
1058 * Note that this may be shared between, say, "gbe0" and "xgbe0".
1059 * ISSUE: Defer header allocation until TSO is actually needed?
1061 static int tile_net_init_egress(struct net_device
*dev
, int echannel
)
1063 struct page
*headers_page
, *edescs_page
, *equeue_page
;
1064 gxio_mpipe_edesc_t
*edescs
;
1065 gxio_mpipe_equeue_t
*equeue
;
1066 unsigned char *headers
;
1067 int headers_order
, edescs_order
, equeue_order
;
1072 /* Only initialize once. */
1073 if (egress_for_echannel
[echannel
].equeue
!= NULL
)
1076 /* Allocate memory for the "headers". */
1077 headers_order
= get_order(EQUEUE_ENTRIES
* HEADER_BYTES
);
1078 headers_page
= alloc_pages(GFP_KERNEL
, headers_order
);
1079 if (headers_page
== NULL
) {
1081 "Could not alloc %zd bytes for TSO headers.\n",
1082 PAGE_SIZE
<< headers_order
);
1085 headers
= pfn_to_kaddr(page_to_pfn(headers_page
));
1087 /* Allocate memory for the "edescs". */
1088 edescs_size
= EQUEUE_ENTRIES
* sizeof(*edescs
);
1089 edescs_order
= get_order(edescs_size
);
1090 edescs_page
= alloc_pages(GFP_KERNEL
, edescs_order
);
1091 if (edescs_page
== NULL
) {
1093 "Could not alloc %zd bytes for eDMA ring.\n",
1097 edescs
= pfn_to_kaddr(page_to_pfn(edescs_page
));
1099 /* Allocate memory for the "equeue". */
1100 equeue_order
= get_order(sizeof(*equeue
));
1101 equeue_page
= alloc_pages(GFP_KERNEL
, equeue_order
);
1102 if (equeue_page
== NULL
) {
1104 "Could not alloc %zd bytes for equeue info.\n",
1105 PAGE_SIZE
<< equeue_order
);
1108 equeue
= pfn_to_kaddr(page_to_pfn(equeue_page
));
1110 /* Allocate an edma ring. Note that in practice this can't
1111 * fail, which is good, because we will leak an edma ring if so.
1113 rc
= gxio_mpipe_alloc_edma_rings(&context
, 1, 0, 0);
1115 netdev_warn(dev
, "gxio_mpipe_alloc_edma_rings failed: %d\n",
1121 /* Initialize the equeue. */
1122 rc
= gxio_mpipe_equeue_init(equeue
, &context
, edma
, echannel
,
1123 edescs
, edescs_size
, 0);
1125 netdev_err(dev
, "gxio_mpipe_equeue_init failed: %d\n", rc
);
1130 egress_for_echannel
[echannel
].equeue
= equeue
;
1131 egress_for_echannel
[echannel
].headers
= headers
;
1135 __free_pages(equeue_page
, equeue_order
);
1138 __free_pages(edescs_page
, edescs_order
);
1141 __free_pages(headers_page
, headers_order
);
1147 /* Return channel number for a newly-opened link. */
1148 static int tile_net_link_open(struct net_device
*dev
, gxio_mpipe_link_t
*link
,
1149 const char *link_name
)
1151 int rc
= gxio_mpipe_link_open(link
, &context
, link_name
, 0);
1153 netdev_err(dev
, "Failed to open '%s'\n", link_name
);
1156 rc
= gxio_mpipe_link_channel(link
);
1157 if (rc
< 0 || rc
>= TILE_NET_CHANNELS
) {
1158 netdev_err(dev
, "gxio_mpipe_link_channel bad value: %d\n", rc
);
1159 gxio_mpipe_link_close(link
);
1165 /* Help the kernel activate the given network interface. */
1166 static int tile_net_open(struct net_device
*dev
)
1168 struct tile_net_priv
*priv
= netdev_priv(dev
);
1171 mutex_lock(&tile_net_devs_for_channel_mutex
);
1173 /* Do one-time initialization the first time any device is opened. */
1174 if (ingress_irq
< 0) {
1175 rc
= tile_net_init_mpipe(dev
);
1180 /* Determine if this is the "loopify" device. */
1181 if (unlikely((loopify_link_name
!= NULL
) &&
1182 !strcmp(dev
->name
, loopify_link_name
))) {
1183 rc
= tile_net_link_open(dev
, &priv
->link
, "loop0");
1187 rc
= tile_net_link_open(dev
, &priv
->loopify_link
, "loop1");
1190 priv
->loopify_channel
= rc
;
1191 priv
->echannel
= rc
;
1193 rc
= tile_net_link_open(dev
, &priv
->link
, dev
->name
);
1197 priv
->echannel
= rc
;
1200 /* Initialize egress info (if needed). Once ever, per echannel. */
1201 rc
= tile_net_init_egress(dev
, priv
->echannel
);
1205 tile_net_devs_for_channel
[priv
->channel
] = dev
;
1207 rc
= tile_net_update(dev
);
1211 mutex_unlock(&tile_net_devs_for_channel_mutex
);
1213 /* Initialize the transmit wake timer for this device for each cpu. */
1214 for_each_online_cpu(cpu
) {
1215 struct tile_net_info
*info
= &per_cpu(per_cpu_info
, cpu
);
1216 struct tile_net_tx_wake
*tx_wake
=
1217 &info
->tx_wake
[priv
->echannel
];
1219 hrtimer_init(&tx_wake
->timer
, CLOCK_MONOTONIC
,
1221 tx_wake
->timer
.function
= tile_net_handle_tx_wake_timer
;
1225 for_each_online_cpu(cpu
)
1226 netif_start_subqueue(dev
, cpu
);
1227 netif_carrier_on(dev
);
1231 if (priv
->loopify_channel
>= 0) {
1232 if (gxio_mpipe_link_close(&priv
->loopify_link
) != 0)
1233 netdev_warn(dev
, "Failed to close loopify link!\n");
1234 priv
->loopify_channel
= -1;
1236 if (priv
->channel
>= 0) {
1237 if (gxio_mpipe_link_close(&priv
->link
) != 0)
1238 netdev_warn(dev
, "Failed to close link!\n");
1241 priv
->echannel
= -1;
1242 tile_net_devs_for_channel
[priv
->channel
] = NULL
;
1243 mutex_unlock(&tile_net_devs_for_channel_mutex
);
1245 /* Don't return raw gxio error codes to generic Linux. */
1246 return (rc
> -512) ? rc
: -EIO
;
1249 /* Help the kernel deactivate the given network interface. */
1250 static int tile_net_stop(struct net_device
*dev
)
1252 struct tile_net_priv
*priv
= netdev_priv(dev
);
1255 for_each_online_cpu(cpu
) {
1256 struct tile_net_info
*info
= &per_cpu(per_cpu_info
, cpu
);
1257 struct tile_net_tx_wake
*tx_wake
=
1258 &info
->tx_wake
[priv
->echannel
];
1260 hrtimer_cancel(&tx_wake
->timer
);
1261 netif_stop_subqueue(dev
, cpu
);
1264 mutex_lock(&tile_net_devs_for_channel_mutex
);
1265 tile_net_devs_for_channel
[priv
->channel
] = NULL
;
1266 (void)tile_net_update(dev
);
1267 if (priv
->loopify_channel
>= 0) {
1268 if (gxio_mpipe_link_close(&priv
->loopify_link
) != 0)
1269 netdev_warn(dev
, "Failed to close loopify link!\n");
1270 priv
->loopify_channel
= -1;
1272 if (priv
->channel
>= 0) {
1273 if (gxio_mpipe_link_close(&priv
->link
) != 0)
1274 netdev_warn(dev
, "Failed to close link!\n");
1277 priv
->echannel
= -1;
1278 mutex_unlock(&tile_net_devs_for_channel_mutex
);
1283 /* Determine the VA for a fragment. */
1284 static inline void *tile_net_frag_buf(skb_frag_t
*f
)
1286 unsigned long pfn
= page_to_pfn(skb_frag_page(f
));
1287 return pfn_to_kaddr(pfn
) + f
->page_offset
;
1290 /* Acquire a completion entry and an egress slot, or if we can't,
1291 * stop the queue and schedule the tx_wake timer.
1293 static s64
tile_net_equeue_try_reserve(struct net_device
*dev
,
1294 struct tile_net_comps
*comps
,
1295 gxio_mpipe_equeue_t
*equeue
,
1298 /* Try to acquire a completion entry. */
1299 if (comps
->comp_next
- comps
->comp_last
< TILE_NET_MAX_COMPS
- 1 ||
1300 tile_net_free_comps(equeue
, comps
, 32, false) != 0) {
1302 /* Try to acquire an egress slot. */
1303 s64 slot
= gxio_mpipe_equeue_try_reserve(equeue
, num_edescs
);
1307 /* Freeing some completions gives the equeue time to drain. */
1308 tile_net_free_comps(equeue
, comps
, TILE_NET_MAX_COMPS
, false);
1310 slot
= gxio_mpipe_equeue_try_reserve(equeue
, num_edescs
);
1315 /* Still nothing; give up and stop the queue for a short while. */
1316 netif_stop_subqueue(dev
, smp_processor_id());
1317 tile_net_schedule_tx_wake_timer(dev
);
1321 /* Determine how many edesc's are needed for TSO.
1323 * Sometimes, if "sendfile()" requires copying, we will be called with
1324 * "data" containing the header and payload, with "frags" being empty.
1325 * Sometimes, for example when using NFS over TCP, a single segment can
1326 * span 3 fragments. This requires special care.
1328 static int tso_count_edescs(struct sk_buff
*skb
)
1330 struct skb_shared_info
*sh
= skb_shinfo(skb
);
1331 unsigned int data_len
= skb
->data_len
;
1332 unsigned int p_len
= sh
->gso_size
;
1333 long f_id
= -1; /* id of the current fragment */
1334 long f_size
= -1; /* size of the current fragment */
1335 long f_used
= -1; /* bytes used from the current fragment */
1336 long n
; /* size of the current piece of payload */
1340 for (segment
= 0; segment
< sh
->gso_segs
; segment
++) {
1342 unsigned int p_used
= 0;
1344 /* One edesc for header and for each piece of the payload. */
1345 for (num_edescs
++; p_used
< p_len
; num_edescs
++) {
1347 /* Advance as needed. */
1348 while (f_used
>= f_size
) {
1350 f_size
= sh
->frags
[f_id
].size
;
1354 /* Use bytes from the current fragment. */
1356 if (n
> f_size
- f_used
)
1357 n
= f_size
- f_used
;
1362 /* The last segment may be less than gso_size. */
1364 if (data_len
< p_len
)
1371 /* Prepare modified copies of the skbuff headers.
1372 * FIXME: add support for IPv6.
1374 static void tso_headers_prepare(struct sk_buff
*skb
, unsigned char *headers
,
1377 struct skb_shared_info
*sh
= skb_shinfo(skb
);
1380 unsigned int data_len
= skb
->data_len
;
1381 unsigned char *data
= skb
->data
;
1382 unsigned int ih_off
, th_off
, sh_len
, p_len
;
1383 unsigned int isum_seed
, tsum_seed
, id
, seq
;
1384 long f_id
= -1; /* id of the current fragment */
1385 long f_size
= -1; /* size of the current fragment */
1386 long f_used
= -1; /* bytes used from the current fragment */
1387 long n
; /* size of the current piece of payload */
1390 /* Locate original headers and compute various lengths. */
1393 ih_off
= skb_network_offset(skb
);
1394 th_off
= skb_transport_offset(skb
);
1395 sh_len
= th_off
+ tcp_hdrlen(skb
);
1396 p_len
= sh
->gso_size
;
1398 /* Set up seed values for IP and TCP csum and initialize id and seq. */
1399 isum_seed
= ((0xFFFF - ih
->check
) +
1400 (0xFFFF - ih
->tot_len
) +
1402 tsum_seed
= th
->check
+ (0xFFFF ^ htons(skb
->len
));
1404 seq
= ntohl(th
->seq
);
1406 /* Prepare all the headers. */
1407 for (segment
= 0; segment
< sh
->gso_segs
; segment
++) {
1409 unsigned int p_used
= 0;
1411 /* Copy to the header memory for this segment. */
1412 buf
= headers
+ (slot
% EQUEUE_ENTRIES
) * HEADER_BYTES
+
1414 memcpy(buf
, data
, sh_len
);
1416 /* Update copied ip header. */
1417 ih
= (struct iphdr
*)(buf
+ ih_off
);
1418 ih
->tot_len
= htons(sh_len
+ p_len
- ih_off
);
1420 ih
->check
= csum_long(isum_seed
+ ih
->tot_len
+
1423 /* Update copied tcp header. */
1424 th
= (struct tcphdr
*)(buf
+ th_off
);
1425 th
->seq
= htonl(seq
);
1426 th
->check
= csum_long(tsum_seed
+ htons(sh_len
+ p_len
));
1427 if (segment
!= sh
->gso_segs
- 1) {
1432 /* Skip past the header. */
1435 /* Skip past the payload. */
1436 while (p_used
< p_len
) {
1438 /* Advance as needed. */
1439 while (f_used
>= f_size
) {
1441 f_size
= sh
->frags
[f_id
].size
;
1445 /* Use bytes from the current fragment. */
1447 if (n
> f_size
- f_used
)
1448 n
= f_size
- f_used
;
1458 /* The last segment may be less than gso_size. */
1460 if (data_len
< p_len
)
1464 /* Flush the headers so they are ready for hardware DMA. */
1468 /* Pass all the data to mpipe for egress. */
1469 static void tso_egress(struct net_device
*dev
, gxio_mpipe_equeue_t
*equeue
,
1470 struct sk_buff
*skb
, unsigned char *headers
, s64 slot
)
1472 struct tile_net_priv
*priv
= netdev_priv(dev
);
1473 struct skb_shared_info
*sh
= skb_shinfo(skb
);
1474 unsigned int data_len
= skb
->data_len
;
1475 unsigned int p_len
= sh
->gso_size
;
1476 gxio_mpipe_edesc_t edesc_head
= { { 0 } };
1477 gxio_mpipe_edesc_t edesc_body
= { { 0 } };
1478 long f_id
= -1; /* id of the current fragment */
1479 long f_size
= -1; /* size of the current fragment */
1480 long f_used
= -1; /* bytes used from the current fragment */
1481 long n
; /* size of the current piece of payload */
1482 unsigned long tx_packets
= 0, tx_bytes
= 0;
1483 unsigned int csum_start
, sh_len
;
1486 /* Prepare to egress the headers: set up header edesc. */
1487 csum_start
= skb_checksum_start_offset(skb
);
1488 sh_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1489 edesc_head
.csum
= 1;
1490 edesc_head
.csum_start
= csum_start
;
1491 edesc_head
.csum_dest
= csum_start
+ skb
->csum_offset
;
1492 edesc_head
.xfer_size
= sh_len
;
1494 /* This is only used to specify the TLB. */
1495 edesc_head
.stack_idx
= large_buffer_stack
;
1496 edesc_body
.stack_idx
= large_buffer_stack
;
1498 /* Egress all the edescs. */
1499 for (segment
= 0; segment
< sh
->gso_segs
; segment
++) {
1502 unsigned int p_used
= 0;
1504 /* Egress the header. */
1505 buf
= headers
+ (slot
% EQUEUE_ENTRIES
) * HEADER_BYTES
+
1507 edesc_head
.va
= va_to_tile_io_addr(buf
);
1508 gxio_mpipe_equeue_put_at(equeue
, edesc_head
, slot
);
1511 /* Egress the payload. */
1512 while (p_used
< p_len
) {
1514 /* Advance as needed. */
1515 while (f_used
>= f_size
) {
1517 f_size
= sh
->frags
[f_id
].size
;
1521 va
= tile_net_frag_buf(&sh
->frags
[f_id
]) + f_used
;
1523 /* Use bytes from the current fragment. */
1525 if (n
> f_size
- f_used
)
1526 n
= f_size
- f_used
;
1530 /* Egress a piece of the payload. */
1531 edesc_body
.va
= va_to_tile_io_addr(va
);
1532 edesc_body
.xfer_size
= n
;
1533 edesc_body
.bound
= !(p_used
< p_len
);
1534 gxio_mpipe_equeue_put_at(equeue
, edesc_body
, slot
);
1539 tx_bytes
+= sh_len
+ p_len
;
1541 /* The last segment may be less than gso_size. */
1543 if (data_len
< p_len
)
1548 tile_net_stats_add(tx_packets
, &priv
->stats
.tx_packets
);
1549 tile_net_stats_add(tx_bytes
, &priv
->stats
.tx_bytes
);
1552 /* Do "TSO" handling for egress.
1554 * Normally drivers set NETIF_F_TSO only to support hardware TSO;
1555 * otherwise the stack uses scatter-gather to implement GSO in software.
1556 * On our testing, enabling GSO support (via NETIF_F_SG) drops network
1557 * performance down to around 7.5 Gbps on the 10G interfaces, although
1558 * also dropping cpu utilization way down, to under 8%. But
1559 * implementing "TSO" in the driver brings performance back up to line
1560 * rate, while dropping cpu usage even further, to less than 4%. In
1561 * practice, profiling of GSO shows that skb_segment() is what causes
1562 * the performance overheads; we benefit in the driver from using
1563 * preallocated memory to duplicate the TCP/IP headers.
1565 static int tile_net_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
)
1567 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
1568 struct tile_net_priv
*priv
= netdev_priv(dev
);
1569 int channel
= priv
->echannel
;
1570 struct tile_net_egress
*egress
= &egress_for_echannel
[channel
];
1571 struct tile_net_comps
*comps
= info
->comps_for_echannel
[channel
];
1572 gxio_mpipe_equeue_t
*equeue
= egress
->equeue
;
1573 unsigned long irqflags
;
1577 /* Determine how many mpipe edesc's are needed. */
1578 num_edescs
= tso_count_edescs(skb
);
1580 local_irq_save(irqflags
);
1582 /* Try to acquire a completion entry and an egress slot. */
1583 slot
= tile_net_equeue_try_reserve(dev
, comps
, equeue
, num_edescs
);
1585 local_irq_restore(irqflags
);
1586 return NETDEV_TX_BUSY
;
1589 /* Set up copies of header data properly. */
1590 tso_headers_prepare(skb
, egress
->headers
, slot
);
1592 /* Actually pass the data to the network hardware. */
1593 tso_egress(dev
, equeue
, skb
, egress
->headers
, slot
);
1595 /* Add a completion record. */
1596 add_comp(equeue
, comps
, slot
+ num_edescs
- 1, skb
);
1598 local_irq_restore(irqflags
);
1600 /* Make sure the egress timer is scheduled. */
1601 tile_net_schedule_egress_timer();
1603 return NETDEV_TX_OK
;
1606 /* Analyze the body and frags for a transmit request. */
1607 static unsigned int tile_net_tx_frags(struct frag
*frags
,
1608 struct sk_buff
*skb
,
1609 void *b_data
, unsigned int b_len
)
1611 unsigned int i
, n
= 0;
1613 struct skb_shared_info
*sh
= skb_shinfo(skb
);
1616 frags
[n
].buf
= b_data
;
1617 frags
[n
++].length
= b_len
;
1620 for (i
= 0; i
< sh
->nr_frags
; i
++) {
1621 skb_frag_t
*f
= &sh
->frags
[i
];
1622 frags
[n
].buf
= tile_net_frag_buf(f
);
1623 frags
[n
++].length
= skb_frag_size(f
);
1629 /* Help the kernel transmit a packet. */
1630 static int tile_net_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1632 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
1633 struct tile_net_priv
*priv
= netdev_priv(dev
);
1634 struct tile_net_egress
*egress
= &egress_for_echannel
[priv
->echannel
];
1635 gxio_mpipe_equeue_t
*equeue
= egress
->equeue
;
1636 struct tile_net_comps
*comps
=
1637 info
->comps_for_echannel
[priv
->echannel
];
1638 unsigned int len
= skb
->len
;
1639 unsigned char *data
= skb
->data
;
1640 unsigned int num_edescs
;
1641 struct frag frags
[MAX_FRAGS
];
1642 gxio_mpipe_edesc_t edescs
[MAX_FRAGS
];
1643 unsigned long irqflags
;
1644 gxio_mpipe_edesc_t edesc
= { { 0 } };
1648 if (skb_is_gso(skb
))
1649 return tile_net_tx_tso(skb
, dev
);
1651 num_edescs
= tile_net_tx_frags(frags
, skb
, data
, skb_headlen(skb
));
1653 /* This is only used to specify the TLB. */
1654 edesc
.stack_idx
= large_buffer_stack
;
1656 /* Prepare the edescs. */
1657 for (i
= 0; i
< num_edescs
; i
++) {
1658 edesc
.xfer_size
= frags
[i
].length
;
1659 edesc
.va
= va_to_tile_io_addr(frags
[i
].buf
);
1663 /* Mark the final edesc. */
1664 edescs
[num_edescs
- 1].bound
= 1;
1666 /* Add checksum info to the initial edesc, if needed. */
1667 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1668 unsigned int csum_start
= skb_checksum_start_offset(skb
);
1670 edescs
[0].csum_start
= csum_start
;
1671 edescs
[0].csum_dest
= csum_start
+ skb
->csum_offset
;
1674 local_irq_save(irqflags
);
1676 /* Try to acquire a completion entry and an egress slot. */
1677 slot
= tile_net_equeue_try_reserve(dev
, comps
, equeue
, num_edescs
);
1679 local_irq_restore(irqflags
);
1680 return NETDEV_TX_BUSY
;
1683 for (i
= 0; i
< num_edescs
; i
++)
1684 gxio_mpipe_equeue_put_at(equeue
, edescs
[i
], slot
++);
1686 /* Add a completion record. */
1687 add_comp(equeue
, comps
, slot
- 1, skb
);
1689 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
1690 tile_net_stats_add(1, &priv
->stats
.tx_packets
);
1691 tile_net_stats_add(max_t(unsigned int, len
, ETH_ZLEN
),
1692 &priv
->stats
.tx_bytes
);
1694 local_irq_restore(irqflags
);
1696 /* Make sure the egress timer is scheduled. */
1697 tile_net_schedule_egress_timer();
1699 return NETDEV_TX_OK
;
1702 /* Return subqueue id on this core (one per core). */
1703 static u16
tile_net_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1705 return smp_processor_id();
1708 /* Deal with a transmit timeout. */
1709 static void tile_net_tx_timeout(struct net_device
*dev
)
1713 for_each_online_cpu(cpu
)
1714 netif_wake_subqueue(dev
, cpu
);
1717 /* Ioctl commands. */
1718 static int tile_net_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1723 /* Get system network statistics for device. */
1724 static struct net_device_stats
*tile_net_get_stats(struct net_device
*dev
)
1726 struct tile_net_priv
*priv
= netdev_priv(dev
);
1727 return &priv
->stats
;
1730 /* Change the MTU. */
1731 static int tile_net_change_mtu(struct net_device
*dev
, int new_mtu
)
1733 if ((new_mtu
< 68) || (new_mtu
> 1500))
1739 /* Change the Ethernet address of the NIC.
1741 * The hypervisor driver does not support changing MAC address. However,
1742 * the hardware does not do anything with the MAC address, so the address
1743 * which gets used on outgoing packets, and which is accepted on incoming
1744 * packets, is completely up to us.
1746 * Returns 0 on success, negative on failure.
1748 static int tile_net_set_mac_address(struct net_device
*dev
, void *p
)
1750 struct sockaddr
*addr
= p
;
1752 if (!is_valid_ether_addr(addr
->sa_data
))
1754 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1758 #ifdef CONFIG_NET_POLL_CONTROLLER
1759 /* Polling 'interrupt' - used by things like netconsole to send skbs
1760 * without having to re-enable interrupts. It's not called while
1761 * the interrupt routine is executing.
1763 static void tile_net_netpoll(struct net_device
*dev
)
1765 disable_percpu_irq(ingress_irq
);
1766 tile_net_handle_ingress_irq(ingress_irq
, NULL
);
1767 enable_percpu_irq(ingress_irq
, 0);
1771 static const struct net_device_ops tile_net_ops
= {
1772 .ndo_open
= tile_net_open
,
1773 .ndo_stop
= tile_net_stop
,
1774 .ndo_start_xmit
= tile_net_tx
,
1775 .ndo_select_queue
= tile_net_select_queue
,
1776 .ndo_do_ioctl
= tile_net_ioctl
,
1777 .ndo_get_stats
= tile_net_get_stats
,
1778 .ndo_change_mtu
= tile_net_change_mtu
,
1779 .ndo_tx_timeout
= tile_net_tx_timeout
,
1780 .ndo_set_mac_address
= tile_net_set_mac_address
,
1781 #ifdef CONFIG_NET_POLL_CONTROLLER
1782 .ndo_poll_controller
= tile_net_netpoll
,
1786 /* The setup function.
1788 * This uses ether_setup() to assign various fields in dev, including
1789 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
1791 static void tile_net_setup(struct net_device
*dev
)
1794 dev
->netdev_ops
= &tile_net_ops
;
1795 dev
->watchdog_timeo
= TILE_NET_TIMEOUT
;
1796 dev
->features
|= NETIF_F_LLTX
;
1797 dev
->features
|= NETIF_F_HW_CSUM
;
1798 dev
->features
|= NETIF_F_SG
;
1799 dev
->features
|= NETIF_F_TSO
;
1803 /* Allocate the device structure, register the device, and obtain the
1804 * MAC address from the hypervisor.
1806 static void tile_net_dev_init(const char *name
, const uint8_t *mac
)
1811 struct net_device
*dev
;
1812 struct tile_net_priv
*priv
;
1814 /* HACK: Ignore "loop" links. */
1815 if (strncmp(name
, "loop", 4) == 0)
1818 /* Allocate the device structure. Normally, "name" is a
1819 * template, instantiated by register_netdev(), but not for us.
1821 dev
= alloc_netdev_mqs(sizeof(*priv
), name
, tile_net_setup
,
1824 pr_err("alloc_netdev_mqs(%s) failed\n", name
);
1828 /* Initialize "priv". */
1829 priv
= netdev_priv(dev
);
1830 memset(priv
, 0, sizeof(*priv
));
1833 priv
->loopify_channel
= -1;
1834 priv
->echannel
= -1;
1836 /* Get the MAC address and set it in the device struct; this must
1837 * be done before the device is opened. If the MAC is all zeroes,
1838 * we use a random address, since we're probably on the simulator.
1840 for (i
= 0; i
< 6; i
++)
1844 memcpy(dev
->dev_addr
, mac
, 6);
1847 random_ether_addr(dev
->dev_addr
);
1850 /* Register the network device. */
1851 ret
= register_netdev(dev
);
1853 netdev_err(dev
, "register_netdev failed %d\n", ret
);
1859 /* Per-cpu module initialization. */
1860 static void tile_net_init_module_percpu(void *unused
)
1862 struct tile_net_info
*info
= &__get_cpu_var(per_cpu_info
);
1863 int my_cpu
= smp_processor_id();
1865 info
->has_iqueue
= false;
1867 info
->my_cpu
= my_cpu
;
1869 /* Initialize the egress timer. */
1870 hrtimer_init(&info
->egress_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1871 info
->egress_timer
.function
= tile_net_handle_egress_timer
;
1874 /* Module initialization. */
1875 static int __init
tile_net_init_module(void)
1878 char name
[GXIO_MPIPE_LINK_NAME_LEN
];
1881 pr_info("Tilera Network Driver\n");
1883 mutex_init(&tile_net_devs_for_channel_mutex
);
1885 /* Initialize each CPU. */
1886 on_each_cpu(tile_net_init_module_percpu
, NULL
, 1);
1888 /* Find out what devices we have, and initialize them. */
1889 for (i
= 0; gxio_mpipe_link_enumerate_mac(i
, name
, mac
) >= 0; i
++)
1890 tile_net_dev_init(name
, mac
);
1892 if (!network_cpus_init())
1893 network_cpus_map
= *cpu_online_mask
;
1898 module_init(tile_net_init_module
);