Commit | Line | Data |
---|---|---|
e5a06939 | 1 | /* |
d91c6412 | 2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. |
e5a06939 CM |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/moduleparam.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/kernel.h> /* printk() */ | |
20 | #include <linux/slab.h> /* kmalloc() */ | |
21 | #include <linux/errno.h> /* error codes */ | |
22 | #include <linux/types.h> /* size_t */ | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/in.h> | |
25 | #include <linux/netdevice.h> /* struct device, and other headers */ | |
26 | #include <linux/etherdevice.h> /* eth_type_trans */ | |
27 | #include <linux/skbuff.h> | |
28 | #include <linux/ioctl.h> | |
29 | #include <linux/cdev.h> | |
30 | #include <linux/hugetlb.h> | |
31 | #include <linux/in6.h> | |
32 | #include <linux/timer.h> | |
33 | #include <linux/io.h> | |
34 | #include <asm/checksum.h> | |
35 | #include <asm/homecache.h> | |
36 | ||
37 | #include <hv/drv_xgbe_intf.h> | |
38 | #include <hv/drv_xgbe_impl.h> | |
39 | #include <hv/hypervisor.h> | |
40 | #include <hv/netio_intf.h> | |
41 | ||
42 | /* For TSO */ | |
43 | #include <linux/ip.h> | |
44 | #include <linux/tcp.h> | |
45 | ||
46 | ||
e5a06939 CM |
47 | /* |
48 | * First, "tile_net_init_module()" initializes all four "devices" which | |
49 | * can be used by linux. | |
50 | * | |
51 | * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes | |
52 | * the network cpus, then uses "tile_net_open_aux()" to initialize | |
53 | * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all | |
54 | * the tiles, provide buffers to LIPP, allow ingress to start, and | |
55 | * turn on hypervisor interrupt handling (and NAPI) on all tiles. | |
56 | * | |
57 | * If registration fails due to the link being down, then "retry_work" | |
58 | * is used to keep calling "tile_net_open_inner()" until it succeeds. | |
59 | * | |
60 | * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to | |
61 | * stop egress, drain the LIPP buffers, unregister all the tiles, stop | |
62 | * LIPP/LEPP, and wipe the LEPP queue. | |
63 | * | |
64 | * We start out with the ingress interrupt enabled on each CPU. When | |
65 | * this interrupt fires, we disable it, and call "napi_schedule()". | |
66 | * This will cause "tile_net_poll()" to be called, which will pull | |
67 | * packets from the netio queue, filtering them out, or passing them | |
68 | * to "netif_receive_skb()". If our budget is exhausted, we will | |
69 | * return, knowing we will be called again later. Otherwise, we | |
70 | * reenable the ingress interrupt, and call "napi_complete()". | |
71 | * | |
d91c6412 CM |
72 | * HACK: Since disabling the ingress interrupt is not reliable, we |
73 | * ignore the interrupt if the global "active" flag is false. | |
74 | * | |
e5a06939 CM |
75 | * |
76 | * NOTE: The use of "native_driver" ensures that EPP exists, and that | |
d91c6412 | 77 | * we are using "LIPP" and "LEPP". |
e5a06939 CM |
78 | * |
79 | * NOTE: Failing to free completions for an arbitrarily long time | |
80 | * (which is defined to be illegal) does in fact cause bizarre | |
81 | * problems. The "egress_timer" helps prevent this from happening. | |
e5a06939 CM |
82 | */ |
83 | ||
84 | ||
85 | /* HACK: Allow use of "jumbo" packets. */ | |
86 | /* This should be 1500 if "jumbo" is not set in LIPP. */ | |
87 | /* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ | |
88 | /* ISSUE: This has not been thoroughly tested (except at 1500). */ | |
89 | #define TILE_NET_MTU 1500 | |
90 | ||
91 | /* HACK: Define to support GSO. */ | |
92 | /* ISSUE: This may actually hurt performance of the TCP blaster. */ | |
93 | /* #define TILE_NET_GSO */ | |
94 | ||
95 | /* Define this to collapse "duplicate" acks. */ | |
96 | /* #define IGNORE_DUP_ACKS */ | |
97 | ||
98 | /* HACK: Define this to verify incoming packets. */ | |
99 | /* #define TILE_NET_VERIFY_INGRESS */ | |
100 | ||
101 | /* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ | |
102 | #define TILE_NET_TX_QUEUE_LEN 0 | |
103 | ||
104 | /* Define to dump packets (prints out the whole packet on tx and rx). */ | |
105 | /* #define TILE_NET_DUMP_PACKETS */ | |
106 | ||
107 | /* Define to enable debug spew (all PDEBUG's are enabled). */ | |
108 | /* #define TILE_NET_DEBUG */ | |
109 | ||
110 | ||
111 | /* Define to activate paranoia checks. */ | |
112 | /* #define TILE_NET_PARANOIA */ | |
113 | ||
114 | /* Default transmit lockup timeout period, in jiffies. */ | |
115 | #define TILE_NET_TIMEOUT (5 * HZ) | |
116 | ||
117 | /* Default retry interval for bringing up the NetIO interface, in jiffies. */ | |
118 | #define TILE_NET_RETRY_INTERVAL (5 * HZ) | |
119 | ||
120 | /* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ | |
121 | #define TILE_NET_DEVS 4 | |
122 | ||
123 | ||
124 | ||
125 | /* Paranoia. */ | |
126 | #if NET_IP_ALIGN != LIPP_PACKET_PADDING | |
127 | #error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." | |
128 | #endif | |
129 | ||
130 | ||
131 | /* Debug print. */ | |
132 | #ifdef TILE_NET_DEBUG | |
133 | #define PDEBUG(fmt, args...) net_printk(fmt, ## args) | |
134 | #else | |
135 | #define PDEBUG(fmt, args...) | |
136 | #endif | |
137 | ||
138 | ||
139 | MODULE_AUTHOR("Tilera"); | |
140 | MODULE_LICENSE("GPL"); | |
141 | ||
d91c6412 | 142 | |
e5a06939 CM |
143 | /* |
144 | * Queue of incoming packets for a specific cpu and device. | |
145 | * | |
146 | * Includes a pointer to the "system" data, and the actual "user" data. | |
147 | */ | |
148 | struct tile_netio_queue { | |
149 | netio_queue_impl_t *__system_part; | |
150 | netio_queue_user_impl_t __user_part; | |
151 | ||
152 | }; | |
153 | ||
154 | ||
155 | /* | |
156 | * Statistics counters for a specific cpu and device. | |
157 | */ | |
158 | struct tile_net_stats_t { | |
159 | u32 rx_packets; | |
160 | u32 rx_bytes; | |
161 | u32 tx_packets; | |
162 | u32 tx_bytes; | |
163 | }; | |
164 | ||
165 | ||
166 | /* | |
167 | * Info for a specific cpu and device. | |
168 | * | |
169 | * ISSUE: There is a "dev" pointer in "napi" as well. | |
170 | */ | |
171 | struct tile_net_cpu { | |
172 | /* The NAPI struct. */ | |
173 | struct napi_struct napi; | |
174 | /* Packet queue. */ | |
175 | struct tile_netio_queue queue; | |
176 | /* Statistics. */ | |
177 | struct tile_net_stats_t stats; | |
d91c6412 | 178 | /* True iff NAPI is enabled. */ |
e5a06939 | 179 | bool napi_enabled; |
bfb9035c | 180 | /* True if this tile has successfully registered with the IPP. */ |
e5a06939 CM |
181 | bool registered; |
182 | /* True if the link was down last time we tried to register. */ | |
183 | bool link_down; | |
184 | /* True if "egress_timer" is scheduled. */ | |
185 | bool egress_timer_scheduled; | |
186 | /* Number of small sk_buffs which must still be provided. */ | |
187 | unsigned int num_needed_small_buffers; | |
188 | /* Number of large sk_buffs which must still be provided. */ | |
189 | unsigned int num_needed_large_buffers; | |
190 | /* A timer for handling egress completions. */ | |
191 | struct timer_list egress_timer; | |
192 | }; | |
193 | ||
194 | ||
195 | /* | |
196 | * Info for a specific device. | |
197 | */ | |
198 | struct tile_net_priv { | |
199 | /* Our network device. */ | |
200 | struct net_device *dev; | |
d91c6412 CM |
201 | /* Pages making up the egress queue. */ |
202 | struct page *eq_pages; | |
203 | /* Address of the actual egress queue. */ | |
204 | lepp_queue_t *eq; | |
205 | /* Protects "eq". */ | |
206 | spinlock_t eq_lock; | |
e5a06939 CM |
207 | /* The hypervisor handle for this interface. */ |
208 | int hv_devhdl; | |
209 | /* The intr bit mask that IDs this device. */ | |
210 | u32 intr_id; | |
211 | /* True iff "tile_net_open_aux()" has succeeded. */ | |
d91c6412 CM |
212 | bool partly_opened; |
213 | /* True iff the device is "active". */ | |
214 | bool active; | |
e5a06939 CM |
215 | /* Effective network cpus. */ |
216 | struct cpumask network_cpus_map; | |
217 | /* Number of network cpus. */ | |
218 | int network_cpus_count; | |
219 | /* Credits per network cpu. */ | |
220 | int network_cpus_credits; | |
221 | /* Network stats. */ | |
222 | struct net_device_stats stats; | |
223 | /* For NetIO bringup retries. */ | |
224 | struct delayed_work retry_work; | |
225 | /* Quick access to per cpu data. */ | |
226 | struct tile_net_cpu *cpu[NR_CPUS]; | |
227 | }; | |
228 | ||
d91c6412 CM |
229 | /* Log2 of the number of small pages needed for the egress queue. */ |
230 | #define EQ_ORDER get_order(sizeof(lepp_queue_t)) | |
231 | /* Size of the egress queue's pages. */ | |
232 | #define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER)) | |
e5a06939 CM |
233 | |
234 | /* | |
235 | * The actual devices (xgbe0, xgbe1, gbe0, gbe1). | |
236 | */ | |
237 | static struct net_device *tile_net_devs[TILE_NET_DEVS]; | |
238 | ||
239 | /* | |
240 | * The "tile_net_cpu" structures for each device. | |
241 | */ | |
242 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); | |
243 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); | |
244 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); | |
245 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); | |
246 | ||
247 | ||
248 | /* | |
249 | * True if "network_cpus" was specified. | |
250 | */ | |
251 | static bool network_cpus_used; | |
252 | ||
253 | /* | |
254 | * The actual cpus in "network_cpus". | |
255 | */ | |
256 | static struct cpumask network_cpus_map; | |
257 | ||
258 | ||
259 | ||
260 | #ifdef TILE_NET_DEBUG | |
261 | /* | |
262 | * printk with extra stuff. | |
263 | * | |
264 | * We print the CPU we're running in brackets. | |
265 | */ | |
266 | static void net_printk(char *fmt, ...) | |
267 | { | |
268 | int i; | |
269 | int len; | |
270 | va_list args; | |
271 | static char buf[256]; | |
272 | ||
273 | len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); | |
274 | va_start(args, fmt); | |
275 | i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); | |
276 | va_end(args); | |
277 | buf[255] = '\0'; | |
278 | pr_notice(buf); | |
279 | } | |
280 | #endif | |
281 | ||
282 | ||
283 | #ifdef TILE_NET_DUMP_PACKETS | |
284 | /* | |
285 | * Dump a packet. | |
286 | */ | |
287 | static void dump_packet(unsigned char *data, unsigned long length, char *s) | |
288 | { | |
d91c6412 CM |
289 | int my_cpu = smp_processor_id(); |
290 | ||
e5a06939 | 291 | unsigned long i; |
d91c6412 CM |
292 | char buf[128]; |
293 | ||
e5a06939 CM |
294 | static unsigned int count; |
295 | ||
296 | pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", | |
297 | data, length, s, count++); | |
298 | ||
299 | pr_info("\n"); | |
300 | ||
301 | for (i = 0; i < length; i++) { | |
302 | if ((i & 0xf) == 0) | |
d91c6412 | 303 | sprintf(buf, "[%02d] %8.8lx:", my_cpu, i); |
e5a06939 | 304 | sprintf(buf + strlen(buf), " %2.2x", data[i]); |
d91c6412 CM |
305 | if ((i & 0xf) == 0xf || i == length - 1) { |
306 | strcat(buf, "\n"); | |
307 | pr_info("%s", buf); | |
308 | } | |
e5a06939 CM |
309 | } |
310 | } | |
311 | #endif | |
312 | ||
313 | ||
314 | /* | |
315 | * Provide support for the __netio_fastio1() swint | |
316 | * (see <hv/drv_xgbe_intf.h> for how it is used). | |
317 | * | |
318 | * The fastio swint2 call may clobber all the caller-saved registers. | |
319 | * It rarely clobbers memory, but we allow for the possibility in | |
320 | * the signature just to be on the safe side. | |
321 | * | |
322 | * Also, gcc doesn't seem to allow an input operand to be | |
323 | * clobbered, so we fake it with dummy outputs. | |
324 | * | |
325 | * This function can't be static because of the way it is declared | |
326 | * in the netio header. | |
327 | */ | |
328 | inline int __netio_fastio1(u32 fastio_index, u32 arg0) | |
329 | { | |
330 | long result, clobber_r1, clobber_r10; | |
331 | asm volatile("swint2" | |
332 | : "=R00" (result), | |
333 | "=R01" (clobber_r1), "=R10" (clobber_r10) | |
334 | : "R10" (fastio_index), "R01" (arg0) | |
335 | : "memory", "r2", "r3", "r4", | |
336 | "r5", "r6", "r7", "r8", "r9", | |
337 | "r11", "r12", "r13", "r14", | |
338 | "r15", "r16", "r17", "r18", "r19", | |
339 | "r20", "r21", "r22", "r23", "r24", | |
340 | "r25", "r26", "r27", "r28", "r29"); | |
341 | return result; | |
342 | } | |
343 | ||
344 | ||
345 | /* | |
346 | * Provide a linux buffer to LIPP. | |
347 | */ | |
348 | static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, | |
349 | void *va, bool small) | |
350 | { | |
351 | struct tile_netio_queue *queue = &info->queue; | |
352 | ||
353 | /* Convert "va" and "small" to "linux_buffer_t". */ | |
354 | unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; | |
355 | ||
356 | __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); | |
357 | } | |
358 | ||
359 | ||
360 | /* | |
361 | * Provide a linux buffer for LIPP. | |
d91c6412 CM |
362 | * |
363 | * Note that the ACTUAL allocation for each buffer is a "struct sk_buff", | |
364 | * plus a chunk of memory that includes not only the requested bytes, but | |
365 | * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info". | |
366 | * | |
367 | * Note that "struct skb_shared_info" is 88 bytes with 64K pages and | |
368 | * 268 bytes with 4K pages (since the frags[] array needs 18 entries). | |
369 | * | |
370 | * Without jumbo packets, the maximum packet size will be 1536 bytes, | |
371 | * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told | |
372 | * the hardware to clip at 1518 bytes instead of 1536 bytes, then we | |
373 | * could save an entire cache line, but in practice, we don't need it. | |
374 | * | |
375 | * Since CPAs are 38 bits, and we can only encode the high 31 bits in | |
376 | * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must | |
377 | * align the actual "va" mod 128. | |
378 | * | |
379 | * We assume that the underlying "head" will be aligned mod 64. Note | |
380 | * that in practice, we have seen "head" NOT aligned mod 128 even when | |
381 | * using 2048 byte allocations, which is surprising. | |
382 | * | |
383 | * If "head" WAS always aligned mod 128, we could change LIPP to | |
384 | * assume that the low SIX bits are zero, and the 7th bit is one, that | |
385 | * is, align the actual "va" mod 128 plus 64, which would be "free". | |
386 | * | |
387 | * For now, the actual "head" pointer points at NET_SKB_PAD bytes of | |
388 | * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff | |
389 | * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for | |
390 | * the actual packet, plus 62 bytes of empty padding, plus some | |
391 | * padding and the "struct skb_shared_info". | |
392 | * | |
393 | * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88 | |
394 | * bytes, or 1816 bytes, which fits comfortably into 2048 bytes. | |
395 | * | |
396 | * With 64K pages, a small buffer thus needs 32+92+4+2+126+88 | |
397 | * bytes, or 344 bytes, which means we are wasting 64+ bytes, and | |
398 | * could presumably increase the size of small buffers. | |
399 | * | |
400 | * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268 | |
401 | * bytes, or 1996 bytes, which fits comfortably into 2048 bytes. | |
402 | * | |
403 | * With 4K pages, a small buffer thus needs 32+92+4+2+126+268 | |
404 | * bytes, or 524 bytes, which is annoyingly wasteful. | |
405 | * | |
406 | * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192? | |
407 | * | |
408 | * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64? | |
e5a06939 CM |
409 | */ |
410 | static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, | |
411 | bool small) | |
412 | { | |
d91c6412 CM |
413 | #if TILE_NET_MTU <= 1536 |
414 | /* Without "jumbo", 2 + 1536 should be sufficient. */ | |
415 | unsigned int large_size = NET_IP_ALIGN + 1536; | |
416 | #else | |
417 | /* ISSUE: This has not been tested. */ | |
e5a06939 | 418 | unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; |
d91c6412 | 419 | #endif |
e5a06939 | 420 | |
d91c6412 | 421 | /* Avoid "false sharing" with last cache line. */ |
dae2e9f4 | 422 | /* ISSUE: This is already done by "netdev_alloc_skb()". */ |
d91c6412 | 423 | unsigned int len = |
e5a06939 CM |
424 | (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + |
425 | CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); | |
426 | ||
d91c6412 CM |
427 | unsigned int padding = 128 - NET_SKB_PAD; |
428 | unsigned int align; | |
e5a06939 CM |
429 | |
430 | struct sk_buff *skb; | |
431 | void *va; | |
432 | ||
433 | struct sk_buff **skb_ptr; | |
434 | ||
d91c6412 | 435 | /* Request 96 extra bytes for alignment purposes. */ |
dae2e9f4 | 436 | skb = netdev_alloc_skb(info->napi->dev, len + padding); |
d91c6412 CM |
437 | if (skb == NULL) |
438 | return false; | |
e5a06939 | 439 | |
d91c6412 CM |
440 | /* Skip 32 or 96 bytes to align "data" mod 128. */ |
441 | align = -(long)skb->data & (128 - 1); | |
442 | BUG_ON(align > padding); | |
443 | skb_reserve(skb, align); | |
e5a06939 | 444 | |
d91c6412 CM |
445 | /* This address is given to IPP. */ |
446 | va = skb->data; | |
e5a06939 | 447 | |
d91c6412 CM |
448 | /* Buffers must not span a huge page. */ |
449 | BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0); | |
e5a06939 | 450 | |
d91c6412 CM |
451 | #ifdef TILE_NET_PARANOIA |
452 | #if CHIP_HAS_CBOX_HOME_MAP() | |
453 | if (hash_default) { | |
454 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); | |
455 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | |
456 | panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx", | |
457 | va, hv_pte_get_mode(pte), hv_pte_val(pte)); | |
e5a06939 | 458 | } |
d91c6412 CM |
459 | #endif |
460 | #endif | |
461 | ||
462 | /* Invalidate the packet buffer. */ | |
463 | if (!hash_default) | |
464 | __inv_buffer(va, len); | |
e5a06939 CM |
465 | |
466 | /* Skip two bytes to satisfy LIPP assumptions. */ | |
467 | /* Note that this aligns IP on a 16 byte boundary. */ | |
468 | /* ISSUE: Do this when the packet arrives? */ | |
469 | skb_reserve(skb, NET_IP_ALIGN); | |
470 | ||
471 | /* Save a back-pointer to 'skb'. */ | |
472 | skb_ptr = va - sizeof(*skb_ptr); | |
473 | *skb_ptr = skb; | |
474 | ||
e5a06939 CM |
475 | /* Make sure "skb_ptr" has been flushed. */ |
476 | __insn_mf(); | |
477 | ||
e5a06939 CM |
478 | /* Provide the new buffer. */ |
479 | tile_net_provide_linux_buffer(info, va, small); | |
480 | ||
481 | return true; | |
482 | } | |
483 | ||
484 | ||
485 | /* | |
486 | * Provide linux buffers for LIPP. | |
487 | */ | |
488 | static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) | |
489 | { | |
490 | while (info->num_needed_small_buffers != 0) { | |
491 | if (!tile_net_provide_needed_buffer(info, true)) | |
492 | goto oops; | |
493 | info->num_needed_small_buffers--; | |
494 | } | |
495 | ||
496 | while (info->num_needed_large_buffers != 0) { | |
497 | if (!tile_net_provide_needed_buffer(info, false)) | |
498 | goto oops; | |
499 | info->num_needed_large_buffers--; | |
500 | } | |
501 | ||
502 | return; | |
503 | ||
504 | oops: | |
505 | ||
506 | /* Add a description to the page allocation failure dump. */ | |
507 | pr_notice("Could not provide a linux buffer to LIPP.\n"); | |
508 | } | |
509 | ||
510 | ||
511 | /* | |
512 | * Grab some LEPP completions, and store them in "comps", of size | |
513 | * "comps_size", and return the number of completions which were | |
514 | * stored, so the caller can free them. | |
e5a06939 | 515 | */ |
d91c6412 | 516 | static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq, |
e5a06939 CM |
517 | struct sk_buff *comps[], |
518 | unsigned int comps_size, | |
d91c6412 | 519 | unsigned int min_size) |
e5a06939 | 520 | { |
e5a06939 CM |
521 | unsigned int n = 0; |
522 | ||
d91c6412 CM |
523 | unsigned int comp_head = eq->comp_head; |
524 | unsigned int comp_busy = eq->comp_busy; | |
e5a06939 CM |
525 | |
526 | while (comp_head != comp_busy && n < comps_size) { | |
527 | comps[n++] = eq->comps[comp_head]; | |
528 | LEPP_QINC(comp_head); | |
529 | } | |
530 | ||
d91c6412 CM |
531 | if (n < min_size) |
532 | return 0; | |
e5a06939 CM |
533 | |
534 | eq->comp_head = comp_head; | |
535 | ||
e5a06939 CM |
536 | return n; |
537 | } | |
538 | ||
539 | ||
d91c6412 CM |
540 | /* |
541 | * Free some comps, and return true iff there are still some pending. | |
542 | */ | |
543 | static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) | |
544 | { | |
545 | struct tile_net_priv *priv = netdev_priv(dev); | |
546 | ||
547 | lepp_queue_t *eq = priv->eq; | |
548 | ||
549 | struct sk_buff *olds[64]; | |
550 | unsigned int wanted = 64; | |
551 | unsigned int i, n; | |
552 | bool pending; | |
553 | ||
554 | spin_lock(&priv->eq_lock); | |
555 | ||
556 | if (all) | |
557 | eq->comp_busy = eq->comp_tail; | |
558 | ||
559 | n = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | |
560 | ||
561 | pending = (eq->comp_head != eq->comp_tail); | |
562 | ||
563 | spin_unlock(&priv->eq_lock); | |
564 | ||
565 | for (i = 0; i < n; i++) | |
566 | kfree_skb(olds[i]); | |
567 | ||
568 | return pending; | |
569 | } | |
570 | ||
571 | ||
e5a06939 CM |
572 | /* |
573 | * Make sure the egress timer is scheduled. | |
574 | * | |
575 | * Note that we use "schedule if not scheduled" logic instead of the more | |
576 | * obvious "reschedule" logic, because "reschedule" is fairly expensive. | |
577 | */ | |
578 | static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) | |
579 | { | |
580 | if (!info->egress_timer_scheduled) { | |
581 | mod_timer_pinned(&info->egress_timer, jiffies + 1); | |
582 | info->egress_timer_scheduled = true; | |
583 | } | |
584 | } | |
585 | ||
586 | ||
587 | /* | |
588 | * The "function" for "info->egress_timer". | |
589 | * | |
590 | * This timer will reschedule itself as long as there are any pending | |
591 | * completions expected (on behalf of any tile). | |
592 | * | |
593 | * ISSUE: Realistically, will the timer ever stop scheduling itself? | |
594 | * | |
595 | * ISSUE: This timer is almost never actually needed, so just use a global | |
596 | * timer that can run on any tile. | |
597 | * | |
598 | * ISSUE: Maybe instead track number of expected completions, and free | |
599 | * only that many, resetting to zero if "pending" is ever false. | |
600 | */ | |
601 | static void tile_net_handle_egress_timer(unsigned long arg) | |
602 | { | |
603 | struct tile_net_cpu *info = (struct tile_net_cpu *)arg; | |
604 | struct net_device *dev = info->napi.dev; | |
605 | ||
e5a06939 CM |
606 | /* The timer is no longer scheduled. */ |
607 | info->egress_timer_scheduled = false; | |
608 | ||
d91c6412 CM |
609 | /* Free comps, and reschedule timer if more are pending. */ |
610 | if (tile_net_lepp_free_comps(dev, false)) | |
e5a06939 CM |
611 | tile_net_schedule_egress_timer(info); |
612 | } | |
613 | ||
614 | ||
615 | #ifdef IGNORE_DUP_ACKS | |
616 | ||
617 | /* | |
618 | * Help detect "duplicate" ACKs. These are sequential packets (for a | |
619 | * given flow) which are exactly 66 bytes long, sharing everything but | |
620 | * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, | |
621 | * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are | |
622 | * +N, and the Tstamps are usually identical. | |
623 | * | |
624 | * NOTE: Apparently truly duplicate acks (with identical "ack" values), | |
625 | * should not be collapsed, as they are used for some kind of flow control. | |
626 | */ | |
627 | static bool is_dup_ack(char *s1, char *s2, unsigned int len) | |
628 | { | |
629 | int i; | |
630 | ||
631 | unsigned long long ignorable = 0; | |
632 | ||
633 | /* Identification. */ | |
634 | ignorable |= (1ULL << 0x12); | |
635 | ignorable |= (1ULL << 0x13); | |
636 | ||
637 | /* Header checksum. */ | |
638 | ignorable |= (1ULL << 0x18); | |
639 | ignorable |= (1ULL << 0x19); | |
640 | ||
641 | /* ACK. */ | |
642 | ignorable |= (1ULL << 0x2a); | |
643 | ignorable |= (1ULL << 0x2b); | |
644 | ignorable |= (1ULL << 0x2c); | |
645 | ignorable |= (1ULL << 0x2d); | |
646 | ||
647 | /* WinSize. */ | |
648 | ignorable |= (1ULL << 0x30); | |
649 | ignorable |= (1ULL << 0x31); | |
650 | ||
651 | /* Checksum. */ | |
652 | ignorable |= (1ULL << 0x32); | |
653 | ignorable |= (1ULL << 0x33); | |
654 | ||
655 | for (i = 0; i < len; i++, ignorable >>= 1) { | |
656 | ||
657 | if ((ignorable & 1) || (s1[i] == s2[i])) | |
658 | continue; | |
659 | ||
660 | #ifdef TILE_NET_DEBUG | |
661 | /* HACK: Mention non-timestamp diffs. */ | |
662 | if (i < 0x38 && i != 0x2f && | |
663 | net_ratelimit()) | |
664 | pr_info("Diff at 0x%x\n", i); | |
665 | #endif | |
666 | ||
667 | return false; | |
668 | } | |
669 | ||
670 | #ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS | |
671 | /* HACK: Do not suppress truly duplicate ACKs. */ | |
672 | /* ISSUE: Is this actually necessary or helpful? */ | |
673 | if (s1[0x2a] == s2[0x2a] && | |
674 | s1[0x2b] == s2[0x2b] && | |
675 | s1[0x2c] == s2[0x2c] && | |
676 | s1[0x2d] == s2[0x2d]) { | |
677 | return false; | |
678 | } | |
679 | #endif | |
680 | ||
681 | return true; | |
682 | } | |
683 | ||
684 | #endif | |
685 | ||
686 | ||
687 | ||
d91c6412 CM |
688 | static void tile_net_discard_aux(struct tile_net_cpu *info, int index) |
689 | { | |
690 | struct tile_netio_queue *queue = &info->queue; | |
691 | netio_queue_impl_t *qsp = queue->__system_part; | |
692 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
693 | ||
694 | int index2_aux = index + sizeof(netio_pkt_t); | |
695 | int index2 = | |
696 | ((index2_aux == | |
697 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | |
698 | 0 : index2_aux); | |
699 | ||
700 | netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); | |
701 | ||
702 | /* Extract the "linux_buffer_t". */ | |
703 | unsigned int buffer = pkt->__packet.word; | |
704 | ||
705 | /* Convert "linux_buffer_t" to "va". */ | |
706 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
707 | ||
708 | /* Acquire the associated "skb". */ | |
709 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
710 | struct sk_buff *skb = *skb_ptr; | |
711 | ||
712 | kfree_skb(skb); | |
713 | ||
714 | /* Consume this packet. */ | |
715 | qup->__packet_receive_read = index2; | |
716 | } | |
717 | ||
718 | ||
e5a06939 | 719 | /* |
d91c6412 | 720 | * Like "tile_net_poll()", but just discard packets. |
e5a06939 CM |
721 | */ |
722 | static void tile_net_discard_packets(struct net_device *dev) | |
723 | { | |
724 | struct tile_net_priv *priv = netdev_priv(dev); | |
725 | int my_cpu = smp_processor_id(); | |
726 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
727 | struct tile_netio_queue *queue = &info->queue; | |
728 | netio_queue_impl_t *qsp = queue->__system_part; | |
729 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
730 | ||
731 | while (qup->__packet_receive_read != | |
732 | qsp->__packet_receive_queue.__packet_write) { | |
e5a06939 | 733 | int index = qup->__packet_receive_read; |
d91c6412 | 734 | tile_net_discard_aux(info, index); |
e5a06939 CM |
735 | } |
736 | } | |
737 | ||
738 | ||
739 | /* | |
740 | * Handle the next packet. Return true if "processed", false if "filtered". | |
741 | */ | |
742 | static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | |
743 | { | |
744 | struct net_device *dev = info->napi.dev; | |
745 | ||
746 | struct tile_netio_queue *queue = &info->queue; | |
747 | netio_queue_impl_t *qsp = queue->__system_part; | |
748 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
749 | struct tile_net_stats_t *stats = &info->stats; | |
750 | ||
751 | int filter; | |
752 | ||
753 | int index2_aux = index + sizeof(netio_pkt_t); | |
754 | int index2 = | |
755 | ((index2_aux == | |
756 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | |
757 | 0 : index2_aux); | |
758 | ||
759 | netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); | |
760 | ||
761 | netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); | |
762 | ||
d91c6412 CM |
763 | /* Extract the packet size. FIXME: Shouldn't the second line */ |
764 | /* get subtracted? Mostly moot, since it should be "zero". */ | |
e5a06939 CM |
765 | unsigned long len = |
766 | (NETIO_PKT_CUSTOM_LENGTH(pkt) + | |
767 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | |
768 | ||
769 | /* Extract the "linux_buffer_t". */ | |
770 | unsigned int buffer = pkt->__packet.word; | |
771 | ||
772 | /* Extract "small" (vs "large"). */ | |
773 | bool small = ((buffer & 1) != 0); | |
774 | ||
775 | /* Convert "linux_buffer_t" to "va". */ | |
776 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
777 | ||
778 | /* Extract the packet data pointer. */ | |
779 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | |
780 | unsigned char *buf = va + NET_IP_ALIGN; | |
781 | ||
e5a06939 CM |
782 | /* Invalidate the packet buffer. */ |
783 | if (!hash_default) | |
784 | __inv_buffer(buf, len); | |
785 | ||
786 | /* ISSUE: Is this needed? */ | |
787 | dev->last_rx = jiffies; | |
788 | ||
789 | #ifdef TILE_NET_DUMP_PACKETS | |
790 | dump_packet(buf, len, "rx"); | |
791 | #endif /* TILE_NET_DUMP_PACKETS */ | |
792 | ||
793 | #ifdef TILE_NET_VERIFY_INGRESS | |
794 | if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && | |
795 | NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { | |
d91c6412 | 796 | /* Bug 6624: Includes UDP packets with a "zero" checksum. */ |
e5a06939 | 797 | pr_warning("Bad L4 checksum on %d byte packet.\n", len); |
e5a06939 CM |
798 | } |
799 | if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && | |
800 | NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { | |
801 | dump_packet(buf, len, "rx"); | |
802 | panic("Bad L3 checksum."); | |
803 | } | |
804 | switch (NETIO_PKT_STATUS_M(metadata, pkt)) { | |
805 | case NETIO_PKT_STATUS_OVERSIZE: | |
806 | if (len >= 64) { | |
807 | dump_packet(buf, len, "rx"); | |
808 | panic("Unexpected OVERSIZE."); | |
809 | } | |
810 | break; | |
811 | case NETIO_PKT_STATUS_BAD: | |
d91c6412 | 812 | pr_warning("Unexpected BAD %ld byte packet.\n", len); |
e5a06939 CM |
813 | } |
814 | #endif | |
815 | ||
816 | filter = 0; | |
817 | ||
d91c6412 CM |
818 | /* ISSUE: Filter TCP packets with "bad" checksums? */ |
819 | ||
e5a06939 CM |
820 | if (!(dev->flags & IFF_UP)) { |
821 | /* Filter packets received before we're up. */ | |
822 | filter = 1; | |
d91c6412 CM |
823 | } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) { |
824 | /* Filter "truncated" packets. */ | |
825 | filter = 1; | |
e5a06939 | 826 | } else if (!(dev->flags & IFF_PROMISC)) { |
d91c6412 CM |
827 | /* FIXME: Implement HW multicast filter. */ |
828 | if (!is_multicast_ether_addr(buf)) { | |
e5a06939 CM |
829 | /* Filter packets not for our address. */ |
830 | const u8 *mine = dev->dev_addr; | |
831 | filter = compare_ether_addr(mine, buf); | |
832 | } | |
833 | } | |
834 | ||
e5a06939 CM |
835 | if (filter) { |
836 | ||
837 | /* ISSUE: Update "drop" statistics? */ | |
838 | ||
839 | tile_net_provide_linux_buffer(info, va, small); | |
840 | ||
841 | } else { | |
842 | ||
843 | /* Acquire the associated "skb". */ | |
844 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
845 | struct sk_buff *skb = *skb_ptr; | |
846 | ||
847 | /* Paranoia. */ | |
848 | if (skb->data != buf) | |
849 | panic("Corrupt linux buffer from LIPP! " | |
850 | "VA=%p, skb=%p, skb->data=%p\n", | |
851 | va, skb, skb->data); | |
852 | ||
853 | /* Encode the actual packet length. */ | |
854 | skb_put(skb, len); | |
855 | ||
856 | /* NOTE: This call also sets "skb->dev = dev". */ | |
857 | skb->protocol = eth_type_trans(skb, dev); | |
858 | ||
d91c6412 | 859 | /* Avoid recomputing "good" TCP/UDP checksums. */ |
e5a06939 CM |
860 | if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) |
861 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
862 | ||
863 | netif_receive_skb(skb); | |
864 | ||
865 | stats->rx_packets++; | |
866 | stats->rx_bytes += len; | |
867 | ||
868 | if (small) | |
869 | info->num_needed_small_buffers++; | |
870 | else | |
871 | info->num_needed_large_buffers++; | |
872 | } | |
873 | ||
874 | /* Return four credits after every fourth packet. */ | |
875 | if (--qup->__receive_credit_remaining == 0) { | |
876 | u32 interval = qup->__receive_credit_interval; | |
877 | qup->__receive_credit_remaining = interval; | |
878 | __netio_fastio_return_credits(qup->__fastio_index, interval); | |
879 | } | |
880 | ||
881 | /* Consume this packet. */ | |
882 | qup->__packet_receive_read = index2; | |
883 | ||
884 | return !filter; | |
885 | } | |
886 | ||
887 | ||
888 | /* | |
889 | * Handle some packets for the given device on the current CPU. | |
890 | * | |
d91c6412 CM |
891 | * If "tile_net_stop()" is called on some other tile while this |
892 | * function is running, we will return, hopefully before that | |
893 | * other tile asks us to call "napi_disable()". | |
894 | * | |
895 | * The "rotting packet" race condition occurs if a packet arrives | |
896 | * during the extremely narrow window between the queue appearing to | |
897 | * be empty, and the ingress interrupt being re-enabled. This happens | |
898 | * a LOT under heavy network load. | |
e5a06939 CM |
899 | */ |
900 | static int tile_net_poll(struct napi_struct *napi, int budget) | |
901 | { | |
902 | struct net_device *dev = napi->dev; | |
903 | struct tile_net_priv *priv = netdev_priv(dev); | |
904 | int my_cpu = smp_processor_id(); | |
905 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
906 | struct tile_netio_queue *queue = &info->queue; | |
907 | netio_queue_impl_t *qsp = queue->__system_part; | |
908 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
909 | ||
910 | unsigned int work = 0; | |
911 | ||
d91c6412 | 912 | while (priv->active) { |
e5a06939 CM |
913 | int index = qup->__packet_receive_read; |
914 | if (index == qsp->__packet_receive_queue.__packet_write) | |
915 | break; | |
916 | ||
917 | if (tile_net_poll_aux(info, index)) { | |
918 | if (++work >= budget) | |
919 | goto done; | |
920 | } | |
921 | } | |
922 | ||
923 | napi_complete(&info->napi); | |
924 | ||
d91c6412 CM |
925 | if (!priv->active) |
926 | goto done; | |
927 | ||
928 | /* Re-enable the ingress interrupt. */ | |
0c90547b | 929 | enable_percpu_irq(priv->intr_id, 0); |
e5a06939 | 930 | |
d91c6412 | 931 | /* HACK: Avoid the "rotting packet" problem (see above). */ |
e5a06939 | 932 | if (qup->__packet_receive_read != |
d91c6412 CM |
933 | qsp->__packet_receive_queue.__packet_write) { |
934 | /* ISSUE: Sometimes this returns zero, presumably */ | |
935 | /* because an interrupt was handled for this tile. */ | |
936 | (void)napi_reschedule(&info->napi); | |
937 | } | |
e5a06939 CM |
938 | |
939 | done: | |
940 | ||
d91c6412 CM |
941 | if (priv->active) |
942 | tile_net_provide_needed_buffers(info); | |
e5a06939 CM |
943 | |
944 | return work; | |
945 | } | |
946 | ||
947 | ||
948 | /* | |
949 | * Handle an ingress interrupt for the given device on the current cpu. | |
d91c6412 CM |
950 | * |
951 | * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has | |
952 | * been called! This is probably due to "pending hypervisor downcalls". | |
953 | * | |
954 | * ISSUE: Is there any race condition between the "napi_schedule()" here | |
955 | * and the "napi_complete()" call above? | |
e5a06939 CM |
956 | */ |
957 | static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) | |
958 | { | |
959 | struct net_device *dev = (struct net_device *)dev_ptr; | |
960 | struct tile_net_priv *priv = netdev_priv(dev); | |
961 | int my_cpu = smp_processor_id(); | |
962 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
963 | ||
d91c6412 | 964 | /* Disable the ingress interrupt. */ |
e5a06939 CM |
965 | disable_percpu_irq(priv->intr_id); |
966 | ||
d91c6412 CM |
967 | /* Ignore unwanted interrupts. */ |
968 | if (!priv->active) | |
969 | return IRQ_HANDLED; | |
970 | ||
971 | /* ISSUE: Sometimes "info->napi_enabled" is false here. */ | |
972 | ||
e5a06939 CM |
973 | napi_schedule(&info->napi); |
974 | ||
975 | return IRQ_HANDLED; | |
976 | } | |
977 | ||
978 | ||
979 | /* | |
980 | * One time initialization per interface. | |
981 | */ | |
982 | static int tile_net_open_aux(struct net_device *dev) | |
983 | { | |
984 | struct tile_net_priv *priv = netdev_priv(dev); | |
985 | ||
986 | int ret; | |
987 | int dummy; | |
988 | unsigned int epp_lotar; | |
989 | ||
990 | /* | |
991 | * Find out where EPP memory should be homed. | |
992 | */ | |
993 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
994 | (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), | |
995 | NETIO_EPP_SHM_OFF); | |
996 | if (ret < 0) { | |
997 | pr_err("could not read epp_shm_queue lotar.\n"); | |
998 | return -EIO; | |
999 | } | |
1000 | ||
1001 | /* | |
1002 | * Home the page on the EPP. | |
1003 | */ | |
1004 | { | |
1005 | int epp_home = hv_lotar_to_cpu(epp_lotar); | |
d91c6412 | 1006 | homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home); |
e5a06939 CM |
1007 | } |
1008 | ||
1009 | /* | |
1010 | * Register the EPP shared memory queue. | |
1011 | */ | |
1012 | { | |
1013 | netio_ipp_address_t ea = { | |
1014 | .va = 0, | |
d91c6412 | 1015 | .pa = __pa(priv->eq), |
e5a06939 | 1016 | .pte = hv_pte(0), |
d91c6412 | 1017 | .size = EQ_SIZE, |
e5a06939 CM |
1018 | }; |
1019 | ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); | |
1020 | ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); | |
1021 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | |
1022 | (HV_VirtAddr)&ea, | |
1023 | sizeof(ea), | |
1024 | NETIO_EPP_SHM_OFF); | |
1025 | if (ret < 0) | |
1026 | return -EIO; | |
1027 | } | |
1028 | ||
1029 | /* | |
1030 | * Start LIPP/LEPP. | |
1031 | */ | |
1032 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1033 | sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { | |
1034 | pr_warning("Failed to start LIPP/LEPP.\n"); | |
1035 | return -EIO; | |
1036 | } | |
1037 | ||
1038 | return 0; | |
1039 | } | |
1040 | ||
1041 | ||
1042 | /* | |
d91c6412 | 1043 | * Register with hypervisor on the current CPU. |
e5a06939 CM |
1044 | * |
1045 | * Strangely, this function does important things even if it "fails", | |
1046 | * which is especially common if the link is not up yet. Hopefully | |
1047 | * these things are all "harmless" if done twice! | |
1048 | */ | |
1049 | static void tile_net_register(void *dev_ptr) | |
1050 | { | |
1051 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1052 | struct tile_net_priv *priv = netdev_priv(dev); | |
1053 | int my_cpu = smp_processor_id(); | |
1054 | struct tile_net_cpu *info; | |
1055 | ||
1056 | struct tile_netio_queue *queue; | |
1057 | ||
1058 | /* Only network cpus can receive packets. */ | |
1059 | int queue_id = | |
1060 | cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; | |
1061 | ||
1062 | netio_input_config_t config = { | |
1063 | .flags = 0, | |
1064 | .num_receive_packets = priv->network_cpus_credits, | |
1065 | .queue_id = queue_id | |
1066 | }; | |
1067 | ||
1068 | int ret = 0; | |
1069 | netio_queue_impl_t *queuep; | |
1070 | ||
1071 | PDEBUG("tile_net_register(queue_id %d)\n", queue_id); | |
1072 | ||
1073 | if (!strcmp(dev->name, "xgbe0")) | |
1074 | info = &__get_cpu_var(hv_xgbe0); | |
1075 | else if (!strcmp(dev->name, "xgbe1")) | |
1076 | info = &__get_cpu_var(hv_xgbe1); | |
1077 | else if (!strcmp(dev->name, "gbe0")) | |
1078 | info = &__get_cpu_var(hv_gbe0); | |
1079 | else if (!strcmp(dev->name, "gbe1")) | |
1080 | info = &__get_cpu_var(hv_gbe1); | |
1081 | else | |
1082 | BUG(); | |
1083 | ||
1084 | /* Initialize the egress timer. */ | |
1085 | init_timer(&info->egress_timer); | |
1086 | info->egress_timer.data = (long)info; | |
1087 | info->egress_timer.function = tile_net_handle_egress_timer; | |
1088 | ||
1089 | priv->cpu[my_cpu] = info; | |
1090 | ||
1091 | /* | |
d91c6412 CM |
1092 | * Register ourselves with LIPP. This does a lot of stuff, |
1093 | * including invoking the LIPP registration code. | |
e5a06939 CM |
1094 | */ |
1095 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | |
1096 | (HV_VirtAddr)&config, | |
1097 | sizeof(netio_input_config_t), | |
1098 | NETIO_IPP_INPUT_REGISTER_OFF); | |
1099 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | |
1100 | ret); | |
1101 | if (ret < 0) { | |
d91c6412 CM |
1102 | if (ret != NETIO_LINK_DOWN) { |
1103 | printk(KERN_DEBUG "hv_dev_pwrite " | |
1104 | "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n", | |
1105 | ret); | |
1106 | } | |
e5a06939 CM |
1107 | info->link_down = (ret == NETIO_LINK_DOWN); |
1108 | return; | |
1109 | } | |
1110 | ||
1111 | /* | |
1112 | * Get the pointer to our queue's system part. | |
1113 | */ | |
1114 | ||
1115 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
1116 | (HV_VirtAddr)&queuep, | |
1117 | sizeof(netio_queue_impl_t *), | |
1118 | NETIO_IPP_INPUT_REGISTER_OFF); | |
1119 | PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | |
1120 | ret); | |
1121 | PDEBUG("queuep %p\n", queuep); | |
1122 | if (ret <= 0) { | |
1123 | /* ISSUE: Shouldn't this be a fatal error? */ | |
1124 | pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); | |
1125 | return; | |
1126 | } | |
1127 | ||
1128 | queue = &info->queue; | |
1129 | ||
1130 | queue->__system_part = queuep; | |
1131 | ||
1132 | memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); | |
1133 | ||
1134 | /* This is traditionally "config.num_receive_packets / 2". */ | |
1135 | queue->__user_part.__receive_credit_interval = 4; | |
1136 | queue->__user_part.__receive_credit_remaining = | |
1137 | queue->__user_part.__receive_credit_interval; | |
1138 | ||
1139 | /* | |
1140 | * Get a fastio index from the hypervisor. | |
1141 | * ISSUE: Shouldn't this check the result? | |
1142 | */ | |
1143 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
1144 | (HV_VirtAddr)&queue->__user_part.__fastio_index, | |
1145 | sizeof(queue->__user_part.__fastio_index), | |
1146 | NETIO_IPP_GET_FASTIO_OFF); | |
1147 | PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); | |
1148 | ||
e5a06939 CM |
1149 | /* Now we are registered. */ |
1150 | info->registered = true; | |
1151 | } | |
1152 | ||
1153 | ||
1154 | /* | |
d91c6412 CM |
1155 | * Deregister with hypervisor on the current CPU. |
1156 | * | |
1157 | * This simply discards all our credits, so no more packets will be | |
1158 | * delivered to this tile. There may still be packets in our queue. | |
1159 | * | |
1160 | * Also, disable the ingress interrupt. | |
1161 | */ | |
1162 | static void tile_net_deregister(void *dev_ptr) | |
1163 | { | |
1164 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1165 | struct tile_net_priv *priv = netdev_priv(dev); | |
1166 | int my_cpu = smp_processor_id(); | |
1167 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1168 | ||
1169 | /* Disable the ingress interrupt. */ | |
1170 | disable_percpu_irq(priv->intr_id); | |
1171 | ||
1172 | /* Do nothing else if not registered. */ | |
1173 | if (info == NULL || !info->registered) | |
1174 | return; | |
1175 | ||
1176 | { | |
1177 | struct tile_netio_queue *queue = &info->queue; | |
1178 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
1179 | ||
1180 | /* Discard all our credits. */ | |
1181 | __netio_fastio_return_credits(qup->__fastio_index, -1); | |
1182 | } | |
1183 | } | |
1184 | ||
1185 | ||
1186 | /* | |
1187 | * Unregister with hypervisor on the current CPU. | |
1188 | * | |
1189 | * Also, disable the ingress interrupt. | |
e5a06939 CM |
1190 | */ |
1191 | static void tile_net_unregister(void *dev_ptr) | |
1192 | { | |
1193 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1194 | struct tile_net_priv *priv = netdev_priv(dev); | |
1195 | int my_cpu = smp_processor_id(); | |
1196 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1197 | ||
d91c6412 | 1198 | int ret; |
e5a06939 CM |
1199 | int dummy = 0; |
1200 | ||
d91c6412 CM |
1201 | /* Disable the ingress interrupt. */ |
1202 | disable_percpu_irq(priv->intr_id); | |
e5a06939 | 1203 | |
d91c6412 CM |
1204 | /* Do nothing else if not registered. */ |
1205 | if (info == NULL || !info->registered) | |
e5a06939 CM |
1206 | return; |
1207 | ||
d91c6412 | 1208 | /* Unregister ourselves with LIPP/LEPP. */ |
e5a06939 CM |
1209 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, |
1210 | sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); | |
d91c6412 CM |
1211 | if (ret < 0) |
1212 | panic("Failed to unregister with LIPP/LEPP!\n"); | |
e5a06939 | 1213 | |
d91c6412 | 1214 | /* Discard all packets still in our NetIO queue. */ |
e5a06939 CM |
1215 | tile_net_discard_packets(dev); |
1216 | ||
1217 | /* Reset state. */ | |
1218 | info->num_needed_small_buffers = 0; | |
1219 | info->num_needed_large_buffers = 0; | |
1220 | ||
1221 | /* Cancel egress timer. */ | |
1222 | del_timer(&info->egress_timer); | |
1223 | info->egress_timer_scheduled = false; | |
e5a06939 CM |
1224 | } |
1225 | ||
1226 | ||
1227 | /* | |
1228 | * Helper function for "tile_net_stop()". | |
1229 | * | |
1230 | * Also used to handle registration failure in "tile_net_open_inner()", | |
d91c6412 | 1231 | * when the various extra steps in "tile_net_stop()" are not necessary. |
e5a06939 CM |
1232 | */ |
1233 | static void tile_net_stop_aux(struct net_device *dev) | |
1234 | { | |
1235 | struct tile_net_priv *priv = netdev_priv(dev); | |
d91c6412 | 1236 | int i; |
e5a06939 CM |
1237 | |
1238 | int dummy = 0; | |
1239 | ||
d91c6412 CM |
1240 | /* |
1241 | * Unregister all tiles, so LIPP will stop delivering packets. | |
1242 | * Also, delete all the "napi" objects (sequentially, to protect | |
1243 | * "dev->napi_list"). | |
1244 | */ | |
e5a06939 | 1245 | on_each_cpu(tile_net_unregister, (void *)dev, 1); |
d91c6412 CM |
1246 | for_each_online_cpu(i) { |
1247 | struct tile_net_cpu *info = priv->cpu[i]; | |
1248 | if (info != NULL && info->registered) { | |
1249 | netif_napi_del(&info->napi); | |
1250 | info->registered = false; | |
1251 | } | |
1252 | } | |
e5a06939 CM |
1253 | |
1254 | /* Stop LIPP/LEPP. */ | |
1255 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1256 | sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) | |
1257 | panic("Failed to stop LIPP/LEPP!\n"); | |
1258 | ||
3db1cd5c | 1259 | priv->partly_opened = false; |
e5a06939 CM |
1260 | } |
1261 | ||
1262 | ||
1263 | /* | |
d91c6412 | 1264 | * Disable NAPI for the given device on the current cpu. |
e5a06939 | 1265 | */ |
d91c6412 | 1266 | static void tile_net_stop_disable(void *dev_ptr) |
e5a06939 CM |
1267 | { |
1268 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1269 | struct tile_net_priv *priv = netdev_priv(dev); | |
1270 | int my_cpu = smp_processor_id(); | |
1271 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1272 | ||
e5a06939 CM |
1273 | /* Disable NAPI if needed. */ |
1274 | if (info != NULL && info->napi_enabled) { | |
1275 | napi_disable(&info->napi); | |
1276 | info->napi_enabled = false; | |
1277 | } | |
1278 | } | |
1279 | ||
1280 | ||
1281 | /* | |
d91c6412 CM |
1282 | * Enable NAPI and the ingress interrupt for the given device |
1283 | * on the current cpu. | |
1284 | * | |
1285 | * ISSUE: Only do this for "network cpus"? | |
e5a06939 | 1286 | */ |
d91c6412 | 1287 | static void tile_net_open_enable(void *dev_ptr) |
e5a06939 CM |
1288 | { |
1289 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1290 | struct tile_net_priv *priv = netdev_priv(dev); | |
1291 | int my_cpu = smp_processor_id(); | |
1292 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1293 | ||
e5a06939 CM |
1294 | /* Enable NAPI. */ |
1295 | napi_enable(&info->napi); | |
1296 | info->napi_enabled = true; | |
d91c6412 CM |
1297 | |
1298 | /* Enable the ingress interrupt. */ | |
0c90547b | 1299 | enable_percpu_irq(priv->intr_id, 0); |
e5a06939 CM |
1300 | } |
1301 | ||
1302 | ||
1303 | /* | |
1304 | * tile_net_open_inner does most of the work of bringing up the interface. | |
1305 | * It's called from tile_net_open(), and also from tile_net_retry_open(). | |
1306 | * The return value is 0 if the interface was brought up, < 0 if | |
1307 | * tile_net_open() should return the return value as an error, and > 0 if | |
1308 | * tile_net_open() should return success and schedule a work item to | |
1309 | * periodically retry the bringup. | |
1310 | */ | |
1311 | static int tile_net_open_inner(struct net_device *dev) | |
1312 | { | |
1313 | struct tile_net_priv *priv = netdev_priv(dev); | |
1314 | int my_cpu = smp_processor_id(); | |
1315 | struct tile_net_cpu *info; | |
1316 | struct tile_netio_queue *queue; | |
d91c6412 | 1317 | int result = 0; |
e5a06939 | 1318 | int i; |
d91c6412 | 1319 | int dummy = 0; |
e5a06939 CM |
1320 | |
1321 | /* | |
1322 | * First try to register just on the local CPU, and handle any | |
1323 | * semi-expected "link down" failure specially. Note that we | |
1324 | * do NOT call "tile_net_stop_aux()", unlike below. | |
1325 | */ | |
1326 | tile_net_register(dev); | |
1327 | info = priv->cpu[my_cpu]; | |
1328 | if (!info->registered) { | |
1329 | if (info->link_down) | |
1330 | return 1; | |
1331 | return -EAGAIN; | |
1332 | } | |
1333 | ||
1334 | /* | |
1335 | * Now register everywhere else. If any registration fails, | |
1336 | * even for "link down" (which might not be possible), we | |
d91c6412 CM |
1337 | * clean up using "tile_net_stop_aux()". Also, add all the |
1338 | * "napi" objects (sequentially, to protect "dev->napi_list"). | |
1339 | * ISSUE: Only use "netif_napi_add()" for "network cpus"? | |
e5a06939 CM |
1340 | */ |
1341 | smp_call_function(tile_net_register, (void *)dev, 1); | |
1342 | for_each_online_cpu(i) { | |
d91c6412 CM |
1343 | struct tile_net_cpu *info = priv->cpu[i]; |
1344 | if (info->registered) | |
1345 | netif_napi_add(dev, &info->napi, tile_net_poll, 64); | |
1346 | else | |
1347 | result = -EAGAIN; | |
1348 | } | |
1349 | if (result != 0) { | |
1350 | tile_net_stop_aux(dev); | |
1351 | return result; | |
e5a06939 CM |
1352 | } |
1353 | ||
1354 | queue = &info->queue; | |
1355 | ||
d91c6412 CM |
1356 | if (priv->intr_id == 0) { |
1357 | unsigned int irq; | |
e5a06939 | 1358 | |
d91c6412 CM |
1359 | /* |
1360 | * Acquire the irq allocated by the hypervisor. Every | |
1361 | * queue gets the same irq. The "__intr_id" field is | |
1362 | * "1 << irq", so we use "__ffs()" to extract "irq". | |
1363 | */ | |
1364 | priv->intr_id = queue->__system_part->__intr_id; | |
1365 | BUG_ON(priv->intr_id == 0); | |
1366 | irq = __ffs(priv->intr_id); | |
e5a06939 | 1367 | |
d91c6412 CM |
1368 | /* |
1369 | * Register the ingress interrupt handler for this | |
1370 | * device, permanently. | |
1371 | * | |
1372 | * We used to call "free_irq()" in "tile_net_stop()", | |
1373 | * and then re-register the handler here every time, | |
1374 | * but that caused DNP errors in "handle_IRQ_event()" | |
1375 | * because "desc->action" was NULL. See bug 9143. | |
1376 | */ | |
1377 | tile_irq_activate(irq, TILE_IRQ_PERCPU); | |
1378 | BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, | |
1379 | 0, dev->name, (void *)dev) != 0); | |
1380 | } | |
e5a06939 | 1381 | |
d91c6412 | 1382 | { |
e5a06939 CM |
1383 | /* Allocate initial buffers. */ |
1384 | ||
1385 | int max_buffers = | |
1386 | priv->network_cpus_count * priv->network_cpus_credits; | |
1387 | ||
1388 | info->num_needed_small_buffers = | |
1389 | min(LIPP_SMALL_BUFFERS, max_buffers); | |
1390 | ||
1391 | info->num_needed_large_buffers = | |
1392 | min(LIPP_LARGE_BUFFERS, max_buffers); | |
1393 | ||
1394 | tile_net_provide_needed_buffers(info); | |
1395 | ||
1396 | if (info->num_needed_small_buffers != 0 || | |
1397 | info->num_needed_large_buffers != 0) | |
1398 | panic("Insufficient memory for buffer stack!"); | |
d91c6412 | 1399 | } |
e5a06939 | 1400 | |
d91c6412 CM |
1401 | /* We are about to be active. */ |
1402 | priv->active = true; | |
e5a06939 | 1403 | |
d91c6412 CM |
1404 | /* Make sure "active" is visible to all tiles. */ |
1405 | mb(); | |
e5a06939 | 1406 | |
d91c6412 CM |
1407 | /* On each tile, enable NAPI and the ingress interrupt. */ |
1408 | on_each_cpu(tile_net_open_enable, (void *)dev, 1); | |
1409 | ||
1410 | /* Start LIPP/LEPP and activate "ingress" at the shim. */ | |
1411 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1412 | sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) | |
1413 | panic("Failed to activate the LIPP Shim!\n"); | |
e5a06939 CM |
1414 | |
1415 | /* Start our transmit queue. */ | |
1416 | netif_start_queue(dev); | |
1417 | ||
1418 | return 0; | |
1419 | } | |
1420 | ||
1421 | ||
1422 | /* | |
1423 | * Called periodically to retry bringing up the NetIO interface, | |
1424 | * if it doesn't come up cleanly during tile_net_open(). | |
1425 | */ | |
1426 | static void tile_net_open_retry(struct work_struct *w) | |
1427 | { | |
1428 | struct delayed_work *dw = | |
1429 | container_of(w, struct delayed_work, work); | |
1430 | ||
1431 | struct tile_net_priv *priv = | |
1432 | container_of(dw, struct tile_net_priv, retry_work); | |
1433 | ||
1434 | /* | |
1435 | * Try to bring the NetIO interface up. If it fails, reschedule | |
1436 | * ourselves to try again later; otherwise, tell Linux we now have | |
1437 | * a working link. ISSUE: What if the return value is negative? | |
1438 | */ | |
d91c6412 CM |
1439 | if (tile_net_open_inner(priv->dev) != 0) |
1440 | schedule_delayed_work(&priv->retry_work, | |
1441 | TILE_NET_RETRY_INTERVAL); | |
e5a06939 CM |
1442 | else |
1443 | netif_carrier_on(priv->dev); | |
1444 | } | |
1445 | ||
1446 | ||
1447 | /* | |
1448 | * Called when a network interface is made active. | |
1449 | * | |
1450 | * Returns 0 on success, negative value on failure. | |
1451 | * | |
1452 | * The open entry point is called when a network interface is made | |
1453 | * active by the system (IFF_UP). At this point all resources needed | |
1454 | * for transmit and receive operations are allocated, the interrupt | |
d91c6412 CM |
1455 | * handler is registered with the OS (if needed), the watchdog timer |
1456 | * is started, and the stack is notified that the interface is ready. | |
e5a06939 CM |
1457 | * |
1458 | * If the actual link is not available yet, then we tell Linux that | |
1459 | * we have no carrier, and we keep checking until the link comes up. | |
1460 | */ | |
1461 | static int tile_net_open(struct net_device *dev) | |
1462 | { | |
1463 | int ret = 0; | |
1464 | struct tile_net_priv *priv = netdev_priv(dev); | |
1465 | ||
1466 | /* | |
1467 | * We rely on priv->partly_opened to tell us if this is the | |
1468 | * first time this interface is being brought up. If it is | |
1469 | * set, the IPP was already initialized and should not be | |
1470 | * initialized again. | |
1471 | */ | |
1472 | if (!priv->partly_opened) { | |
1473 | ||
1474 | int count; | |
1475 | int credits; | |
1476 | ||
1477 | /* Initialize LIPP/LEPP, and start the Shim. */ | |
1478 | ret = tile_net_open_aux(dev); | |
1479 | if (ret < 0) { | |
1480 | pr_err("tile_net_open_aux failed: %d\n", ret); | |
1481 | return ret; | |
1482 | } | |
1483 | ||
1484 | /* Analyze the network cpus. */ | |
1485 | ||
1486 | if (network_cpus_used) | |
1487 | cpumask_copy(&priv->network_cpus_map, | |
1488 | &network_cpus_map); | |
1489 | else | |
1490 | cpumask_copy(&priv->network_cpus_map, cpu_online_mask); | |
1491 | ||
1492 | ||
1493 | count = cpumask_weight(&priv->network_cpus_map); | |
1494 | ||
1495 | /* Limit credits to available buffers, and apply min. */ | |
1496 | credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); | |
1497 | ||
1498 | /* Apply "GBE" max limit. */ | |
1499 | /* ISSUE: Use higher limit for XGBE? */ | |
1500 | credits = min(NETIO_MAX_RECEIVE_PKTS, credits); | |
1501 | ||
1502 | priv->network_cpus_count = count; | |
1503 | priv->network_cpus_credits = credits; | |
1504 | ||
1505 | #ifdef TILE_NET_DEBUG | |
1506 | pr_info("Using %d network cpus, with %d credits each\n", | |
1507 | priv->network_cpus_count, priv->network_cpus_credits); | |
1508 | #endif | |
1509 | ||
3db1cd5c | 1510 | priv->partly_opened = true; |
d91c6412 CM |
1511 | |
1512 | } else { | |
1513 | /* FIXME: Is this possible? */ | |
1514 | /* printk("Already partly opened.\n"); */ | |
e5a06939 CM |
1515 | } |
1516 | ||
1517 | /* | |
1518 | * Attempt to bring up the link. | |
1519 | */ | |
1520 | ret = tile_net_open_inner(dev); | |
1521 | if (ret <= 0) { | |
1522 | if (ret == 0) | |
1523 | netif_carrier_on(dev); | |
1524 | return ret; | |
1525 | } | |
1526 | ||
1527 | /* | |
1528 | * We were unable to bring up the NetIO interface, but we want to | |
1529 | * try again in a little bit. Tell Linux that we have no carrier | |
1530 | * so it doesn't try to use the interface before the link comes up | |
1531 | * and then remember to try again later. | |
1532 | */ | |
1533 | netif_carrier_off(dev); | |
d91c6412 | 1534 | schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); |
e5a06939 CM |
1535 | |
1536 | return 0; | |
1537 | } | |
1538 | ||
1539 | ||
d91c6412 | 1540 | static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) |
e5a06939 | 1541 | { |
d91c6412 | 1542 | int n = 0; |
e5a06939 | 1543 | |
d91c6412 | 1544 | /* Drain all the LIPP buffers. */ |
e5a06939 CM |
1545 | while (true) { |
1546 | int buffer; | |
1547 | ||
1548 | /* NOTE: This should never fail. */ | |
1549 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, | |
1550 | sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) | |
1551 | break; | |
1552 | ||
1553 | /* Stop when done. */ | |
1554 | if (buffer == 0) | |
1555 | break; | |
1556 | ||
1557 | { | |
1558 | /* Convert "linux_buffer_t" to "va". */ | |
1559 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
1560 | ||
1561 | /* Acquire the associated "skb". */ | |
1562 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
1563 | struct sk_buff *skb = *skb_ptr; | |
1564 | ||
1565 | kfree_skb(skb); | |
1566 | } | |
d91c6412 CM |
1567 | |
1568 | n++; | |
e5a06939 CM |
1569 | } |
1570 | ||
d91c6412 CM |
1571 | return n; |
1572 | } | |
e5a06939 CM |
1573 | |
1574 | ||
d91c6412 CM |
1575 | /* |
1576 | * Disables a network interface. | |
1577 | * | |
1578 | * Returns 0, this is not allowed to fail. | |
1579 | * | |
1580 | * The close entry point is called when an interface is de-activated | |
1581 | * by the OS. The hardware is still under the drivers control, but | |
1582 | * needs to be disabled. A global MAC reset is issued to stop the | |
1583 | * hardware, and all transmit and receive resources are freed. | |
1584 | * | |
1585 | * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"? | |
1586 | * | |
1587 | * Before we are called by "__dev_close()", "netif_running()" will | |
1588 | * have been cleared, so no NEW calls to "tile_net_poll()" will be | |
1589 | * made by "netpoll_poll_dev()". | |
1590 | * | |
1591 | * Often, this can cause some tiles to still have packets in their | |
1592 | * queues, so we must call "tile_net_discard_packets()" later. | |
1593 | * | |
1594 | * Note that some other tile may still be INSIDE "tile_net_poll()", | |
1595 | * and in fact, many will be, if there is heavy network load. | |
1596 | * | |
1597 | * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when | |
1598 | * any tile is still "napi_schedule()"'d will induce a horrible crash | |
1599 | * when "msleep()" is called. This includes tiles which are inside | |
1600 | * "tile_net_poll()" which have not yet called "napi_complete()". | |
1601 | * | |
1602 | * So, we must first try to wait long enough for other tiles to finish | |
1603 | * with any current "tile_net_poll()" call, and, hopefully, to clear | |
1604 | * the "scheduled" flag. ISSUE: It is unclear what happens to tiles | |
1605 | * which have called "napi_schedule()" but which had not yet tried to | |
1606 | * call "tile_net_poll()", or which exhausted their budget inside | |
1607 | * "tile_net_poll()" just before this function was called. | |
1608 | */ | |
1609 | static int tile_net_stop(struct net_device *dev) | |
1610 | { | |
1611 | struct tile_net_priv *priv = netdev_priv(dev); | |
1612 | ||
1613 | PDEBUG("tile_net_stop()\n"); | |
e5a06939 | 1614 | |
d91c6412 CM |
1615 | /* Start discarding packets. */ |
1616 | priv->active = false; | |
1617 | ||
1618 | /* Make sure "active" is visible to all tiles. */ | |
1619 | mb(); | |
e5a06939 CM |
1620 | |
1621 | /* | |
d91c6412 CM |
1622 | * On each tile, make sure no NEW packets get delivered, and |
1623 | * disable the ingress interrupt. | |
1624 | * | |
1625 | * Note that the ingress interrupt can fire AFTER this, | |
1626 | * presumably due to packets which were recently delivered, | |
1627 | * but it will have no effect. | |
e5a06939 | 1628 | */ |
d91c6412 | 1629 | on_each_cpu(tile_net_deregister, (void *)dev, 1); |
e5a06939 | 1630 | |
d91c6412 CM |
1631 | /* Optimistically drain LIPP buffers. */ |
1632 | (void)tile_net_drain_lipp_buffers(priv); | |
e5a06939 | 1633 | |
d91c6412 CM |
1634 | /* ISSUE: Only needed if not yet fully open. */ |
1635 | cancel_delayed_work_sync(&priv->retry_work); | |
e5a06939 | 1636 | |
d91c6412 CM |
1637 | /* Can't transmit any more. */ |
1638 | netif_stop_queue(dev); | |
e5a06939 | 1639 | |
d91c6412 CM |
1640 | /* Disable NAPI on each tile. */ |
1641 | on_each_cpu(tile_net_stop_disable, (void *)dev, 1); | |
1642 | ||
1643 | /* | |
1644 | * Drain any remaining LIPP buffers. NOTE: This "printk()" | |
1645 | * has never been observed, but in theory it could happen. | |
1646 | */ | |
1647 | if (tile_net_drain_lipp_buffers(priv) != 0) | |
1648 | printk("Had to drain some extra LIPP buffers!\n"); | |
e5a06939 | 1649 | |
d91c6412 CM |
1650 | /* Stop LIPP/LEPP. */ |
1651 | tile_net_stop_aux(dev); | |
1652 | ||
1653 | /* | |
1654 | * ISSUE: It appears that, in practice anyway, by the time we | |
1655 | * get here, there are no pending completions, but just in case, | |
1656 | * we free (all of) them anyway. | |
1657 | */ | |
1658 | while (tile_net_lepp_free_comps(dev, true)) | |
1659 | /* loop */; | |
e5a06939 | 1660 | |
d07bd86d | 1661 | /* Wipe the EPP queue, and wait till the stores hit the EPP. */ |
d91c6412 | 1662 | memset(priv->eq, 0, sizeof(lepp_queue_t)); |
d07bd86d | 1663 | mb(); |
e5a06939 CM |
1664 | |
1665 | return 0; | |
1666 | } | |
1667 | ||
1668 | ||
1669 | /* | |
1670 | * Prepare the "frags" info for the resulting LEPP command. | |
1671 | * | |
1672 | * If needed, flush the memory used by the frags. | |
1673 | */ | |
1674 | static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | |
1675 | struct sk_buff *skb, | |
1676 | void *b_data, unsigned int b_len) | |
1677 | { | |
1678 | unsigned int i, n = 0; | |
1679 | ||
1680 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1681 | ||
1682 | phys_addr_t cpa; | |
1683 | ||
1684 | if (b_len != 0) { | |
1685 | ||
1686 | if (!hash_default) | |
63b7ca6b | 1687 | finv_buffer_remote(b_data, b_len, 0); |
e5a06939 CM |
1688 | |
1689 | cpa = __pa(b_data); | |
1690 | frags[n].cpa_lo = cpa; | |
1691 | frags[n].cpa_hi = cpa >> 32; | |
1692 | frags[n].length = b_len; | |
1693 | frags[n].hash_for_home = hash_default; | |
1694 | n++; | |
1695 | } | |
1696 | ||
1697 | for (i = 0; i < sh->nr_frags; i++) { | |
1698 | ||
1699 | skb_frag_t *f = &sh->frags[i]; | |
781a5e92 | 1700 | unsigned long pfn = page_to_pfn(skb_frag_page(f)); |
e5a06939 CM |
1701 | |
1702 | /* FIXME: Compute "hash_for_home" properly. */ | |
1703 | /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ | |
1704 | int hash_for_home = hash_default; | |
1705 | ||
1706 | /* FIXME: Hmmm. */ | |
1707 | if (!hash_default) { | |
1708 | void *va = pfn_to_kaddr(pfn) + f->page_offset; | |
781a5e92 | 1709 | BUG_ON(PageHighMem(skb_frag_page(f))); |
63b7ca6b | 1710 | finv_buffer_remote(va, f->size, 0); |
e5a06939 CM |
1711 | } |
1712 | ||
1713 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; | |
1714 | frags[n].cpa_lo = cpa; | |
1715 | frags[n].cpa_hi = cpa >> 32; | |
9e903e08 | 1716 | frags[n].length = skb_frag_size(f); |
e5a06939 CM |
1717 | frags[n].hash_for_home = hash_for_home; |
1718 | n++; | |
1719 | } | |
1720 | ||
1721 | return n; | |
1722 | } | |
1723 | ||
1724 | ||
1725 | /* | |
1726 | * This function takes "skb", consisting of a header template and a | |
1727 | * payload, and hands it to LEPP, to emit as one or more segments, | |
1728 | * each consisting of a possibly modified header, plus a piece of the | |
1729 | * payload, via a process known as "tcp segmentation offload". | |
1730 | * | |
1731 | * Usually, "data" will contain the header template, of size "sh_len", | |
1732 | * and "sh->frags" will contain "skb->data_len" bytes of payload, and | |
1733 | * there will be "sh->gso_segs" segments. | |
1734 | * | |
1735 | * Sometimes, if "sendfile()" requires copying, we will be called with | |
1736 | * "data" containing the header and payload, with "frags" being empty. | |
1737 | * | |
1738 | * In theory, "sh->nr_frags" could be 3, but in practice, it seems | |
1739 | * that this will never actually happen. | |
1740 | * | |
1741 | * See "emulate_large_send_offload()" for some reference code, which | |
1742 | * does not handle checksumming. | |
1743 | * | |
1744 | * ISSUE: How do we make sure that high memory DMA does not migrate? | |
1745 | */ | |
1746 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | |
1747 | { | |
1748 | struct tile_net_priv *priv = netdev_priv(dev); | |
1749 | int my_cpu = smp_processor_id(); | |
1750 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1751 | struct tile_net_stats_t *stats = &info->stats; | |
1752 | ||
1753 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1754 | ||
1755 | unsigned char *data = skb->data; | |
1756 | ||
1757 | /* The ip header follows the ethernet header. */ | |
1758 | struct iphdr *ih = ip_hdr(skb); | |
1759 | unsigned int ih_len = ih->ihl * 4; | |
1760 | ||
1761 | /* Note that "nh == ih", by definition. */ | |
1762 | unsigned char *nh = skb_network_header(skb); | |
1763 | unsigned int eh_len = nh - data; | |
1764 | ||
1765 | /* The tcp header follows the ip header. */ | |
1766 | struct tcphdr *th = (struct tcphdr *)(nh + ih_len); | |
1767 | unsigned int th_len = th->doff * 4; | |
1768 | ||
1769 | /* The total number of header bytes. */ | |
1770 | /* NOTE: This may be less than skb_headlen(skb). */ | |
1771 | unsigned int sh_len = eh_len + ih_len + th_len; | |
1772 | ||
1773 | /* The number of payload bytes at "skb->data + sh_len". */ | |
1774 | /* This is non-zero for sendfile() without HIGHDMA. */ | |
1775 | unsigned int b_len = skb_headlen(skb) - sh_len; | |
1776 | ||
1777 | /* The total number of payload bytes. */ | |
1778 | unsigned int d_len = b_len + skb->data_len; | |
1779 | ||
1780 | /* The maximum payload size. */ | |
1781 | unsigned int p_len = sh->gso_size; | |
1782 | ||
1783 | /* The total number of segments. */ | |
1784 | unsigned int num_segs = sh->gso_segs; | |
1785 | ||
1786 | /* The temporary copy of the command. */ | |
1787 | u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; | |
1788 | lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; | |
1789 | ||
1790 | /* Analyze the "frags". */ | |
1791 | unsigned int num_frags = | |
1792 | tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); | |
1793 | ||
1794 | /* The size of the command, including frags and header. */ | |
1795 | size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); | |
1796 | ||
1797 | /* The command header. */ | |
1798 | lepp_tso_cmd_t cmd_init = { | |
1799 | .tso = true, | |
1800 | .header_size = sh_len, | |
1801 | .ip_offset = eh_len, | |
1802 | .tcp_offset = eh_len + ih_len, | |
1803 | .payload_size = p_len, | |
1804 | .num_frags = num_frags, | |
1805 | }; | |
1806 | ||
1807 | unsigned long irqflags; | |
1808 | ||
d91c6412 | 1809 | lepp_queue_t *eq = priv->eq; |
e5a06939 | 1810 | |
d91c6412 CM |
1811 | struct sk_buff *olds[8]; |
1812 | unsigned int wanted = 8; | |
e5a06939 CM |
1813 | unsigned int i, nolds = 0; |
1814 | ||
1815 | unsigned int cmd_head, cmd_tail, cmd_next; | |
1816 | unsigned int comp_tail; | |
1817 | ||
e5a06939 CM |
1818 | |
1819 | /* Paranoia. */ | |
1820 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | |
1821 | BUG_ON(ih->protocol != IPPROTO_TCP); | |
1822 | BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); | |
1823 | BUG_ON(num_frags > LEPP_MAX_FRAGS); | |
1824 | /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ | |
1825 | BUG_ON(num_segs <= 1); | |
1826 | ||
1827 | ||
1828 | /* Finish preparing the command. */ | |
1829 | ||
1830 | /* Copy the command header. */ | |
1831 | *cmd = cmd_init; | |
1832 | ||
1833 | /* Copy the "header". */ | |
1834 | memcpy(&cmd->frags[num_frags], data, sh_len); | |
1835 | ||
1836 | ||
1837 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | |
1838 | prefetch_L1(&eq->comp_tail); | |
1839 | prefetch_L1(&eq->cmd_tail); | |
1840 | mb(); | |
1841 | ||
1842 | ||
1843 | /* Enqueue the command. */ | |
1844 | ||
d91c6412 | 1845 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
e5a06939 CM |
1846 | |
1847 | /* | |
1848 | * Handle completions if needed to make room. | |
1849 | * HACK: Spin until there is sufficient room. | |
1850 | */ | |
d91c6412 CM |
1851 | if (lepp_num_free_comp_slots(eq) == 0) { |
1852 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | |
1853 | if (nolds == 0) { | |
1854 | busy: | |
1855 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
1856 | return NETDEV_TX_BUSY; | |
1857 | } | |
e5a06939 CM |
1858 | } |
1859 | ||
1860 | cmd_head = eq->cmd_head; | |
1861 | cmd_tail = eq->cmd_tail; | |
1862 | ||
e5a06939 CM |
1863 | /* Prepare to advance, detecting full queue. */ |
1864 | cmd_next = cmd_tail + cmd_size; | |
1865 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | |
d91c6412 | 1866 | goto busy; |
e5a06939 CM |
1867 | if (cmd_next > LEPP_CMD_LIMIT) { |
1868 | cmd_next = 0; | |
1869 | if (cmd_next == cmd_head) | |
d91c6412 | 1870 | goto busy; |
e5a06939 CM |
1871 | } |
1872 | ||
1873 | /* Copy the command. */ | |
1874 | memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); | |
1875 | ||
1876 | /* Advance. */ | |
1877 | cmd_tail = cmd_next; | |
1878 | ||
1879 | /* Record "skb" for eventual freeing. */ | |
1880 | comp_tail = eq->comp_tail; | |
1881 | eq->comps[comp_tail] = skb; | |
1882 | LEPP_QINC(comp_tail); | |
1883 | eq->comp_tail = comp_tail; | |
1884 | ||
1885 | /* Flush before allowing LEPP to handle the command. */ | |
d91c6412 | 1886 | /* ISSUE: Is this the optimal location for the flush? */ |
e5a06939 CM |
1887 | __insn_mf(); |
1888 | ||
1889 | eq->cmd_tail = cmd_tail; | |
1890 | ||
d91c6412 CM |
1891 | /* NOTE: Using "4" here is more efficient than "0" or "2", */ |
1892 | /* and, strangely, more efficient than pre-checking the number */ | |
1893 | /* of available completions, and comparing it to 4. */ | |
e5a06939 | 1894 | if (nolds == 0) |
d91c6412 CM |
1895 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); |
1896 | ||
1897 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
e5a06939 CM |
1898 | |
1899 | /* Handle completions. */ | |
1900 | for (i = 0; i < nolds; i++) | |
1901 | kfree_skb(olds[i]); | |
1902 | ||
1903 | /* Update stats. */ | |
1904 | stats->tx_packets += num_segs; | |
1905 | stats->tx_bytes += (num_segs * sh_len) + d_len; | |
1906 | ||
1907 | /* Make sure the egress timer is scheduled. */ | |
1908 | tile_net_schedule_egress_timer(info); | |
1909 | ||
1910 | return NETDEV_TX_OK; | |
1911 | } | |
1912 | ||
1913 | ||
1914 | /* | |
1915 | * Transmit a packet (called by the kernel via "hard_start_xmit" hook). | |
1916 | */ | |
1917 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |
1918 | { | |
1919 | struct tile_net_priv *priv = netdev_priv(dev); | |
1920 | int my_cpu = smp_processor_id(); | |
1921 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1922 | struct tile_net_stats_t *stats = &info->stats; | |
1923 | ||
1924 | unsigned long irqflags; | |
1925 | ||
1926 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1927 | ||
1928 | unsigned int len = skb->len; | |
1929 | unsigned char *data = skb->data; | |
1930 | ||
96339d6c | 1931 | unsigned int csum_start = skb_checksum_start_offset(skb); |
e5a06939 CM |
1932 | |
1933 | lepp_frag_t frags[LEPP_MAX_FRAGS]; | |
1934 | ||
1935 | unsigned int num_frags; | |
1936 | ||
d91c6412 | 1937 | lepp_queue_t *eq = priv->eq; |
e5a06939 | 1938 | |
d91c6412 CM |
1939 | struct sk_buff *olds[8]; |
1940 | unsigned int wanted = 8; | |
e5a06939 CM |
1941 | unsigned int i, nolds = 0; |
1942 | ||
1943 | unsigned int cmd_size = sizeof(lepp_cmd_t); | |
1944 | ||
1945 | unsigned int cmd_head, cmd_tail, cmd_next; | |
1946 | unsigned int comp_tail; | |
1947 | ||
1948 | lepp_cmd_t cmds[LEPP_MAX_FRAGS]; | |
1949 | ||
e5a06939 CM |
1950 | |
1951 | /* | |
1952 | * This is paranoia, since we think that if the link doesn't come | |
1953 | * up, telling Linux we have no carrier will keep it from trying | |
1954 | * to transmit. If it does, though, we can't execute this routine, | |
1955 | * since data structures we depend on aren't set up yet. | |
1956 | */ | |
1957 | if (!info->registered) | |
1958 | return NETDEV_TX_BUSY; | |
1959 | ||
1960 | ||
1961 | /* Save the timestamp. */ | |
1962 | dev->trans_start = jiffies; | |
1963 | ||
1964 | ||
1965 | #ifdef TILE_NET_PARANOIA | |
1966 | #if CHIP_HAS_CBOX_HOME_MAP() | |
1967 | if (hash_default) { | |
1968 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); | |
1969 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | |
d91c6412 CM |
1970 | panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx", |
1971 | data, hv_pte_get_mode(pte), hv_pte_val(pte)); | |
e5a06939 CM |
1972 | } |
1973 | #endif | |
1974 | #endif | |
1975 | ||
1976 | ||
1977 | #ifdef TILE_NET_DUMP_PACKETS | |
1978 | /* ISSUE: Does not dump the "frags". */ | |
1979 | dump_packet(data, skb_headlen(skb), "tx"); | |
1980 | #endif /* TILE_NET_DUMP_PACKETS */ | |
1981 | ||
1982 | ||
1983 | if (sh->gso_size != 0) | |
1984 | return tile_net_tx_tso(skb, dev); | |
1985 | ||
1986 | ||
1987 | /* Prepare the commands. */ | |
1988 | ||
1989 | num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | |
1990 | ||
1991 | for (i = 0; i < num_frags; i++) { | |
1992 | ||
1993 | bool final = (i == num_frags - 1); | |
1994 | ||
1995 | lepp_cmd_t cmd = { | |
1996 | .cpa_lo = frags[i].cpa_lo, | |
1997 | .cpa_hi = frags[i].cpa_hi, | |
1998 | .length = frags[i].length, | |
1999 | .hash_for_home = frags[i].hash_for_home, | |
2000 | .send_completion = final, | |
2001 | .end_of_packet = final | |
2002 | }; | |
2003 | ||
2004 | if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { | |
2005 | cmd.compute_checksum = 1; | |
2006 | cmd.checksum_data.bits.start_byte = csum_start; | |
2007 | cmd.checksum_data.bits.count = len - csum_start; | |
2008 | cmd.checksum_data.bits.destination_byte = | |
2009 | csum_start + skb->csum_offset; | |
2010 | } | |
2011 | ||
2012 | cmds[i] = cmd; | |
2013 | } | |
2014 | ||
2015 | ||
2016 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | |
2017 | prefetch_L1(&eq->comp_tail); | |
2018 | prefetch_L1(&eq->cmd_tail); | |
2019 | mb(); | |
2020 | ||
2021 | ||
2022 | /* Enqueue the commands. */ | |
2023 | ||
d91c6412 | 2024 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
e5a06939 CM |
2025 | |
2026 | /* | |
2027 | * Handle completions if needed to make room. | |
2028 | * HACK: Spin until there is sufficient room. | |
2029 | */ | |
d91c6412 CM |
2030 | if (lepp_num_free_comp_slots(eq) == 0) { |
2031 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | |
2032 | if (nolds == 0) { | |
2033 | busy: | |
2034 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
2035 | return NETDEV_TX_BUSY; | |
2036 | } | |
e5a06939 CM |
2037 | } |
2038 | ||
2039 | cmd_head = eq->cmd_head; | |
2040 | cmd_tail = eq->cmd_tail; | |
2041 | ||
e5a06939 CM |
2042 | /* Copy the commands, or fail. */ |
2043 | for (i = 0; i < num_frags; i++) { | |
2044 | ||
2045 | /* Prepare to advance, detecting full queue. */ | |
2046 | cmd_next = cmd_tail + cmd_size; | |
2047 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | |
d91c6412 | 2048 | goto busy; |
e5a06939 CM |
2049 | if (cmd_next > LEPP_CMD_LIMIT) { |
2050 | cmd_next = 0; | |
2051 | if (cmd_next == cmd_head) | |
d91c6412 | 2052 | goto busy; |
e5a06939 CM |
2053 | } |
2054 | ||
2055 | /* Copy the command. */ | |
2056 | *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; | |
2057 | ||
2058 | /* Advance. */ | |
2059 | cmd_tail = cmd_next; | |
2060 | } | |
2061 | ||
2062 | /* Record "skb" for eventual freeing. */ | |
2063 | comp_tail = eq->comp_tail; | |
2064 | eq->comps[comp_tail] = skb; | |
2065 | LEPP_QINC(comp_tail); | |
2066 | eq->comp_tail = comp_tail; | |
2067 | ||
2068 | /* Flush before allowing LEPP to handle the command. */ | |
d91c6412 | 2069 | /* ISSUE: Is this the optimal location for the flush? */ |
e5a06939 CM |
2070 | __insn_mf(); |
2071 | ||
2072 | eq->cmd_tail = cmd_tail; | |
2073 | ||
d91c6412 CM |
2074 | /* NOTE: Using "4" here is more efficient than "0" or "2", */ |
2075 | /* and, strangely, more efficient than pre-checking the number */ | |
2076 | /* of available completions, and comparing it to 4. */ | |
e5a06939 | 2077 | if (nolds == 0) |
d91c6412 CM |
2078 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); |
2079 | ||
2080 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
e5a06939 CM |
2081 | |
2082 | /* Handle completions. */ | |
2083 | for (i = 0; i < nolds; i++) | |
2084 | kfree_skb(olds[i]); | |
2085 | ||
2086 | /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ | |
2087 | stats->tx_packets++; | |
2088 | stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); | |
2089 | ||
2090 | /* Make sure the egress timer is scheduled. */ | |
2091 | tile_net_schedule_egress_timer(info); | |
2092 | ||
2093 | return NETDEV_TX_OK; | |
2094 | } | |
2095 | ||
2096 | ||
2097 | /* | |
2098 | * Deal with a transmit timeout. | |
2099 | */ | |
2100 | static void tile_net_tx_timeout(struct net_device *dev) | |
2101 | { | |
2102 | PDEBUG("tile_net_tx_timeout()\n"); | |
2103 | PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, | |
2104 | jiffies - dev->trans_start); | |
2105 | ||
2106 | /* XXX: ISSUE: This doesn't seem useful for us. */ | |
2107 | netif_wake_queue(dev); | |
2108 | } | |
2109 | ||
2110 | ||
2111 | /* | |
2112 | * Ioctl commands. | |
2113 | */ | |
2114 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
2115 | { | |
2116 | return -EOPNOTSUPP; | |
2117 | } | |
2118 | ||
2119 | ||
2120 | /* | |
2121 | * Get System Network Statistics. | |
2122 | * | |
2123 | * Returns the address of the device statistics structure. | |
2124 | */ | |
2125 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | |
2126 | { | |
2127 | struct tile_net_priv *priv = netdev_priv(dev); | |
2128 | u32 rx_packets = 0; | |
2129 | u32 tx_packets = 0; | |
2130 | u32 rx_bytes = 0; | |
2131 | u32 tx_bytes = 0; | |
2132 | int i; | |
2133 | ||
2134 | for_each_online_cpu(i) { | |
2135 | if (priv->cpu[i]) { | |
2136 | rx_packets += priv->cpu[i]->stats.rx_packets; | |
2137 | rx_bytes += priv->cpu[i]->stats.rx_bytes; | |
2138 | tx_packets += priv->cpu[i]->stats.tx_packets; | |
2139 | tx_bytes += priv->cpu[i]->stats.tx_bytes; | |
2140 | } | |
2141 | } | |
2142 | ||
2143 | priv->stats.rx_packets = rx_packets; | |
2144 | priv->stats.rx_bytes = rx_bytes; | |
2145 | priv->stats.tx_packets = tx_packets; | |
2146 | priv->stats.tx_bytes = tx_bytes; | |
2147 | ||
2148 | return &priv->stats; | |
2149 | } | |
2150 | ||
2151 | ||
2152 | /* | |
2153 | * Change the "mtu". | |
2154 | * | |
2155 | * The "change_mtu" method is usually not needed. | |
2156 | * If you need it, it must be like this. | |
2157 | */ | |
2158 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | |
2159 | { | |
2160 | PDEBUG("tile_net_change_mtu()\n"); | |
2161 | ||
2162 | /* Check ranges. */ | |
2163 | if ((new_mtu < 68) || (new_mtu > 1500)) | |
2164 | return -EINVAL; | |
2165 | ||
2166 | /* Accept the value. */ | |
2167 | dev->mtu = new_mtu; | |
2168 | ||
2169 | return 0; | |
2170 | } | |
2171 | ||
2172 | ||
2173 | /* | |
2174 | * Change the Ethernet Address of the NIC. | |
2175 | * | |
2176 | * The hypervisor driver does not support changing MAC address. However, | |
2177 | * the IPP does not do anything with the MAC address, so the address which | |
2178 | * gets used on outgoing packets, and which is accepted on incoming packets, | |
2179 | * is completely up to the NetIO program or kernel driver which is actually | |
2180 | * handling them. | |
2181 | * | |
2182 | * Returns 0 on success, negative on failure. | |
2183 | */ | |
2184 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | |
2185 | { | |
2186 | struct sockaddr *addr = p; | |
2187 | ||
2188 | if (!is_valid_ether_addr(addr->sa_data)) | |
2189 | return -EINVAL; | |
2190 | ||
2191 | /* ISSUE: Note that "dev_addr" is now a pointer. */ | |
2192 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
7ce5d222 | 2193 | dev->addr_assign_type &= ~NET_ADDR_RANDOM; |
e5a06939 CM |
2194 | |
2195 | return 0; | |
2196 | } | |
2197 | ||
2198 | ||
2199 | /* | |
2200 | * Obtain the MAC address from the hypervisor. | |
2201 | * This must be done before opening the device. | |
2202 | */ | |
2203 | static int tile_net_get_mac(struct net_device *dev) | |
2204 | { | |
2205 | struct tile_net_priv *priv = netdev_priv(dev); | |
2206 | ||
2207 | char hv_dev_name[32]; | |
2208 | int len; | |
2209 | ||
2210 | __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; | |
2211 | ||
2212 | int ret; | |
2213 | ||
2214 | /* For example, "xgbe0". */ | |
2215 | strcpy(hv_dev_name, dev->name); | |
2216 | len = strlen(hv_dev_name); | |
2217 | ||
2218 | /* For example, "xgbe/0". */ | |
2219 | hv_dev_name[len] = hv_dev_name[len - 1]; | |
2220 | hv_dev_name[len - 1] = '/'; | |
2221 | len++; | |
2222 | ||
2223 | /* For example, "xgbe/0/native_hash". */ | |
2224 | strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); | |
2225 | ||
2226 | /* Get the hypervisor handle for this device. */ | |
2227 | priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); | |
2228 | PDEBUG("hv_dev_open(%s) returned %d %p\n", | |
2229 | hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); | |
2230 | if (priv->hv_devhdl < 0) { | |
2231 | if (priv->hv_devhdl == HV_ENODEV) | |
2232 | printk(KERN_DEBUG "Ignoring unconfigured device %s\n", | |
2233 | hv_dev_name); | |
2234 | else | |
2235 | printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", | |
2236 | hv_dev_name, priv->hv_devhdl); | |
2237 | return -1; | |
2238 | } | |
2239 | ||
2240 | /* | |
2241 | * Read the hardware address from the hypervisor. | |
2242 | * ISSUE: Note that "dev_addr" is now a pointer. | |
2243 | */ | |
2244 | offset.bits.class = NETIO_PARAM; | |
2245 | offset.bits.addr = NETIO_PARAM_MAC; | |
2246 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
2247 | (HV_VirtAddr)dev->dev_addr, dev->addr_len, | |
2248 | offset.word); | |
2249 | PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); | |
2250 | if (ret <= 0) { | |
2251 | printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", | |
2252 | dev->name); | |
2253 | /* | |
2254 | * Since the device is configured by the hypervisor but we | |
2255 | * can't get its MAC address, we are most likely running | |
2256 | * the simulator, so let's generate a random MAC address. | |
2257 | */ | |
7ce5d222 | 2258 | eth_hw_addr_random(dev); |
e5a06939 CM |
2259 | } |
2260 | ||
2261 | return 0; | |
2262 | } | |
2263 | ||
e5686ad8 | 2264 | static const struct net_device_ops tile_net_ops = { |
e5a06939 CM |
2265 | .ndo_open = tile_net_open, |
2266 | .ndo_stop = tile_net_stop, | |
2267 | .ndo_start_xmit = tile_net_tx, | |
2268 | .ndo_do_ioctl = tile_net_ioctl, | |
2269 | .ndo_get_stats = tile_net_get_stats, | |
2270 | .ndo_change_mtu = tile_net_change_mtu, | |
2271 | .ndo_tx_timeout = tile_net_tx_timeout, | |
2272 | .ndo_set_mac_address = tile_net_set_mac_address | |
2273 | }; | |
2274 | ||
2275 | ||
2276 | /* | |
2277 | * The setup function. | |
2278 | * | |
2279 | * This uses ether_setup() to assign various fields in dev, including | |
2280 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | |
2281 | */ | |
2282 | static void tile_net_setup(struct net_device *dev) | |
2283 | { | |
2284 | PDEBUG("tile_net_setup()\n"); | |
2285 | ||
2286 | ether_setup(dev); | |
2287 | ||
2288 | dev->netdev_ops = &tile_net_ops; | |
2289 | ||
2290 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | |
2291 | ||
2292 | /* We want lockless xmit. */ | |
2293 | dev->features |= NETIF_F_LLTX; | |
2294 | ||
2295 | /* We support hardware tx checksums. */ | |
2296 | dev->features |= NETIF_F_HW_CSUM; | |
2297 | ||
2298 | /* We support scatter/gather. */ | |
2299 | dev->features |= NETIF_F_SG; | |
2300 | ||
2301 | /* We support TSO. */ | |
2302 | dev->features |= NETIF_F_TSO; | |
2303 | ||
2304 | #ifdef TILE_NET_GSO | |
2305 | /* We support GSO. */ | |
2306 | dev->features |= NETIF_F_GSO; | |
2307 | #endif | |
2308 | ||
2309 | if (hash_default) | |
2310 | dev->features |= NETIF_F_HIGHDMA; | |
2311 | ||
2312 | /* ISSUE: We should support NETIF_F_UFO. */ | |
2313 | ||
2314 | dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; | |
2315 | ||
2316 | dev->mtu = TILE_NET_MTU; | |
2317 | } | |
2318 | ||
2319 | ||
2320 | /* | |
2321 | * Allocate the device structure, register the device, and obtain the | |
2322 | * MAC address from the hypervisor. | |
2323 | */ | |
2324 | static struct net_device *tile_net_dev_init(const char *name) | |
2325 | { | |
2326 | int ret; | |
2327 | struct net_device *dev; | |
2328 | struct tile_net_priv *priv; | |
e5a06939 CM |
2329 | |
2330 | /* | |
2331 | * Allocate the device structure. This allocates "priv", calls | |
2332 | * tile_net_setup(), and saves "name". Normally, "name" is a | |
2333 | * template, instantiated by register_netdev(), but not for us. | |
2334 | */ | |
2335 | dev = alloc_netdev(sizeof(*priv), name, tile_net_setup); | |
2336 | if (!dev) { | |
2337 | pr_err("alloc_netdev(%s) failed\n", name); | |
2338 | return NULL; | |
2339 | } | |
2340 | ||
2341 | priv = netdev_priv(dev); | |
2342 | ||
2343 | /* Initialize "priv". */ | |
2344 | ||
2345 | memset(priv, 0, sizeof(*priv)); | |
2346 | ||
2347 | /* Save "dev" for "tile_net_open_retry()". */ | |
2348 | priv->dev = dev; | |
2349 | ||
2350 | INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); | |
2351 | ||
d91c6412 | 2352 | spin_lock_init(&priv->eq_lock); |
e5a06939 | 2353 | |
d91c6412 CM |
2354 | /* Allocate "eq". */ |
2355 | priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); | |
2356 | if (!priv->eq_pages) { | |
e5a06939 CM |
2357 | free_netdev(dev); |
2358 | return NULL; | |
2359 | } | |
d91c6412 | 2360 | priv->eq = page_address(priv->eq_pages); |
e5a06939 CM |
2361 | |
2362 | /* Register the network device. */ | |
2363 | ret = register_netdev(dev); | |
2364 | if (ret) { | |
2365 | pr_err("register_netdev %s failed %d\n", dev->name, ret); | |
d91c6412 | 2366 | __free_pages(priv->eq_pages, EQ_ORDER); |
e5a06939 CM |
2367 | free_netdev(dev); |
2368 | return NULL; | |
2369 | } | |
2370 | ||
2371 | /* Get the MAC address. */ | |
2372 | ret = tile_net_get_mac(dev); | |
2373 | if (ret < 0) { | |
2374 | unregister_netdev(dev); | |
d91c6412 | 2375 | __free_pages(priv->eq_pages, EQ_ORDER); |
e5a06939 CM |
2376 | free_netdev(dev); |
2377 | return NULL; | |
2378 | } | |
2379 | ||
2380 | return dev; | |
2381 | } | |
2382 | ||
2383 | ||
2384 | /* | |
2385 | * Module cleanup. | |
d91c6412 CM |
2386 | * |
2387 | * FIXME: If compiled as a module, this module cannot be "unloaded", | |
2388 | * because the "ingress interrupt handler" is registered permanently. | |
e5a06939 CM |
2389 | */ |
2390 | static void tile_net_cleanup(void) | |
2391 | { | |
2392 | int i; | |
2393 | ||
2394 | for (i = 0; i < TILE_NET_DEVS; i++) { | |
2395 | if (tile_net_devs[i]) { | |
2396 | struct net_device *dev = tile_net_devs[i]; | |
2397 | struct tile_net_priv *priv = netdev_priv(dev); | |
2398 | unregister_netdev(dev); | |
d07bd86d | 2399 | finv_buffer_remote(priv->eq, EQ_SIZE, 0); |
d91c6412 | 2400 | __free_pages(priv->eq_pages, EQ_ORDER); |
e5a06939 CM |
2401 | free_netdev(dev); |
2402 | } | |
2403 | } | |
2404 | } | |
2405 | ||
2406 | ||
2407 | /* | |
2408 | * Module initialization. | |
2409 | */ | |
2410 | static int tile_net_init_module(void) | |
2411 | { | |
2412 | pr_info("Tilera IPP Net Driver\n"); | |
2413 | ||
2414 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); | |
2415 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); | |
2416 | tile_net_devs[2] = tile_net_dev_init("gbe0"); | |
2417 | tile_net_devs[3] = tile_net_dev_init("gbe1"); | |
2418 | ||
2419 | return 0; | |
2420 | } | |
2421 | ||
2422 | ||
d91c6412 CM |
2423 | module_init(tile_net_init_module); |
2424 | module_exit(tile_net_cleanup); | |
2425 | ||
2426 | ||
e5a06939 | 2427 | #ifndef MODULE |
d91c6412 | 2428 | |
e5a06939 CM |
2429 | /* |
2430 | * The "network_cpus" boot argument specifies the cpus that are dedicated | |
2431 | * to handle ingress packets. | |
2432 | * | |
2433 | * The parameter should be in the form "network_cpus=m-n[,x-y]", where | |
2434 | * m, n, x, y are integer numbers that represent the cpus that can be | |
2435 | * neither a dedicated cpu nor a dataplane cpu. | |
2436 | */ | |
2437 | static int __init network_cpus_setup(char *str) | |
2438 | { | |
2439 | int rc = cpulist_parse_crop(str, &network_cpus_map); | |
2440 | if (rc != 0) { | |
2441 | pr_warning("network_cpus=%s: malformed cpu list\n", | |
2442 | str); | |
2443 | } else { | |
2444 | ||
2445 | /* Remove dedicated cpus. */ | |
2446 | cpumask_and(&network_cpus_map, &network_cpus_map, | |
2447 | cpu_possible_mask); | |
2448 | ||
2449 | ||
2450 | if (cpumask_empty(&network_cpus_map)) { | |
2451 | pr_warning("Ignoring network_cpus='%s'.\n", | |
2452 | str); | |
2453 | } else { | |
2454 | char buf[1024]; | |
2455 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | |
2456 | pr_info("Linux network CPUs: %s\n", buf); | |
2457 | network_cpus_used = true; | |
2458 | } | |
2459 | } | |
2460 | ||
2461 | return 0; | |
2462 | } | |
2463 | __setup("network_cpus=", network_cpus_setup); | |
e5a06939 | 2464 | |
d91c6412 | 2465 | #endif |