Commit | Line | Data |
---|---|---|
e5a06939 | 1 | /* |
d91c6412 | 2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. |
e5a06939 CM |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/moduleparam.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/kernel.h> /* printk() */ | |
20 | #include <linux/slab.h> /* kmalloc() */ | |
21 | #include <linux/errno.h> /* error codes */ | |
22 | #include <linux/types.h> /* size_t */ | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/in.h> | |
25 | #include <linux/netdevice.h> /* struct device, and other headers */ | |
26 | #include <linux/etherdevice.h> /* eth_type_trans */ | |
27 | #include <linux/skbuff.h> | |
28 | #include <linux/ioctl.h> | |
29 | #include <linux/cdev.h> | |
30 | #include <linux/hugetlb.h> | |
31 | #include <linux/in6.h> | |
32 | #include <linux/timer.h> | |
33 | #include <linux/io.h> | |
d68e2d3b | 34 | #include <linux/u64_stats_sync.h> |
e5a06939 CM |
35 | #include <asm/checksum.h> |
36 | #include <asm/homecache.h> | |
37 | ||
38 | #include <hv/drv_xgbe_intf.h> | |
39 | #include <hv/drv_xgbe_impl.h> | |
40 | #include <hv/hypervisor.h> | |
41 | #include <hv/netio_intf.h> | |
42 | ||
43 | /* For TSO */ | |
44 | #include <linux/ip.h> | |
45 | #include <linux/tcp.h> | |
46 | ||
47 | ||
e5a06939 CM |
48 | /* |
49 | * First, "tile_net_init_module()" initializes all four "devices" which | |
50 | * can be used by linux. | |
51 | * | |
52 | * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes | |
53 | * the network cpus, then uses "tile_net_open_aux()" to initialize | |
54 | * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all | |
55 | * the tiles, provide buffers to LIPP, allow ingress to start, and | |
56 | * turn on hypervisor interrupt handling (and NAPI) on all tiles. | |
57 | * | |
58 | * If registration fails due to the link being down, then "retry_work" | |
59 | * is used to keep calling "tile_net_open_inner()" until it succeeds. | |
60 | * | |
61 | * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to | |
62 | * stop egress, drain the LIPP buffers, unregister all the tiles, stop | |
63 | * LIPP/LEPP, and wipe the LEPP queue. | |
64 | * | |
65 | * We start out with the ingress interrupt enabled on each CPU. When | |
66 | * this interrupt fires, we disable it, and call "napi_schedule()". | |
67 | * This will cause "tile_net_poll()" to be called, which will pull | |
68 | * packets from the netio queue, filtering them out, or passing them | |
69 | * to "netif_receive_skb()". If our budget is exhausted, we will | |
70 | * return, knowing we will be called again later. Otherwise, we | |
71 | * reenable the ingress interrupt, and call "napi_complete()". | |
72 | * | |
d91c6412 CM |
73 | * HACK: Since disabling the ingress interrupt is not reliable, we |
74 | * ignore the interrupt if the global "active" flag is false. | |
75 | * | |
e5a06939 CM |
76 | * |
77 | * NOTE: The use of "native_driver" ensures that EPP exists, and that | |
d91c6412 | 78 | * we are using "LIPP" and "LEPP". |
e5a06939 CM |
79 | * |
80 | * NOTE: Failing to free completions for an arbitrarily long time | |
81 | * (which is defined to be illegal) does in fact cause bizarre | |
82 | * problems. The "egress_timer" helps prevent this from happening. | |
e5a06939 CM |
83 | */ |
84 | ||
85 | ||
86 | /* HACK: Allow use of "jumbo" packets. */ | |
87 | /* This should be 1500 if "jumbo" is not set in LIPP. */ | |
88 | /* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ | |
89 | /* ISSUE: This has not been thoroughly tested (except at 1500). */ | |
90 | #define TILE_NET_MTU 1500 | |
91 | ||
e5a06939 CM |
92 | /* HACK: Define this to verify incoming packets. */ |
93 | /* #define TILE_NET_VERIFY_INGRESS */ | |
94 | ||
95 | /* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ | |
96 | #define TILE_NET_TX_QUEUE_LEN 0 | |
97 | ||
98 | /* Define to dump packets (prints out the whole packet on tx and rx). */ | |
99 | /* #define TILE_NET_DUMP_PACKETS */ | |
100 | ||
101 | /* Define to enable debug spew (all PDEBUG's are enabled). */ | |
102 | /* #define TILE_NET_DEBUG */ | |
103 | ||
104 | ||
105 | /* Define to activate paranoia checks. */ | |
106 | /* #define TILE_NET_PARANOIA */ | |
107 | ||
108 | /* Default transmit lockup timeout period, in jiffies. */ | |
109 | #define TILE_NET_TIMEOUT (5 * HZ) | |
110 | ||
111 | /* Default retry interval for bringing up the NetIO interface, in jiffies. */ | |
112 | #define TILE_NET_RETRY_INTERVAL (5 * HZ) | |
113 | ||
114 | /* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ | |
115 | #define TILE_NET_DEVS 4 | |
116 | ||
117 | ||
118 | ||
119 | /* Paranoia. */ | |
120 | #if NET_IP_ALIGN != LIPP_PACKET_PADDING | |
121 | #error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." | |
122 | #endif | |
123 | ||
124 | ||
125 | /* Debug print. */ | |
126 | #ifdef TILE_NET_DEBUG | |
127 | #define PDEBUG(fmt, args...) net_printk(fmt, ## args) | |
128 | #else | |
129 | #define PDEBUG(fmt, args...) | |
130 | #endif | |
131 | ||
132 | ||
133 | MODULE_AUTHOR("Tilera"); | |
134 | MODULE_LICENSE("GPL"); | |
135 | ||
d91c6412 | 136 | |
e5a06939 CM |
137 | /* |
138 | * Queue of incoming packets for a specific cpu and device. | |
139 | * | |
140 | * Includes a pointer to the "system" data, and the actual "user" data. | |
141 | */ | |
142 | struct tile_netio_queue { | |
143 | netio_queue_impl_t *__system_part; | |
144 | netio_queue_user_impl_t __user_part; | |
145 | ||
146 | }; | |
147 | ||
148 | ||
149 | /* | |
150 | * Statistics counters for a specific cpu and device. | |
151 | */ | |
152 | struct tile_net_stats_t { | |
d68e2d3b CM |
153 | struct u64_stats_sync syncp; |
154 | u64 rx_packets; /* total packets received */ | |
155 | u64 tx_packets; /* total packets transmitted */ | |
156 | u64 rx_bytes; /* total bytes received */ | |
157 | u64 tx_bytes; /* total bytes transmitted */ | |
158 | u64 rx_errors; /* packets truncated or marked bad by hw */ | |
159 | u64 rx_dropped; /* packets not for us or intf not up */ | |
e5a06939 CM |
160 | }; |
161 | ||
162 | ||
163 | /* | |
164 | * Info for a specific cpu and device. | |
165 | * | |
166 | * ISSUE: There is a "dev" pointer in "napi" as well. | |
167 | */ | |
168 | struct tile_net_cpu { | |
169 | /* The NAPI struct. */ | |
170 | struct napi_struct napi; | |
171 | /* Packet queue. */ | |
172 | struct tile_netio_queue queue; | |
173 | /* Statistics. */ | |
174 | struct tile_net_stats_t stats; | |
d91c6412 | 175 | /* True iff NAPI is enabled. */ |
e5a06939 | 176 | bool napi_enabled; |
bfb9035c | 177 | /* True if this tile has successfully registered with the IPP. */ |
e5a06939 CM |
178 | bool registered; |
179 | /* True if the link was down last time we tried to register. */ | |
180 | bool link_down; | |
181 | /* True if "egress_timer" is scheduled. */ | |
182 | bool egress_timer_scheduled; | |
183 | /* Number of small sk_buffs which must still be provided. */ | |
184 | unsigned int num_needed_small_buffers; | |
185 | /* Number of large sk_buffs which must still be provided. */ | |
186 | unsigned int num_needed_large_buffers; | |
187 | /* A timer for handling egress completions. */ | |
188 | struct timer_list egress_timer; | |
189 | }; | |
190 | ||
191 | ||
192 | /* | |
193 | * Info for a specific device. | |
194 | */ | |
195 | struct tile_net_priv { | |
196 | /* Our network device. */ | |
197 | struct net_device *dev; | |
d91c6412 CM |
198 | /* Pages making up the egress queue. */ |
199 | struct page *eq_pages; | |
200 | /* Address of the actual egress queue. */ | |
201 | lepp_queue_t *eq; | |
202 | /* Protects "eq". */ | |
203 | spinlock_t eq_lock; | |
e5a06939 CM |
204 | /* The hypervisor handle for this interface. */ |
205 | int hv_devhdl; | |
206 | /* The intr bit mask that IDs this device. */ | |
207 | u32 intr_id; | |
208 | /* True iff "tile_net_open_aux()" has succeeded. */ | |
d91c6412 CM |
209 | bool partly_opened; |
210 | /* True iff the device is "active". */ | |
211 | bool active; | |
e5a06939 CM |
212 | /* Effective network cpus. */ |
213 | struct cpumask network_cpus_map; | |
214 | /* Number of network cpus. */ | |
215 | int network_cpus_count; | |
216 | /* Credits per network cpu. */ | |
217 | int network_cpus_credits; | |
e5a06939 CM |
218 | /* For NetIO bringup retries. */ |
219 | struct delayed_work retry_work; | |
220 | /* Quick access to per cpu data. */ | |
221 | struct tile_net_cpu *cpu[NR_CPUS]; | |
222 | }; | |
223 | ||
d91c6412 CM |
224 | /* Log2 of the number of small pages needed for the egress queue. */ |
225 | #define EQ_ORDER get_order(sizeof(lepp_queue_t)) | |
226 | /* Size of the egress queue's pages. */ | |
227 | #define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER)) | |
e5a06939 CM |
228 | |
229 | /* | |
230 | * The actual devices (xgbe0, xgbe1, gbe0, gbe1). | |
231 | */ | |
232 | static struct net_device *tile_net_devs[TILE_NET_DEVS]; | |
233 | ||
234 | /* | |
235 | * The "tile_net_cpu" structures for each device. | |
236 | */ | |
237 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); | |
238 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); | |
239 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); | |
240 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); | |
241 | ||
242 | ||
243 | /* | |
244 | * True if "network_cpus" was specified. | |
245 | */ | |
246 | static bool network_cpus_used; | |
247 | ||
248 | /* | |
249 | * The actual cpus in "network_cpus". | |
250 | */ | |
251 | static struct cpumask network_cpus_map; | |
252 | ||
253 | ||
254 | ||
255 | #ifdef TILE_NET_DEBUG | |
256 | /* | |
257 | * printk with extra stuff. | |
258 | * | |
259 | * We print the CPU we're running in brackets. | |
260 | */ | |
261 | static void net_printk(char *fmt, ...) | |
262 | { | |
263 | int i; | |
264 | int len; | |
265 | va_list args; | |
266 | static char buf[256]; | |
267 | ||
268 | len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); | |
269 | va_start(args, fmt); | |
270 | i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); | |
271 | va_end(args); | |
272 | buf[255] = '\0'; | |
273 | pr_notice(buf); | |
274 | } | |
275 | #endif | |
276 | ||
277 | ||
278 | #ifdef TILE_NET_DUMP_PACKETS | |
279 | /* | |
280 | * Dump a packet. | |
281 | */ | |
282 | static void dump_packet(unsigned char *data, unsigned long length, char *s) | |
283 | { | |
d91c6412 CM |
284 | int my_cpu = smp_processor_id(); |
285 | ||
e5a06939 | 286 | unsigned long i; |
d91c6412 CM |
287 | char buf[128]; |
288 | ||
e5a06939 CM |
289 | static unsigned int count; |
290 | ||
291 | pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", | |
292 | data, length, s, count++); | |
293 | ||
294 | pr_info("\n"); | |
295 | ||
296 | for (i = 0; i < length; i++) { | |
297 | if ((i & 0xf) == 0) | |
d91c6412 | 298 | sprintf(buf, "[%02d] %8.8lx:", my_cpu, i); |
e5a06939 | 299 | sprintf(buf + strlen(buf), " %2.2x", data[i]); |
d91c6412 CM |
300 | if ((i & 0xf) == 0xf || i == length - 1) { |
301 | strcat(buf, "\n"); | |
302 | pr_info("%s", buf); | |
303 | } | |
e5a06939 CM |
304 | } |
305 | } | |
306 | #endif | |
307 | ||
308 | ||
309 | /* | |
310 | * Provide support for the __netio_fastio1() swint | |
311 | * (see <hv/drv_xgbe_intf.h> for how it is used). | |
312 | * | |
313 | * The fastio swint2 call may clobber all the caller-saved registers. | |
314 | * It rarely clobbers memory, but we allow for the possibility in | |
315 | * the signature just to be on the safe side. | |
316 | * | |
317 | * Also, gcc doesn't seem to allow an input operand to be | |
318 | * clobbered, so we fake it with dummy outputs. | |
319 | * | |
320 | * This function can't be static because of the way it is declared | |
321 | * in the netio header. | |
322 | */ | |
323 | inline int __netio_fastio1(u32 fastio_index, u32 arg0) | |
324 | { | |
325 | long result, clobber_r1, clobber_r10; | |
326 | asm volatile("swint2" | |
327 | : "=R00" (result), | |
328 | "=R01" (clobber_r1), "=R10" (clobber_r10) | |
329 | : "R10" (fastio_index), "R01" (arg0) | |
330 | : "memory", "r2", "r3", "r4", | |
331 | "r5", "r6", "r7", "r8", "r9", | |
332 | "r11", "r12", "r13", "r14", | |
333 | "r15", "r16", "r17", "r18", "r19", | |
334 | "r20", "r21", "r22", "r23", "r24", | |
335 | "r25", "r26", "r27", "r28", "r29"); | |
336 | return result; | |
337 | } | |
338 | ||
339 | ||
92795672 CM |
340 | static void tile_net_return_credit(struct tile_net_cpu *info) |
341 | { | |
342 | struct tile_netio_queue *queue = &info->queue; | |
343 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
344 | ||
345 | /* Return four credits after every fourth packet. */ | |
346 | if (--qup->__receive_credit_remaining == 0) { | |
347 | u32 interval = qup->__receive_credit_interval; | |
348 | qup->__receive_credit_remaining = interval; | |
349 | __netio_fastio_return_credits(qup->__fastio_index, interval); | |
350 | } | |
351 | } | |
352 | ||
353 | ||
354 | ||
e5a06939 CM |
355 | /* |
356 | * Provide a linux buffer to LIPP. | |
357 | */ | |
358 | static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, | |
359 | void *va, bool small) | |
360 | { | |
361 | struct tile_netio_queue *queue = &info->queue; | |
362 | ||
363 | /* Convert "va" and "small" to "linux_buffer_t". */ | |
364 | unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; | |
365 | ||
366 | __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); | |
367 | } | |
368 | ||
369 | ||
370 | /* | |
371 | * Provide a linux buffer for LIPP. | |
d91c6412 CM |
372 | * |
373 | * Note that the ACTUAL allocation for each buffer is a "struct sk_buff", | |
374 | * plus a chunk of memory that includes not only the requested bytes, but | |
375 | * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info". | |
376 | * | |
377 | * Note that "struct skb_shared_info" is 88 bytes with 64K pages and | |
378 | * 268 bytes with 4K pages (since the frags[] array needs 18 entries). | |
379 | * | |
380 | * Without jumbo packets, the maximum packet size will be 1536 bytes, | |
381 | * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told | |
382 | * the hardware to clip at 1518 bytes instead of 1536 bytes, then we | |
383 | * could save an entire cache line, but in practice, we don't need it. | |
384 | * | |
385 | * Since CPAs are 38 bits, and we can only encode the high 31 bits in | |
386 | * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must | |
387 | * align the actual "va" mod 128. | |
388 | * | |
389 | * We assume that the underlying "head" will be aligned mod 64. Note | |
390 | * that in practice, we have seen "head" NOT aligned mod 128 even when | |
391 | * using 2048 byte allocations, which is surprising. | |
392 | * | |
393 | * If "head" WAS always aligned mod 128, we could change LIPP to | |
394 | * assume that the low SIX bits are zero, and the 7th bit is one, that | |
395 | * is, align the actual "va" mod 128 plus 64, which would be "free". | |
396 | * | |
397 | * For now, the actual "head" pointer points at NET_SKB_PAD bytes of | |
398 | * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff | |
399 | * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for | |
400 | * the actual packet, plus 62 bytes of empty padding, plus some | |
401 | * padding and the "struct skb_shared_info". | |
402 | * | |
403 | * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88 | |
404 | * bytes, or 1816 bytes, which fits comfortably into 2048 bytes. | |
405 | * | |
406 | * With 64K pages, a small buffer thus needs 32+92+4+2+126+88 | |
407 | * bytes, or 344 bytes, which means we are wasting 64+ bytes, and | |
408 | * could presumably increase the size of small buffers. | |
409 | * | |
410 | * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268 | |
411 | * bytes, or 1996 bytes, which fits comfortably into 2048 bytes. | |
412 | * | |
413 | * With 4K pages, a small buffer thus needs 32+92+4+2+126+268 | |
414 | * bytes, or 524 bytes, which is annoyingly wasteful. | |
415 | * | |
416 | * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192? | |
417 | * | |
418 | * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64? | |
e5a06939 CM |
419 | */ |
420 | static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, | |
421 | bool small) | |
422 | { | |
d91c6412 CM |
423 | #if TILE_NET_MTU <= 1536 |
424 | /* Without "jumbo", 2 + 1536 should be sufficient. */ | |
425 | unsigned int large_size = NET_IP_ALIGN + 1536; | |
426 | #else | |
427 | /* ISSUE: This has not been tested. */ | |
e5a06939 | 428 | unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; |
d91c6412 | 429 | #endif |
e5a06939 | 430 | |
d91c6412 | 431 | /* Avoid "false sharing" with last cache line. */ |
dae2e9f4 | 432 | /* ISSUE: This is already done by "netdev_alloc_skb()". */ |
d91c6412 | 433 | unsigned int len = |
e5a06939 CM |
434 | (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + |
435 | CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); | |
436 | ||
d91c6412 CM |
437 | unsigned int padding = 128 - NET_SKB_PAD; |
438 | unsigned int align; | |
e5a06939 CM |
439 | |
440 | struct sk_buff *skb; | |
441 | void *va; | |
442 | ||
443 | struct sk_buff **skb_ptr; | |
444 | ||
d91c6412 | 445 | /* Request 96 extra bytes for alignment purposes. */ |
00a62d4b | 446 | skb = netdev_alloc_skb(info->napi.dev, len + padding); |
d91c6412 CM |
447 | if (skb == NULL) |
448 | return false; | |
e5a06939 | 449 | |
d91c6412 CM |
450 | /* Skip 32 or 96 bytes to align "data" mod 128. */ |
451 | align = -(long)skb->data & (128 - 1); | |
452 | BUG_ON(align > padding); | |
453 | skb_reserve(skb, align); | |
e5a06939 | 454 | |
d91c6412 CM |
455 | /* This address is given to IPP. */ |
456 | va = skb->data; | |
e5a06939 | 457 | |
d91c6412 CM |
458 | /* Buffers must not span a huge page. */ |
459 | BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0); | |
e5a06939 | 460 | |
d91c6412 CM |
461 | #ifdef TILE_NET_PARANOIA |
462 | #if CHIP_HAS_CBOX_HOME_MAP() | |
463 | if (hash_default) { | |
464 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); | |
465 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | |
466 | panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx", | |
467 | va, hv_pte_get_mode(pte), hv_pte_val(pte)); | |
e5a06939 | 468 | } |
d91c6412 CM |
469 | #endif |
470 | #endif | |
471 | ||
472 | /* Invalidate the packet buffer. */ | |
473 | if (!hash_default) | |
474 | __inv_buffer(va, len); | |
e5a06939 CM |
475 | |
476 | /* Skip two bytes to satisfy LIPP assumptions. */ | |
477 | /* Note that this aligns IP on a 16 byte boundary. */ | |
478 | /* ISSUE: Do this when the packet arrives? */ | |
479 | skb_reserve(skb, NET_IP_ALIGN); | |
480 | ||
481 | /* Save a back-pointer to 'skb'. */ | |
482 | skb_ptr = va - sizeof(*skb_ptr); | |
483 | *skb_ptr = skb; | |
484 | ||
e5a06939 CM |
485 | /* Make sure "skb_ptr" has been flushed. */ |
486 | __insn_mf(); | |
487 | ||
e5a06939 CM |
488 | /* Provide the new buffer. */ |
489 | tile_net_provide_linux_buffer(info, va, small); | |
490 | ||
491 | return true; | |
492 | } | |
493 | ||
494 | ||
495 | /* | |
496 | * Provide linux buffers for LIPP. | |
497 | */ | |
498 | static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) | |
499 | { | |
500 | while (info->num_needed_small_buffers != 0) { | |
501 | if (!tile_net_provide_needed_buffer(info, true)) | |
502 | goto oops; | |
503 | info->num_needed_small_buffers--; | |
504 | } | |
505 | ||
506 | while (info->num_needed_large_buffers != 0) { | |
507 | if (!tile_net_provide_needed_buffer(info, false)) | |
508 | goto oops; | |
509 | info->num_needed_large_buffers--; | |
510 | } | |
511 | ||
512 | return; | |
513 | ||
514 | oops: | |
515 | ||
516 | /* Add a description to the page allocation failure dump. */ | |
517 | pr_notice("Could not provide a linux buffer to LIPP.\n"); | |
518 | } | |
519 | ||
520 | ||
521 | /* | |
522 | * Grab some LEPP completions, and store them in "comps", of size | |
523 | * "comps_size", and return the number of completions which were | |
524 | * stored, so the caller can free them. | |
e5a06939 | 525 | */ |
d91c6412 | 526 | static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq, |
e5a06939 CM |
527 | struct sk_buff *comps[], |
528 | unsigned int comps_size, | |
d91c6412 | 529 | unsigned int min_size) |
e5a06939 | 530 | { |
e5a06939 CM |
531 | unsigned int n = 0; |
532 | ||
d91c6412 CM |
533 | unsigned int comp_head = eq->comp_head; |
534 | unsigned int comp_busy = eq->comp_busy; | |
e5a06939 CM |
535 | |
536 | while (comp_head != comp_busy && n < comps_size) { | |
537 | comps[n++] = eq->comps[comp_head]; | |
538 | LEPP_QINC(comp_head); | |
539 | } | |
540 | ||
d91c6412 CM |
541 | if (n < min_size) |
542 | return 0; | |
e5a06939 CM |
543 | |
544 | eq->comp_head = comp_head; | |
545 | ||
e5a06939 CM |
546 | return n; |
547 | } | |
548 | ||
549 | ||
d91c6412 CM |
550 | /* |
551 | * Free some comps, and return true iff there are still some pending. | |
552 | */ | |
553 | static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) | |
554 | { | |
555 | struct tile_net_priv *priv = netdev_priv(dev); | |
556 | ||
557 | lepp_queue_t *eq = priv->eq; | |
558 | ||
559 | struct sk_buff *olds[64]; | |
560 | unsigned int wanted = 64; | |
561 | unsigned int i, n; | |
562 | bool pending; | |
563 | ||
564 | spin_lock(&priv->eq_lock); | |
565 | ||
566 | if (all) | |
567 | eq->comp_busy = eq->comp_tail; | |
568 | ||
569 | n = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | |
570 | ||
571 | pending = (eq->comp_head != eq->comp_tail); | |
572 | ||
573 | spin_unlock(&priv->eq_lock); | |
574 | ||
575 | for (i = 0; i < n; i++) | |
576 | kfree_skb(olds[i]); | |
577 | ||
578 | return pending; | |
579 | } | |
580 | ||
581 | ||
e5a06939 CM |
582 | /* |
583 | * Make sure the egress timer is scheduled. | |
584 | * | |
585 | * Note that we use "schedule if not scheduled" logic instead of the more | |
586 | * obvious "reschedule" logic, because "reschedule" is fairly expensive. | |
587 | */ | |
588 | static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) | |
589 | { | |
590 | if (!info->egress_timer_scheduled) { | |
25df5760 | 591 | mod_timer(&info->egress_timer, jiffies + 1); |
e5a06939 CM |
592 | info->egress_timer_scheduled = true; |
593 | } | |
594 | } | |
595 | ||
596 | ||
597 | /* | |
598 | * The "function" for "info->egress_timer". | |
599 | * | |
600 | * This timer will reschedule itself as long as there are any pending | |
601 | * completions expected (on behalf of any tile). | |
602 | * | |
603 | * ISSUE: Realistically, will the timer ever stop scheduling itself? | |
604 | * | |
605 | * ISSUE: This timer is almost never actually needed, so just use a global | |
606 | * timer that can run on any tile. | |
607 | * | |
608 | * ISSUE: Maybe instead track number of expected completions, and free | |
609 | * only that many, resetting to zero if "pending" is ever false. | |
610 | */ | |
611 | static void tile_net_handle_egress_timer(unsigned long arg) | |
612 | { | |
613 | struct tile_net_cpu *info = (struct tile_net_cpu *)arg; | |
614 | struct net_device *dev = info->napi.dev; | |
615 | ||
e5a06939 CM |
616 | /* The timer is no longer scheduled. */ |
617 | info->egress_timer_scheduled = false; | |
618 | ||
d91c6412 CM |
619 | /* Free comps, and reschedule timer if more are pending. */ |
620 | if (tile_net_lepp_free_comps(dev, false)) | |
e5a06939 CM |
621 | tile_net_schedule_egress_timer(info); |
622 | } | |
623 | ||
624 | ||
d91c6412 CM |
625 | static void tile_net_discard_aux(struct tile_net_cpu *info, int index) |
626 | { | |
627 | struct tile_netio_queue *queue = &info->queue; | |
628 | netio_queue_impl_t *qsp = queue->__system_part; | |
629 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
630 | ||
631 | int index2_aux = index + sizeof(netio_pkt_t); | |
632 | int index2 = | |
633 | ((index2_aux == | |
634 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | |
635 | 0 : index2_aux); | |
636 | ||
637 | netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); | |
638 | ||
639 | /* Extract the "linux_buffer_t". */ | |
640 | unsigned int buffer = pkt->__packet.word; | |
641 | ||
642 | /* Convert "linux_buffer_t" to "va". */ | |
643 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
644 | ||
645 | /* Acquire the associated "skb". */ | |
646 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
647 | struct sk_buff *skb = *skb_ptr; | |
648 | ||
649 | kfree_skb(skb); | |
650 | ||
651 | /* Consume this packet. */ | |
652 | qup->__packet_receive_read = index2; | |
653 | } | |
654 | ||
655 | ||
e5a06939 | 656 | /* |
d91c6412 | 657 | * Like "tile_net_poll()", but just discard packets. |
e5a06939 CM |
658 | */ |
659 | static void tile_net_discard_packets(struct net_device *dev) | |
660 | { | |
661 | struct tile_net_priv *priv = netdev_priv(dev); | |
662 | int my_cpu = smp_processor_id(); | |
663 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
664 | struct tile_netio_queue *queue = &info->queue; | |
665 | netio_queue_impl_t *qsp = queue->__system_part; | |
666 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
667 | ||
668 | while (qup->__packet_receive_read != | |
669 | qsp->__packet_receive_queue.__packet_write) { | |
e5a06939 | 670 | int index = qup->__packet_receive_read; |
d91c6412 | 671 | tile_net_discard_aux(info, index); |
e5a06939 CM |
672 | } |
673 | } | |
674 | ||
675 | ||
676 | /* | |
677 | * Handle the next packet. Return true if "processed", false if "filtered". | |
678 | */ | |
679 | static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | |
680 | { | |
681 | struct net_device *dev = info->napi.dev; | |
682 | ||
683 | struct tile_netio_queue *queue = &info->queue; | |
684 | netio_queue_impl_t *qsp = queue->__system_part; | |
685 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
686 | struct tile_net_stats_t *stats = &info->stats; | |
687 | ||
688 | int filter; | |
689 | ||
690 | int index2_aux = index + sizeof(netio_pkt_t); | |
691 | int index2 = | |
692 | ((index2_aux == | |
693 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | |
694 | 0 : index2_aux); | |
695 | ||
696 | netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); | |
697 | ||
698 | netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); | |
439a93a0 | 699 | netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt); |
e5a06939 | 700 | |
d91c6412 CM |
701 | /* Extract the packet size. FIXME: Shouldn't the second line */ |
702 | /* get subtracted? Mostly moot, since it should be "zero". */ | |
e5a06939 CM |
703 | unsigned long len = |
704 | (NETIO_PKT_CUSTOM_LENGTH(pkt) + | |
705 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | |
706 | ||
707 | /* Extract the "linux_buffer_t". */ | |
708 | unsigned int buffer = pkt->__packet.word; | |
709 | ||
710 | /* Extract "small" (vs "large"). */ | |
711 | bool small = ((buffer & 1) != 0); | |
712 | ||
713 | /* Convert "linux_buffer_t" to "va". */ | |
714 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
715 | ||
716 | /* Extract the packet data pointer. */ | |
717 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | |
718 | unsigned char *buf = va + NET_IP_ALIGN; | |
719 | ||
e5a06939 CM |
720 | /* Invalidate the packet buffer. */ |
721 | if (!hash_default) | |
722 | __inv_buffer(buf, len); | |
723 | ||
e5a06939 CM |
724 | #ifdef TILE_NET_DUMP_PACKETS |
725 | dump_packet(buf, len, "rx"); | |
726 | #endif /* TILE_NET_DUMP_PACKETS */ | |
727 | ||
728 | #ifdef TILE_NET_VERIFY_INGRESS | |
439a93a0 | 729 | if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) { |
e5a06939 | 730 | dump_packet(buf, len, "rx"); |
439a93a0 | 731 | panic("Unexpected OVERSIZE."); |
e5a06939 CM |
732 | } |
733 | #endif | |
734 | ||
735 | filter = 0; | |
736 | ||
439a93a0 CM |
737 | if (pkt_status == NETIO_PKT_STATUS_BAD) { |
738 | /* Handle CRC error and hardware truncation. */ | |
739 | filter = 2; | |
740 | } else if (!(dev->flags & IFF_UP)) { | |
e5a06939 CM |
741 | /* Filter packets received before we're up. */ |
742 | filter = 1; | |
439a93a0 CM |
743 | } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) && |
744 | pkt_status == NETIO_PKT_STATUS_UNDERSIZE) { | |
d91c6412 | 745 | /* Filter "truncated" packets. */ |
439a93a0 | 746 | filter = 2; |
e5a06939 | 747 | } else if (!(dev->flags & IFF_PROMISC)) { |
d91c6412 | 748 | if (!is_multicast_ether_addr(buf)) { |
e5a06939 CM |
749 | /* Filter packets not for our address. */ |
750 | const u8 *mine = dev->dev_addr; | |
2e42e474 | 751 | filter = !ether_addr_equal(mine, buf); |
e5a06939 CM |
752 | } |
753 | } | |
754 | ||
d68e2d3b CM |
755 | u64_stats_update_begin(&stats->syncp); |
756 | ||
439a93a0 | 757 | if (filter != 0) { |
e5a06939 | 758 | |
439a93a0 CM |
759 | if (filter == 1) |
760 | stats->rx_dropped++; | |
761 | else | |
762 | stats->rx_errors++; | |
e5a06939 CM |
763 | |
764 | tile_net_provide_linux_buffer(info, va, small); | |
765 | ||
766 | } else { | |
767 | ||
768 | /* Acquire the associated "skb". */ | |
769 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
770 | struct sk_buff *skb = *skb_ptr; | |
771 | ||
772 | /* Paranoia. */ | |
773 | if (skb->data != buf) | |
774 | panic("Corrupt linux buffer from LIPP! " | |
775 | "VA=%p, skb=%p, skb->data=%p\n", | |
776 | va, skb, skb->data); | |
777 | ||
778 | /* Encode the actual packet length. */ | |
779 | skb_put(skb, len); | |
780 | ||
781 | /* NOTE: This call also sets "skb->dev = dev". */ | |
782 | skb->protocol = eth_type_trans(skb, dev); | |
783 | ||
d91c6412 | 784 | /* Avoid recomputing "good" TCP/UDP checksums. */ |
e5a06939 CM |
785 | if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) |
786 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
787 | ||
788 | netif_receive_skb(skb); | |
789 | ||
790 | stats->rx_packets++; | |
791 | stats->rx_bytes += len; | |
e5a06939 CM |
792 | } |
793 | ||
d68e2d3b CM |
794 | u64_stats_update_end(&stats->syncp); |
795 | ||
92795672 CM |
796 | /* ISSUE: It would be nice to defer this until the packet has */ |
797 | /* actually been processed. */ | |
798 | tile_net_return_credit(info); | |
e5a06939 CM |
799 | |
800 | /* Consume this packet. */ | |
801 | qup->__packet_receive_read = index2; | |
802 | ||
803 | return !filter; | |
804 | } | |
805 | ||
806 | ||
807 | /* | |
808 | * Handle some packets for the given device on the current CPU. | |
809 | * | |
d91c6412 CM |
810 | * If "tile_net_stop()" is called on some other tile while this |
811 | * function is running, we will return, hopefully before that | |
812 | * other tile asks us to call "napi_disable()". | |
813 | * | |
814 | * The "rotting packet" race condition occurs if a packet arrives | |
815 | * during the extremely narrow window between the queue appearing to | |
816 | * be empty, and the ingress interrupt being re-enabled. This happens | |
817 | * a LOT under heavy network load. | |
e5a06939 CM |
818 | */ |
819 | static int tile_net_poll(struct napi_struct *napi, int budget) | |
820 | { | |
821 | struct net_device *dev = napi->dev; | |
822 | struct tile_net_priv *priv = netdev_priv(dev); | |
823 | int my_cpu = smp_processor_id(); | |
824 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
825 | struct tile_netio_queue *queue = &info->queue; | |
826 | netio_queue_impl_t *qsp = queue->__system_part; | |
827 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
828 | ||
829 | unsigned int work = 0; | |
830 | ||
d1def91c EB |
831 | if (budget <= 0) |
832 | goto done; | |
833 | ||
d91c6412 | 834 | while (priv->active) { |
e5a06939 CM |
835 | int index = qup->__packet_receive_read; |
836 | if (index == qsp->__packet_receive_queue.__packet_write) | |
837 | break; | |
838 | ||
839 | if (tile_net_poll_aux(info, index)) { | |
840 | if (++work >= budget) | |
841 | goto done; | |
842 | } | |
843 | } | |
844 | ||
845 | napi_complete(&info->napi); | |
846 | ||
d91c6412 CM |
847 | if (!priv->active) |
848 | goto done; | |
849 | ||
850 | /* Re-enable the ingress interrupt. */ | |
0c90547b | 851 | enable_percpu_irq(priv->intr_id, 0); |
e5a06939 | 852 | |
d91c6412 | 853 | /* HACK: Avoid the "rotting packet" problem (see above). */ |
e5a06939 | 854 | if (qup->__packet_receive_read != |
d91c6412 CM |
855 | qsp->__packet_receive_queue.__packet_write) { |
856 | /* ISSUE: Sometimes this returns zero, presumably */ | |
857 | /* because an interrupt was handled for this tile. */ | |
858 | (void)napi_reschedule(&info->napi); | |
859 | } | |
e5a06939 CM |
860 | |
861 | done: | |
862 | ||
d91c6412 CM |
863 | if (priv->active) |
864 | tile_net_provide_needed_buffers(info); | |
e5a06939 CM |
865 | |
866 | return work; | |
867 | } | |
868 | ||
869 | ||
870 | /* | |
871 | * Handle an ingress interrupt for the given device on the current cpu. | |
d91c6412 CM |
872 | * |
873 | * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has | |
874 | * been called! This is probably due to "pending hypervisor downcalls". | |
875 | * | |
876 | * ISSUE: Is there any race condition between the "napi_schedule()" here | |
877 | * and the "napi_complete()" call above? | |
e5a06939 CM |
878 | */ |
879 | static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) | |
880 | { | |
881 | struct net_device *dev = (struct net_device *)dev_ptr; | |
882 | struct tile_net_priv *priv = netdev_priv(dev); | |
883 | int my_cpu = smp_processor_id(); | |
884 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
885 | ||
d91c6412 | 886 | /* Disable the ingress interrupt. */ |
e5a06939 CM |
887 | disable_percpu_irq(priv->intr_id); |
888 | ||
d91c6412 CM |
889 | /* Ignore unwanted interrupts. */ |
890 | if (!priv->active) | |
891 | return IRQ_HANDLED; | |
892 | ||
893 | /* ISSUE: Sometimes "info->napi_enabled" is false here. */ | |
894 | ||
e5a06939 CM |
895 | napi_schedule(&info->napi); |
896 | ||
897 | return IRQ_HANDLED; | |
898 | } | |
899 | ||
900 | ||
901 | /* | |
902 | * One time initialization per interface. | |
903 | */ | |
904 | static int tile_net_open_aux(struct net_device *dev) | |
905 | { | |
906 | struct tile_net_priv *priv = netdev_priv(dev); | |
907 | ||
908 | int ret; | |
909 | int dummy; | |
910 | unsigned int epp_lotar; | |
911 | ||
912 | /* | |
913 | * Find out where EPP memory should be homed. | |
914 | */ | |
915 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
916 | (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), | |
917 | NETIO_EPP_SHM_OFF); | |
918 | if (ret < 0) { | |
919 | pr_err("could not read epp_shm_queue lotar.\n"); | |
920 | return -EIO; | |
921 | } | |
922 | ||
923 | /* | |
924 | * Home the page on the EPP. | |
925 | */ | |
926 | { | |
927 | int epp_home = hv_lotar_to_cpu(epp_lotar); | |
d91c6412 | 928 | homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home); |
e5a06939 CM |
929 | } |
930 | ||
931 | /* | |
932 | * Register the EPP shared memory queue. | |
933 | */ | |
934 | { | |
935 | netio_ipp_address_t ea = { | |
936 | .va = 0, | |
d91c6412 | 937 | .pa = __pa(priv->eq), |
e5a06939 | 938 | .pte = hv_pte(0), |
d91c6412 | 939 | .size = EQ_SIZE, |
e5a06939 CM |
940 | }; |
941 | ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); | |
942 | ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); | |
943 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | |
944 | (HV_VirtAddr)&ea, | |
945 | sizeof(ea), | |
946 | NETIO_EPP_SHM_OFF); | |
947 | if (ret < 0) | |
948 | return -EIO; | |
949 | } | |
950 | ||
951 | /* | |
952 | * Start LIPP/LEPP. | |
953 | */ | |
954 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
955 | sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { | |
fe3881cf | 956 | pr_warn("Failed to start LIPP/LEPP\n"); |
e5a06939 CM |
957 | return -EIO; |
958 | } | |
959 | ||
960 | return 0; | |
961 | } | |
962 | ||
963 | ||
964 | /* | |
d91c6412 | 965 | * Register with hypervisor on the current CPU. |
e5a06939 CM |
966 | * |
967 | * Strangely, this function does important things even if it "fails", | |
968 | * which is especially common if the link is not up yet. Hopefully | |
969 | * these things are all "harmless" if done twice! | |
970 | */ | |
971 | static void tile_net_register(void *dev_ptr) | |
972 | { | |
973 | struct net_device *dev = (struct net_device *)dev_ptr; | |
974 | struct tile_net_priv *priv = netdev_priv(dev); | |
975 | int my_cpu = smp_processor_id(); | |
976 | struct tile_net_cpu *info; | |
977 | ||
978 | struct tile_netio_queue *queue; | |
979 | ||
980 | /* Only network cpus can receive packets. */ | |
981 | int queue_id = | |
982 | cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; | |
983 | ||
984 | netio_input_config_t config = { | |
985 | .flags = 0, | |
986 | .num_receive_packets = priv->network_cpus_credits, | |
987 | .queue_id = queue_id | |
988 | }; | |
989 | ||
990 | int ret = 0; | |
991 | netio_queue_impl_t *queuep; | |
992 | ||
993 | PDEBUG("tile_net_register(queue_id %d)\n", queue_id); | |
994 | ||
995 | if (!strcmp(dev->name, "xgbe0")) | |
70b2776a | 996 | info = this_cpu_ptr(&hv_xgbe0); |
e5a06939 | 997 | else if (!strcmp(dev->name, "xgbe1")) |
70b2776a | 998 | info = this_cpu_ptr(&hv_xgbe1); |
e5a06939 | 999 | else if (!strcmp(dev->name, "gbe0")) |
70b2776a | 1000 | info = this_cpu_ptr(&hv_gbe0); |
e5a06939 | 1001 | else if (!strcmp(dev->name, "gbe1")) |
70b2776a | 1002 | info = this_cpu_ptr(&hv_gbe1); |
e5a06939 CM |
1003 | else |
1004 | BUG(); | |
1005 | ||
1006 | /* Initialize the egress timer. */ | |
25df5760 | 1007 | init_timer_pinned(&info->egress_timer); |
e5a06939 CM |
1008 | info->egress_timer.data = (long)info; |
1009 | info->egress_timer.function = tile_net_handle_egress_timer; | |
1010 | ||
827da44c JS |
1011 | u64_stats_init(&info->stats.syncp); |
1012 | ||
e5a06939 CM |
1013 | priv->cpu[my_cpu] = info; |
1014 | ||
1015 | /* | |
d91c6412 CM |
1016 | * Register ourselves with LIPP. This does a lot of stuff, |
1017 | * including invoking the LIPP registration code. | |
e5a06939 CM |
1018 | */ |
1019 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | |
1020 | (HV_VirtAddr)&config, | |
1021 | sizeof(netio_input_config_t), | |
1022 | NETIO_IPP_INPUT_REGISTER_OFF); | |
1023 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | |
1024 | ret); | |
1025 | if (ret < 0) { | |
d91c6412 CM |
1026 | if (ret != NETIO_LINK_DOWN) { |
1027 | printk(KERN_DEBUG "hv_dev_pwrite " | |
1028 | "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n", | |
1029 | ret); | |
1030 | } | |
e5a06939 CM |
1031 | info->link_down = (ret == NETIO_LINK_DOWN); |
1032 | return; | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * Get the pointer to our queue's system part. | |
1037 | */ | |
1038 | ||
1039 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
1040 | (HV_VirtAddr)&queuep, | |
1041 | sizeof(netio_queue_impl_t *), | |
1042 | NETIO_IPP_INPUT_REGISTER_OFF); | |
1043 | PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | |
1044 | ret); | |
1045 | PDEBUG("queuep %p\n", queuep); | |
1046 | if (ret <= 0) { | |
1047 | /* ISSUE: Shouldn't this be a fatal error? */ | |
1048 | pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); | |
1049 | return; | |
1050 | } | |
1051 | ||
1052 | queue = &info->queue; | |
1053 | ||
1054 | queue->__system_part = queuep; | |
1055 | ||
1056 | memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); | |
1057 | ||
1058 | /* This is traditionally "config.num_receive_packets / 2". */ | |
1059 | queue->__user_part.__receive_credit_interval = 4; | |
1060 | queue->__user_part.__receive_credit_remaining = | |
1061 | queue->__user_part.__receive_credit_interval; | |
1062 | ||
1063 | /* | |
1064 | * Get a fastio index from the hypervisor. | |
1065 | * ISSUE: Shouldn't this check the result? | |
1066 | */ | |
1067 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
1068 | (HV_VirtAddr)&queue->__user_part.__fastio_index, | |
1069 | sizeof(queue->__user_part.__fastio_index), | |
1070 | NETIO_IPP_GET_FASTIO_OFF); | |
1071 | PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); | |
1072 | ||
e5a06939 CM |
1073 | /* Now we are registered. */ |
1074 | info->registered = true; | |
1075 | } | |
1076 | ||
1077 | ||
1078 | /* | |
d91c6412 CM |
1079 | * Deregister with hypervisor on the current CPU. |
1080 | * | |
1081 | * This simply discards all our credits, so no more packets will be | |
1082 | * delivered to this tile. There may still be packets in our queue. | |
1083 | * | |
1084 | * Also, disable the ingress interrupt. | |
1085 | */ | |
1086 | static void tile_net_deregister(void *dev_ptr) | |
1087 | { | |
1088 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1089 | struct tile_net_priv *priv = netdev_priv(dev); | |
1090 | int my_cpu = smp_processor_id(); | |
1091 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1092 | ||
1093 | /* Disable the ingress interrupt. */ | |
1094 | disable_percpu_irq(priv->intr_id); | |
1095 | ||
1096 | /* Do nothing else if not registered. */ | |
1097 | if (info == NULL || !info->registered) | |
1098 | return; | |
1099 | ||
1100 | { | |
1101 | struct tile_netio_queue *queue = &info->queue; | |
1102 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
1103 | ||
1104 | /* Discard all our credits. */ | |
1105 | __netio_fastio_return_credits(qup->__fastio_index, -1); | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | ||
1110 | /* | |
1111 | * Unregister with hypervisor on the current CPU. | |
1112 | * | |
1113 | * Also, disable the ingress interrupt. | |
e5a06939 CM |
1114 | */ |
1115 | static void tile_net_unregister(void *dev_ptr) | |
1116 | { | |
1117 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1118 | struct tile_net_priv *priv = netdev_priv(dev); | |
1119 | int my_cpu = smp_processor_id(); | |
1120 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1121 | ||
d91c6412 | 1122 | int ret; |
e5a06939 CM |
1123 | int dummy = 0; |
1124 | ||
d91c6412 CM |
1125 | /* Disable the ingress interrupt. */ |
1126 | disable_percpu_irq(priv->intr_id); | |
e5a06939 | 1127 | |
d91c6412 CM |
1128 | /* Do nothing else if not registered. */ |
1129 | if (info == NULL || !info->registered) | |
e5a06939 CM |
1130 | return; |
1131 | ||
d91c6412 | 1132 | /* Unregister ourselves with LIPP/LEPP. */ |
e5a06939 CM |
1133 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, |
1134 | sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); | |
d91c6412 CM |
1135 | if (ret < 0) |
1136 | panic("Failed to unregister with LIPP/LEPP!\n"); | |
e5a06939 | 1137 | |
d91c6412 | 1138 | /* Discard all packets still in our NetIO queue. */ |
e5a06939 CM |
1139 | tile_net_discard_packets(dev); |
1140 | ||
1141 | /* Reset state. */ | |
1142 | info->num_needed_small_buffers = 0; | |
1143 | info->num_needed_large_buffers = 0; | |
1144 | ||
1145 | /* Cancel egress timer. */ | |
1146 | del_timer(&info->egress_timer); | |
1147 | info->egress_timer_scheduled = false; | |
e5a06939 CM |
1148 | } |
1149 | ||
1150 | ||
1151 | /* | |
1152 | * Helper function for "tile_net_stop()". | |
1153 | * | |
1154 | * Also used to handle registration failure in "tile_net_open_inner()", | |
d91c6412 | 1155 | * when the various extra steps in "tile_net_stop()" are not necessary. |
e5a06939 CM |
1156 | */ |
1157 | static void tile_net_stop_aux(struct net_device *dev) | |
1158 | { | |
1159 | struct tile_net_priv *priv = netdev_priv(dev); | |
d91c6412 | 1160 | int i; |
e5a06939 CM |
1161 | |
1162 | int dummy = 0; | |
1163 | ||
d91c6412 CM |
1164 | /* |
1165 | * Unregister all tiles, so LIPP will stop delivering packets. | |
1166 | * Also, delete all the "napi" objects (sequentially, to protect | |
1167 | * "dev->napi_list"). | |
1168 | */ | |
e5a06939 | 1169 | on_each_cpu(tile_net_unregister, (void *)dev, 1); |
d91c6412 CM |
1170 | for_each_online_cpu(i) { |
1171 | struct tile_net_cpu *info = priv->cpu[i]; | |
1172 | if (info != NULL && info->registered) { | |
1173 | netif_napi_del(&info->napi); | |
1174 | info->registered = false; | |
1175 | } | |
1176 | } | |
e5a06939 CM |
1177 | |
1178 | /* Stop LIPP/LEPP. */ | |
1179 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1180 | sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) | |
1181 | panic("Failed to stop LIPP/LEPP!\n"); | |
1182 | ||
3db1cd5c | 1183 | priv->partly_opened = false; |
e5a06939 CM |
1184 | } |
1185 | ||
1186 | ||
1187 | /* | |
d91c6412 | 1188 | * Disable NAPI for the given device on the current cpu. |
e5a06939 | 1189 | */ |
d91c6412 | 1190 | static void tile_net_stop_disable(void *dev_ptr) |
e5a06939 CM |
1191 | { |
1192 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1193 | struct tile_net_priv *priv = netdev_priv(dev); | |
1194 | int my_cpu = smp_processor_id(); | |
1195 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1196 | ||
e5a06939 CM |
1197 | /* Disable NAPI if needed. */ |
1198 | if (info != NULL && info->napi_enabled) { | |
1199 | napi_disable(&info->napi); | |
1200 | info->napi_enabled = false; | |
1201 | } | |
1202 | } | |
1203 | ||
1204 | ||
1205 | /* | |
d91c6412 CM |
1206 | * Enable NAPI and the ingress interrupt for the given device |
1207 | * on the current cpu. | |
1208 | * | |
1209 | * ISSUE: Only do this for "network cpus"? | |
e5a06939 | 1210 | */ |
d91c6412 | 1211 | static void tile_net_open_enable(void *dev_ptr) |
e5a06939 CM |
1212 | { |
1213 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1214 | struct tile_net_priv *priv = netdev_priv(dev); | |
1215 | int my_cpu = smp_processor_id(); | |
1216 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1217 | ||
e5a06939 CM |
1218 | /* Enable NAPI. */ |
1219 | napi_enable(&info->napi); | |
1220 | info->napi_enabled = true; | |
d91c6412 CM |
1221 | |
1222 | /* Enable the ingress interrupt. */ | |
0c90547b | 1223 | enable_percpu_irq(priv->intr_id, 0); |
e5a06939 CM |
1224 | } |
1225 | ||
1226 | ||
1227 | /* | |
1228 | * tile_net_open_inner does most of the work of bringing up the interface. | |
1229 | * It's called from tile_net_open(), and also from tile_net_retry_open(). | |
1230 | * The return value is 0 if the interface was brought up, < 0 if | |
1231 | * tile_net_open() should return the return value as an error, and > 0 if | |
1232 | * tile_net_open() should return success and schedule a work item to | |
1233 | * periodically retry the bringup. | |
1234 | */ | |
1235 | static int tile_net_open_inner(struct net_device *dev) | |
1236 | { | |
1237 | struct tile_net_priv *priv = netdev_priv(dev); | |
1238 | int my_cpu = smp_processor_id(); | |
1239 | struct tile_net_cpu *info; | |
1240 | struct tile_netio_queue *queue; | |
d91c6412 | 1241 | int result = 0; |
e5a06939 | 1242 | int i; |
d91c6412 | 1243 | int dummy = 0; |
e5a06939 CM |
1244 | |
1245 | /* | |
1246 | * First try to register just on the local CPU, and handle any | |
1247 | * semi-expected "link down" failure specially. Note that we | |
1248 | * do NOT call "tile_net_stop_aux()", unlike below. | |
1249 | */ | |
1250 | tile_net_register(dev); | |
1251 | info = priv->cpu[my_cpu]; | |
1252 | if (!info->registered) { | |
1253 | if (info->link_down) | |
1254 | return 1; | |
1255 | return -EAGAIN; | |
1256 | } | |
1257 | ||
1258 | /* | |
1259 | * Now register everywhere else. If any registration fails, | |
1260 | * even for "link down" (which might not be possible), we | |
d91c6412 CM |
1261 | * clean up using "tile_net_stop_aux()". Also, add all the |
1262 | * "napi" objects (sequentially, to protect "dev->napi_list"). | |
1263 | * ISSUE: Only use "netif_napi_add()" for "network cpus"? | |
e5a06939 CM |
1264 | */ |
1265 | smp_call_function(tile_net_register, (void *)dev, 1); | |
1266 | for_each_online_cpu(i) { | |
d91c6412 CM |
1267 | struct tile_net_cpu *info = priv->cpu[i]; |
1268 | if (info->registered) | |
1269 | netif_napi_add(dev, &info->napi, tile_net_poll, 64); | |
1270 | else | |
1271 | result = -EAGAIN; | |
1272 | } | |
1273 | if (result != 0) { | |
1274 | tile_net_stop_aux(dev); | |
1275 | return result; | |
e5a06939 CM |
1276 | } |
1277 | ||
1278 | queue = &info->queue; | |
1279 | ||
d91c6412 CM |
1280 | if (priv->intr_id == 0) { |
1281 | unsigned int irq; | |
e5a06939 | 1282 | |
d91c6412 CM |
1283 | /* |
1284 | * Acquire the irq allocated by the hypervisor. Every | |
1285 | * queue gets the same irq. The "__intr_id" field is | |
1286 | * "1 << irq", so we use "__ffs()" to extract "irq". | |
1287 | */ | |
1288 | priv->intr_id = queue->__system_part->__intr_id; | |
1289 | BUG_ON(priv->intr_id == 0); | |
1290 | irq = __ffs(priv->intr_id); | |
e5a06939 | 1291 | |
d91c6412 CM |
1292 | /* |
1293 | * Register the ingress interrupt handler for this | |
1294 | * device, permanently. | |
1295 | * | |
1296 | * We used to call "free_irq()" in "tile_net_stop()", | |
1297 | * and then re-register the handler here every time, | |
1298 | * but that caused DNP errors in "handle_IRQ_event()" | |
1299 | * because "desc->action" was NULL. See bug 9143. | |
1300 | */ | |
1301 | tile_irq_activate(irq, TILE_IRQ_PERCPU); | |
1302 | BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, | |
1303 | 0, dev->name, (void *)dev) != 0); | |
1304 | } | |
e5a06939 | 1305 | |
d91c6412 | 1306 | { |
e5a06939 CM |
1307 | /* Allocate initial buffers. */ |
1308 | ||
1309 | int max_buffers = | |
1310 | priv->network_cpus_count * priv->network_cpus_credits; | |
1311 | ||
1312 | info->num_needed_small_buffers = | |
1313 | min(LIPP_SMALL_BUFFERS, max_buffers); | |
1314 | ||
1315 | info->num_needed_large_buffers = | |
1316 | min(LIPP_LARGE_BUFFERS, max_buffers); | |
1317 | ||
1318 | tile_net_provide_needed_buffers(info); | |
1319 | ||
1320 | if (info->num_needed_small_buffers != 0 || | |
1321 | info->num_needed_large_buffers != 0) | |
1322 | panic("Insufficient memory for buffer stack!"); | |
d91c6412 | 1323 | } |
e5a06939 | 1324 | |
d91c6412 CM |
1325 | /* We are about to be active. */ |
1326 | priv->active = true; | |
e5a06939 | 1327 | |
d91c6412 CM |
1328 | /* Make sure "active" is visible to all tiles. */ |
1329 | mb(); | |
e5a06939 | 1330 | |
d91c6412 CM |
1331 | /* On each tile, enable NAPI and the ingress interrupt. */ |
1332 | on_each_cpu(tile_net_open_enable, (void *)dev, 1); | |
1333 | ||
1334 | /* Start LIPP/LEPP and activate "ingress" at the shim. */ | |
1335 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1336 | sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) | |
1337 | panic("Failed to activate the LIPP Shim!\n"); | |
e5a06939 CM |
1338 | |
1339 | /* Start our transmit queue. */ | |
1340 | netif_start_queue(dev); | |
1341 | ||
1342 | return 0; | |
1343 | } | |
1344 | ||
1345 | ||
1346 | /* | |
1347 | * Called periodically to retry bringing up the NetIO interface, | |
1348 | * if it doesn't come up cleanly during tile_net_open(). | |
1349 | */ | |
1350 | static void tile_net_open_retry(struct work_struct *w) | |
1351 | { | |
6e898bfd | 1352 | struct delayed_work *dw = to_delayed_work(w); |
e5a06939 CM |
1353 | |
1354 | struct tile_net_priv *priv = | |
1355 | container_of(dw, struct tile_net_priv, retry_work); | |
1356 | ||
1357 | /* | |
1358 | * Try to bring the NetIO interface up. If it fails, reschedule | |
1359 | * ourselves to try again later; otherwise, tell Linux we now have | |
1360 | * a working link. ISSUE: What if the return value is negative? | |
1361 | */ | |
d91c6412 CM |
1362 | if (tile_net_open_inner(priv->dev) != 0) |
1363 | schedule_delayed_work(&priv->retry_work, | |
1364 | TILE_NET_RETRY_INTERVAL); | |
e5a06939 CM |
1365 | else |
1366 | netif_carrier_on(priv->dev); | |
1367 | } | |
1368 | ||
1369 | ||
1370 | /* | |
1371 | * Called when a network interface is made active. | |
1372 | * | |
1373 | * Returns 0 on success, negative value on failure. | |
1374 | * | |
1375 | * The open entry point is called when a network interface is made | |
1376 | * active by the system (IFF_UP). At this point all resources needed | |
1377 | * for transmit and receive operations are allocated, the interrupt | |
d91c6412 CM |
1378 | * handler is registered with the OS (if needed), the watchdog timer |
1379 | * is started, and the stack is notified that the interface is ready. | |
e5a06939 CM |
1380 | * |
1381 | * If the actual link is not available yet, then we tell Linux that | |
1382 | * we have no carrier, and we keep checking until the link comes up. | |
1383 | */ | |
1384 | static int tile_net_open(struct net_device *dev) | |
1385 | { | |
1386 | int ret = 0; | |
1387 | struct tile_net_priv *priv = netdev_priv(dev); | |
1388 | ||
1389 | /* | |
1390 | * We rely on priv->partly_opened to tell us if this is the | |
1391 | * first time this interface is being brought up. If it is | |
1392 | * set, the IPP was already initialized and should not be | |
1393 | * initialized again. | |
1394 | */ | |
1395 | if (!priv->partly_opened) { | |
1396 | ||
1397 | int count; | |
1398 | int credits; | |
1399 | ||
1400 | /* Initialize LIPP/LEPP, and start the Shim. */ | |
1401 | ret = tile_net_open_aux(dev); | |
1402 | if (ret < 0) { | |
1403 | pr_err("tile_net_open_aux failed: %d\n", ret); | |
1404 | return ret; | |
1405 | } | |
1406 | ||
1407 | /* Analyze the network cpus. */ | |
1408 | ||
1409 | if (network_cpus_used) | |
1410 | cpumask_copy(&priv->network_cpus_map, | |
1411 | &network_cpus_map); | |
1412 | else | |
1413 | cpumask_copy(&priv->network_cpus_map, cpu_online_mask); | |
1414 | ||
1415 | ||
1416 | count = cpumask_weight(&priv->network_cpus_map); | |
1417 | ||
1418 | /* Limit credits to available buffers, and apply min. */ | |
1419 | credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); | |
1420 | ||
1421 | /* Apply "GBE" max limit. */ | |
1422 | /* ISSUE: Use higher limit for XGBE? */ | |
1423 | credits = min(NETIO_MAX_RECEIVE_PKTS, credits); | |
1424 | ||
1425 | priv->network_cpus_count = count; | |
1426 | priv->network_cpus_credits = credits; | |
1427 | ||
1428 | #ifdef TILE_NET_DEBUG | |
1429 | pr_info("Using %d network cpus, with %d credits each\n", | |
1430 | priv->network_cpus_count, priv->network_cpus_credits); | |
1431 | #endif | |
1432 | ||
3db1cd5c | 1433 | priv->partly_opened = true; |
d91c6412 CM |
1434 | |
1435 | } else { | |
1436 | /* FIXME: Is this possible? */ | |
1437 | /* printk("Already partly opened.\n"); */ | |
e5a06939 CM |
1438 | } |
1439 | ||
1440 | /* | |
1441 | * Attempt to bring up the link. | |
1442 | */ | |
1443 | ret = tile_net_open_inner(dev); | |
1444 | if (ret <= 0) { | |
1445 | if (ret == 0) | |
1446 | netif_carrier_on(dev); | |
1447 | return ret; | |
1448 | } | |
1449 | ||
1450 | /* | |
1451 | * We were unable to bring up the NetIO interface, but we want to | |
1452 | * try again in a little bit. Tell Linux that we have no carrier | |
1453 | * so it doesn't try to use the interface before the link comes up | |
1454 | * and then remember to try again later. | |
1455 | */ | |
1456 | netif_carrier_off(dev); | |
d91c6412 | 1457 | schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); |
e5a06939 CM |
1458 | |
1459 | return 0; | |
1460 | } | |
1461 | ||
1462 | ||
d91c6412 | 1463 | static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) |
e5a06939 | 1464 | { |
d91c6412 | 1465 | int n = 0; |
e5a06939 | 1466 | |
d91c6412 | 1467 | /* Drain all the LIPP buffers. */ |
e5a06939 | 1468 | while (true) { |
92795672 | 1469 | unsigned int buffer; |
e5a06939 CM |
1470 | |
1471 | /* NOTE: This should never fail. */ | |
1472 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, | |
1473 | sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) | |
1474 | break; | |
1475 | ||
1476 | /* Stop when done. */ | |
1477 | if (buffer == 0) | |
1478 | break; | |
1479 | ||
1480 | { | |
1481 | /* Convert "linux_buffer_t" to "va". */ | |
1482 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
1483 | ||
1484 | /* Acquire the associated "skb". */ | |
1485 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
1486 | struct sk_buff *skb = *skb_ptr; | |
1487 | ||
1488 | kfree_skb(skb); | |
1489 | } | |
d91c6412 CM |
1490 | |
1491 | n++; | |
e5a06939 CM |
1492 | } |
1493 | ||
d91c6412 CM |
1494 | return n; |
1495 | } | |
e5a06939 CM |
1496 | |
1497 | ||
d91c6412 CM |
1498 | /* |
1499 | * Disables a network interface. | |
1500 | * | |
1501 | * Returns 0, this is not allowed to fail. | |
1502 | * | |
1503 | * The close entry point is called when an interface is de-activated | |
1504 | * by the OS. The hardware is still under the drivers control, but | |
1505 | * needs to be disabled. A global MAC reset is issued to stop the | |
1506 | * hardware, and all transmit and receive resources are freed. | |
1507 | * | |
1508 | * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"? | |
1509 | * | |
1510 | * Before we are called by "__dev_close()", "netif_running()" will | |
1511 | * have been cleared, so no NEW calls to "tile_net_poll()" will be | |
1512 | * made by "netpoll_poll_dev()". | |
1513 | * | |
1514 | * Often, this can cause some tiles to still have packets in their | |
1515 | * queues, so we must call "tile_net_discard_packets()" later. | |
1516 | * | |
1517 | * Note that some other tile may still be INSIDE "tile_net_poll()", | |
1518 | * and in fact, many will be, if there is heavy network load. | |
1519 | * | |
1520 | * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when | |
1521 | * any tile is still "napi_schedule()"'d will induce a horrible crash | |
1522 | * when "msleep()" is called. This includes tiles which are inside | |
1523 | * "tile_net_poll()" which have not yet called "napi_complete()". | |
1524 | * | |
1525 | * So, we must first try to wait long enough for other tiles to finish | |
1526 | * with any current "tile_net_poll()" call, and, hopefully, to clear | |
1527 | * the "scheduled" flag. ISSUE: It is unclear what happens to tiles | |
1528 | * which have called "napi_schedule()" but which had not yet tried to | |
1529 | * call "tile_net_poll()", or which exhausted their budget inside | |
1530 | * "tile_net_poll()" just before this function was called. | |
1531 | */ | |
1532 | static int tile_net_stop(struct net_device *dev) | |
1533 | { | |
1534 | struct tile_net_priv *priv = netdev_priv(dev); | |
1535 | ||
1536 | PDEBUG("tile_net_stop()\n"); | |
e5a06939 | 1537 | |
d91c6412 CM |
1538 | /* Start discarding packets. */ |
1539 | priv->active = false; | |
1540 | ||
1541 | /* Make sure "active" is visible to all tiles. */ | |
1542 | mb(); | |
e5a06939 CM |
1543 | |
1544 | /* | |
d91c6412 CM |
1545 | * On each tile, make sure no NEW packets get delivered, and |
1546 | * disable the ingress interrupt. | |
1547 | * | |
1548 | * Note that the ingress interrupt can fire AFTER this, | |
1549 | * presumably due to packets which were recently delivered, | |
1550 | * but it will have no effect. | |
e5a06939 | 1551 | */ |
d91c6412 | 1552 | on_each_cpu(tile_net_deregister, (void *)dev, 1); |
e5a06939 | 1553 | |
d91c6412 CM |
1554 | /* Optimistically drain LIPP buffers. */ |
1555 | (void)tile_net_drain_lipp_buffers(priv); | |
e5a06939 | 1556 | |
d91c6412 CM |
1557 | /* ISSUE: Only needed if not yet fully open. */ |
1558 | cancel_delayed_work_sync(&priv->retry_work); | |
e5a06939 | 1559 | |
d91c6412 CM |
1560 | /* Can't transmit any more. */ |
1561 | netif_stop_queue(dev); | |
e5a06939 | 1562 | |
d91c6412 CM |
1563 | /* Disable NAPI on each tile. */ |
1564 | on_each_cpu(tile_net_stop_disable, (void *)dev, 1); | |
1565 | ||
1566 | /* | |
1567 | * Drain any remaining LIPP buffers. NOTE: This "printk()" | |
1568 | * has never been observed, but in theory it could happen. | |
1569 | */ | |
1570 | if (tile_net_drain_lipp_buffers(priv) != 0) | |
1571 | printk("Had to drain some extra LIPP buffers!\n"); | |
e5a06939 | 1572 | |
d91c6412 CM |
1573 | /* Stop LIPP/LEPP. */ |
1574 | tile_net_stop_aux(dev); | |
1575 | ||
1576 | /* | |
1577 | * ISSUE: It appears that, in practice anyway, by the time we | |
1578 | * get here, there are no pending completions, but just in case, | |
1579 | * we free (all of) them anyway. | |
1580 | */ | |
1581 | while (tile_net_lepp_free_comps(dev, true)) | |
1582 | /* loop */; | |
e5a06939 | 1583 | |
d07bd86d | 1584 | /* Wipe the EPP queue, and wait till the stores hit the EPP. */ |
d91c6412 | 1585 | memset(priv->eq, 0, sizeof(lepp_queue_t)); |
d07bd86d | 1586 | mb(); |
e5a06939 CM |
1587 | |
1588 | return 0; | |
1589 | } | |
1590 | ||
1591 | ||
1592 | /* | |
1593 | * Prepare the "frags" info for the resulting LEPP command. | |
1594 | * | |
1595 | * If needed, flush the memory used by the frags. | |
1596 | */ | |
1597 | static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | |
1598 | struct sk_buff *skb, | |
1599 | void *b_data, unsigned int b_len) | |
1600 | { | |
1601 | unsigned int i, n = 0; | |
1602 | ||
1603 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1604 | ||
1605 | phys_addr_t cpa; | |
1606 | ||
1607 | if (b_len != 0) { | |
1608 | ||
1609 | if (!hash_default) | |
63b7ca6b | 1610 | finv_buffer_remote(b_data, b_len, 0); |
e5a06939 CM |
1611 | |
1612 | cpa = __pa(b_data); | |
1613 | frags[n].cpa_lo = cpa; | |
1614 | frags[n].cpa_hi = cpa >> 32; | |
1615 | frags[n].length = b_len; | |
1616 | frags[n].hash_for_home = hash_default; | |
1617 | n++; | |
1618 | } | |
1619 | ||
1620 | for (i = 0; i < sh->nr_frags; i++) { | |
1621 | ||
1622 | skb_frag_t *f = &sh->frags[i]; | |
781a5e92 | 1623 | unsigned long pfn = page_to_pfn(skb_frag_page(f)); |
e5a06939 CM |
1624 | |
1625 | /* FIXME: Compute "hash_for_home" properly. */ | |
1626 | /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ | |
1627 | int hash_for_home = hash_default; | |
1628 | ||
1629 | /* FIXME: Hmmm. */ | |
1630 | if (!hash_default) { | |
1631 | void *va = pfn_to_kaddr(pfn) + f->page_offset; | |
781a5e92 | 1632 | BUG_ON(PageHighMem(skb_frag_page(f))); |
92795672 | 1633 | finv_buffer_remote(va, skb_frag_size(f), 0); |
e5a06939 CM |
1634 | } |
1635 | ||
1636 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; | |
1637 | frags[n].cpa_lo = cpa; | |
1638 | frags[n].cpa_hi = cpa >> 32; | |
9e903e08 | 1639 | frags[n].length = skb_frag_size(f); |
e5a06939 CM |
1640 | frags[n].hash_for_home = hash_for_home; |
1641 | n++; | |
1642 | } | |
1643 | ||
1644 | return n; | |
1645 | } | |
1646 | ||
1647 | ||
1648 | /* | |
1649 | * This function takes "skb", consisting of a header template and a | |
1650 | * payload, and hands it to LEPP, to emit as one or more segments, | |
1651 | * each consisting of a possibly modified header, plus a piece of the | |
1652 | * payload, via a process known as "tcp segmentation offload". | |
1653 | * | |
1654 | * Usually, "data" will contain the header template, of size "sh_len", | |
1655 | * and "sh->frags" will contain "skb->data_len" bytes of payload, and | |
1656 | * there will be "sh->gso_segs" segments. | |
1657 | * | |
1658 | * Sometimes, if "sendfile()" requires copying, we will be called with | |
1659 | * "data" containing the header and payload, with "frags" being empty. | |
1660 | * | |
92795672 CM |
1661 | * Sometimes, for example when using NFS over TCP, a single segment can |
1662 | * span 3 fragments, which must be handled carefully in LEPP. | |
e5a06939 CM |
1663 | * |
1664 | * See "emulate_large_send_offload()" for some reference code, which | |
1665 | * does not handle checksumming. | |
1666 | * | |
1667 | * ISSUE: How do we make sure that high memory DMA does not migrate? | |
1668 | */ | |
1669 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | |
1670 | { | |
1671 | struct tile_net_priv *priv = netdev_priv(dev); | |
1672 | int my_cpu = smp_processor_id(); | |
1673 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1674 | struct tile_net_stats_t *stats = &info->stats; | |
1675 | ||
1676 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1677 | ||
1678 | unsigned char *data = skb->data; | |
1679 | ||
1680 | /* The ip header follows the ethernet header. */ | |
1681 | struct iphdr *ih = ip_hdr(skb); | |
1682 | unsigned int ih_len = ih->ihl * 4; | |
1683 | ||
1684 | /* Note that "nh == ih", by definition. */ | |
1685 | unsigned char *nh = skb_network_header(skb); | |
1686 | unsigned int eh_len = nh - data; | |
1687 | ||
1688 | /* The tcp header follows the ip header. */ | |
1689 | struct tcphdr *th = (struct tcphdr *)(nh + ih_len); | |
1690 | unsigned int th_len = th->doff * 4; | |
1691 | ||
1692 | /* The total number of header bytes. */ | |
1693 | /* NOTE: This may be less than skb_headlen(skb). */ | |
1694 | unsigned int sh_len = eh_len + ih_len + th_len; | |
1695 | ||
1696 | /* The number of payload bytes at "skb->data + sh_len". */ | |
1697 | /* This is non-zero for sendfile() without HIGHDMA. */ | |
1698 | unsigned int b_len = skb_headlen(skb) - sh_len; | |
1699 | ||
1700 | /* The total number of payload bytes. */ | |
1701 | unsigned int d_len = b_len + skb->data_len; | |
1702 | ||
1703 | /* The maximum payload size. */ | |
1704 | unsigned int p_len = sh->gso_size; | |
1705 | ||
1706 | /* The total number of segments. */ | |
1707 | unsigned int num_segs = sh->gso_segs; | |
1708 | ||
1709 | /* The temporary copy of the command. */ | |
1710 | u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; | |
1711 | lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; | |
1712 | ||
1713 | /* Analyze the "frags". */ | |
1714 | unsigned int num_frags = | |
1715 | tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); | |
1716 | ||
1717 | /* The size of the command, including frags and header. */ | |
1718 | size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); | |
1719 | ||
1720 | /* The command header. */ | |
1721 | lepp_tso_cmd_t cmd_init = { | |
1722 | .tso = true, | |
1723 | .header_size = sh_len, | |
1724 | .ip_offset = eh_len, | |
1725 | .tcp_offset = eh_len + ih_len, | |
1726 | .payload_size = p_len, | |
1727 | .num_frags = num_frags, | |
1728 | }; | |
1729 | ||
1730 | unsigned long irqflags; | |
1731 | ||
d91c6412 | 1732 | lepp_queue_t *eq = priv->eq; |
e5a06939 | 1733 | |
d91c6412 CM |
1734 | struct sk_buff *olds[8]; |
1735 | unsigned int wanted = 8; | |
e5a06939 CM |
1736 | unsigned int i, nolds = 0; |
1737 | ||
1738 | unsigned int cmd_head, cmd_tail, cmd_next; | |
1739 | unsigned int comp_tail; | |
1740 | ||
e5a06939 CM |
1741 | |
1742 | /* Paranoia. */ | |
1743 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | |
1744 | BUG_ON(ih->protocol != IPPROTO_TCP); | |
1745 | BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); | |
1746 | BUG_ON(num_frags > LEPP_MAX_FRAGS); | |
1747 | /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ | |
1748 | BUG_ON(num_segs <= 1); | |
1749 | ||
1750 | ||
1751 | /* Finish preparing the command. */ | |
1752 | ||
1753 | /* Copy the command header. */ | |
1754 | *cmd = cmd_init; | |
1755 | ||
1756 | /* Copy the "header". */ | |
1757 | memcpy(&cmd->frags[num_frags], data, sh_len); | |
1758 | ||
1759 | ||
1760 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | |
1761 | prefetch_L1(&eq->comp_tail); | |
1762 | prefetch_L1(&eq->cmd_tail); | |
1763 | mb(); | |
1764 | ||
1765 | ||
1766 | /* Enqueue the command. */ | |
1767 | ||
d91c6412 | 1768 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
e5a06939 | 1769 | |
92795672 CM |
1770 | /* Handle completions if needed to make room. */ |
1771 | /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ | |
d91c6412 CM |
1772 | if (lepp_num_free_comp_slots(eq) == 0) { |
1773 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | |
1774 | if (nolds == 0) { | |
1775 | busy: | |
1776 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
1777 | return NETDEV_TX_BUSY; | |
1778 | } | |
e5a06939 CM |
1779 | } |
1780 | ||
1781 | cmd_head = eq->cmd_head; | |
1782 | cmd_tail = eq->cmd_tail; | |
1783 | ||
e5a06939 | 1784 | /* Prepare to advance, detecting full queue. */ |
92795672 | 1785 | /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ |
e5a06939 CM |
1786 | cmd_next = cmd_tail + cmd_size; |
1787 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | |
d91c6412 | 1788 | goto busy; |
e5a06939 CM |
1789 | if (cmd_next > LEPP_CMD_LIMIT) { |
1790 | cmd_next = 0; | |
1791 | if (cmd_next == cmd_head) | |
d91c6412 | 1792 | goto busy; |
e5a06939 CM |
1793 | } |
1794 | ||
1795 | /* Copy the command. */ | |
1796 | memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); | |
1797 | ||
1798 | /* Advance. */ | |
1799 | cmd_tail = cmd_next; | |
1800 | ||
1801 | /* Record "skb" for eventual freeing. */ | |
1802 | comp_tail = eq->comp_tail; | |
1803 | eq->comps[comp_tail] = skb; | |
1804 | LEPP_QINC(comp_tail); | |
1805 | eq->comp_tail = comp_tail; | |
1806 | ||
1807 | /* Flush before allowing LEPP to handle the command. */ | |
d91c6412 | 1808 | /* ISSUE: Is this the optimal location for the flush? */ |
e5a06939 CM |
1809 | __insn_mf(); |
1810 | ||
1811 | eq->cmd_tail = cmd_tail; | |
1812 | ||
d91c6412 CM |
1813 | /* NOTE: Using "4" here is more efficient than "0" or "2", */ |
1814 | /* and, strangely, more efficient than pre-checking the number */ | |
1815 | /* of available completions, and comparing it to 4. */ | |
e5a06939 | 1816 | if (nolds == 0) |
d91c6412 CM |
1817 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); |
1818 | ||
1819 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
e5a06939 CM |
1820 | |
1821 | /* Handle completions. */ | |
1822 | for (i = 0; i < nolds; i++) | |
66d1bee1 | 1823 | dev_consume_skb_any(olds[i]); |
e5a06939 CM |
1824 | |
1825 | /* Update stats. */ | |
d68e2d3b | 1826 | u64_stats_update_begin(&stats->syncp); |
e5a06939 CM |
1827 | stats->tx_packets += num_segs; |
1828 | stats->tx_bytes += (num_segs * sh_len) + d_len; | |
d68e2d3b | 1829 | u64_stats_update_end(&stats->syncp); |
e5a06939 CM |
1830 | |
1831 | /* Make sure the egress timer is scheduled. */ | |
1832 | tile_net_schedule_egress_timer(info); | |
1833 | ||
1834 | return NETDEV_TX_OK; | |
1835 | } | |
1836 | ||
1837 | ||
1838 | /* | |
1839 | * Transmit a packet (called by the kernel via "hard_start_xmit" hook). | |
1840 | */ | |
1841 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |
1842 | { | |
1843 | struct tile_net_priv *priv = netdev_priv(dev); | |
1844 | int my_cpu = smp_processor_id(); | |
1845 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1846 | struct tile_net_stats_t *stats = &info->stats; | |
1847 | ||
1848 | unsigned long irqflags; | |
1849 | ||
1850 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1851 | ||
1852 | unsigned int len = skb->len; | |
1853 | unsigned char *data = skb->data; | |
1854 | ||
96339d6c | 1855 | unsigned int csum_start = skb_checksum_start_offset(skb); |
e5a06939 | 1856 | |
815d3bae | 1857 | lepp_frag_t frags[1 + MAX_SKB_FRAGS]; |
e5a06939 CM |
1858 | |
1859 | unsigned int num_frags; | |
1860 | ||
d91c6412 | 1861 | lepp_queue_t *eq = priv->eq; |
e5a06939 | 1862 | |
d91c6412 CM |
1863 | struct sk_buff *olds[8]; |
1864 | unsigned int wanted = 8; | |
e5a06939 CM |
1865 | unsigned int i, nolds = 0; |
1866 | ||
1867 | unsigned int cmd_size = sizeof(lepp_cmd_t); | |
1868 | ||
1869 | unsigned int cmd_head, cmd_tail, cmd_next; | |
1870 | unsigned int comp_tail; | |
1871 | ||
815d3bae | 1872 | lepp_cmd_t cmds[1 + MAX_SKB_FRAGS]; |
e5a06939 | 1873 | |
e5a06939 CM |
1874 | |
1875 | /* | |
1876 | * This is paranoia, since we think that if the link doesn't come | |
1877 | * up, telling Linux we have no carrier will keep it from trying | |
1878 | * to transmit. If it does, though, we can't execute this routine, | |
1879 | * since data structures we depend on aren't set up yet. | |
1880 | */ | |
1881 | if (!info->registered) | |
1882 | return NETDEV_TX_BUSY; | |
1883 | ||
1884 | ||
1885 | /* Save the timestamp. */ | |
860e9538 | 1886 | netif_trans_update(dev); |
e5a06939 CM |
1887 | |
1888 | ||
1889 | #ifdef TILE_NET_PARANOIA | |
1890 | #if CHIP_HAS_CBOX_HOME_MAP() | |
1891 | if (hash_default) { | |
1892 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); | |
1893 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | |
d91c6412 CM |
1894 | panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx", |
1895 | data, hv_pte_get_mode(pte), hv_pte_val(pte)); | |
e5a06939 CM |
1896 | } |
1897 | #endif | |
1898 | #endif | |
1899 | ||
1900 | ||
1901 | #ifdef TILE_NET_DUMP_PACKETS | |
1902 | /* ISSUE: Does not dump the "frags". */ | |
1903 | dump_packet(data, skb_headlen(skb), "tx"); | |
1904 | #endif /* TILE_NET_DUMP_PACKETS */ | |
1905 | ||
1906 | ||
1907 | if (sh->gso_size != 0) | |
1908 | return tile_net_tx_tso(skb, dev); | |
1909 | ||
1910 | ||
1911 | /* Prepare the commands. */ | |
1912 | ||
1913 | num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | |
1914 | ||
1915 | for (i = 0; i < num_frags; i++) { | |
1916 | ||
1917 | bool final = (i == num_frags - 1); | |
1918 | ||
1919 | lepp_cmd_t cmd = { | |
1920 | .cpa_lo = frags[i].cpa_lo, | |
1921 | .cpa_hi = frags[i].cpa_hi, | |
1922 | .length = frags[i].length, | |
1923 | .hash_for_home = frags[i].hash_for_home, | |
1924 | .send_completion = final, | |
1925 | .end_of_packet = final | |
1926 | }; | |
1927 | ||
1928 | if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { | |
1929 | cmd.compute_checksum = 1; | |
1930 | cmd.checksum_data.bits.start_byte = csum_start; | |
1931 | cmd.checksum_data.bits.count = len - csum_start; | |
1932 | cmd.checksum_data.bits.destination_byte = | |
1933 | csum_start + skb->csum_offset; | |
1934 | } | |
1935 | ||
1936 | cmds[i] = cmd; | |
1937 | } | |
1938 | ||
1939 | ||
1940 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | |
1941 | prefetch_L1(&eq->comp_tail); | |
1942 | prefetch_L1(&eq->cmd_tail); | |
1943 | mb(); | |
1944 | ||
1945 | ||
1946 | /* Enqueue the commands. */ | |
1947 | ||
d91c6412 | 1948 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
e5a06939 | 1949 | |
92795672 CM |
1950 | /* Handle completions if needed to make room. */ |
1951 | /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ | |
d91c6412 CM |
1952 | if (lepp_num_free_comp_slots(eq) == 0) { |
1953 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | |
1954 | if (nolds == 0) { | |
1955 | busy: | |
1956 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
1957 | return NETDEV_TX_BUSY; | |
1958 | } | |
e5a06939 CM |
1959 | } |
1960 | ||
1961 | cmd_head = eq->cmd_head; | |
1962 | cmd_tail = eq->cmd_tail; | |
1963 | ||
e5a06939 | 1964 | /* Copy the commands, or fail. */ |
92795672 | 1965 | /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ |
e5a06939 CM |
1966 | for (i = 0; i < num_frags; i++) { |
1967 | ||
1968 | /* Prepare to advance, detecting full queue. */ | |
1969 | cmd_next = cmd_tail + cmd_size; | |
1970 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | |
d91c6412 | 1971 | goto busy; |
e5a06939 CM |
1972 | if (cmd_next > LEPP_CMD_LIMIT) { |
1973 | cmd_next = 0; | |
1974 | if (cmd_next == cmd_head) | |
d91c6412 | 1975 | goto busy; |
e5a06939 CM |
1976 | } |
1977 | ||
1978 | /* Copy the command. */ | |
1979 | *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; | |
1980 | ||
1981 | /* Advance. */ | |
1982 | cmd_tail = cmd_next; | |
1983 | } | |
1984 | ||
1985 | /* Record "skb" for eventual freeing. */ | |
1986 | comp_tail = eq->comp_tail; | |
1987 | eq->comps[comp_tail] = skb; | |
1988 | LEPP_QINC(comp_tail); | |
1989 | eq->comp_tail = comp_tail; | |
1990 | ||
1991 | /* Flush before allowing LEPP to handle the command. */ | |
d91c6412 | 1992 | /* ISSUE: Is this the optimal location for the flush? */ |
e5a06939 CM |
1993 | __insn_mf(); |
1994 | ||
1995 | eq->cmd_tail = cmd_tail; | |
1996 | ||
d91c6412 CM |
1997 | /* NOTE: Using "4" here is more efficient than "0" or "2", */ |
1998 | /* and, strangely, more efficient than pre-checking the number */ | |
1999 | /* of available completions, and comparing it to 4. */ | |
e5a06939 | 2000 | if (nolds == 0) |
d91c6412 CM |
2001 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); |
2002 | ||
2003 | spin_unlock_irqrestore(&priv->eq_lock, irqflags); | |
e5a06939 CM |
2004 | |
2005 | /* Handle completions. */ | |
2006 | for (i = 0; i < nolds; i++) | |
66d1bee1 | 2007 | dev_consume_skb_any(olds[i]); |
e5a06939 CM |
2008 | |
2009 | /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ | |
d68e2d3b | 2010 | u64_stats_update_begin(&stats->syncp); |
e5a06939 CM |
2011 | stats->tx_packets++; |
2012 | stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); | |
d68e2d3b | 2013 | u64_stats_update_end(&stats->syncp); |
e5a06939 CM |
2014 | |
2015 | /* Make sure the egress timer is scheduled. */ | |
2016 | tile_net_schedule_egress_timer(info); | |
2017 | ||
2018 | return NETDEV_TX_OK; | |
2019 | } | |
2020 | ||
2021 | ||
2022 | /* | |
2023 | * Deal with a transmit timeout. | |
2024 | */ | |
2025 | static void tile_net_tx_timeout(struct net_device *dev) | |
2026 | { | |
2027 | PDEBUG("tile_net_tx_timeout()\n"); | |
2028 | PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, | |
3e66bab3 | 2029 | jiffies - dev_trans_start(dev)); |
e5a06939 CM |
2030 | |
2031 | /* XXX: ISSUE: This doesn't seem useful for us. */ | |
2032 | netif_wake_queue(dev); | |
2033 | } | |
2034 | ||
2035 | ||
2036 | /* | |
2037 | * Ioctl commands. | |
2038 | */ | |
2039 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
2040 | { | |
2041 | return -EOPNOTSUPP; | |
2042 | } | |
2043 | ||
2044 | ||
2045 | /* | |
2046 | * Get System Network Statistics. | |
2047 | * | |
2048 | * Returns the address of the device statistics structure. | |
2049 | */ | |
d68e2d3b CM |
2050 | static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, |
2051 | struct rtnl_link_stats64 *stats) | |
e5a06939 CM |
2052 | { |
2053 | struct tile_net_priv *priv = netdev_priv(dev); | |
d68e2d3b CM |
2054 | u64 rx_packets = 0, tx_packets = 0; |
2055 | u64 rx_bytes = 0, tx_bytes = 0; | |
2056 | u64 rx_errors = 0, rx_dropped = 0; | |
e5a06939 CM |
2057 | int i; |
2058 | ||
2059 | for_each_online_cpu(i) { | |
d68e2d3b CM |
2060 | struct tile_net_stats_t *cpu_stats; |
2061 | u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes; | |
2062 | u64 trx_errors, trx_dropped; | |
2063 | unsigned int start; | |
2064 | ||
2065 | if (priv->cpu[i] == NULL) | |
2066 | continue; | |
2067 | cpu_stats = &priv->cpu[i]->stats; | |
2068 | ||
2069 | do { | |
57a7744e | 2070 | start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); |
d68e2d3b CM |
2071 | trx_packets = cpu_stats->rx_packets; |
2072 | ttx_packets = cpu_stats->tx_packets; | |
2073 | trx_bytes = cpu_stats->rx_bytes; | |
2074 | ttx_bytes = cpu_stats->tx_bytes; | |
2075 | trx_errors = cpu_stats->rx_errors; | |
2076 | trx_dropped = cpu_stats->rx_dropped; | |
57a7744e | 2077 | } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); |
d68e2d3b CM |
2078 | |
2079 | rx_packets += trx_packets; | |
2080 | tx_packets += ttx_packets; | |
2081 | rx_bytes += trx_bytes; | |
2082 | tx_bytes += ttx_bytes; | |
2083 | rx_errors += trx_errors; | |
2084 | rx_dropped += trx_dropped; | |
e5a06939 CM |
2085 | } |
2086 | ||
d68e2d3b CM |
2087 | stats->rx_packets = rx_packets; |
2088 | stats->tx_packets = tx_packets; | |
2089 | stats->rx_bytes = rx_bytes; | |
2090 | stats->tx_bytes = tx_bytes; | |
2091 | stats->rx_errors = rx_errors; | |
2092 | stats->rx_dropped = rx_dropped; | |
e5a06939 | 2093 | |
d68e2d3b | 2094 | return stats; |
e5a06939 CM |
2095 | } |
2096 | ||
2097 | ||
2098 | /* | |
2099 | * Change the "mtu". | |
2100 | * | |
2101 | * The "change_mtu" method is usually not needed. | |
2102 | * If you need it, it must be like this. | |
2103 | */ | |
2104 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | |
2105 | { | |
2106 | PDEBUG("tile_net_change_mtu()\n"); | |
2107 | ||
2108 | /* Check ranges. */ | |
2109 | if ((new_mtu < 68) || (new_mtu > 1500)) | |
2110 | return -EINVAL; | |
2111 | ||
2112 | /* Accept the value. */ | |
2113 | dev->mtu = new_mtu; | |
2114 | ||
2115 | return 0; | |
2116 | } | |
2117 | ||
2118 | ||
2119 | /* | |
2120 | * Change the Ethernet Address of the NIC. | |
2121 | * | |
2122 | * The hypervisor driver does not support changing MAC address. However, | |
2123 | * the IPP does not do anything with the MAC address, so the address which | |
2124 | * gets used on outgoing packets, and which is accepted on incoming packets, | |
2125 | * is completely up to the NetIO program or kernel driver which is actually | |
2126 | * handling them. | |
2127 | * | |
2128 | * Returns 0 on success, negative on failure. | |
2129 | */ | |
2130 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | |
2131 | { | |
2132 | struct sockaddr *addr = p; | |
2133 | ||
2134 | if (!is_valid_ether_addr(addr->sa_data)) | |
504f9b5a | 2135 | return -EADDRNOTAVAIL; |
e5a06939 CM |
2136 | |
2137 | /* ISSUE: Note that "dev_addr" is now a pointer. */ | |
2138 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
2139 | ||
2140 | return 0; | |
2141 | } | |
2142 | ||
2143 | ||
2144 | /* | |
2145 | * Obtain the MAC address from the hypervisor. | |
2146 | * This must be done before opening the device. | |
2147 | */ | |
2148 | static int tile_net_get_mac(struct net_device *dev) | |
2149 | { | |
2150 | struct tile_net_priv *priv = netdev_priv(dev); | |
2151 | ||
2152 | char hv_dev_name[32]; | |
2153 | int len; | |
2154 | ||
2155 | __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; | |
2156 | ||
2157 | int ret; | |
2158 | ||
2159 | /* For example, "xgbe0". */ | |
2160 | strcpy(hv_dev_name, dev->name); | |
2161 | len = strlen(hv_dev_name); | |
2162 | ||
2163 | /* For example, "xgbe/0". */ | |
2164 | hv_dev_name[len] = hv_dev_name[len - 1]; | |
2165 | hv_dev_name[len - 1] = '/'; | |
2166 | len++; | |
2167 | ||
2168 | /* For example, "xgbe/0/native_hash". */ | |
2169 | strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); | |
2170 | ||
2171 | /* Get the hypervisor handle for this device. */ | |
2172 | priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); | |
2173 | PDEBUG("hv_dev_open(%s) returned %d %p\n", | |
2174 | hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); | |
2175 | if (priv->hv_devhdl < 0) { | |
2176 | if (priv->hv_devhdl == HV_ENODEV) | |
2177 | printk(KERN_DEBUG "Ignoring unconfigured device %s\n", | |
2178 | hv_dev_name); | |
2179 | else | |
2180 | printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", | |
2181 | hv_dev_name, priv->hv_devhdl); | |
2182 | return -1; | |
2183 | } | |
2184 | ||
2185 | /* | |
2186 | * Read the hardware address from the hypervisor. | |
2187 | * ISSUE: Note that "dev_addr" is now a pointer. | |
2188 | */ | |
2189 | offset.bits.class = NETIO_PARAM; | |
2190 | offset.bits.addr = NETIO_PARAM_MAC; | |
2191 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
2192 | (HV_VirtAddr)dev->dev_addr, dev->addr_len, | |
2193 | offset.word); | |
2194 | PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); | |
2195 | if (ret <= 0) { | |
2196 | printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", | |
2197 | dev->name); | |
2198 | /* | |
2199 | * Since the device is configured by the hypervisor but we | |
2200 | * can't get its MAC address, we are most likely running | |
2201 | * the simulator, so let's generate a random MAC address. | |
2202 | */ | |
7ce5d222 | 2203 | eth_hw_addr_random(dev); |
e5a06939 CM |
2204 | } |
2205 | ||
2206 | return 0; | |
2207 | } | |
2208 | ||
92795672 CM |
2209 | |
2210 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2211 | /* | |
2212 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
2213 | * without having to re-enable interrupts. It's not called while | |
2214 | * the interrupt routine is executing. | |
2215 | */ | |
2216 | static void tile_net_netpoll(struct net_device *dev) | |
2217 | { | |
2218 | struct tile_net_priv *priv = netdev_priv(dev); | |
2219 | disable_percpu_irq(priv->intr_id); | |
2220 | tile_net_handle_ingress_interrupt(priv->intr_id, dev); | |
2221 | enable_percpu_irq(priv->intr_id, 0); | |
2222 | } | |
2223 | #endif | |
2224 | ||
2225 | ||
e5686ad8 | 2226 | static const struct net_device_ops tile_net_ops = { |
e5a06939 CM |
2227 | .ndo_open = tile_net_open, |
2228 | .ndo_stop = tile_net_stop, | |
2229 | .ndo_start_xmit = tile_net_tx, | |
2230 | .ndo_do_ioctl = tile_net_ioctl, | |
d68e2d3b | 2231 | .ndo_get_stats64 = tile_net_get_stats64, |
e5a06939 CM |
2232 | .ndo_change_mtu = tile_net_change_mtu, |
2233 | .ndo_tx_timeout = tile_net_tx_timeout, | |
92795672 CM |
2234 | .ndo_set_mac_address = tile_net_set_mac_address, |
2235 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2236 | .ndo_poll_controller = tile_net_netpoll, | |
2237 | #endif | |
e5a06939 CM |
2238 | }; |
2239 | ||
2240 | ||
2241 | /* | |
2242 | * The setup function. | |
2243 | * | |
2244 | * This uses ether_setup() to assign various fields in dev, including | |
2245 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | |
2246 | */ | |
2247 | static void tile_net_setup(struct net_device *dev) | |
2248 | { | |
a8eaed55 | 2249 | netdev_features_t features = 0; |
e5a06939 CM |
2250 | |
2251 | ether_setup(dev); | |
e5a06939 | 2252 | dev->netdev_ops = &tile_net_ops; |
e5a06939 | 2253 | dev->watchdog_timeo = TILE_NET_TIMEOUT; |
a8eaed55 CM |
2254 | dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; |
2255 | dev->mtu = TILE_NET_MTU; | |
e5a06939 | 2256 | |
a8eaed55 CM |
2257 | features |= NETIF_F_HW_CSUM; |
2258 | features |= NETIF_F_SG; | |
815d3bae CM |
2259 | |
2260 | /* We support TSO iff the HV supports sufficient frags. */ | |
2261 | if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS) | |
2262 | features |= NETIF_F_TSO; | |
e5a06939 | 2263 | |
a8eaed55 CM |
2264 | /* We can't support HIGHDMA without hash_default, since we need |
2265 | * to be able to finv() with a VA if we don't have hash_default. | |
2266 | */ | |
e5a06939 | 2267 | if (hash_default) |
a8eaed55 | 2268 | features |= NETIF_F_HIGHDMA; |
e5a06939 | 2269 | |
a8eaed55 CM |
2270 | dev->hw_features |= features; |
2271 | dev->vlan_features |= features; | |
2272 | dev->features |= features; | |
e5a06939 CM |
2273 | } |
2274 | ||
2275 | ||
2276 | /* | |
2277 | * Allocate the device structure, register the device, and obtain the | |
2278 | * MAC address from the hypervisor. | |
2279 | */ | |
2280 | static struct net_device *tile_net_dev_init(const char *name) | |
2281 | { | |
2282 | int ret; | |
2283 | struct net_device *dev; | |
2284 | struct tile_net_priv *priv; | |
e5a06939 CM |
2285 | |
2286 | /* | |
2287 | * Allocate the device structure. This allocates "priv", calls | |
2288 | * tile_net_setup(), and saves "name". Normally, "name" is a | |
2289 | * template, instantiated by register_netdev(), but not for us. | |
2290 | */ | |
c835a677 TG |
2291 | dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN, |
2292 | tile_net_setup); | |
e5a06939 CM |
2293 | if (!dev) { |
2294 | pr_err("alloc_netdev(%s) failed\n", name); | |
2295 | return NULL; | |
2296 | } | |
2297 | ||
2298 | priv = netdev_priv(dev); | |
2299 | ||
2300 | /* Initialize "priv". */ | |
2301 | ||
2302 | memset(priv, 0, sizeof(*priv)); | |
2303 | ||
2304 | /* Save "dev" for "tile_net_open_retry()". */ | |
2305 | priv->dev = dev; | |
2306 | ||
2307 | INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); | |
2308 | ||
d91c6412 | 2309 | spin_lock_init(&priv->eq_lock); |
e5a06939 | 2310 | |
d91c6412 CM |
2311 | /* Allocate "eq". */ |
2312 | priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); | |
2313 | if (!priv->eq_pages) { | |
e5a06939 CM |
2314 | free_netdev(dev); |
2315 | return NULL; | |
2316 | } | |
d91c6412 | 2317 | priv->eq = page_address(priv->eq_pages); |
e5a06939 CM |
2318 | |
2319 | /* Register the network device. */ | |
2320 | ret = register_netdev(dev); | |
2321 | if (ret) { | |
2322 | pr_err("register_netdev %s failed %d\n", dev->name, ret); | |
d91c6412 | 2323 | __free_pages(priv->eq_pages, EQ_ORDER); |
e5a06939 CM |
2324 | free_netdev(dev); |
2325 | return NULL; | |
2326 | } | |
2327 | ||
2328 | /* Get the MAC address. */ | |
2329 | ret = tile_net_get_mac(dev); | |
2330 | if (ret < 0) { | |
2331 | unregister_netdev(dev); | |
d91c6412 | 2332 | __free_pages(priv->eq_pages, EQ_ORDER); |
e5a06939 CM |
2333 | free_netdev(dev); |
2334 | return NULL; | |
2335 | } | |
2336 | ||
2337 | return dev; | |
2338 | } | |
2339 | ||
2340 | ||
2341 | /* | |
2342 | * Module cleanup. | |
d91c6412 CM |
2343 | * |
2344 | * FIXME: If compiled as a module, this module cannot be "unloaded", | |
2345 | * because the "ingress interrupt handler" is registered permanently. | |
e5a06939 CM |
2346 | */ |
2347 | static void tile_net_cleanup(void) | |
2348 | { | |
2349 | int i; | |
2350 | ||
2351 | for (i = 0; i < TILE_NET_DEVS; i++) { | |
2352 | if (tile_net_devs[i]) { | |
2353 | struct net_device *dev = tile_net_devs[i]; | |
2354 | struct tile_net_priv *priv = netdev_priv(dev); | |
2355 | unregister_netdev(dev); | |
d07bd86d | 2356 | finv_buffer_remote(priv->eq, EQ_SIZE, 0); |
d91c6412 | 2357 | __free_pages(priv->eq_pages, EQ_ORDER); |
e5a06939 CM |
2358 | free_netdev(dev); |
2359 | } | |
2360 | } | |
2361 | } | |
2362 | ||
2363 | ||
2364 | /* | |
2365 | * Module initialization. | |
2366 | */ | |
2367 | static int tile_net_init_module(void) | |
2368 | { | |
92795672 | 2369 | pr_info("Tilera Network Driver\n"); |
e5a06939 CM |
2370 | |
2371 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); | |
2372 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); | |
2373 | tile_net_devs[2] = tile_net_dev_init("gbe0"); | |
2374 | tile_net_devs[3] = tile_net_dev_init("gbe1"); | |
2375 | ||
2376 | return 0; | |
2377 | } | |
2378 | ||
2379 | ||
d91c6412 CM |
2380 | module_init(tile_net_init_module); |
2381 | module_exit(tile_net_cleanup); | |
2382 | ||
2383 | ||
e5a06939 | 2384 | #ifndef MODULE |
d91c6412 | 2385 | |
e5a06939 CM |
2386 | /* |
2387 | * The "network_cpus" boot argument specifies the cpus that are dedicated | |
2388 | * to handle ingress packets. | |
2389 | * | |
2390 | * The parameter should be in the form "network_cpus=m-n[,x-y]", where | |
2391 | * m, n, x, y are integer numbers that represent the cpus that can be | |
2392 | * neither a dedicated cpu nor a dataplane cpu. | |
2393 | */ | |
2394 | static int __init network_cpus_setup(char *str) | |
2395 | { | |
2396 | int rc = cpulist_parse_crop(str, &network_cpus_map); | |
2397 | if (rc != 0) { | |
fe3881cf | 2398 | pr_warn("network_cpus=%s: malformed cpu list\n", str); |
e5a06939 CM |
2399 | } else { |
2400 | ||
2401 | /* Remove dedicated cpus. */ | |
2402 | cpumask_and(&network_cpus_map, &network_cpus_map, | |
2403 | cpu_possible_mask); | |
2404 | ||
2405 | ||
2406 | if (cpumask_empty(&network_cpus_map)) { | |
fe3881cf | 2407 | pr_warn("Ignoring network_cpus='%s'\n", str); |
e5a06939 | 2408 | } else { |
839b2680 TH |
2409 | pr_info("Linux network CPUs: %*pbl\n", |
2410 | cpumask_pr_args(&network_cpus_map)); | |
e5a06939 CM |
2411 | network_cpus_used = true; |
2412 | } | |
2413 | } | |
2414 | ||
2415 | return 0; | |
2416 | } | |
2417 | __setup("network_cpus=", network_cpus_setup); | |
e5a06939 | 2418 | |
d91c6412 | 2419 | #endif |