| 1 | /* |
| 2 | * Common framework for low-level network console, dump, and debugger code |
| 3 | * |
| 4 | * Sep 8 2003 Matt Mackall <mpm@selenic.com> |
| 5 | * |
| 6 | * based on the netconsole code from: |
| 7 | * |
| 8 | * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> |
| 9 | * Copyright (C) 2002 Red Hat, Inc. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/netdevice.h> |
| 13 | #include <linux/etherdevice.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/if_arp.h> |
| 16 | #include <linux/inetdevice.h> |
| 17 | #include <linux/inet.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/netpoll.h> |
| 20 | #include <linux/sched.h> |
| 21 | #include <linux/delay.h> |
| 22 | #include <linux/rcupdate.h> |
| 23 | #include <linux/workqueue.h> |
| 24 | #include <net/tcp.h> |
| 25 | #include <net/udp.h> |
| 26 | #include <asm/unaligned.h> |
| 27 | |
| 28 | /* |
| 29 | * We maintain a small pool of fully-sized skbs, to make sure the |
| 30 | * message gets out even in extreme OOM situations. |
| 31 | */ |
| 32 | |
| 33 | #define MAX_UDP_CHUNK 1460 |
| 34 | #define MAX_SKBS 32 |
| 35 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) |
| 36 | |
| 37 | static struct sk_buff_head skb_pool; |
| 38 | |
| 39 | static atomic_t trapped; |
| 40 | |
| 41 | #define USEC_PER_POLL 50 |
| 42 | #define NETPOLL_RX_ENABLED 1 |
| 43 | #define NETPOLL_RX_DROP 2 |
| 44 | |
| 45 | #define MAX_SKB_SIZE \ |
| 46 | (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ |
| 47 | sizeof(struct iphdr) + sizeof(struct ethhdr)) |
| 48 | |
| 49 | static void zap_completion_queue(void); |
| 50 | static void arp_reply(struct sk_buff *skb); |
| 51 | |
| 52 | static void queue_process(struct work_struct *work) |
| 53 | { |
| 54 | struct netpoll_info *npinfo = |
| 55 | container_of(work, struct netpoll_info, tx_work.work); |
| 56 | struct sk_buff *skb; |
| 57 | unsigned long flags; |
| 58 | |
| 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
| 60 | struct net_device *dev = skb->dev; |
| 61 | |
| 62 | if (!netif_device_present(dev) || !netif_running(dev)) { |
| 63 | __kfree_skb(skb); |
| 64 | continue; |
| 65 | } |
| 66 | |
| 67 | local_irq_save(flags); |
| 68 | netif_tx_lock(dev); |
| 69 | if ((netif_queue_stopped(dev) || |
| 70 | netif_subqueue_stopped(dev, skb)) || |
| 71 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { |
| 72 | skb_queue_head(&npinfo->txq, skb); |
| 73 | netif_tx_unlock(dev); |
| 74 | local_irq_restore(flags); |
| 75 | |
| 76 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
| 77 | return; |
| 78 | } |
| 79 | netif_tx_unlock(dev); |
| 80 | local_irq_restore(flags); |
| 81 | } |
| 82 | } |
| 83 | |
| 84 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, |
| 85 | unsigned short ulen, __be32 saddr, __be32 daddr) |
| 86 | { |
| 87 | __wsum psum; |
| 88 | |
| 89 | if (uh->check == 0 || skb_csum_unnecessary(skb)) |
| 90 | return 0; |
| 91 | |
| 92 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); |
| 93 | |
| 94 | if (skb->ip_summed == CHECKSUM_COMPLETE && |
| 95 | !csum_fold(csum_add(psum, skb->csum))) |
| 96 | return 0; |
| 97 | |
| 98 | skb->csum = psum; |
| 99 | |
| 100 | return __skb_checksum_complete(skb); |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * Check whether delayed processing was scheduled for our NIC. If so, |
| 105 | * we attempt to grab the poll lock and use ->poll() to pump the card. |
| 106 | * If this fails, either we've recursed in ->poll() or it's already |
| 107 | * running on another CPU. |
| 108 | * |
| 109 | * Note: we don't mask interrupts with this lock because we're using |
| 110 | * trylock here and interrupts are already disabled in the softirq |
| 111 | * case. Further, we test the poll_owner to avoid recursion on UP |
| 112 | * systems where the lock doesn't exist. |
| 113 | * |
| 114 | * In cases where there is bi-directional communications, reading only |
| 115 | * one message at a time can lead to packets being dropped by the |
| 116 | * network adapter, forcing superfluous retries and possibly timeouts. |
| 117 | * Thus, we set our budget to greater than 1. |
| 118 | */ |
| 119 | static int poll_one_napi(struct netpoll_info *npinfo, |
| 120 | struct napi_struct *napi, int budget) |
| 121 | { |
| 122 | int work; |
| 123 | |
| 124 | /* net_rx_action's ->poll() invocations and our's are |
| 125 | * synchronized by this test which is only made while |
| 126 | * holding the napi->poll_lock. |
| 127 | */ |
| 128 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) |
| 129 | return budget; |
| 130 | |
| 131 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
| 132 | atomic_inc(&trapped); |
| 133 | |
| 134 | work = napi->poll(napi, budget); |
| 135 | |
| 136 | atomic_dec(&trapped); |
| 137 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
| 138 | |
| 139 | return budget - work; |
| 140 | } |
| 141 | |
| 142 | static void poll_napi(struct net_device *dev) |
| 143 | { |
| 144 | struct napi_struct *napi; |
| 145 | int budget = 16; |
| 146 | |
| 147 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
| 148 | if (napi->poll_owner != smp_processor_id() && |
| 149 | spin_trylock(&napi->poll_lock)) { |
| 150 | budget = poll_one_napi(dev->npinfo, napi, budget); |
| 151 | spin_unlock(&napi->poll_lock); |
| 152 | |
| 153 | if (!budget) |
| 154 | break; |
| 155 | } |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | static void service_arp_queue(struct netpoll_info *npi) |
| 160 | { |
| 161 | if (npi) { |
| 162 | struct sk_buff *skb; |
| 163 | |
| 164 | while ((skb = skb_dequeue(&npi->arp_tx))) |
| 165 | arp_reply(skb); |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | void netpoll_poll(struct netpoll *np) |
| 170 | { |
| 171 | struct net_device *dev = np->dev; |
| 172 | |
| 173 | if (!dev || !netif_running(dev) || !dev->poll_controller) |
| 174 | return; |
| 175 | |
| 176 | /* Process pending work on NIC */ |
| 177 | dev->poll_controller(dev); |
| 178 | |
| 179 | poll_napi(dev); |
| 180 | |
| 181 | service_arp_queue(dev->npinfo); |
| 182 | |
| 183 | zap_completion_queue(); |
| 184 | } |
| 185 | |
| 186 | static void refill_skbs(void) |
| 187 | { |
| 188 | struct sk_buff *skb; |
| 189 | unsigned long flags; |
| 190 | |
| 191 | spin_lock_irqsave(&skb_pool.lock, flags); |
| 192 | while (skb_pool.qlen < MAX_SKBS) { |
| 193 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); |
| 194 | if (!skb) |
| 195 | break; |
| 196 | |
| 197 | __skb_queue_tail(&skb_pool, skb); |
| 198 | } |
| 199 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
| 200 | } |
| 201 | |
| 202 | static void zap_completion_queue(void) |
| 203 | { |
| 204 | unsigned long flags; |
| 205 | struct softnet_data *sd = &get_cpu_var(softnet_data); |
| 206 | |
| 207 | if (sd->completion_queue) { |
| 208 | struct sk_buff *clist; |
| 209 | |
| 210 | local_irq_save(flags); |
| 211 | clist = sd->completion_queue; |
| 212 | sd->completion_queue = NULL; |
| 213 | local_irq_restore(flags); |
| 214 | |
| 215 | while (clist != NULL) { |
| 216 | struct sk_buff *skb = clist; |
| 217 | clist = clist->next; |
| 218 | if (skb->destructor) { |
| 219 | atomic_inc(&skb->users); |
| 220 | dev_kfree_skb_any(skb); /* put this one back */ |
| 221 | } else { |
| 222 | __kfree_skb(skb); |
| 223 | } |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | put_cpu_var(softnet_data); |
| 228 | } |
| 229 | |
| 230 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
| 231 | { |
| 232 | int count = 0; |
| 233 | struct sk_buff *skb; |
| 234 | |
| 235 | zap_completion_queue(); |
| 236 | refill_skbs(); |
| 237 | repeat: |
| 238 | |
| 239 | skb = alloc_skb(len, GFP_ATOMIC); |
| 240 | if (!skb) |
| 241 | skb = skb_dequeue(&skb_pool); |
| 242 | |
| 243 | if (!skb) { |
| 244 | if (++count < 10) { |
| 245 | netpoll_poll(np); |
| 246 | goto repeat; |
| 247 | } |
| 248 | return NULL; |
| 249 | } |
| 250 | |
| 251 | atomic_set(&skb->users, 1); |
| 252 | skb_reserve(skb, reserve); |
| 253 | return skb; |
| 254 | } |
| 255 | |
| 256 | static int netpoll_owner_active(struct net_device *dev) |
| 257 | { |
| 258 | struct napi_struct *napi; |
| 259 | |
| 260 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
| 261 | if (napi->poll_owner == smp_processor_id()) |
| 262 | return 1; |
| 263 | } |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
| 268 | { |
| 269 | int status = NETDEV_TX_BUSY; |
| 270 | unsigned long tries; |
| 271 | struct net_device *dev = np->dev; |
| 272 | struct netpoll_info *npinfo = np->dev->npinfo; |
| 273 | |
| 274 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
| 275 | __kfree_skb(skb); |
| 276 | return; |
| 277 | } |
| 278 | |
| 279 | /* don't get messages out of order, and no recursion */ |
| 280 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
| 281 | unsigned long flags; |
| 282 | |
| 283 | local_irq_save(flags); |
| 284 | /* try until next clock tick */ |
| 285 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
| 286 | tries > 0; --tries) { |
| 287 | if (netif_tx_trylock(dev)) { |
| 288 | if (!netif_queue_stopped(dev) && |
| 289 | !netif_subqueue_stopped(dev, skb)) |
| 290 | status = dev->hard_start_xmit(skb, dev); |
| 291 | netif_tx_unlock(dev); |
| 292 | |
| 293 | if (status == NETDEV_TX_OK) |
| 294 | break; |
| 295 | |
| 296 | } |
| 297 | |
| 298 | /* tickle device maybe there is some cleanup */ |
| 299 | netpoll_poll(np); |
| 300 | |
| 301 | udelay(USEC_PER_POLL); |
| 302 | } |
| 303 | local_irq_restore(flags); |
| 304 | } |
| 305 | |
| 306 | if (status != NETDEV_TX_OK) { |
| 307 | skb_queue_tail(&npinfo->txq, skb); |
| 308 | schedule_delayed_work(&npinfo->tx_work,0); |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
| 313 | { |
| 314 | int total_len, eth_len, ip_len, udp_len; |
| 315 | struct sk_buff *skb; |
| 316 | struct udphdr *udph; |
| 317 | struct iphdr *iph; |
| 318 | struct ethhdr *eth; |
| 319 | |
| 320 | udp_len = len + sizeof(*udph); |
| 321 | ip_len = eth_len = udp_len + sizeof(*iph); |
| 322 | total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; |
| 323 | |
| 324 | skb = find_skb(np, total_len, total_len - len); |
| 325 | if (!skb) |
| 326 | return; |
| 327 | |
| 328 | skb_copy_to_linear_data(skb, msg, len); |
| 329 | skb->len += len; |
| 330 | |
| 331 | skb_push(skb, sizeof(*udph)); |
| 332 | skb_reset_transport_header(skb); |
| 333 | udph = udp_hdr(skb); |
| 334 | udph->source = htons(np->local_port); |
| 335 | udph->dest = htons(np->remote_port); |
| 336 | udph->len = htons(udp_len); |
| 337 | udph->check = 0; |
| 338 | udph->check = csum_tcpudp_magic(htonl(np->local_ip), |
| 339 | htonl(np->remote_ip), |
| 340 | udp_len, IPPROTO_UDP, |
| 341 | csum_partial((unsigned char *)udph, udp_len, 0)); |
| 342 | if (udph->check == 0) |
| 343 | udph->check = CSUM_MANGLED_0; |
| 344 | |
| 345 | skb_push(skb, sizeof(*iph)); |
| 346 | skb_reset_network_header(skb); |
| 347 | iph = ip_hdr(skb); |
| 348 | |
| 349 | /* iph->version = 4; iph->ihl = 5; */ |
| 350 | put_unaligned(0x45, (unsigned char *)iph); |
| 351 | iph->tos = 0; |
| 352 | put_unaligned(htons(ip_len), &(iph->tot_len)); |
| 353 | iph->id = 0; |
| 354 | iph->frag_off = 0; |
| 355 | iph->ttl = 64; |
| 356 | iph->protocol = IPPROTO_UDP; |
| 357 | iph->check = 0; |
| 358 | put_unaligned(htonl(np->local_ip), &(iph->saddr)); |
| 359 | put_unaligned(htonl(np->remote_ip), &(iph->daddr)); |
| 360 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
| 361 | |
| 362 | eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); |
| 363 | skb_reset_mac_header(skb); |
| 364 | skb->protocol = eth->h_proto = htons(ETH_P_IP); |
| 365 | memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN); |
| 366 | memcpy(eth->h_dest, np->remote_mac, ETH_ALEN); |
| 367 | |
| 368 | skb->dev = np->dev; |
| 369 | |
| 370 | netpoll_send_skb(np, skb); |
| 371 | } |
| 372 | |
| 373 | static void arp_reply(struct sk_buff *skb) |
| 374 | { |
| 375 | struct netpoll_info *npinfo = skb->dev->npinfo; |
| 376 | struct arphdr *arp; |
| 377 | unsigned char *arp_ptr; |
| 378 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
| 379 | __be32 sip, tip; |
| 380 | unsigned char *sha; |
| 381 | struct sk_buff *send_skb; |
| 382 | struct netpoll *np = NULL; |
| 383 | |
| 384 | if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) |
| 385 | np = npinfo->rx_np; |
| 386 | if (!np) |
| 387 | return; |
| 388 | |
| 389 | /* No arp on this interface */ |
| 390 | if (skb->dev->flags & IFF_NOARP) |
| 391 | return; |
| 392 | |
| 393 | if (!pskb_may_pull(skb, (sizeof(struct arphdr) + |
| 394 | (2 * skb->dev->addr_len) + |
| 395 | (2 * sizeof(u32))))) |
| 396 | return; |
| 397 | |
| 398 | skb_reset_network_header(skb); |
| 399 | skb_reset_transport_header(skb); |
| 400 | arp = arp_hdr(skb); |
| 401 | |
| 402 | if ((arp->ar_hrd != htons(ARPHRD_ETHER) && |
| 403 | arp->ar_hrd != htons(ARPHRD_IEEE802)) || |
| 404 | arp->ar_pro != htons(ETH_P_IP) || |
| 405 | arp->ar_op != htons(ARPOP_REQUEST)) |
| 406 | return; |
| 407 | |
| 408 | arp_ptr = (unsigned char *)(arp+1); |
| 409 | /* save the location of the src hw addr */ |
| 410 | sha = arp_ptr; |
| 411 | arp_ptr += skb->dev->addr_len; |
| 412 | memcpy(&sip, arp_ptr, 4); |
| 413 | arp_ptr += 4; |
| 414 | /* if we actually cared about dst hw addr, it would get copied here */ |
| 415 | arp_ptr += skb->dev->addr_len; |
| 416 | memcpy(&tip, arp_ptr, 4); |
| 417 | |
| 418 | /* Should we ignore arp? */ |
| 419 | if (tip != htonl(np->local_ip) || |
| 420 | ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) |
| 421 | return; |
| 422 | |
| 423 | size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4); |
| 424 | send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), |
| 425 | LL_RESERVED_SPACE(np->dev)); |
| 426 | |
| 427 | if (!send_skb) |
| 428 | return; |
| 429 | |
| 430 | skb_reset_network_header(send_skb); |
| 431 | arp = (struct arphdr *) skb_put(send_skb, size); |
| 432 | send_skb->dev = skb->dev; |
| 433 | send_skb->protocol = htons(ETH_P_ARP); |
| 434 | |
| 435 | /* Fill the device header for the ARP frame */ |
| 436 | if (dev_hard_header(send_skb, skb->dev, ptype, |
| 437 | sha, np->dev->dev_addr, |
| 438 | send_skb->len) < 0) { |
| 439 | kfree_skb(send_skb); |
| 440 | return; |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * Fill out the arp protocol part. |
| 445 | * |
| 446 | * we only support ethernet device type, |
| 447 | * which (according to RFC 1390) should always equal 1 (Ethernet). |
| 448 | */ |
| 449 | |
| 450 | arp->ar_hrd = htons(np->dev->type); |
| 451 | arp->ar_pro = htons(ETH_P_IP); |
| 452 | arp->ar_hln = np->dev->addr_len; |
| 453 | arp->ar_pln = 4; |
| 454 | arp->ar_op = htons(type); |
| 455 | |
| 456 | arp_ptr=(unsigned char *)(arp + 1); |
| 457 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); |
| 458 | arp_ptr += np->dev->addr_len; |
| 459 | memcpy(arp_ptr, &tip, 4); |
| 460 | arp_ptr += 4; |
| 461 | memcpy(arp_ptr, sha, np->dev->addr_len); |
| 462 | arp_ptr += np->dev->addr_len; |
| 463 | memcpy(arp_ptr, &sip, 4); |
| 464 | |
| 465 | netpoll_send_skb(np, send_skb); |
| 466 | } |
| 467 | |
| 468 | int __netpoll_rx(struct sk_buff *skb) |
| 469 | { |
| 470 | int proto, len, ulen; |
| 471 | struct iphdr *iph; |
| 472 | struct udphdr *uh; |
| 473 | struct netpoll_info *npi = skb->dev->npinfo; |
| 474 | struct netpoll *np = npi->rx_np; |
| 475 | |
| 476 | if (!np) |
| 477 | goto out; |
| 478 | if (skb->dev->type != ARPHRD_ETHER) |
| 479 | goto out; |
| 480 | |
| 481 | /* check if netpoll clients need ARP */ |
| 482 | if (skb->protocol == htons(ETH_P_ARP) && |
| 483 | atomic_read(&trapped)) { |
| 484 | skb_queue_tail(&npi->arp_tx, skb); |
| 485 | return 1; |
| 486 | } |
| 487 | |
| 488 | proto = ntohs(eth_hdr(skb)->h_proto); |
| 489 | if (proto != ETH_P_IP) |
| 490 | goto out; |
| 491 | if (skb->pkt_type == PACKET_OTHERHOST) |
| 492 | goto out; |
| 493 | if (skb_shared(skb)) |
| 494 | goto out; |
| 495 | |
| 496 | iph = (struct iphdr *)skb->data; |
| 497 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) |
| 498 | goto out; |
| 499 | if (iph->ihl < 5 || iph->version != 4) |
| 500 | goto out; |
| 501 | if (!pskb_may_pull(skb, iph->ihl*4)) |
| 502 | goto out; |
| 503 | if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) |
| 504 | goto out; |
| 505 | |
| 506 | len = ntohs(iph->tot_len); |
| 507 | if (skb->len < len || len < iph->ihl*4) |
| 508 | goto out; |
| 509 | |
| 510 | /* |
| 511 | * Our transport medium may have padded the buffer out. |
| 512 | * Now We trim to the true length of the frame. |
| 513 | */ |
| 514 | if (pskb_trim_rcsum(skb, len)) |
| 515 | goto out; |
| 516 | |
| 517 | if (iph->protocol != IPPROTO_UDP) |
| 518 | goto out; |
| 519 | |
| 520 | len -= iph->ihl*4; |
| 521 | uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); |
| 522 | ulen = ntohs(uh->len); |
| 523 | |
| 524 | if (ulen != len) |
| 525 | goto out; |
| 526 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) |
| 527 | goto out; |
| 528 | if (np->local_ip && np->local_ip != ntohl(iph->daddr)) |
| 529 | goto out; |
| 530 | if (np->remote_ip && np->remote_ip != ntohl(iph->saddr)) |
| 531 | goto out; |
| 532 | if (np->local_port && np->local_port != ntohs(uh->dest)) |
| 533 | goto out; |
| 534 | |
| 535 | np->rx_hook(np, ntohs(uh->source), |
| 536 | (char *)(uh+1), |
| 537 | ulen - sizeof(struct udphdr)); |
| 538 | |
| 539 | kfree_skb(skb); |
| 540 | return 1; |
| 541 | |
| 542 | out: |
| 543 | if (atomic_read(&trapped)) { |
| 544 | kfree_skb(skb); |
| 545 | return 1; |
| 546 | } |
| 547 | |
| 548 | return 0; |
| 549 | } |
| 550 | |
| 551 | void netpoll_print_options(struct netpoll *np) |
| 552 | { |
| 553 | DECLARE_MAC_BUF(mac); |
| 554 | printk(KERN_INFO "%s: local port %d\n", |
| 555 | np->name, np->local_port); |
| 556 | printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", |
| 557 | np->name, HIPQUAD(np->local_ip)); |
| 558 | printk(KERN_INFO "%s: interface %s\n", |
| 559 | np->name, np->dev_name); |
| 560 | printk(KERN_INFO "%s: remote port %d\n", |
| 561 | np->name, np->remote_port); |
| 562 | printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", |
| 563 | np->name, HIPQUAD(np->remote_ip)); |
| 564 | printk(KERN_INFO "%s: remote ethernet address %s\n", |
| 565 | np->name, print_mac(mac, np->remote_mac)); |
| 566 | } |
| 567 | |
| 568 | int netpoll_parse_options(struct netpoll *np, char *opt) |
| 569 | { |
| 570 | char *cur=opt, *delim; |
| 571 | |
| 572 | if (*cur != '@') { |
| 573 | if ((delim = strchr(cur, '@')) == NULL) |
| 574 | goto parse_failed; |
| 575 | *delim = 0; |
| 576 | np->local_port = simple_strtol(cur, NULL, 10); |
| 577 | cur = delim; |
| 578 | } |
| 579 | cur++; |
| 580 | |
| 581 | if (*cur != '/') { |
| 582 | if ((delim = strchr(cur, '/')) == NULL) |
| 583 | goto parse_failed; |
| 584 | *delim = 0; |
| 585 | np->local_ip = ntohl(in_aton(cur)); |
| 586 | cur = delim; |
| 587 | } |
| 588 | cur++; |
| 589 | |
| 590 | if (*cur != ',') { |
| 591 | /* parse out dev name */ |
| 592 | if ((delim = strchr(cur, ',')) == NULL) |
| 593 | goto parse_failed; |
| 594 | *delim = 0; |
| 595 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); |
| 596 | cur = delim; |
| 597 | } |
| 598 | cur++; |
| 599 | |
| 600 | if (*cur != '@') { |
| 601 | /* dst port */ |
| 602 | if ((delim = strchr(cur, '@')) == NULL) |
| 603 | goto parse_failed; |
| 604 | *delim = 0; |
| 605 | np->remote_port = simple_strtol(cur, NULL, 10); |
| 606 | cur = delim; |
| 607 | } |
| 608 | cur++; |
| 609 | |
| 610 | /* dst ip */ |
| 611 | if ((delim = strchr(cur, '/')) == NULL) |
| 612 | goto parse_failed; |
| 613 | *delim = 0; |
| 614 | np->remote_ip = ntohl(in_aton(cur)); |
| 615 | cur = delim + 1; |
| 616 | |
| 617 | if (*cur != 0) { |
| 618 | /* MAC address */ |
| 619 | if ((delim = strchr(cur, ':')) == NULL) |
| 620 | goto parse_failed; |
| 621 | *delim = 0; |
| 622 | np->remote_mac[0] = simple_strtol(cur, NULL, 16); |
| 623 | cur = delim + 1; |
| 624 | if ((delim = strchr(cur, ':')) == NULL) |
| 625 | goto parse_failed; |
| 626 | *delim = 0; |
| 627 | np->remote_mac[1] = simple_strtol(cur, NULL, 16); |
| 628 | cur = delim + 1; |
| 629 | if ((delim = strchr(cur, ':')) == NULL) |
| 630 | goto parse_failed; |
| 631 | *delim = 0; |
| 632 | np->remote_mac[2] = simple_strtol(cur, NULL, 16); |
| 633 | cur = delim + 1; |
| 634 | if ((delim = strchr(cur, ':')) == NULL) |
| 635 | goto parse_failed; |
| 636 | *delim = 0; |
| 637 | np->remote_mac[3] = simple_strtol(cur, NULL, 16); |
| 638 | cur = delim + 1; |
| 639 | if ((delim = strchr(cur, ':')) == NULL) |
| 640 | goto parse_failed; |
| 641 | *delim = 0; |
| 642 | np->remote_mac[4] = simple_strtol(cur, NULL, 16); |
| 643 | cur = delim + 1; |
| 644 | np->remote_mac[5] = simple_strtol(cur, NULL, 16); |
| 645 | } |
| 646 | |
| 647 | netpoll_print_options(np); |
| 648 | |
| 649 | return 0; |
| 650 | |
| 651 | parse_failed: |
| 652 | printk(KERN_INFO "%s: couldn't parse config at %s!\n", |
| 653 | np->name, cur); |
| 654 | return -1; |
| 655 | } |
| 656 | |
| 657 | int netpoll_setup(struct netpoll *np) |
| 658 | { |
| 659 | struct net_device *ndev = NULL; |
| 660 | struct in_device *in_dev; |
| 661 | struct netpoll_info *npinfo; |
| 662 | unsigned long flags; |
| 663 | int err; |
| 664 | |
| 665 | if (np->dev_name) |
| 666 | ndev = dev_get_by_name(&init_net, np->dev_name); |
| 667 | if (!ndev) { |
| 668 | printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", |
| 669 | np->name, np->dev_name); |
| 670 | return -ENODEV; |
| 671 | } |
| 672 | |
| 673 | np->dev = ndev; |
| 674 | if (!ndev->npinfo) { |
| 675 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
| 676 | if (!npinfo) { |
| 677 | err = -ENOMEM; |
| 678 | goto release; |
| 679 | } |
| 680 | |
| 681 | npinfo->rx_flags = 0; |
| 682 | npinfo->rx_np = NULL; |
| 683 | |
| 684 | spin_lock_init(&npinfo->rx_lock); |
| 685 | skb_queue_head_init(&npinfo->arp_tx); |
| 686 | skb_queue_head_init(&npinfo->txq); |
| 687 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
| 688 | |
| 689 | atomic_set(&npinfo->refcnt, 1); |
| 690 | } else { |
| 691 | npinfo = ndev->npinfo; |
| 692 | atomic_inc(&npinfo->refcnt); |
| 693 | } |
| 694 | |
| 695 | if (!ndev->poll_controller) { |
| 696 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", |
| 697 | np->name, np->dev_name); |
| 698 | err = -ENOTSUPP; |
| 699 | goto release; |
| 700 | } |
| 701 | |
| 702 | if (!netif_running(ndev)) { |
| 703 | unsigned long atmost, atleast; |
| 704 | |
| 705 | printk(KERN_INFO "%s: device %s not up yet, forcing it\n", |
| 706 | np->name, np->dev_name); |
| 707 | |
| 708 | rtnl_lock(); |
| 709 | err = dev_open(ndev); |
| 710 | rtnl_unlock(); |
| 711 | |
| 712 | if (err) { |
| 713 | printk(KERN_ERR "%s: failed to open %s\n", |
| 714 | np->name, ndev->name); |
| 715 | goto release; |
| 716 | } |
| 717 | |
| 718 | atleast = jiffies + HZ/10; |
| 719 | atmost = jiffies + 4*HZ; |
| 720 | while (!netif_carrier_ok(ndev)) { |
| 721 | if (time_after(jiffies, atmost)) { |
| 722 | printk(KERN_NOTICE |
| 723 | "%s: timeout waiting for carrier\n", |
| 724 | np->name); |
| 725 | break; |
| 726 | } |
| 727 | cond_resched(); |
| 728 | } |
| 729 | |
| 730 | /* If carrier appears to come up instantly, we don't |
| 731 | * trust it and pause so that we don't pump all our |
| 732 | * queued console messages into the bitbucket. |
| 733 | */ |
| 734 | |
| 735 | if (time_before(jiffies, atleast)) { |
| 736 | printk(KERN_NOTICE "%s: carrier detect appears" |
| 737 | " untrustworthy, waiting 4 seconds\n", |
| 738 | np->name); |
| 739 | msleep(4000); |
| 740 | } |
| 741 | } |
| 742 | |
| 743 | if (!np->local_ip) { |
| 744 | rcu_read_lock(); |
| 745 | in_dev = __in_dev_get_rcu(ndev); |
| 746 | |
| 747 | if (!in_dev || !in_dev->ifa_list) { |
| 748 | rcu_read_unlock(); |
| 749 | printk(KERN_ERR "%s: no IP address for %s, aborting\n", |
| 750 | np->name, np->dev_name); |
| 751 | err = -EDESTADDRREQ; |
| 752 | goto release; |
| 753 | } |
| 754 | |
| 755 | np->local_ip = ntohl(in_dev->ifa_list->ifa_local); |
| 756 | rcu_read_unlock(); |
| 757 | printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", |
| 758 | np->name, HIPQUAD(np->local_ip)); |
| 759 | } |
| 760 | |
| 761 | if (np->rx_hook) { |
| 762 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 763 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; |
| 764 | npinfo->rx_np = np; |
| 765 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 766 | } |
| 767 | |
| 768 | /* fill up the skb queue */ |
| 769 | refill_skbs(); |
| 770 | |
| 771 | /* last thing to do is link it to the net device structure */ |
| 772 | ndev->npinfo = npinfo; |
| 773 | |
| 774 | /* avoid racing with NAPI reading npinfo */ |
| 775 | synchronize_rcu(); |
| 776 | |
| 777 | return 0; |
| 778 | |
| 779 | release: |
| 780 | if (!ndev->npinfo) |
| 781 | kfree(npinfo); |
| 782 | np->dev = NULL; |
| 783 | dev_put(ndev); |
| 784 | return err; |
| 785 | } |
| 786 | |
| 787 | static int __init netpoll_init(void) |
| 788 | { |
| 789 | skb_queue_head_init(&skb_pool); |
| 790 | return 0; |
| 791 | } |
| 792 | core_initcall(netpoll_init); |
| 793 | |
| 794 | void netpoll_cleanup(struct netpoll *np) |
| 795 | { |
| 796 | struct netpoll_info *npinfo; |
| 797 | unsigned long flags; |
| 798 | |
| 799 | if (np->dev) { |
| 800 | npinfo = np->dev->npinfo; |
| 801 | if (npinfo) { |
| 802 | if (npinfo->rx_np == np) { |
| 803 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 804 | npinfo->rx_np = NULL; |
| 805 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; |
| 806 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 807 | } |
| 808 | |
| 809 | if (atomic_dec_and_test(&npinfo->refcnt)) { |
| 810 | skb_queue_purge(&npinfo->arp_tx); |
| 811 | skb_queue_purge(&npinfo->txq); |
| 812 | cancel_rearming_delayed_work(&npinfo->tx_work); |
| 813 | |
| 814 | /* clean after last, unfinished work */ |
| 815 | __skb_queue_purge(&npinfo->txq); |
| 816 | kfree(npinfo); |
| 817 | np->dev->npinfo = NULL; |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | dev_put(np->dev); |
| 822 | } |
| 823 | |
| 824 | np->dev = NULL; |
| 825 | } |
| 826 | |
| 827 | int netpoll_trap(void) |
| 828 | { |
| 829 | return atomic_read(&trapped); |
| 830 | } |
| 831 | |
| 832 | void netpoll_set_trap(int trap) |
| 833 | { |
| 834 | if (trap) |
| 835 | atomic_inc(&trapped); |
| 836 | else |
| 837 | atomic_dec(&trapped); |
| 838 | } |
| 839 | |
| 840 | EXPORT_SYMBOL(netpoll_set_trap); |
| 841 | EXPORT_SYMBOL(netpoll_trap); |
| 842 | EXPORT_SYMBOL(netpoll_print_options); |
| 843 | EXPORT_SYMBOL(netpoll_parse_options); |
| 844 | EXPORT_SYMBOL(netpoll_setup); |
| 845 | EXPORT_SYMBOL(netpoll_cleanup); |
| 846 | EXPORT_SYMBOL(netpoll_send_udp); |
| 847 | EXPORT_SYMBOL(netpoll_poll); |