| 1 | /* |
| 2 | * Copyright (C) 2006-2007 PA Semi, Inc |
| 3 | * |
| 4 | * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/pci.h> |
| 23 | #include <linux/interrupt.h> |
| 24 | #include <linux/dmaengine.h> |
| 25 | #include <linux/delay.h> |
| 26 | #include <linux/netdevice.h> |
| 27 | #include <linux/of_mdio.h> |
| 28 | #include <linux/etherdevice.h> |
| 29 | #include <asm/dma-mapping.h> |
| 30 | #include <linux/in.h> |
| 31 | #include <linux/skbuff.h> |
| 32 | |
| 33 | #include <linux/ip.h> |
| 34 | #include <linux/tcp.h> |
| 35 | #include <net/checksum.h> |
| 36 | #include <linux/inet_lro.h> |
| 37 | |
| 38 | #include <asm/irq.h> |
| 39 | #include <asm/firmware.h> |
| 40 | #include <asm/pasemi_dma.h> |
| 41 | |
| 42 | #include "pasemi_mac.h" |
| 43 | |
| 44 | /* We have our own align, since ppc64 in general has it at 0 because |
| 45 | * of design flaws in some of the server bridge chips. However, for |
| 46 | * PWRficient doing the unaligned copies is more expensive than doing |
| 47 | * unaligned DMA, so make sure the data is aligned instead. |
| 48 | */ |
| 49 | #define LOCAL_SKB_ALIGN 2 |
| 50 | |
| 51 | /* TODO list |
| 52 | * |
| 53 | * - Multicast support |
| 54 | * - Large MTU support |
| 55 | * - SW LRO |
| 56 | * - Multiqueue RX/TX |
| 57 | */ |
| 58 | |
| 59 | #define LRO_MAX_AGGR 64 |
| 60 | |
| 61 | #define PE_MIN_MTU 64 |
| 62 | #define PE_MAX_MTU 9000 |
| 63 | #define PE_DEF_MTU ETH_DATA_LEN |
| 64 | |
| 65 | #define DEFAULT_MSG_ENABLE \ |
| 66 | (NETIF_MSG_DRV | \ |
| 67 | NETIF_MSG_PROBE | \ |
| 68 | NETIF_MSG_LINK | \ |
| 69 | NETIF_MSG_TIMER | \ |
| 70 | NETIF_MSG_IFDOWN | \ |
| 71 | NETIF_MSG_IFUP | \ |
| 72 | NETIF_MSG_RX_ERR | \ |
| 73 | NETIF_MSG_TX_ERR) |
| 74 | |
| 75 | MODULE_LICENSE("GPL"); |
| 76 | MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); |
| 77 | MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); |
| 78 | |
| 79 | static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ |
| 80 | module_param(debug, int, 0); |
| 81 | MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); |
| 82 | |
| 83 | extern const struct ethtool_ops pasemi_mac_ethtool_ops; |
| 84 | |
| 85 | static int translation_enabled(void) |
| 86 | { |
| 87 | #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) |
| 88 | return 1; |
| 89 | #else |
| 90 | return firmware_has_feature(FW_FEATURE_LPAR); |
| 91 | #endif |
| 92 | } |
| 93 | |
| 94 | static void write_iob_reg(unsigned int reg, unsigned int val) |
| 95 | { |
| 96 | pasemi_write_iob_reg(reg, val); |
| 97 | } |
| 98 | |
| 99 | static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg) |
| 100 | { |
| 101 | return pasemi_read_mac_reg(mac->dma_if, reg); |
| 102 | } |
| 103 | |
| 104 | static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg, |
| 105 | unsigned int val) |
| 106 | { |
| 107 | pasemi_write_mac_reg(mac->dma_if, reg, val); |
| 108 | } |
| 109 | |
| 110 | static unsigned int read_dma_reg(unsigned int reg) |
| 111 | { |
| 112 | return pasemi_read_dma_reg(reg); |
| 113 | } |
| 114 | |
| 115 | static void write_dma_reg(unsigned int reg, unsigned int val) |
| 116 | { |
| 117 | pasemi_write_dma_reg(reg, val); |
| 118 | } |
| 119 | |
| 120 | static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac) |
| 121 | { |
| 122 | return mac->rx; |
| 123 | } |
| 124 | |
| 125 | static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac) |
| 126 | { |
| 127 | return mac->tx; |
| 128 | } |
| 129 | |
| 130 | static inline void prefetch_skb(const struct sk_buff *skb) |
| 131 | { |
| 132 | const void *d = skb; |
| 133 | |
| 134 | prefetch(d); |
| 135 | prefetch(d+64); |
| 136 | prefetch(d+128); |
| 137 | prefetch(d+192); |
| 138 | } |
| 139 | |
| 140 | static int mac_to_intf(struct pasemi_mac *mac) |
| 141 | { |
| 142 | struct pci_dev *pdev = mac->pdev; |
| 143 | u32 tmp; |
| 144 | int nintf, off, i, j; |
| 145 | int devfn = pdev->devfn; |
| 146 | |
| 147 | tmp = read_dma_reg(PAS_DMA_CAP_IFI); |
| 148 | nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S; |
| 149 | off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S; |
| 150 | |
| 151 | /* IOFF contains the offset to the registers containing the |
| 152 | * DMA interface-to-MAC-pci-id mappings, and NIN contains number |
| 153 | * of total interfaces. Each register contains 4 devfns. |
| 154 | * Just do a linear search until we find the devfn of the MAC |
| 155 | * we're trying to look up. |
| 156 | */ |
| 157 | |
| 158 | for (i = 0; i < (nintf+3)/4; i++) { |
| 159 | tmp = read_dma_reg(off+4*i); |
| 160 | for (j = 0; j < 4; j++) { |
| 161 | if (((tmp >> (8*j)) & 0xff) == devfn) |
| 162 | return i*4 + j; |
| 163 | } |
| 164 | } |
| 165 | return -1; |
| 166 | } |
| 167 | |
| 168 | static void pasemi_mac_intf_disable(struct pasemi_mac *mac) |
| 169 | { |
| 170 | unsigned int flags; |
| 171 | |
| 172 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); |
| 173 | flags &= ~PAS_MAC_CFG_PCFG_PE; |
| 174 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); |
| 175 | } |
| 176 | |
| 177 | static void pasemi_mac_intf_enable(struct pasemi_mac *mac) |
| 178 | { |
| 179 | unsigned int flags; |
| 180 | |
| 181 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); |
| 182 | flags |= PAS_MAC_CFG_PCFG_PE; |
| 183 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); |
| 184 | } |
| 185 | |
| 186 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) |
| 187 | { |
| 188 | struct pci_dev *pdev = mac->pdev; |
| 189 | struct device_node *dn = pci_device_to_OF_node(pdev); |
| 190 | int len; |
| 191 | const u8 *maddr; |
| 192 | u8 addr[6]; |
| 193 | |
| 194 | if (!dn) { |
| 195 | dev_dbg(&pdev->dev, |
| 196 | "No device node for mac, not configuring\n"); |
| 197 | return -ENOENT; |
| 198 | } |
| 199 | |
| 200 | maddr = of_get_property(dn, "local-mac-address", &len); |
| 201 | |
| 202 | if (maddr && len == 6) { |
| 203 | memcpy(mac->mac_addr, maddr, 6); |
| 204 | return 0; |
| 205 | } |
| 206 | |
| 207 | /* Some old versions of firmware mistakenly uses mac-address |
| 208 | * (and as a string) instead of a byte array in local-mac-address. |
| 209 | */ |
| 210 | |
| 211 | if (maddr == NULL) |
| 212 | maddr = of_get_property(dn, "mac-address", NULL); |
| 213 | |
| 214 | if (maddr == NULL) { |
| 215 | dev_warn(&pdev->dev, |
| 216 | "no mac address in device tree, not configuring\n"); |
| 217 | return -ENOENT; |
| 218 | } |
| 219 | |
| 220 | if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], |
| 221 | &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { |
| 222 | dev_warn(&pdev->dev, |
| 223 | "can't parse mac address, not configuring\n"); |
| 224 | return -EINVAL; |
| 225 | } |
| 226 | |
| 227 | memcpy(mac->mac_addr, addr, 6); |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) |
| 233 | { |
| 234 | struct pasemi_mac *mac = netdev_priv(dev); |
| 235 | struct sockaddr *addr = p; |
| 236 | unsigned int adr0, adr1; |
| 237 | |
| 238 | if (!is_valid_ether_addr(addr->sa_data)) |
| 239 | return -EINVAL; |
| 240 | |
| 241 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
| 242 | |
| 243 | adr0 = dev->dev_addr[2] << 24 | |
| 244 | dev->dev_addr[3] << 16 | |
| 245 | dev->dev_addr[4] << 8 | |
| 246 | dev->dev_addr[5]; |
| 247 | adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); |
| 248 | adr1 &= ~0xffff; |
| 249 | adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; |
| 250 | |
| 251 | pasemi_mac_intf_disable(mac); |
| 252 | write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); |
| 253 | write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); |
| 254 | pasemi_mac_intf_enable(mac); |
| 255 | |
| 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | static int get_skb_hdr(struct sk_buff *skb, void **iphdr, |
| 260 | void **tcph, u64 *hdr_flags, void *data) |
| 261 | { |
| 262 | u64 macrx = (u64) data; |
| 263 | unsigned int ip_len; |
| 264 | struct iphdr *iph; |
| 265 | |
| 266 | /* IPv4 header checksum failed */ |
| 267 | if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) |
| 268 | return -1; |
| 269 | |
| 270 | /* non tcp packet */ |
| 271 | skb_reset_network_header(skb); |
| 272 | iph = ip_hdr(skb); |
| 273 | if (iph->protocol != IPPROTO_TCP) |
| 274 | return -1; |
| 275 | |
| 276 | ip_len = ip_hdrlen(skb); |
| 277 | skb_set_transport_header(skb, ip_len); |
| 278 | *tcph = tcp_hdr(skb); |
| 279 | |
| 280 | /* check if ip header and tcp header are complete */ |
| 281 | if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) |
| 282 | return -1; |
| 283 | |
| 284 | *hdr_flags = LRO_IPV4 | LRO_TCP; |
| 285 | *iphdr = iph; |
| 286 | |
| 287 | return 0; |
| 288 | } |
| 289 | |
| 290 | static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, |
| 291 | const int nfrags, |
| 292 | struct sk_buff *skb, |
| 293 | const dma_addr_t *dmas) |
| 294 | { |
| 295 | int f; |
| 296 | struct pci_dev *pdev = mac->dma_pdev; |
| 297 | |
| 298 | pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); |
| 299 | |
| 300 | for (f = 0; f < nfrags; f++) { |
| 301 | skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; |
| 302 | |
| 303 | pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE); |
| 304 | } |
| 305 | dev_kfree_skb_irq(skb); |
| 306 | |
| 307 | /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, |
| 308 | * aligned up to a power of 2 |
| 309 | */ |
| 310 | return (nfrags + 3) & ~1; |
| 311 | } |
| 312 | |
| 313 | static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac) |
| 314 | { |
| 315 | struct pasemi_mac_csring *ring; |
| 316 | u32 val; |
| 317 | unsigned int cfg; |
| 318 | int chno; |
| 319 | |
| 320 | ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring), |
| 321 | offsetof(struct pasemi_mac_csring, chan)); |
| 322 | |
| 323 | if (!ring) { |
| 324 | dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n"); |
| 325 | goto out_chan; |
| 326 | } |
| 327 | |
| 328 | chno = ring->chan.chno; |
| 329 | |
| 330 | ring->size = CS_RING_SIZE; |
| 331 | ring->next_to_fill = 0; |
| 332 | |
| 333 | /* Allocate descriptors */ |
| 334 | if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE)) |
| 335 | goto out_ring_desc; |
| 336 | |
| 337 | write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), |
| 338 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); |
| 339 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); |
| 340 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3); |
| 341 | |
| 342 | write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); |
| 343 | |
| 344 | ring->events[0] = pasemi_dma_alloc_flag(); |
| 345 | ring->events[1] = pasemi_dma_alloc_flag(); |
| 346 | if (ring->events[0] < 0 || ring->events[1] < 0) |
| 347 | goto out_flags; |
| 348 | |
| 349 | pasemi_dma_clear_flag(ring->events[0]); |
| 350 | pasemi_dma_clear_flag(ring->events[1]); |
| 351 | |
| 352 | ring->fun = pasemi_dma_alloc_fun(); |
| 353 | if (ring->fun < 0) |
| 354 | goto out_fun; |
| 355 | |
| 356 | cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP | |
| 357 | PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) | |
| 358 | PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ; |
| 359 | |
| 360 | if (translation_enabled()) |
| 361 | cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; |
| 362 | |
| 363 | write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); |
| 364 | |
| 365 | /* enable channel */ |
| 366 | pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | |
| 367 | PAS_DMA_TXCHAN_TCMDSTA_DB | |
| 368 | PAS_DMA_TXCHAN_TCMDSTA_DE | |
| 369 | PAS_DMA_TXCHAN_TCMDSTA_DA); |
| 370 | |
| 371 | return ring; |
| 372 | |
| 373 | out_fun: |
| 374 | out_flags: |
| 375 | if (ring->events[0] >= 0) |
| 376 | pasemi_dma_free_flag(ring->events[0]); |
| 377 | if (ring->events[1] >= 0) |
| 378 | pasemi_dma_free_flag(ring->events[1]); |
| 379 | pasemi_dma_free_ring(&ring->chan); |
| 380 | out_ring_desc: |
| 381 | pasemi_dma_free_chan(&ring->chan); |
| 382 | out_chan: |
| 383 | |
| 384 | return NULL; |
| 385 | } |
| 386 | |
| 387 | static void pasemi_mac_setup_csrings(struct pasemi_mac *mac) |
| 388 | { |
| 389 | int i; |
| 390 | mac->cs[0] = pasemi_mac_setup_csring(mac); |
| 391 | if (mac->type == MAC_TYPE_XAUI) |
| 392 | mac->cs[1] = pasemi_mac_setup_csring(mac); |
| 393 | else |
| 394 | mac->cs[1] = 0; |
| 395 | |
| 396 | for (i = 0; i < MAX_CS; i++) |
| 397 | if (mac->cs[i]) |
| 398 | mac->num_cs++; |
| 399 | } |
| 400 | |
| 401 | static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring) |
| 402 | { |
| 403 | pasemi_dma_stop_chan(&csring->chan); |
| 404 | pasemi_dma_free_flag(csring->events[0]); |
| 405 | pasemi_dma_free_flag(csring->events[1]); |
| 406 | pasemi_dma_free_ring(&csring->chan); |
| 407 | pasemi_dma_free_chan(&csring->chan); |
| 408 | pasemi_dma_free_fun(csring->fun); |
| 409 | } |
| 410 | |
| 411 | static int pasemi_mac_setup_rx_resources(const struct net_device *dev) |
| 412 | { |
| 413 | struct pasemi_mac_rxring *ring; |
| 414 | struct pasemi_mac *mac = netdev_priv(dev); |
| 415 | int chno; |
| 416 | unsigned int cfg; |
| 417 | |
| 418 | ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring), |
| 419 | offsetof(struct pasemi_mac_rxring, chan)); |
| 420 | |
| 421 | if (!ring) { |
| 422 | dev_err(&mac->pdev->dev, "Can't allocate RX channel\n"); |
| 423 | goto out_chan; |
| 424 | } |
| 425 | chno = ring->chan.chno; |
| 426 | |
| 427 | spin_lock_init(&ring->lock); |
| 428 | |
| 429 | ring->size = RX_RING_SIZE; |
| 430 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * |
| 431 | RX_RING_SIZE, GFP_KERNEL); |
| 432 | |
| 433 | if (!ring->ring_info) |
| 434 | goto out_ring_info; |
| 435 | |
| 436 | /* Allocate descriptors */ |
| 437 | if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) |
| 438 | goto out_ring_desc; |
| 439 | |
| 440 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, |
| 441 | RX_RING_SIZE * sizeof(u64), |
| 442 | &ring->buf_dma, GFP_KERNEL); |
| 443 | if (!ring->buffers) |
| 444 | goto out_ring_desc; |
| 445 | |
| 446 | memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); |
| 447 | |
| 448 | write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), |
| 449 | PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); |
| 450 | |
| 451 | write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno), |
| 452 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) | |
| 453 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); |
| 454 | |
| 455 | cfg = PAS_DMA_RXCHAN_CFG_HBU(2); |
| 456 | |
| 457 | if (translation_enabled()) |
| 458 | cfg |= PAS_DMA_RXCHAN_CFG_CTR; |
| 459 | |
| 460 | write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg); |
| 461 | |
| 462 | write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if), |
| 463 | PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); |
| 464 | |
| 465 | write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if), |
| 466 | PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | |
| 467 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); |
| 468 | |
| 469 | cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 | |
| 470 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | |
| 471 | PAS_DMA_RXINT_CFG_HEN; |
| 472 | |
| 473 | if (translation_enabled()) |
| 474 | cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; |
| 475 | |
| 476 | write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg); |
| 477 | |
| 478 | ring->next_to_fill = 0; |
| 479 | ring->next_to_clean = 0; |
| 480 | ring->mac = mac; |
| 481 | mac->rx = ring; |
| 482 | |
| 483 | return 0; |
| 484 | |
| 485 | out_ring_desc: |
| 486 | kfree(ring->ring_info); |
| 487 | out_ring_info: |
| 488 | pasemi_dma_free_chan(&ring->chan); |
| 489 | out_chan: |
| 490 | return -ENOMEM; |
| 491 | } |
| 492 | |
| 493 | static struct pasemi_mac_txring * |
| 494 | pasemi_mac_setup_tx_resources(const struct net_device *dev) |
| 495 | { |
| 496 | struct pasemi_mac *mac = netdev_priv(dev); |
| 497 | u32 val; |
| 498 | struct pasemi_mac_txring *ring; |
| 499 | unsigned int cfg; |
| 500 | int chno; |
| 501 | |
| 502 | ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring), |
| 503 | offsetof(struct pasemi_mac_txring, chan)); |
| 504 | |
| 505 | if (!ring) { |
| 506 | dev_err(&mac->pdev->dev, "Can't allocate TX channel\n"); |
| 507 | goto out_chan; |
| 508 | } |
| 509 | |
| 510 | chno = ring->chan.chno; |
| 511 | |
| 512 | spin_lock_init(&ring->lock); |
| 513 | |
| 514 | ring->size = TX_RING_SIZE; |
| 515 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * |
| 516 | TX_RING_SIZE, GFP_KERNEL); |
| 517 | if (!ring->ring_info) |
| 518 | goto out_ring_info; |
| 519 | |
| 520 | /* Allocate descriptors */ |
| 521 | if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) |
| 522 | goto out_ring_desc; |
| 523 | |
| 524 | write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), |
| 525 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); |
| 526 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); |
| 527 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); |
| 528 | |
| 529 | write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); |
| 530 | |
| 531 | cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | |
| 532 | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | |
| 533 | PAS_DMA_TXCHAN_CFG_UP | |
| 534 | PAS_DMA_TXCHAN_CFG_WT(4); |
| 535 | |
| 536 | if (translation_enabled()) |
| 537 | cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; |
| 538 | |
| 539 | write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); |
| 540 | |
| 541 | ring->next_to_fill = 0; |
| 542 | ring->next_to_clean = 0; |
| 543 | ring->mac = mac; |
| 544 | |
| 545 | return ring; |
| 546 | |
| 547 | out_ring_desc: |
| 548 | kfree(ring->ring_info); |
| 549 | out_ring_info: |
| 550 | pasemi_dma_free_chan(&ring->chan); |
| 551 | out_chan: |
| 552 | return NULL; |
| 553 | } |
| 554 | |
| 555 | static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) |
| 556 | { |
| 557 | struct pasemi_mac_txring *txring = tx_ring(mac); |
| 558 | unsigned int i, j; |
| 559 | struct pasemi_mac_buffer *info; |
| 560 | dma_addr_t dmas[MAX_SKB_FRAGS+1]; |
| 561 | int freed, nfrags; |
| 562 | int start, limit; |
| 563 | |
| 564 | start = txring->next_to_clean; |
| 565 | limit = txring->next_to_fill; |
| 566 | |
| 567 | /* Compensate for when fill has wrapped and clean has not */ |
| 568 | if (start > limit) |
| 569 | limit += TX_RING_SIZE; |
| 570 | |
| 571 | for (i = start; i < limit; i += freed) { |
| 572 | info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)]; |
| 573 | if (info->dma && info->skb) { |
| 574 | nfrags = skb_shinfo(info->skb)->nr_frags; |
| 575 | for (j = 0; j <= nfrags; j++) |
| 576 | dmas[j] = txring->ring_info[(i+1+j) & |
| 577 | (TX_RING_SIZE-1)].dma; |
| 578 | freed = pasemi_mac_unmap_tx_skb(mac, nfrags, |
| 579 | info->skb, dmas); |
| 580 | } else |
| 581 | freed = 2; |
| 582 | } |
| 583 | |
| 584 | kfree(txring->ring_info); |
| 585 | pasemi_dma_free_chan(&txring->chan); |
| 586 | |
| 587 | } |
| 588 | |
| 589 | static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) |
| 590 | { |
| 591 | struct pasemi_mac_rxring *rx = rx_ring(mac); |
| 592 | unsigned int i; |
| 593 | struct pasemi_mac_buffer *info; |
| 594 | |
| 595 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 596 | info = &RX_DESC_INFO(rx, i); |
| 597 | if (info->skb && info->dma) { |
| 598 | pci_unmap_single(mac->dma_pdev, |
| 599 | info->dma, |
| 600 | info->skb->len, |
| 601 | PCI_DMA_FROMDEVICE); |
| 602 | dev_kfree_skb_any(info->skb); |
| 603 | } |
| 604 | info->dma = 0; |
| 605 | info->skb = NULL; |
| 606 | } |
| 607 | |
| 608 | for (i = 0; i < RX_RING_SIZE; i++) |
| 609 | RX_BUFF(rx, i) = 0; |
| 610 | } |
| 611 | |
| 612 | static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) |
| 613 | { |
| 614 | pasemi_mac_free_rx_buffers(mac); |
| 615 | |
| 616 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), |
| 617 | rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); |
| 618 | |
| 619 | kfree(rx_ring(mac)->ring_info); |
| 620 | pasemi_dma_free_chan(&rx_ring(mac)->chan); |
| 621 | mac->rx = NULL; |
| 622 | } |
| 623 | |
| 624 | static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, |
| 625 | const int limit) |
| 626 | { |
| 627 | const struct pasemi_mac *mac = netdev_priv(dev); |
| 628 | struct pasemi_mac_rxring *rx = rx_ring(mac); |
| 629 | int fill, count; |
| 630 | |
| 631 | if (limit <= 0) |
| 632 | return; |
| 633 | |
| 634 | fill = rx_ring(mac)->next_to_fill; |
| 635 | for (count = 0; count < limit; count++) { |
| 636 | struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill); |
| 637 | u64 *buff = &RX_BUFF(rx, fill); |
| 638 | struct sk_buff *skb; |
| 639 | dma_addr_t dma; |
| 640 | |
| 641 | /* Entry in use? */ |
| 642 | WARN_ON(*buff); |
| 643 | |
| 644 | skb = dev_alloc_skb(mac->bufsz); |
| 645 | skb_reserve(skb, LOCAL_SKB_ALIGN); |
| 646 | |
| 647 | if (unlikely(!skb)) |
| 648 | break; |
| 649 | |
| 650 | dma = pci_map_single(mac->dma_pdev, skb->data, |
| 651 | mac->bufsz - LOCAL_SKB_ALIGN, |
| 652 | PCI_DMA_FROMDEVICE); |
| 653 | |
| 654 | if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { |
| 655 | dev_kfree_skb_irq(info->skb); |
| 656 | break; |
| 657 | } |
| 658 | |
| 659 | info->skb = skb; |
| 660 | info->dma = dma; |
| 661 | *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma); |
| 662 | fill++; |
| 663 | } |
| 664 | |
| 665 | wmb(); |
| 666 | |
| 667 | write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count); |
| 668 | |
| 669 | rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & |
| 670 | (RX_RING_SIZE - 1); |
| 671 | } |
| 672 | |
| 673 | static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac) |
| 674 | { |
| 675 | struct pasemi_mac_rxring *rx = rx_ring(mac); |
| 676 | unsigned int reg, pcnt; |
| 677 | /* Re-enable packet count interrupts: finally |
| 678 | * ack the packet count interrupt we got in rx_intr. |
| 679 | */ |
| 680 | |
| 681 | pcnt = *rx->chan.status & PAS_STATUS_PCNT_M; |
| 682 | |
| 683 | reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; |
| 684 | |
| 685 | if (*rx->chan.status & PAS_STATUS_TIMER) |
| 686 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; |
| 687 | |
| 688 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg); |
| 689 | } |
| 690 | |
| 691 | static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac) |
| 692 | { |
| 693 | unsigned int reg, pcnt; |
| 694 | |
| 695 | /* Re-enable packet count interrupts */ |
| 696 | pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; |
| 697 | |
| 698 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; |
| 699 | |
| 700 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg); |
| 701 | } |
| 702 | |
| 703 | |
| 704 | static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac, |
| 705 | const u64 macrx) |
| 706 | { |
| 707 | unsigned int rcmdsta, ccmdsta; |
| 708 | struct pasemi_dmachan *chan = &rx_ring(mac)->chan; |
| 709 | |
| 710 | if (!netif_msg_rx_err(mac)) |
| 711 | return; |
| 712 | |
| 713 | rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
| 714 | ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); |
| 715 | |
| 716 | printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n", |
| 717 | macrx, *chan->status); |
| 718 | |
| 719 | printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", |
| 720 | rcmdsta, ccmdsta); |
| 721 | } |
| 722 | |
| 723 | static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac, |
| 724 | const u64 mactx) |
| 725 | { |
| 726 | unsigned int cmdsta; |
| 727 | struct pasemi_dmachan *chan = &tx_ring(mac)->chan; |
| 728 | |
| 729 | if (!netif_msg_tx_err(mac)) |
| 730 | return; |
| 731 | |
| 732 | cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); |
| 733 | |
| 734 | printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\ |
| 735 | "tx status 0x%016llx\n", mactx, *chan->status); |
| 736 | |
| 737 | printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); |
| 738 | } |
| 739 | |
| 740 | static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, |
| 741 | const int limit) |
| 742 | { |
| 743 | const struct pasemi_dmachan *chan = &rx->chan; |
| 744 | struct pasemi_mac *mac = rx->mac; |
| 745 | struct pci_dev *pdev = mac->dma_pdev; |
| 746 | unsigned int n; |
| 747 | int count, buf_index, tot_bytes, packets; |
| 748 | struct pasemi_mac_buffer *info; |
| 749 | struct sk_buff *skb; |
| 750 | unsigned int len; |
| 751 | u64 macrx, eval; |
| 752 | dma_addr_t dma; |
| 753 | |
| 754 | tot_bytes = 0; |
| 755 | packets = 0; |
| 756 | |
| 757 | spin_lock(&rx->lock); |
| 758 | |
| 759 | n = rx->next_to_clean; |
| 760 | |
| 761 | prefetch(&RX_DESC(rx, n)); |
| 762 | |
| 763 | for (count = 0; count < limit; count++) { |
| 764 | macrx = RX_DESC(rx, n); |
| 765 | prefetch(&RX_DESC(rx, n+4)); |
| 766 | |
| 767 | if ((macrx & XCT_MACRX_E) || |
| 768 | (*chan->status & PAS_STATUS_ERROR)) |
| 769 | pasemi_mac_rx_error(mac, macrx); |
| 770 | |
| 771 | if (!(macrx & XCT_MACRX_O)) |
| 772 | break; |
| 773 | |
| 774 | info = NULL; |
| 775 | |
| 776 | BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); |
| 777 | |
| 778 | eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >> |
| 779 | XCT_RXRES_8B_EVAL_S; |
| 780 | buf_index = eval-1; |
| 781 | |
| 782 | dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M); |
| 783 | info = &RX_DESC_INFO(rx, buf_index); |
| 784 | |
| 785 | skb = info->skb; |
| 786 | |
| 787 | prefetch_skb(skb); |
| 788 | |
| 789 | len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; |
| 790 | |
| 791 | pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, |
| 792 | PCI_DMA_FROMDEVICE); |
| 793 | |
| 794 | if (macrx & XCT_MACRX_CRC) { |
| 795 | /* CRC error flagged */ |
| 796 | mac->netdev->stats.rx_errors++; |
| 797 | mac->netdev->stats.rx_crc_errors++; |
| 798 | /* No need to free skb, it'll be reused */ |
| 799 | goto next; |
| 800 | } |
| 801 | |
| 802 | info->skb = NULL; |
| 803 | info->dma = 0; |
| 804 | |
| 805 | if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { |
| 806 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 807 | skb->csum = (macrx & XCT_MACRX_CSUM_M) >> |
| 808 | XCT_MACRX_CSUM_S; |
| 809 | } else |
| 810 | skb->ip_summed = CHECKSUM_NONE; |
| 811 | |
| 812 | packets++; |
| 813 | tot_bytes += len; |
| 814 | |
| 815 | /* Don't include CRC */ |
| 816 | skb_put(skb, len-4); |
| 817 | |
| 818 | skb->protocol = eth_type_trans(skb, mac->netdev); |
| 819 | lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); |
| 820 | |
| 821 | next: |
| 822 | RX_DESC(rx, n) = 0; |
| 823 | RX_DESC(rx, n+1) = 0; |
| 824 | |
| 825 | /* Need to zero it out since hardware doesn't, since the |
| 826 | * replenish loop uses it to tell when it's done. |
| 827 | */ |
| 828 | RX_BUFF(rx, buf_index) = 0; |
| 829 | |
| 830 | n += 4; |
| 831 | } |
| 832 | |
| 833 | if (n > RX_RING_SIZE) { |
| 834 | /* Errata 5971 workaround: L2 target of headers */ |
| 835 | write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0); |
| 836 | n &= (RX_RING_SIZE-1); |
| 837 | } |
| 838 | |
| 839 | rx_ring(mac)->next_to_clean = n; |
| 840 | |
| 841 | lro_flush_all(&mac->lro_mgr); |
| 842 | |
| 843 | /* Increase is in number of 16-byte entries, and since each descriptor |
| 844 | * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with |
| 845 | * count*2. |
| 846 | */ |
| 847 | write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1); |
| 848 | |
| 849 | pasemi_mac_replenish_rx_ring(mac->netdev, count); |
| 850 | |
| 851 | mac->netdev->stats.rx_bytes += tot_bytes; |
| 852 | mac->netdev->stats.rx_packets += packets; |
| 853 | |
| 854 | spin_unlock(&rx_ring(mac)->lock); |
| 855 | |
| 856 | return count; |
| 857 | } |
| 858 | |
| 859 | /* Can't make this too large or we blow the kernel stack limits */ |
| 860 | #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) |
| 861 | |
| 862 | static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) |
| 863 | { |
| 864 | struct pasemi_dmachan *chan = &txring->chan; |
| 865 | struct pasemi_mac *mac = txring->mac; |
| 866 | int i, j; |
| 867 | unsigned int start, descr_count, buf_count, batch_limit; |
| 868 | unsigned int ring_limit; |
| 869 | unsigned int total_count; |
| 870 | unsigned long flags; |
| 871 | struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; |
| 872 | dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; |
| 873 | int nf[TX_CLEAN_BATCHSIZE]; |
| 874 | int nr_frags; |
| 875 | |
| 876 | total_count = 0; |
| 877 | batch_limit = TX_CLEAN_BATCHSIZE; |
| 878 | restart: |
| 879 | spin_lock_irqsave(&txring->lock, flags); |
| 880 | |
| 881 | start = txring->next_to_clean; |
| 882 | ring_limit = txring->next_to_fill; |
| 883 | |
| 884 | prefetch(&TX_DESC_INFO(txring, start+1).skb); |
| 885 | |
| 886 | /* Compensate for when fill has wrapped but clean has not */ |
| 887 | if (start > ring_limit) |
| 888 | ring_limit += TX_RING_SIZE; |
| 889 | |
| 890 | buf_count = 0; |
| 891 | descr_count = 0; |
| 892 | |
| 893 | for (i = start; |
| 894 | descr_count < batch_limit && i < ring_limit; |
| 895 | i += buf_count) { |
| 896 | u64 mactx = TX_DESC(txring, i); |
| 897 | struct sk_buff *skb; |
| 898 | |
| 899 | if ((mactx & XCT_MACTX_E) || |
| 900 | (*chan->status & PAS_STATUS_ERROR)) |
| 901 | pasemi_mac_tx_error(mac, mactx); |
| 902 | |
| 903 | /* Skip over control descriptors */ |
| 904 | if (!(mactx & XCT_MACTX_LLEN_M)) { |
| 905 | TX_DESC(txring, i) = 0; |
| 906 | TX_DESC(txring, i+1) = 0; |
| 907 | buf_count = 2; |
| 908 | continue; |
| 909 | } |
| 910 | |
| 911 | skb = TX_DESC_INFO(txring, i+1).skb; |
| 912 | nr_frags = TX_DESC_INFO(txring, i).dma; |
| 913 | |
| 914 | if (unlikely(mactx & XCT_MACTX_O)) |
| 915 | /* Not yet transmitted */ |
| 916 | break; |
| 917 | |
| 918 | buf_count = 2 + nr_frags; |
| 919 | /* Since we always fill with an even number of entries, make |
| 920 | * sure we skip any unused one at the end as well. |
| 921 | */ |
| 922 | if (buf_count & 1) |
| 923 | buf_count++; |
| 924 | |
| 925 | for (j = 0; j <= nr_frags; j++) |
| 926 | dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma; |
| 927 | |
| 928 | skbs[descr_count] = skb; |
| 929 | nf[descr_count] = nr_frags; |
| 930 | |
| 931 | TX_DESC(txring, i) = 0; |
| 932 | TX_DESC(txring, i+1) = 0; |
| 933 | |
| 934 | descr_count++; |
| 935 | } |
| 936 | txring->next_to_clean = i & (TX_RING_SIZE-1); |
| 937 | |
| 938 | spin_unlock_irqrestore(&txring->lock, flags); |
| 939 | netif_wake_queue(mac->netdev); |
| 940 | |
| 941 | for (i = 0; i < descr_count; i++) |
| 942 | pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]); |
| 943 | |
| 944 | total_count += descr_count; |
| 945 | |
| 946 | /* If the batch was full, try to clean more */ |
| 947 | if (descr_count == batch_limit) |
| 948 | goto restart; |
| 949 | |
| 950 | return total_count; |
| 951 | } |
| 952 | |
| 953 | |
| 954 | static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) |
| 955 | { |
| 956 | const struct pasemi_mac_rxring *rxring = data; |
| 957 | struct pasemi_mac *mac = rxring->mac; |
| 958 | const struct pasemi_dmachan *chan = &rxring->chan; |
| 959 | unsigned int reg; |
| 960 | |
| 961 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) |
| 962 | return IRQ_NONE; |
| 963 | |
| 964 | /* Don't reset packet count so it won't fire again but clear |
| 965 | * all others. |
| 966 | */ |
| 967 | |
| 968 | reg = 0; |
| 969 | if (*chan->status & PAS_STATUS_SOFT) |
| 970 | reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; |
| 971 | if (*chan->status & PAS_STATUS_ERROR) |
| 972 | reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; |
| 973 | |
| 974 | napi_schedule(&mac->napi); |
| 975 | |
| 976 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); |
| 977 | |
| 978 | return IRQ_HANDLED; |
| 979 | } |
| 980 | |
| 981 | #define TX_CLEAN_INTERVAL HZ |
| 982 | |
| 983 | static void pasemi_mac_tx_timer(unsigned long data) |
| 984 | { |
| 985 | struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data; |
| 986 | struct pasemi_mac *mac = txring->mac; |
| 987 | |
| 988 | pasemi_mac_clean_tx(txring); |
| 989 | |
| 990 | mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL); |
| 991 | |
| 992 | pasemi_mac_restart_tx_intr(mac); |
| 993 | } |
| 994 | |
| 995 | static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) |
| 996 | { |
| 997 | struct pasemi_mac_txring *txring = data; |
| 998 | const struct pasemi_dmachan *chan = &txring->chan; |
| 999 | struct pasemi_mac *mac = txring->mac; |
| 1000 | unsigned int reg; |
| 1001 | |
| 1002 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) |
| 1003 | return IRQ_NONE; |
| 1004 | |
| 1005 | reg = 0; |
| 1006 | |
| 1007 | if (*chan->status & PAS_STATUS_SOFT) |
| 1008 | reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; |
| 1009 | if (*chan->status & PAS_STATUS_ERROR) |
| 1010 | reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; |
| 1011 | |
| 1012 | mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); |
| 1013 | |
| 1014 | napi_schedule(&mac->napi); |
| 1015 | |
| 1016 | if (reg) |
| 1017 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); |
| 1018 | |
| 1019 | return IRQ_HANDLED; |
| 1020 | } |
| 1021 | |
| 1022 | static void pasemi_adjust_link(struct net_device *dev) |
| 1023 | { |
| 1024 | struct pasemi_mac *mac = netdev_priv(dev); |
| 1025 | int msg; |
| 1026 | unsigned int flags; |
| 1027 | unsigned int new_flags; |
| 1028 | |
| 1029 | if (!mac->phydev->link) { |
| 1030 | /* If no link, MAC speed settings don't matter. Just report |
| 1031 | * link down and return. |
| 1032 | */ |
| 1033 | if (mac->link && netif_msg_link(mac)) |
| 1034 | printk(KERN_INFO "%s: Link is down.\n", dev->name); |
| 1035 | |
| 1036 | netif_carrier_off(dev); |
| 1037 | pasemi_mac_intf_disable(mac); |
| 1038 | mac->link = 0; |
| 1039 | |
| 1040 | return; |
| 1041 | } else { |
| 1042 | pasemi_mac_intf_enable(mac); |
| 1043 | netif_carrier_on(dev); |
| 1044 | } |
| 1045 | |
| 1046 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); |
| 1047 | new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | |
| 1048 | PAS_MAC_CFG_PCFG_TSR_M); |
| 1049 | |
| 1050 | if (!mac->phydev->duplex) |
| 1051 | new_flags |= PAS_MAC_CFG_PCFG_HD; |
| 1052 | |
| 1053 | switch (mac->phydev->speed) { |
| 1054 | case 1000: |
| 1055 | new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | |
| 1056 | PAS_MAC_CFG_PCFG_TSR_1G; |
| 1057 | break; |
| 1058 | case 100: |
| 1059 | new_flags |= PAS_MAC_CFG_PCFG_SPD_100M | |
| 1060 | PAS_MAC_CFG_PCFG_TSR_100M; |
| 1061 | break; |
| 1062 | case 10: |
| 1063 | new_flags |= PAS_MAC_CFG_PCFG_SPD_10M | |
| 1064 | PAS_MAC_CFG_PCFG_TSR_10M; |
| 1065 | break; |
| 1066 | default: |
| 1067 | printk("Unsupported speed %d\n", mac->phydev->speed); |
| 1068 | } |
| 1069 | |
| 1070 | /* Print on link or speed/duplex change */ |
| 1071 | msg = mac->link != mac->phydev->link || flags != new_flags; |
| 1072 | |
| 1073 | mac->duplex = mac->phydev->duplex; |
| 1074 | mac->speed = mac->phydev->speed; |
| 1075 | mac->link = mac->phydev->link; |
| 1076 | |
| 1077 | if (new_flags != flags) |
| 1078 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); |
| 1079 | |
| 1080 | if (msg && netif_msg_link(mac)) |
| 1081 | printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", |
| 1082 | dev->name, mac->speed, mac->duplex ? "full" : "half"); |
| 1083 | } |
| 1084 | |
| 1085 | static int pasemi_mac_phy_init(struct net_device *dev) |
| 1086 | { |
| 1087 | struct pasemi_mac *mac = netdev_priv(dev); |
| 1088 | struct device_node *dn, *phy_dn; |
| 1089 | struct phy_device *phydev; |
| 1090 | |
| 1091 | dn = pci_device_to_OF_node(mac->pdev); |
| 1092 | phy_dn = of_parse_phandle(dn, "phy-handle", 0); |
| 1093 | of_node_put(phy_dn); |
| 1094 | |
| 1095 | mac->link = 0; |
| 1096 | mac->speed = 0; |
| 1097 | mac->duplex = -1; |
| 1098 | |
| 1099 | phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, |
| 1100 | PHY_INTERFACE_MODE_SGMII); |
| 1101 | |
| 1102 | if (IS_ERR(phydev)) { |
| 1103 | printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); |
| 1104 | return PTR_ERR(phydev); |
| 1105 | } |
| 1106 | |
| 1107 | mac->phydev = phydev; |
| 1108 | |
| 1109 | return 0; |
| 1110 | } |
| 1111 | |
| 1112 | |
| 1113 | static int pasemi_mac_open(struct net_device *dev) |
| 1114 | { |
| 1115 | struct pasemi_mac *mac = netdev_priv(dev); |
| 1116 | unsigned int flags; |
| 1117 | int i, ret; |
| 1118 | |
| 1119 | flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | |
| 1120 | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | |
| 1121 | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); |
| 1122 | |
| 1123 | write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); |
| 1124 | |
| 1125 | ret = pasemi_mac_setup_rx_resources(dev); |
| 1126 | if (ret) |
| 1127 | goto out_rx_resources; |
| 1128 | |
| 1129 | mac->tx = pasemi_mac_setup_tx_resources(dev); |
| 1130 | |
| 1131 | if (!mac->tx) |
| 1132 | goto out_tx_ring; |
| 1133 | |
| 1134 | /* We might already have allocated rings in case mtu was changed |
| 1135 | * before interface was brought up. |
| 1136 | */ |
| 1137 | if (dev->mtu > 1500 && !mac->num_cs) { |
| 1138 | pasemi_mac_setup_csrings(mac); |
| 1139 | if (!mac->num_cs) |
| 1140 | goto out_tx_ring; |
| 1141 | } |
| 1142 | |
| 1143 | /* Zero out rmon counters */ |
| 1144 | for (i = 0; i < 32; i++) |
| 1145 | write_mac_reg(mac, PAS_MAC_RMON(i), 0); |
| 1146 | |
| 1147 | /* 0x3ff with 33MHz clock is about 31us */ |
| 1148 | write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, |
| 1149 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); |
| 1150 | |
| 1151 | write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno), |
| 1152 | PAS_IOB_DMA_RXCH_CFG_CNTTH(256)); |
| 1153 | |
| 1154 | write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), |
| 1155 | PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); |
| 1156 | |
| 1157 | write_mac_reg(mac, PAS_MAC_IPC_CHNL, |
| 1158 | PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) | |
| 1159 | PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno)); |
| 1160 | |
| 1161 | /* enable rx if */ |
| 1162 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), |
| 1163 | PAS_DMA_RXINT_RCMDSTA_EN | |
| 1164 | PAS_DMA_RXINT_RCMDSTA_DROPS_M | |
| 1165 | PAS_DMA_RXINT_RCMDSTA_BP | |
| 1166 | PAS_DMA_RXINT_RCMDSTA_OO | |
| 1167 | PAS_DMA_RXINT_RCMDSTA_BT); |
| 1168 | |
| 1169 | /* enable rx channel */ |
| 1170 | pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | |
| 1171 | PAS_DMA_RXCHAN_CCMDSTA_OD | |
| 1172 | PAS_DMA_RXCHAN_CCMDSTA_FD | |
| 1173 | PAS_DMA_RXCHAN_CCMDSTA_DT); |
| 1174 | |
| 1175 | /* enable tx channel */ |
| 1176 | pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | |
| 1177 | PAS_DMA_TXCHAN_TCMDSTA_DB | |
| 1178 | PAS_DMA_TXCHAN_TCMDSTA_DE | |
| 1179 | PAS_DMA_TXCHAN_TCMDSTA_DA); |
| 1180 | |
| 1181 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); |
| 1182 | |
| 1183 | write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), |
| 1184 | RX_RING_SIZE>>1); |
| 1185 | |
| 1186 | /* Clear out any residual packet count state from firmware */ |
| 1187 | pasemi_mac_restart_rx_intr(mac); |
| 1188 | pasemi_mac_restart_tx_intr(mac); |
| 1189 | |
| 1190 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; |
| 1191 | |
| 1192 | if (mac->type == MAC_TYPE_GMAC) |
| 1193 | flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; |
| 1194 | else |
| 1195 | flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; |
| 1196 | |
| 1197 | /* Enable interface in MAC */ |
| 1198 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); |
| 1199 | |
| 1200 | ret = pasemi_mac_phy_init(dev); |
| 1201 | if (ret) { |
| 1202 | /* Since we won't get link notification, just enable RX */ |
| 1203 | pasemi_mac_intf_enable(mac); |
| 1204 | if (mac->type == MAC_TYPE_GMAC) { |
| 1205 | /* Warn for missing PHY on SGMII (1Gig) ports */ |
| 1206 | dev_warn(&mac->pdev->dev, |
| 1207 | "PHY init failed: %d.\n", ret); |
| 1208 | dev_warn(&mac->pdev->dev, |
| 1209 | "Defaulting to 1Gbit full duplex\n"); |
| 1210 | } |
| 1211 | } |
| 1212 | |
| 1213 | netif_start_queue(dev); |
| 1214 | napi_enable(&mac->napi); |
| 1215 | |
| 1216 | snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", |
| 1217 | dev->name); |
| 1218 | |
| 1219 | ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED, |
| 1220 | mac->tx_irq_name, mac->tx); |
| 1221 | if (ret) { |
| 1222 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", |
| 1223 | mac->tx->chan.irq, ret); |
| 1224 | goto out_tx_int; |
| 1225 | } |
| 1226 | |
| 1227 | snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", |
| 1228 | dev->name); |
| 1229 | |
| 1230 | ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED, |
| 1231 | mac->rx_irq_name, mac->rx); |
| 1232 | if (ret) { |
| 1233 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", |
| 1234 | mac->rx->chan.irq, ret); |
| 1235 | goto out_rx_int; |
| 1236 | } |
| 1237 | |
| 1238 | if (mac->phydev) |
| 1239 | phy_start(mac->phydev); |
| 1240 | |
| 1241 | init_timer(&mac->tx->clean_timer); |
| 1242 | mac->tx->clean_timer.function = pasemi_mac_tx_timer; |
| 1243 | mac->tx->clean_timer.data = (unsigned long)mac->tx; |
| 1244 | mac->tx->clean_timer.expires = jiffies+HZ; |
| 1245 | add_timer(&mac->tx->clean_timer); |
| 1246 | |
| 1247 | return 0; |
| 1248 | |
| 1249 | out_rx_int: |
| 1250 | free_irq(mac->tx->chan.irq, mac->tx); |
| 1251 | out_tx_int: |
| 1252 | napi_disable(&mac->napi); |
| 1253 | netif_stop_queue(dev); |
| 1254 | out_tx_ring: |
| 1255 | if (mac->tx) |
| 1256 | pasemi_mac_free_tx_resources(mac); |
| 1257 | pasemi_mac_free_rx_resources(mac); |
| 1258 | out_rx_resources: |
| 1259 | |
| 1260 | return ret; |
| 1261 | } |
| 1262 | |
| 1263 | #define MAX_RETRIES 5000 |
| 1264 | |
| 1265 | static void pasemi_mac_pause_txchan(struct pasemi_mac *mac) |
| 1266 | { |
| 1267 | unsigned int sta, retries; |
| 1268 | int txch = tx_ring(mac)->chan.chno; |
| 1269 | |
| 1270 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), |
| 1271 | PAS_DMA_TXCHAN_TCMDSTA_ST); |
| 1272 | |
| 1273 | for (retries = 0; retries < MAX_RETRIES; retries++) { |
| 1274 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); |
| 1275 | if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) |
| 1276 | break; |
| 1277 | cond_resched(); |
| 1278 | } |
| 1279 | |
| 1280 | if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) |
| 1281 | dev_err(&mac->dma_pdev->dev, |
| 1282 | "Failed to stop tx channel, tcmdsta %08x\n", sta); |
| 1283 | |
| 1284 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); |
| 1285 | } |
| 1286 | |
| 1287 | static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac) |
| 1288 | { |
| 1289 | unsigned int sta, retries; |
| 1290 | int rxch = rx_ring(mac)->chan.chno; |
| 1291 | |
| 1292 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), |
| 1293 | PAS_DMA_RXCHAN_CCMDSTA_ST); |
| 1294 | for (retries = 0; retries < MAX_RETRIES; retries++) { |
| 1295 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); |
| 1296 | if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) |
| 1297 | break; |
| 1298 | cond_resched(); |
| 1299 | } |
| 1300 | |
| 1301 | if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) |
| 1302 | dev_err(&mac->dma_pdev->dev, |
| 1303 | "Failed to stop rx channel, ccmdsta 08%x\n", sta); |
| 1304 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); |
| 1305 | } |
| 1306 | |
| 1307 | static void pasemi_mac_pause_rxint(struct pasemi_mac *mac) |
| 1308 | { |
| 1309 | unsigned int sta, retries; |
| 1310 | |
| 1311 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), |
| 1312 | PAS_DMA_RXINT_RCMDSTA_ST); |
| 1313 | for (retries = 0; retries < MAX_RETRIES; retries++) { |
| 1314 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
| 1315 | if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) |
| 1316 | break; |
| 1317 | cond_resched(); |
| 1318 | } |
| 1319 | |
| 1320 | if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) |
| 1321 | dev_err(&mac->dma_pdev->dev, |
| 1322 | "Failed to stop rx interface, rcmdsta %08x\n", sta); |
| 1323 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); |
| 1324 | } |
| 1325 | |
| 1326 | static int pasemi_mac_close(struct net_device *dev) |
| 1327 | { |
| 1328 | struct pasemi_mac *mac = netdev_priv(dev); |
| 1329 | unsigned int sta; |
| 1330 | int rxch, txch, i; |
| 1331 | |
| 1332 | rxch = rx_ring(mac)->chan.chno; |
| 1333 | txch = tx_ring(mac)->chan.chno; |
| 1334 | |
| 1335 | if (mac->phydev) { |
| 1336 | phy_stop(mac->phydev); |
| 1337 | phy_disconnect(mac->phydev); |
| 1338 | } |
| 1339 | |
| 1340 | del_timer_sync(&mac->tx->clean_timer); |
| 1341 | |
| 1342 | netif_stop_queue(dev); |
| 1343 | napi_disable(&mac->napi); |
| 1344 | |
| 1345 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
| 1346 | if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | |
| 1347 | PAS_DMA_RXINT_RCMDSTA_OO | |
| 1348 | PAS_DMA_RXINT_RCMDSTA_BT)) |
| 1349 | printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); |
| 1350 | |
| 1351 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); |
| 1352 | if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | |
| 1353 | PAS_DMA_RXCHAN_CCMDSTA_OD | |
| 1354 | PAS_DMA_RXCHAN_CCMDSTA_FD | |
| 1355 | PAS_DMA_RXCHAN_CCMDSTA_DT)) |
| 1356 | printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); |
| 1357 | |
| 1358 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); |
| 1359 | if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | |
| 1360 | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) |
| 1361 | printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); |
| 1362 | |
| 1363 | /* Clean out any pending buffers */ |
| 1364 | pasemi_mac_clean_tx(tx_ring(mac)); |
| 1365 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); |
| 1366 | |
| 1367 | pasemi_mac_pause_txchan(mac); |
| 1368 | pasemi_mac_pause_rxint(mac); |
| 1369 | pasemi_mac_pause_rxchan(mac); |
| 1370 | pasemi_mac_intf_disable(mac); |
| 1371 | |
| 1372 | free_irq(mac->tx->chan.irq, mac->tx); |
| 1373 | free_irq(mac->rx->chan.irq, mac->rx); |
| 1374 | |
| 1375 | for (i = 0; i < mac->num_cs; i++) { |
| 1376 | pasemi_mac_free_csring(mac->cs[i]); |
| 1377 | mac->cs[i] = NULL; |
| 1378 | } |
| 1379 | |
| 1380 | mac->num_cs = 0; |
| 1381 | |
| 1382 | /* Free resources */ |
| 1383 | pasemi_mac_free_rx_resources(mac); |
| 1384 | pasemi_mac_free_tx_resources(mac); |
| 1385 | |
| 1386 | return 0; |
| 1387 | } |
| 1388 | |
| 1389 | static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, |
| 1390 | const dma_addr_t *map, |
| 1391 | const unsigned int *map_size, |
| 1392 | struct pasemi_mac_txring *txring, |
| 1393 | struct pasemi_mac_csring *csring) |
| 1394 | { |
| 1395 | u64 fund; |
| 1396 | dma_addr_t cs_dest; |
| 1397 | const int nh_off = skb_network_offset(skb); |
| 1398 | const int nh_len = skb_network_header_len(skb); |
| 1399 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 1400 | int cs_size, i, fill, hdr, cpyhdr, evt; |
| 1401 | dma_addr_t csdma; |
| 1402 | |
| 1403 | fund = XCT_FUN_ST | XCT_FUN_RR_8BRES | |
| 1404 | XCT_FUN_O | XCT_FUN_FUN(csring->fun) | |
| 1405 | XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) | |
| 1406 | XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE; |
| 1407 | |
| 1408 | switch (ip_hdr(skb)->protocol) { |
| 1409 | case IPPROTO_TCP: |
| 1410 | fund |= XCT_FUN_SIG_TCP4; |
| 1411 | /* TCP checksum is 16 bytes into the header */ |
| 1412 | cs_dest = map[0] + skb_transport_offset(skb) + 16; |
| 1413 | break; |
| 1414 | case IPPROTO_UDP: |
| 1415 | fund |= XCT_FUN_SIG_UDP4; |
| 1416 | /* UDP checksum is 6 bytes into the header */ |
| 1417 | cs_dest = map[0] + skb_transport_offset(skb) + 6; |
| 1418 | break; |
| 1419 | default: |
| 1420 | BUG(); |
| 1421 | } |
| 1422 | |
| 1423 | /* Do the checksum offloaded */ |
| 1424 | fill = csring->next_to_fill; |
| 1425 | hdr = fill; |
| 1426 | |
| 1427 | CS_DESC(csring, fill++) = fund; |
| 1428 | /* Room for 8BRES. Checksum result is really 2 bytes into it */ |
| 1429 | csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2; |
| 1430 | CS_DESC(csring, fill++) = 0; |
| 1431 | |
| 1432 | CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off); |
| 1433 | for (i = 1; i <= nfrags; i++) |
| 1434 | CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); |
| 1435 | |
| 1436 | fill += i; |
| 1437 | if (fill & 1) |
| 1438 | fill++; |
| 1439 | |
| 1440 | /* Copy the result into the TCP packet */ |
| 1441 | cpyhdr = fill; |
| 1442 | CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) | |
| 1443 | XCT_FUN_LLEN(2) | XCT_FUN_SE; |
| 1444 | CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T; |
| 1445 | CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma); |
| 1446 | fill++; |
| 1447 | |
| 1448 | evt = !csring->last_event; |
| 1449 | csring->last_event = evt; |
| 1450 | |
| 1451 | /* Event handshaking with MAC TX */ |
| 1452 | CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | |
| 1453 | CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]); |
| 1454 | CS_DESC(csring, fill++) = 0; |
| 1455 | CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | |
| 1456 | CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]); |
| 1457 | CS_DESC(csring, fill++) = 0; |
| 1458 | csring->next_to_fill = fill & (CS_RING_SIZE-1); |
| 1459 | |
| 1460 | cs_size = fill - hdr; |
| 1461 | write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1); |
| 1462 | |
| 1463 | /* TX-side event handshaking */ |
| 1464 | fill = txring->next_to_fill; |
| 1465 | TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | |
| 1466 | CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]); |
| 1467 | TX_DESC(txring, fill++) = 0; |
| 1468 | TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | |
| 1469 | CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]); |
| 1470 | TX_DESC(txring, fill++) = 0; |
| 1471 | txring->next_to_fill = fill; |
| 1472 | |
| 1473 | write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); |
| 1474 | |
| 1475 | return; |
| 1476 | } |
| 1477 | |
| 1478 | static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) |
| 1479 | { |
| 1480 | struct pasemi_mac * const mac = netdev_priv(dev); |
| 1481 | struct pasemi_mac_txring * const txring = tx_ring(mac); |
| 1482 | struct pasemi_mac_csring *csring; |
| 1483 | u64 dflags = 0; |
| 1484 | u64 mactx; |
| 1485 | dma_addr_t map[MAX_SKB_FRAGS+1]; |
| 1486 | unsigned int map_size[MAX_SKB_FRAGS+1]; |
| 1487 | unsigned long flags; |
| 1488 | int i, nfrags; |
| 1489 | int fill; |
| 1490 | const int nh_off = skb_network_offset(skb); |
| 1491 | const int nh_len = skb_network_header_len(skb); |
| 1492 | |
| 1493 | prefetch(&txring->ring_info); |
| 1494 | |
| 1495 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; |
| 1496 | |
| 1497 | nfrags = skb_shinfo(skb)->nr_frags; |
| 1498 | |
| 1499 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), |
| 1500 | PCI_DMA_TODEVICE); |
| 1501 | map_size[0] = skb_headlen(skb); |
| 1502 | if (pci_dma_mapping_error(mac->dma_pdev, map[0])) |
| 1503 | goto out_err_nolock; |
| 1504 | |
| 1505 | for (i = 0; i < nfrags; i++) { |
| 1506 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 1507 | |
| 1508 | map[i+1] = pci_map_page(mac->dma_pdev, frag->page, |
| 1509 | frag->page_offset, frag->size, |
| 1510 | PCI_DMA_TODEVICE); |
| 1511 | map_size[i+1] = frag->size; |
| 1512 | if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { |
| 1513 | nfrags = i; |
| 1514 | goto out_err_nolock; |
| 1515 | } |
| 1516 | } |
| 1517 | |
| 1518 | if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) { |
| 1519 | switch (ip_hdr(skb)->protocol) { |
| 1520 | case IPPROTO_TCP: |
| 1521 | dflags |= XCT_MACTX_CSUM_TCP; |
| 1522 | dflags |= XCT_MACTX_IPH(nh_len >> 2); |
| 1523 | dflags |= XCT_MACTX_IPO(nh_off); |
| 1524 | break; |
| 1525 | case IPPROTO_UDP: |
| 1526 | dflags |= XCT_MACTX_CSUM_UDP; |
| 1527 | dflags |= XCT_MACTX_IPH(nh_len >> 2); |
| 1528 | dflags |= XCT_MACTX_IPO(nh_off); |
| 1529 | break; |
| 1530 | default: |
| 1531 | WARN_ON(1); |
| 1532 | } |
| 1533 | } |
| 1534 | |
| 1535 | mactx = dflags | XCT_MACTX_LLEN(skb->len); |
| 1536 | |
| 1537 | spin_lock_irqsave(&txring->lock, flags); |
| 1538 | |
| 1539 | /* Avoid stepping on the same cache line that the DMA controller |
| 1540 | * is currently about to send, so leave at least 8 words available. |
| 1541 | * Total free space needed is mactx + fragments + 8 |
| 1542 | */ |
| 1543 | if (RING_AVAIL(txring) < nfrags + 14) { |
| 1544 | /* no room -- stop the queue and wait for tx intr */ |
| 1545 | netif_stop_queue(dev); |
| 1546 | goto out_err; |
| 1547 | } |
| 1548 | |
| 1549 | /* Queue up checksum + event descriptors, if needed */ |
| 1550 | if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) { |
| 1551 | csring = mac->cs[mac->last_cs]; |
| 1552 | mac->last_cs = (mac->last_cs + 1) % mac->num_cs; |
| 1553 | |
| 1554 | pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring); |
| 1555 | } |
| 1556 | |
| 1557 | fill = txring->next_to_fill; |
| 1558 | TX_DESC(txring, fill) = mactx; |
| 1559 | TX_DESC_INFO(txring, fill).dma = nfrags; |
| 1560 | fill++; |
| 1561 | TX_DESC_INFO(txring, fill).skb = skb; |
| 1562 | for (i = 0; i <= nfrags; i++) { |
| 1563 | TX_DESC(txring, fill+i) = |
| 1564 | XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); |
| 1565 | TX_DESC_INFO(txring, fill+i).dma = map[i]; |
| 1566 | } |
| 1567 | |
| 1568 | /* We have to add an even number of 8-byte entries to the ring |
| 1569 | * even if the last one is unused. That means always an odd number |
| 1570 | * of pointers + one mactx descriptor. |
| 1571 | */ |
| 1572 | if (nfrags & 1) |
| 1573 | nfrags++; |
| 1574 | |
| 1575 | txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1); |
| 1576 | |
| 1577 | dev->stats.tx_packets++; |
| 1578 | dev->stats.tx_bytes += skb->len; |
| 1579 | |
| 1580 | spin_unlock_irqrestore(&txring->lock, flags); |
| 1581 | |
| 1582 | write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1); |
| 1583 | |
| 1584 | return NETDEV_TX_OK; |
| 1585 | |
| 1586 | out_err: |
| 1587 | spin_unlock_irqrestore(&txring->lock, flags); |
| 1588 | out_err_nolock: |
| 1589 | while (nfrags--) |
| 1590 | pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], |
| 1591 | PCI_DMA_TODEVICE); |
| 1592 | |
| 1593 | return NETDEV_TX_BUSY; |
| 1594 | } |
| 1595 | |
| 1596 | static void pasemi_mac_set_rx_mode(struct net_device *dev) |
| 1597 | { |
| 1598 | const struct pasemi_mac *mac = netdev_priv(dev); |
| 1599 | unsigned int flags; |
| 1600 | |
| 1601 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); |
| 1602 | |
| 1603 | /* Set promiscuous */ |
| 1604 | if (dev->flags & IFF_PROMISC) |
| 1605 | flags |= PAS_MAC_CFG_PCFG_PR; |
| 1606 | else |
| 1607 | flags &= ~PAS_MAC_CFG_PCFG_PR; |
| 1608 | |
| 1609 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); |
| 1610 | } |
| 1611 | |
| 1612 | |
| 1613 | static int pasemi_mac_poll(struct napi_struct *napi, int budget) |
| 1614 | { |
| 1615 | struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); |
| 1616 | int pkts; |
| 1617 | |
| 1618 | pasemi_mac_clean_tx(tx_ring(mac)); |
| 1619 | pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); |
| 1620 | if (pkts < budget) { |
| 1621 | /* all done, no more packets present */ |
| 1622 | napi_complete(napi); |
| 1623 | |
| 1624 | pasemi_mac_restart_rx_intr(mac); |
| 1625 | pasemi_mac_restart_tx_intr(mac); |
| 1626 | } |
| 1627 | return pkts; |
| 1628 | } |
| 1629 | |
| 1630 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1631 | /* |
| 1632 | * Polling 'interrupt' - used by things like netconsole to send skbs |
| 1633 | * without having to re-enable interrupts. It's not called while |
| 1634 | * the interrupt routine is executing. |
| 1635 | */ |
| 1636 | static void pasemi_mac_netpoll(struct net_device *dev) |
| 1637 | { |
| 1638 | const struct pasemi_mac *mac = netdev_priv(dev); |
| 1639 | |
| 1640 | disable_irq(mac->tx->chan.irq); |
| 1641 | pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx); |
| 1642 | enable_irq(mac->tx->chan.irq); |
| 1643 | |
| 1644 | disable_irq(mac->rx->chan.irq); |
| 1645 | pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx); |
| 1646 | enable_irq(mac->rx->chan.irq); |
| 1647 | } |
| 1648 | #endif |
| 1649 | |
| 1650 | static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) |
| 1651 | { |
| 1652 | struct pasemi_mac *mac = netdev_priv(dev); |
| 1653 | unsigned int reg; |
| 1654 | unsigned int rcmdsta = 0; |
| 1655 | int running; |
| 1656 | int ret = 0; |
| 1657 | |
| 1658 | if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) |
| 1659 | return -EINVAL; |
| 1660 | |
| 1661 | running = netif_running(dev); |
| 1662 | |
| 1663 | if (running) { |
| 1664 | /* Need to stop the interface, clean out all already |
| 1665 | * received buffers, free all unused buffers on the RX |
| 1666 | * interface ring, then finally re-fill the rx ring with |
| 1667 | * the new-size buffers and restart. |
| 1668 | */ |
| 1669 | |
| 1670 | napi_disable(&mac->napi); |
| 1671 | netif_tx_disable(dev); |
| 1672 | pasemi_mac_intf_disable(mac); |
| 1673 | |
| 1674 | rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
| 1675 | pasemi_mac_pause_rxint(mac); |
| 1676 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); |
| 1677 | pasemi_mac_free_rx_buffers(mac); |
| 1678 | |
| 1679 | } |
| 1680 | |
| 1681 | /* Setup checksum channels if large MTU and none already allocated */ |
| 1682 | if (new_mtu > 1500 && !mac->num_cs) { |
| 1683 | pasemi_mac_setup_csrings(mac); |
| 1684 | if (!mac->num_cs) { |
| 1685 | ret = -ENOMEM; |
| 1686 | goto out; |
| 1687 | } |
| 1688 | } |
| 1689 | |
| 1690 | /* Change maxf, i.e. what size frames are accepted. |
| 1691 | * Need room for ethernet header and CRC word |
| 1692 | */ |
| 1693 | reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG); |
| 1694 | reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M; |
| 1695 | reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); |
| 1696 | write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); |
| 1697 | |
| 1698 | dev->mtu = new_mtu; |
| 1699 | /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ |
| 1700 | mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; |
| 1701 | |
| 1702 | out: |
| 1703 | if (running) { |
| 1704 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), |
| 1705 | rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); |
| 1706 | |
| 1707 | rx_ring(mac)->next_to_fill = 0; |
| 1708 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1); |
| 1709 | |
| 1710 | napi_enable(&mac->napi); |
| 1711 | netif_start_queue(dev); |
| 1712 | pasemi_mac_intf_enable(mac); |
| 1713 | } |
| 1714 | |
| 1715 | return ret; |
| 1716 | } |
| 1717 | |
| 1718 | static const struct net_device_ops pasemi_netdev_ops = { |
| 1719 | .ndo_open = pasemi_mac_open, |
| 1720 | .ndo_stop = pasemi_mac_close, |
| 1721 | .ndo_start_xmit = pasemi_mac_start_tx, |
| 1722 | .ndo_set_multicast_list = pasemi_mac_set_rx_mode, |
| 1723 | .ndo_set_mac_address = pasemi_mac_set_mac_addr, |
| 1724 | .ndo_change_mtu = pasemi_mac_change_mtu, |
| 1725 | .ndo_validate_addr = eth_validate_addr, |
| 1726 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1727 | .ndo_poll_controller = pasemi_mac_netpoll, |
| 1728 | #endif |
| 1729 | }; |
| 1730 | |
| 1731 | static int __devinit |
| 1732 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 1733 | { |
| 1734 | struct net_device *dev; |
| 1735 | struct pasemi_mac *mac; |
| 1736 | int err, ret; |
| 1737 | |
| 1738 | err = pci_enable_device(pdev); |
| 1739 | if (err) |
| 1740 | return err; |
| 1741 | |
| 1742 | dev = alloc_etherdev(sizeof(struct pasemi_mac)); |
| 1743 | if (dev == NULL) { |
| 1744 | dev_err(&pdev->dev, |
| 1745 | "pasemi_mac: Could not allocate ethernet device.\n"); |
| 1746 | err = -ENOMEM; |
| 1747 | goto out_disable_device; |
| 1748 | } |
| 1749 | |
| 1750 | pci_set_drvdata(pdev, dev); |
| 1751 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 1752 | |
| 1753 | mac = netdev_priv(dev); |
| 1754 | |
| 1755 | mac->pdev = pdev; |
| 1756 | mac->netdev = dev; |
| 1757 | |
| 1758 | netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); |
| 1759 | |
| 1760 | dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | |
| 1761 | NETIF_F_HIGHDMA | NETIF_F_GSO; |
| 1762 | |
| 1763 | mac->lro_mgr.max_aggr = LRO_MAX_AGGR; |
| 1764 | mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; |
| 1765 | mac->lro_mgr.lro_arr = mac->lro_desc; |
| 1766 | mac->lro_mgr.get_skb_header = get_skb_hdr; |
| 1767 | mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; |
| 1768 | mac->lro_mgr.dev = mac->netdev; |
| 1769 | mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; |
| 1770 | mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; |
| 1771 | |
| 1772 | |
| 1773 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); |
| 1774 | if (!mac->dma_pdev) { |
| 1775 | dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); |
| 1776 | err = -ENODEV; |
| 1777 | goto out; |
| 1778 | } |
| 1779 | |
| 1780 | mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); |
| 1781 | if (!mac->iob_pdev) { |
| 1782 | dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); |
| 1783 | err = -ENODEV; |
| 1784 | goto out; |
| 1785 | } |
| 1786 | |
| 1787 | /* get mac addr from device tree */ |
| 1788 | if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { |
| 1789 | err = -ENODEV; |
| 1790 | goto out; |
| 1791 | } |
| 1792 | memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); |
| 1793 | |
| 1794 | ret = mac_to_intf(mac); |
| 1795 | if (ret < 0) { |
| 1796 | dev_err(&mac->pdev->dev, "Can't map DMA interface\n"); |
| 1797 | err = -ENODEV; |
| 1798 | goto out; |
| 1799 | } |
| 1800 | mac->dma_if = ret; |
| 1801 | |
| 1802 | switch (pdev->device) { |
| 1803 | case 0xa005: |
| 1804 | mac->type = MAC_TYPE_GMAC; |
| 1805 | break; |
| 1806 | case 0xa006: |
| 1807 | mac->type = MAC_TYPE_XAUI; |
| 1808 | break; |
| 1809 | default: |
| 1810 | err = -ENODEV; |
| 1811 | goto out; |
| 1812 | } |
| 1813 | |
| 1814 | dev->netdev_ops = &pasemi_netdev_ops; |
| 1815 | dev->mtu = PE_DEF_MTU; |
| 1816 | /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ |
| 1817 | mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; |
| 1818 | |
| 1819 | dev->ethtool_ops = &pasemi_mac_ethtool_ops; |
| 1820 | |
| 1821 | if (err) |
| 1822 | goto out; |
| 1823 | |
| 1824 | mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); |
| 1825 | |
| 1826 | /* Enable most messages by default */ |
| 1827 | mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
| 1828 | |
| 1829 | err = register_netdev(dev); |
| 1830 | |
| 1831 | if (err) { |
| 1832 | dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", |
| 1833 | err); |
| 1834 | goto out; |
| 1835 | } else if netif_msg_probe(mac) |
| 1836 | printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n", |
| 1837 | dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", |
| 1838 | mac->dma_if, dev->dev_addr); |
| 1839 | |
| 1840 | return err; |
| 1841 | |
| 1842 | out: |
| 1843 | if (mac->iob_pdev) |
| 1844 | pci_dev_put(mac->iob_pdev); |
| 1845 | if (mac->dma_pdev) |
| 1846 | pci_dev_put(mac->dma_pdev); |
| 1847 | |
| 1848 | free_netdev(dev); |
| 1849 | out_disable_device: |
| 1850 | pci_disable_device(pdev); |
| 1851 | return err; |
| 1852 | |
| 1853 | } |
| 1854 | |
| 1855 | static void __devexit pasemi_mac_remove(struct pci_dev *pdev) |
| 1856 | { |
| 1857 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 1858 | struct pasemi_mac *mac; |
| 1859 | |
| 1860 | if (!netdev) |
| 1861 | return; |
| 1862 | |
| 1863 | mac = netdev_priv(netdev); |
| 1864 | |
| 1865 | unregister_netdev(netdev); |
| 1866 | |
| 1867 | pci_disable_device(pdev); |
| 1868 | pci_dev_put(mac->dma_pdev); |
| 1869 | pci_dev_put(mac->iob_pdev); |
| 1870 | |
| 1871 | pasemi_dma_free_chan(&mac->tx->chan); |
| 1872 | pasemi_dma_free_chan(&mac->rx->chan); |
| 1873 | |
| 1874 | pci_set_drvdata(pdev, NULL); |
| 1875 | free_netdev(netdev); |
| 1876 | } |
| 1877 | |
| 1878 | static struct pci_device_id pasemi_mac_pci_tbl[] = { |
| 1879 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, |
| 1880 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, |
| 1881 | { }, |
| 1882 | }; |
| 1883 | |
| 1884 | MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl); |
| 1885 | |
| 1886 | static struct pci_driver pasemi_mac_driver = { |
| 1887 | .name = "pasemi_mac", |
| 1888 | .id_table = pasemi_mac_pci_tbl, |
| 1889 | .probe = pasemi_mac_probe, |
| 1890 | .remove = __devexit_p(pasemi_mac_remove), |
| 1891 | }; |
| 1892 | |
| 1893 | static void __exit pasemi_mac_cleanup_module(void) |
| 1894 | { |
| 1895 | pci_unregister_driver(&pasemi_mac_driver); |
| 1896 | } |
| 1897 | |
| 1898 | int pasemi_mac_init_module(void) |
| 1899 | { |
| 1900 | int err; |
| 1901 | |
| 1902 | err = pasemi_dma_init(); |
| 1903 | if (err) |
| 1904 | return err; |
| 1905 | |
| 1906 | return pci_register_driver(&pasemi_mac_driver); |
| 1907 | } |
| 1908 | |
| 1909 | module_init(pasemi_mac_init_module); |
| 1910 | module_exit(pasemi_mac_cleanup_module); |