Commit | Line | Data |
---|---|---|
656e7052 JC |
1 | /* This program is free software; you can redistribute it and/or modify |
2 | * it under the terms of the GNU General Public License as published by | |
3 | * the Free Software Foundation; version 2 of the License | |
4 | * | |
5 | * This program is distributed in the hope that it will be useful, | |
6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
8 | * GNU General Public License for more details. | |
9 | * | |
10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | |
11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | |
12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | |
13 | */ | |
14 | ||
15 | #include <linux/of_device.h> | |
16 | #include <linux/of_mdio.h> | |
17 | #include <linux/of_net.h> | |
18 | #include <linux/mfd/syscon.h> | |
19 | #include <linux/regmap.h> | |
20 | #include <linux/clk.h> | |
21 | #include <linux/if_vlan.h> | |
22 | #include <linux/reset.h> | |
23 | #include <linux/tcp.h> | |
24 | ||
25 | #include "mtk_eth_soc.h" | |
26 | ||
27 | static int mtk_msg_level = -1; | |
28 | module_param_named(msg_level, mtk_msg_level, int, 0); | |
29 | MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); | |
30 | ||
31 | #define MTK_ETHTOOL_STAT(x) { #x, \ | |
32 | offsetof(struct mtk_hw_stats, x) / sizeof(u64) } | |
33 | ||
34 | /* strings used by ethtool */ | |
35 | static const struct mtk_ethtool_stats { | |
36 | char str[ETH_GSTRING_LEN]; | |
37 | u32 offset; | |
38 | } mtk_ethtool_stats[] = { | |
39 | MTK_ETHTOOL_STAT(tx_bytes), | |
40 | MTK_ETHTOOL_STAT(tx_packets), | |
41 | MTK_ETHTOOL_STAT(tx_skip), | |
42 | MTK_ETHTOOL_STAT(tx_collisions), | |
43 | MTK_ETHTOOL_STAT(rx_bytes), | |
44 | MTK_ETHTOOL_STAT(rx_packets), | |
45 | MTK_ETHTOOL_STAT(rx_overflow), | |
46 | MTK_ETHTOOL_STAT(rx_fcs_errors), | |
47 | MTK_ETHTOOL_STAT(rx_short_errors), | |
48 | MTK_ETHTOOL_STAT(rx_long_errors), | |
49 | MTK_ETHTOOL_STAT(rx_checksum_errors), | |
50 | MTK_ETHTOOL_STAT(rx_flow_control_packets), | |
51 | }; | |
52 | ||
53 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) | |
54 | { | |
55 | __raw_writel(val, eth->base + reg); | |
56 | } | |
57 | ||
58 | u32 mtk_r32(struct mtk_eth *eth, unsigned reg) | |
59 | { | |
60 | return __raw_readl(eth->base + reg); | |
61 | } | |
62 | ||
63 | static int mtk_mdio_busy_wait(struct mtk_eth *eth) | |
64 | { | |
65 | unsigned long t_start = jiffies; | |
66 | ||
67 | while (1) { | |
68 | if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) | |
69 | return 0; | |
70 | if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) | |
71 | break; | |
72 | usleep_range(10, 20); | |
73 | } | |
74 | ||
75 | dev_err(eth->dev, "mdio: MDIO timeout\n"); | |
76 | return -1; | |
77 | } | |
78 | ||
79 | u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, | |
80 | u32 phy_register, u32 write_data) | |
81 | { | |
82 | if (mtk_mdio_busy_wait(eth)) | |
83 | return -1; | |
84 | ||
85 | write_data &= 0xffff; | |
86 | ||
87 | mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | | |
88 | (phy_register << PHY_IAC_REG_SHIFT) | | |
89 | (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data, | |
90 | MTK_PHY_IAC); | |
91 | ||
92 | if (mtk_mdio_busy_wait(eth)) | |
93 | return -1; | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) | |
99 | { | |
100 | u32 d; | |
101 | ||
102 | if (mtk_mdio_busy_wait(eth)) | |
103 | return 0xffff; | |
104 | ||
105 | mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | | |
106 | (phy_reg << PHY_IAC_REG_SHIFT) | | |
107 | (phy_addr << PHY_IAC_ADDR_SHIFT), | |
108 | MTK_PHY_IAC); | |
109 | ||
110 | if (mtk_mdio_busy_wait(eth)) | |
111 | return 0xffff; | |
112 | ||
113 | d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; | |
114 | ||
115 | return d; | |
116 | } | |
117 | ||
118 | static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, | |
119 | int phy_reg, u16 val) | |
120 | { | |
121 | struct mtk_eth *eth = bus->priv; | |
122 | ||
123 | return _mtk_mdio_write(eth, phy_addr, phy_reg, val); | |
124 | } | |
125 | ||
126 | static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | |
127 | { | |
128 | struct mtk_eth *eth = bus->priv; | |
129 | ||
130 | return _mtk_mdio_read(eth, phy_addr, phy_reg); | |
131 | } | |
132 | ||
133 | static void mtk_phy_link_adjust(struct net_device *dev) | |
134 | { | |
135 | struct mtk_mac *mac = netdev_priv(dev); | |
136 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | | |
137 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | | |
138 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | | |
139 | MAC_MCR_BACKPR_EN; | |
140 | ||
141 | switch (mac->phy_dev->speed) { | |
142 | case SPEED_1000: | |
143 | mcr |= MAC_MCR_SPEED_1000; | |
144 | break; | |
145 | case SPEED_100: | |
146 | mcr |= MAC_MCR_SPEED_100; | |
147 | break; | |
148 | }; | |
149 | ||
150 | if (mac->phy_dev->link) | |
151 | mcr |= MAC_MCR_FORCE_LINK; | |
152 | ||
153 | if (mac->phy_dev->duplex) | |
154 | mcr |= MAC_MCR_FORCE_DPX; | |
155 | ||
156 | if (mac->phy_dev->pause) | |
157 | mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; | |
158 | ||
159 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | |
160 | ||
161 | if (mac->phy_dev->link) | |
162 | netif_carrier_on(dev); | |
163 | else | |
164 | netif_carrier_off(dev); | |
165 | } | |
166 | ||
167 | static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, | |
168 | struct device_node *phy_node) | |
169 | { | |
170 | const __be32 *_addr = NULL; | |
171 | struct phy_device *phydev; | |
172 | int phy_mode, addr; | |
173 | ||
174 | _addr = of_get_property(phy_node, "reg", NULL); | |
175 | ||
176 | if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) { | |
177 | pr_err("%s: invalid phy address\n", phy_node->name); | |
178 | return -EINVAL; | |
179 | } | |
180 | addr = be32_to_cpu(*_addr); | |
181 | phy_mode = of_get_phy_mode(phy_node); | |
182 | if (phy_mode < 0) { | |
183 | dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); | |
184 | return -EINVAL; | |
185 | } | |
186 | ||
187 | phydev = of_phy_connect(eth->netdev[mac->id], phy_node, | |
188 | mtk_phy_link_adjust, 0, phy_mode); | |
977bc20c | 189 | if (!phydev) { |
656e7052 | 190 | dev_err(eth->dev, "could not connect to PHY\n"); |
977bc20c | 191 | return -ENODEV; |
656e7052 JC |
192 | } |
193 | ||
194 | dev_info(eth->dev, | |
195 | "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", | |
196 | mac->id, phydev_name(phydev), phydev->phy_id, | |
197 | phydev->drv->name); | |
198 | ||
199 | mac->phy_dev = phydev; | |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static int mtk_phy_connect(struct mtk_mac *mac) | |
205 | { | |
206 | struct mtk_eth *eth = mac->hw; | |
207 | struct device_node *np; | |
208 | u32 val, ge_mode; | |
209 | ||
210 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); | |
211 | if (!np) | |
212 | return -ENODEV; | |
213 | ||
214 | switch (of_get_phy_mode(np)) { | |
215 | case PHY_INTERFACE_MODE_RGMII: | |
216 | ge_mode = 0; | |
217 | break; | |
218 | case PHY_INTERFACE_MODE_MII: | |
219 | ge_mode = 1; | |
220 | break; | |
221 | case PHY_INTERFACE_MODE_RMII: | |
222 | ge_mode = 2; | |
223 | break; | |
224 | default: | |
225 | dev_err(eth->dev, "invalid phy_mode\n"); | |
226 | return -1; | |
227 | } | |
228 | ||
229 | /* put the gmac into the right mode */ | |
230 | regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); | |
231 | val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); | |
232 | val |= SYSCFG0_GE_MODE(ge_mode, mac->id); | |
233 | regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); | |
234 | ||
235 | mtk_phy_connect_node(eth, mac, np); | |
236 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | |
237 | mac->phy_dev->speed = 0; | |
238 | mac->phy_dev->duplex = 0; | |
239 | mac->phy_dev->supported &= PHY_BASIC_FEATURES; | |
240 | mac->phy_dev->advertising = mac->phy_dev->supported | | |
241 | ADVERTISED_Autoneg; | |
242 | phy_start_aneg(mac->phy_dev); | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
247 | static int mtk_mdio_init(struct mtk_eth *eth) | |
248 | { | |
249 | struct device_node *mii_np; | |
250 | int err; | |
251 | ||
252 | mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); | |
253 | if (!mii_np) { | |
254 | dev_err(eth->dev, "no %s child node found", "mdio-bus"); | |
255 | return -ENODEV; | |
256 | } | |
257 | ||
258 | if (!of_device_is_available(mii_np)) { | |
259 | err = 0; | |
260 | goto err_put_node; | |
261 | } | |
262 | ||
263 | eth->mii_bus = mdiobus_alloc(); | |
264 | if (!eth->mii_bus) { | |
265 | err = -ENOMEM; | |
266 | goto err_put_node; | |
267 | } | |
268 | ||
269 | eth->mii_bus->name = "mdio"; | |
270 | eth->mii_bus->read = mtk_mdio_read; | |
271 | eth->mii_bus->write = mtk_mdio_write; | |
272 | eth->mii_bus->priv = eth; | |
273 | eth->mii_bus->parent = eth->dev; | |
274 | ||
275 | snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); | |
276 | err = of_mdiobus_register(eth->mii_bus, mii_np); | |
277 | if (err) | |
278 | goto err_free_bus; | |
279 | ||
280 | return 0; | |
281 | ||
282 | err_free_bus: | |
283 | kfree(eth->mii_bus); | |
284 | ||
285 | err_put_node: | |
286 | of_node_put(mii_np); | |
287 | eth->mii_bus = NULL; | |
288 | return err; | |
289 | } | |
290 | ||
291 | static void mtk_mdio_cleanup(struct mtk_eth *eth) | |
292 | { | |
293 | if (!eth->mii_bus) | |
294 | return; | |
295 | ||
296 | mdiobus_unregister(eth->mii_bus); | |
297 | of_node_put(eth->mii_bus->dev.of_node); | |
298 | kfree(eth->mii_bus); | |
299 | } | |
300 | ||
301 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | |
302 | { | |
303 | u32 val; | |
304 | ||
305 | val = mtk_r32(eth, MTK_QDMA_INT_MASK); | |
306 | mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); | |
307 | /* flush write */ | |
308 | mtk_r32(eth, MTK_QDMA_INT_MASK); | |
309 | } | |
310 | ||
311 | static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) | |
312 | { | |
313 | u32 val; | |
314 | ||
315 | val = mtk_r32(eth, MTK_QDMA_INT_MASK); | |
316 | mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); | |
317 | /* flush write */ | |
318 | mtk_r32(eth, MTK_QDMA_INT_MASK); | |
319 | } | |
320 | ||
321 | static int mtk_set_mac_address(struct net_device *dev, void *p) | |
322 | { | |
323 | int ret = eth_mac_addr(dev, p); | |
324 | struct mtk_mac *mac = netdev_priv(dev); | |
325 | const char *macaddr = dev->dev_addr; | |
326 | unsigned long flags; | |
327 | ||
328 | if (ret) | |
329 | return ret; | |
330 | ||
331 | spin_lock_irqsave(&mac->hw->page_lock, flags); | |
332 | mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], | |
333 | MTK_GDMA_MAC_ADRH(mac->id)); | |
334 | mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | | |
335 | (macaddr[4] << 8) | macaddr[5], | |
336 | MTK_GDMA_MAC_ADRL(mac->id)); | |
337 | spin_unlock_irqrestore(&mac->hw->page_lock, flags); | |
338 | ||
339 | return 0; | |
340 | } | |
341 | ||
342 | void mtk_stats_update_mac(struct mtk_mac *mac) | |
343 | { | |
344 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
345 | unsigned int base = MTK_GDM1_TX_GBCNT; | |
346 | u64 stats; | |
347 | ||
348 | base += hw_stats->reg_offset; | |
349 | ||
350 | u64_stats_update_begin(&hw_stats->syncp); | |
351 | ||
352 | hw_stats->rx_bytes += mtk_r32(mac->hw, base); | |
353 | stats = mtk_r32(mac->hw, base + 0x04); | |
354 | if (stats) | |
355 | hw_stats->rx_bytes += (stats << 32); | |
356 | hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); | |
357 | hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); | |
358 | hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); | |
359 | hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); | |
360 | hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); | |
361 | hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); | |
362 | hw_stats->rx_flow_control_packets += | |
363 | mtk_r32(mac->hw, base + 0x24); | |
364 | hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); | |
365 | hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); | |
366 | hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); | |
367 | stats = mtk_r32(mac->hw, base + 0x34); | |
368 | if (stats) | |
369 | hw_stats->tx_bytes += (stats << 32); | |
370 | hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); | |
371 | u64_stats_update_end(&hw_stats->syncp); | |
372 | } | |
373 | ||
374 | static void mtk_stats_update(struct mtk_eth *eth) | |
375 | { | |
376 | int i; | |
377 | ||
378 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
379 | if (!eth->mac[i] || !eth->mac[i]->hw_stats) | |
380 | continue; | |
381 | if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { | |
382 | mtk_stats_update_mac(eth->mac[i]); | |
383 | spin_unlock(ð->mac[i]->hw_stats->stats_lock); | |
384 | } | |
385 | } | |
386 | } | |
387 | ||
388 | static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, | |
389 | struct rtnl_link_stats64 *storage) | |
390 | { | |
391 | struct mtk_mac *mac = netdev_priv(dev); | |
392 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
393 | unsigned int start; | |
394 | ||
395 | if (netif_running(dev) && netif_device_present(dev)) { | |
396 | if (spin_trylock(&hw_stats->stats_lock)) { | |
397 | mtk_stats_update_mac(mac); | |
398 | spin_unlock(&hw_stats->stats_lock); | |
399 | } | |
400 | } | |
401 | ||
402 | do { | |
403 | start = u64_stats_fetch_begin_irq(&hw_stats->syncp); | |
404 | storage->rx_packets = hw_stats->rx_packets; | |
405 | storage->tx_packets = hw_stats->tx_packets; | |
406 | storage->rx_bytes = hw_stats->rx_bytes; | |
407 | storage->tx_bytes = hw_stats->tx_bytes; | |
408 | storage->collisions = hw_stats->tx_collisions; | |
409 | storage->rx_length_errors = hw_stats->rx_short_errors + | |
410 | hw_stats->rx_long_errors; | |
411 | storage->rx_over_errors = hw_stats->rx_overflow; | |
412 | storage->rx_crc_errors = hw_stats->rx_fcs_errors; | |
413 | storage->rx_errors = hw_stats->rx_checksum_errors; | |
414 | storage->tx_aborted_errors = hw_stats->tx_skip; | |
415 | } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); | |
416 | ||
417 | storage->tx_errors = dev->stats.tx_errors; | |
418 | storage->rx_dropped = dev->stats.rx_dropped; | |
419 | storage->tx_dropped = dev->stats.tx_dropped; | |
420 | ||
421 | return storage; | |
422 | } | |
423 | ||
424 | static inline int mtk_max_frag_size(int mtu) | |
425 | { | |
426 | /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ | |
427 | if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) | |
428 | mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; | |
429 | ||
430 | return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + | |
431 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
432 | } | |
433 | ||
434 | static inline int mtk_max_buf_size(int frag_size) | |
435 | { | |
436 | int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - | |
437 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
438 | ||
439 | WARN_ON(buf_size < MTK_MAX_RX_LENGTH); | |
440 | ||
441 | return buf_size; | |
442 | } | |
443 | ||
444 | static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, | |
445 | struct mtk_rx_dma *dma_rxd) | |
446 | { | |
447 | rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); | |
448 | rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); | |
449 | rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); | |
450 | rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); | |
451 | } | |
452 | ||
453 | /* the qdma core needs scratch memory to be setup */ | |
454 | static int mtk_init_fq_dma(struct mtk_eth *eth) | |
455 | { | |
6aab1a62 | 456 | dma_addr_t phy_ring_head, phy_ring_tail; |
656e7052 JC |
457 | int cnt = MTK_DMA_SIZE; |
458 | dma_addr_t dma_addr; | |
459 | int i; | |
460 | ||
461 | eth->scratch_ring = dma_alloc_coherent(eth->dev, | |
462 | cnt * sizeof(struct mtk_tx_dma), | |
463 | &phy_ring_head, | |
464 | GFP_ATOMIC | __GFP_ZERO); | |
465 | if (unlikely(!eth->scratch_ring)) | |
466 | return -ENOMEM; | |
467 | ||
468 | eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, | |
469 | GFP_KERNEL); | |
470 | dma_addr = dma_map_single(eth->dev, | |
471 | eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, | |
472 | DMA_FROM_DEVICE); | |
473 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
474 | return -ENOMEM; | |
475 | ||
476 | memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); | |
477 | phy_ring_tail = phy_ring_head + | |
478 | (sizeof(struct mtk_tx_dma) * (cnt - 1)); | |
479 | ||
480 | for (i = 0; i < cnt; i++) { | |
481 | eth->scratch_ring[i].txd1 = | |
482 | (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); | |
483 | if (i < cnt - 1) | |
484 | eth->scratch_ring[i].txd2 = (phy_ring_head + | |
485 | ((i + 1) * sizeof(struct mtk_tx_dma))); | |
486 | eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); | |
487 | } | |
488 | ||
489 | mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); | |
490 | mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); | |
491 | mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); | |
492 | mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); | |
493 | ||
494 | return 0; | |
495 | } | |
496 | ||
497 | static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) | |
498 | { | |
499 | void *ret = ring->dma; | |
500 | ||
501 | return ret + (desc - ring->phys); | |
502 | } | |
503 | ||
504 | static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |
505 | struct mtk_tx_dma *txd) | |
506 | { | |
507 | int idx = txd - ring->dma; | |
508 | ||
509 | return &ring->buf[idx]; | |
510 | } | |
511 | ||
512 | static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) | |
513 | { | |
514 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | |
515 | dma_unmap_single(dev, | |
516 | dma_unmap_addr(tx_buf, dma_addr0), | |
517 | dma_unmap_len(tx_buf, dma_len0), | |
518 | DMA_TO_DEVICE); | |
519 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | |
520 | dma_unmap_page(dev, | |
521 | dma_unmap_addr(tx_buf, dma_addr0), | |
522 | dma_unmap_len(tx_buf, dma_len0), | |
523 | DMA_TO_DEVICE); | |
524 | } | |
525 | tx_buf->flags = 0; | |
526 | if (tx_buf->skb && | |
527 | (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) | |
528 | dev_kfree_skb_any(tx_buf->skb); | |
529 | tx_buf->skb = NULL; | |
530 | } | |
531 | ||
532 | static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |
533 | int tx_num, struct mtk_tx_ring *ring, bool gso) | |
534 | { | |
535 | struct mtk_mac *mac = netdev_priv(dev); | |
536 | struct mtk_eth *eth = mac->hw; | |
537 | struct mtk_tx_dma *itxd, *txd; | |
538 | struct mtk_tx_buf *tx_buf; | |
656e7052 JC |
539 | dma_addr_t mapped_addr; |
540 | unsigned int nr_frags; | |
541 | int i, n_desc = 1; | |
542 | u32 txd4 = 0; | |
543 | ||
544 | itxd = ring->next_free; | |
545 | if (itxd == ring->last_free) | |
546 | return -ENOMEM; | |
547 | ||
548 | /* set the forward port */ | |
549 | txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; | |
550 | ||
551 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | |
552 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
553 | ||
554 | if (gso) | |
555 | txd4 |= TX_DMA_TSO; | |
556 | ||
557 | /* TX Checksum offload */ | |
558 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
559 | txd4 |= TX_DMA_CHKSUM; | |
560 | ||
561 | /* VLAN header offload */ | |
562 | if (skb_vlan_tag_present(skb)) | |
563 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); | |
564 | ||
565 | mapped_addr = dma_map_single(&dev->dev, skb->data, | |
566 | skb_headlen(skb), DMA_TO_DEVICE); | |
567 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | |
568 | return -ENOMEM; | |
569 | ||
656e7052 JC |
570 | WRITE_ONCE(itxd->txd1, mapped_addr); |
571 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | |
572 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
573 | dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); | |
574 | ||
575 | /* TX SG offload */ | |
576 | txd = itxd; | |
577 | nr_frags = skb_shinfo(skb)->nr_frags; | |
578 | for (i = 0; i < nr_frags; i++) { | |
579 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | |
580 | unsigned int offset = 0; | |
581 | int frag_size = skb_frag_size(frag); | |
582 | ||
583 | while (frag_size) { | |
584 | bool last_frag = false; | |
585 | unsigned int frag_map_size; | |
586 | ||
587 | txd = mtk_qdma_phys_to_virt(ring, txd->txd2); | |
588 | if (txd == ring->last_free) | |
589 | goto err_dma; | |
590 | ||
591 | n_desc++; | |
592 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); | |
593 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | |
594 | frag_map_size, | |
595 | DMA_TO_DEVICE); | |
596 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | |
597 | goto err_dma; | |
598 | ||
599 | if (i == nr_frags - 1 && | |
600 | (frag_size - frag_map_size) == 0) | |
601 | last_frag = true; | |
602 | ||
603 | WRITE_ONCE(txd->txd1, mapped_addr); | |
604 | WRITE_ONCE(txd->txd3, (TX_DMA_SWC | | |
605 | TX_DMA_PLEN0(frag_map_size) | | |
369f0453 | 606 | last_frag * TX_DMA_LS0)); |
656e7052 JC |
607 | WRITE_ONCE(txd->txd4, 0); |
608 | ||
609 | tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; | |
610 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | |
611 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
612 | ||
613 | tx_buf->flags |= MTK_TX_FLAGS_PAGE0; | |
614 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
615 | dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); | |
616 | frag_size -= frag_map_size; | |
617 | offset += frag_map_size; | |
618 | } | |
619 | } | |
620 | ||
621 | /* store skb to cleanup */ | |
622 | tx_buf->skb = skb; | |
623 | ||
624 | WRITE_ONCE(itxd->txd4, txd4); | |
625 | WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | | |
626 | (!nr_frags * TX_DMA_LS0))); | |
627 | ||
656e7052 JC |
628 | netdev_sent_queue(dev, skb->len); |
629 | skb_tx_timestamp(skb); | |
630 | ||
631 | ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); | |
632 | atomic_sub(n_desc, &ring->free_count); | |
633 | ||
634 | /* make sure that all changes to the dma ring are flushed before we | |
635 | * continue | |
636 | */ | |
637 | wmb(); | |
638 | ||
639 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) | |
640 | mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); | |
641 | ||
642 | return 0; | |
643 | ||
644 | err_dma: | |
645 | do { | |
646 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | |
647 | ||
648 | /* unmap dma */ | |
649 | mtk_tx_unmap(&dev->dev, tx_buf); | |
650 | ||
651 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | |
652 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | |
653 | } while (itxd != txd); | |
654 | ||
655 | return -ENOMEM; | |
656 | } | |
657 | ||
658 | static inline int mtk_cal_txd_req(struct sk_buff *skb) | |
659 | { | |
660 | int i, nfrags; | |
661 | struct skb_frag_struct *frag; | |
662 | ||
663 | nfrags = 1; | |
664 | if (skb_is_gso(skb)) { | |
665 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
666 | frag = &skb_shinfo(skb)->frags[i]; | |
667 | nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); | |
668 | } | |
669 | } else { | |
670 | nfrags += skb_shinfo(skb)->nr_frags; | |
671 | } | |
672 | ||
beeb4ca4 | 673 | return nfrags; |
656e7052 JC |
674 | } |
675 | ||
13c822f6 JC |
676 | static void mtk_wake_queue(struct mtk_eth *eth) |
677 | { | |
678 | int i; | |
679 | ||
680 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
681 | if (!eth->netdev[i]) | |
682 | continue; | |
683 | netif_wake_queue(eth->netdev[i]); | |
684 | } | |
685 | } | |
686 | ||
687 | static void mtk_stop_queue(struct mtk_eth *eth) | |
688 | { | |
689 | int i; | |
690 | ||
691 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
692 | if (!eth->netdev[i]) | |
693 | continue; | |
694 | netif_stop_queue(eth->netdev[i]); | |
695 | } | |
696 | } | |
697 | ||
656e7052 JC |
698 | static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) |
699 | { | |
700 | struct mtk_mac *mac = netdev_priv(dev); | |
701 | struct mtk_eth *eth = mac->hw; | |
702 | struct mtk_tx_ring *ring = ð->tx_ring; | |
703 | struct net_device_stats *stats = &dev->stats; | |
34c2e4c9 | 704 | unsigned long flags; |
656e7052 JC |
705 | bool gso = false; |
706 | int tx_num; | |
707 | ||
34c2e4c9 JC |
708 | /* normally we can rely on the stack not calling this more than once, |
709 | * however we have 2 queues running on the same ring so we need to lock | |
710 | * the ring access | |
711 | */ | |
712 | spin_lock_irqsave(ð->page_lock, flags); | |
713 | ||
656e7052 JC |
714 | tx_num = mtk_cal_txd_req(skb); |
715 | if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { | |
13c822f6 | 716 | mtk_stop_queue(eth); |
656e7052 JC |
717 | netif_err(eth, tx_queued, dev, |
718 | "Tx Ring full when queue awake!\n"); | |
34c2e4c9 | 719 | spin_unlock_irqrestore(ð->page_lock, flags); |
656e7052 JC |
720 | return NETDEV_TX_BUSY; |
721 | } | |
722 | ||
723 | /* TSO: fill MSS info in tcp checksum field */ | |
724 | if (skb_is_gso(skb)) { | |
725 | if (skb_cow_head(skb, 0)) { | |
726 | netif_warn(eth, tx_err, dev, | |
727 | "GSO expand head fail.\n"); | |
728 | goto drop; | |
729 | } | |
730 | ||
731 | if (skb_shinfo(skb)->gso_type & | |
732 | (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | |
733 | gso = true; | |
734 | tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); | |
735 | } | |
736 | } | |
737 | ||
738 | if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) | |
739 | goto drop; | |
740 | ||
741 | if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { | |
13c822f6 | 742 | mtk_stop_queue(eth); |
656e7052 JC |
743 | if (unlikely(atomic_read(&ring->free_count) > |
744 | ring->thresh)) | |
13c822f6 | 745 | mtk_wake_queue(eth); |
656e7052 | 746 | } |
34c2e4c9 | 747 | spin_unlock_irqrestore(ð->page_lock, flags); |
656e7052 JC |
748 | |
749 | return NETDEV_TX_OK; | |
750 | ||
751 | drop: | |
34c2e4c9 | 752 | spin_unlock_irqrestore(ð->page_lock, flags); |
656e7052 JC |
753 | stats->tx_dropped++; |
754 | dev_kfree_skb(skb); | |
755 | return NETDEV_TX_OK; | |
756 | } | |
757 | ||
758 | static int mtk_poll_rx(struct napi_struct *napi, int budget, | |
759 | struct mtk_eth *eth, u32 rx_intr) | |
760 | { | |
761 | struct mtk_rx_ring *ring = ð->rx_ring; | |
762 | int idx = ring->calc_idx; | |
763 | struct sk_buff *skb; | |
764 | u8 *data, *new_data; | |
765 | struct mtk_rx_dma *rxd, trxd; | |
766 | int done = 0; | |
767 | ||
768 | while (done < budget) { | |
769 | struct net_device *netdev; | |
770 | unsigned int pktlen; | |
771 | dma_addr_t dma_addr; | |
772 | int mac = 0; | |
773 | ||
774 | idx = NEXT_RX_DESP_IDX(idx); | |
775 | rxd = &ring->dma[idx]; | |
776 | data = ring->data[idx]; | |
777 | ||
778 | mtk_rx_get_desc(&trxd, rxd); | |
779 | if (!(trxd.rxd2 & RX_DMA_DONE)) | |
780 | break; | |
781 | ||
782 | /* find out which mac the packet come from. values start at 1 */ | |
783 | mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & | |
784 | RX_DMA_FPORT_MASK; | |
785 | mac--; | |
786 | ||
787 | netdev = eth->netdev[mac]; | |
788 | ||
789 | /* alloc new buffer */ | |
790 | new_data = napi_alloc_frag(ring->frag_size); | |
791 | if (unlikely(!new_data)) { | |
792 | netdev->stats.rx_dropped++; | |
793 | goto release_desc; | |
794 | } | |
795 | dma_addr = dma_map_single(ð->netdev[mac]->dev, | |
796 | new_data + NET_SKB_PAD, | |
797 | ring->buf_size, | |
798 | DMA_FROM_DEVICE); | |
799 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | |
800 | skb_free_frag(new_data); | |
801 | goto release_desc; | |
802 | } | |
803 | ||
804 | /* receive data */ | |
805 | skb = build_skb(data, ring->frag_size); | |
806 | if (unlikely(!skb)) { | |
807 | put_page(virt_to_head_page(new_data)); | |
808 | goto release_desc; | |
809 | } | |
810 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | |
811 | ||
812 | dma_unmap_single(&netdev->dev, trxd.rxd1, | |
813 | ring->buf_size, DMA_FROM_DEVICE); | |
814 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | |
815 | skb->dev = netdev; | |
816 | skb_put(skb, pktlen); | |
817 | if (trxd.rxd4 & RX_DMA_L4_VALID) | |
818 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
819 | else | |
820 | skb_checksum_none_assert(skb); | |
821 | skb->protocol = eth_type_trans(skb, netdev); | |
822 | ||
823 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && | |
824 | RX_DMA_VID(trxd.rxd3)) | |
825 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
826 | RX_DMA_VID(trxd.rxd3)); | |
827 | napi_gro_receive(napi, skb); | |
828 | ||
829 | ring->data[idx] = new_data; | |
830 | rxd->rxd1 = (unsigned int)dma_addr; | |
831 | ||
832 | release_desc: | |
833 | rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); | |
834 | ||
835 | ring->calc_idx = idx; | |
836 | /* make sure that all changes to the dma ring are flushed before | |
837 | * we continue | |
838 | */ | |
839 | wmb(); | |
840 | mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0); | |
841 | done++; | |
842 | } | |
843 | ||
844 | if (done < budget) | |
845 | mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS); | |
846 | ||
847 | return done; | |
848 | } | |
849 | ||
850 | static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) | |
851 | { | |
852 | struct mtk_tx_ring *ring = ð->tx_ring; | |
853 | struct mtk_tx_dma *desc; | |
854 | struct sk_buff *skb; | |
855 | struct mtk_tx_buf *tx_buf; | |
856 | int total = 0, done[MTK_MAX_DEVS]; | |
857 | unsigned int bytes[MTK_MAX_DEVS]; | |
858 | u32 cpu, dma; | |
859 | static int condition; | |
860 | int i; | |
861 | ||
862 | memset(done, 0, sizeof(done)); | |
863 | memset(bytes, 0, sizeof(bytes)); | |
864 | ||
865 | cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); | |
866 | dma = mtk_r32(eth, MTK_QTX_DRX_PTR); | |
867 | ||
868 | desc = mtk_qdma_phys_to_virt(ring, cpu); | |
869 | ||
870 | while ((cpu != dma) && budget) { | |
871 | u32 next_cpu = desc->txd2; | |
872 | int mac; | |
873 | ||
874 | desc = mtk_qdma_phys_to_virt(ring, desc->txd2); | |
875 | if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) | |
876 | break; | |
877 | ||
878 | mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & | |
879 | TX_DMA_FPORT_MASK; | |
880 | mac--; | |
881 | ||
882 | tx_buf = mtk_desc_to_tx_buf(ring, desc); | |
883 | skb = tx_buf->skb; | |
884 | if (!skb) { | |
885 | condition = 1; | |
886 | break; | |
887 | } | |
888 | ||
889 | if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { | |
890 | bytes[mac] += skb->len; | |
891 | done[mac]++; | |
892 | budget--; | |
893 | } | |
894 | mtk_tx_unmap(eth->dev, tx_buf); | |
895 | ||
896 | ring->last_free->txd2 = next_cpu; | |
897 | ring->last_free = desc; | |
898 | atomic_inc(&ring->free_count); | |
899 | ||
900 | cpu = next_cpu; | |
901 | } | |
902 | ||
903 | mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); | |
904 | ||
905 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
906 | if (!eth->netdev[i] || !done[i]) | |
907 | continue; | |
908 | netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); | |
909 | total += done[i]; | |
910 | } | |
911 | ||
912 | /* read hw index again make sure no new tx packet */ | |
913 | if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) | |
914 | *tx_again = true; | |
915 | else | |
916 | mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); | |
917 | ||
918 | if (!total) | |
919 | return 0; | |
920 | ||
13c822f6 JC |
921 | if (atomic_read(&ring->free_count) > ring->thresh) |
922 | mtk_wake_queue(eth); | |
656e7052 JC |
923 | |
924 | return total; | |
925 | } | |
926 | ||
927 | static int mtk_poll(struct napi_struct *napi, int budget) | |
928 | { | |
929 | struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); | |
930 | u32 status, status2, mask, tx_intr, rx_intr, status_intr; | |
931 | int tx_done, rx_done; | |
932 | bool tx_again = false; | |
933 | ||
934 | status = mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
935 | status2 = mtk_r32(eth, MTK_INT_STATUS2); | |
936 | tx_intr = MTK_TX_DONE_INT; | |
937 | rx_intr = MTK_RX_DONE_INT; | |
938 | status_intr = (MTK_GDM1_AF | MTK_GDM2_AF); | |
939 | tx_done = 0; | |
940 | rx_done = 0; | |
941 | tx_again = 0; | |
942 | ||
943 | if (status & tx_intr) | |
944 | tx_done = mtk_poll_tx(eth, budget, &tx_again); | |
945 | ||
946 | if (status & rx_intr) | |
947 | rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); | |
948 | ||
949 | if (unlikely(status2 & status_intr)) { | |
950 | mtk_stats_update(eth); | |
951 | mtk_w32(eth, status_intr, MTK_INT_STATUS2); | |
952 | } | |
953 | ||
954 | if (unlikely(netif_msg_intr(eth))) { | |
955 | mask = mtk_r32(eth, MTK_QDMA_INT_MASK); | |
956 | netdev_info(eth->netdev[0], | |
957 | "done tx %d, rx %d, intr 0x%08x/0x%x\n", | |
958 | tx_done, rx_done, status, mask); | |
959 | } | |
960 | ||
961 | if (tx_again || rx_done == budget) | |
962 | return budget; | |
963 | ||
964 | status = mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
965 | if (status & (tx_intr | rx_intr)) | |
966 | return budget; | |
967 | ||
968 | napi_complete(napi); | |
969 | mtk_irq_enable(eth, tx_intr | rx_intr); | |
970 | ||
971 | return rx_done; | |
972 | } | |
973 | ||
974 | static int mtk_tx_alloc(struct mtk_eth *eth) | |
975 | { | |
976 | struct mtk_tx_ring *ring = ð->tx_ring; | |
977 | int i, sz = sizeof(*ring->dma); | |
978 | ||
979 | ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), | |
980 | GFP_KERNEL); | |
981 | if (!ring->buf) | |
982 | goto no_tx_mem; | |
983 | ||
984 | ring->dma = dma_alloc_coherent(eth->dev, | |
985 | MTK_DMA_SIZE * sz, | |
986 | &ring->phys, | |
987 | GFP_ATOMIC | __GFP_ZERO); | |
988 | if (!ring->dma) | |
989 | goto no_tx_mem; | |
990 | ||
991 | memset(ring->dma, 0, MTK_DMA_SIZE * sz); | |
992 | for (i = 0; i < MTK_DMA_SIZE; i++) { | |
993 | int next = (i + 1) % MTK_DMA_SIZE; | |
994 | u32 next_ptr = ring->phys + next * sz; | |
995 | ||
996 | ring->dma[i].txd2 = next_ptr; | |
997 | ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | |
998 | } | |
999 | ||
1000 | atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); | |
1001 | ring->next_free = &ring->dma[0]; | |
1002 | ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; | |
1003 | ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, | |
1004 | MAX_SKB_FRAGS); | |
1005 | ||
1006 | /* make sure that all changes to the dma ring are flushed before we | |
1007 | * continue | |
1008 | */ | |
1009 | wmb(); | |
1010 | ||
1011 | mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); | |
1012 | mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); | |
1013 | mtk_w32(eth, | |
1014 | ring->phys + ((MTK_DMA_SIZE - 1) * sz), | |
1015 | MTK_QTX_CRX_PTR); | |
1016 | mtk_w32(eth, | |
1017 | ring->phys + ((MTK_DMA_SIZE - 1) * sz), | |
1018 | MTK_QTX_DRX_PTR); | |
1019 | ||
1020 | return 0; | |
1021 | ||
1022 | no_tx_mem: | |
1023 | return -ENOMEM; | |
1024 | } | |
1025 | ||
1026 | static void mtk_tx_clean(struct mtk_eth *eth) | |
1027 | { | |
1028 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1029 | int i; | |
1030 | ||
1031 | if (ring->buf) { | |
1032 | for (i = 0; i < MTK_DMA_SIZE; i++) | |
1033 | mtk_tx_unmap(eth->dev, &ring->buf[i]); | |
1034 | kfree(ring->buf); | |
1035 | ring->buf = NULL; | |
1036 | } | |
1037 | ||
1038 | if (ring->dma) { | |
1039 | dma_free_coherent(eth->dev, | |
1040 | MTK_DMA_SIZE * sizeof(*ring->dma), | |
1041 | ring->dma, | |
1042 | ring->phys); | |
1043 | ring->dma = NULL; | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | static int mtk_rx_alloc(struct mtk_eth *eth) | |
1048 | { | |
1049 | struct mtk_rx_ring *ring = ð->rx_ring; | |
1050 | int i; | |
1051 | ||
1052 | ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN); | |
1053 | ring->buf_size = mtk_max_buf_size(ring->frag_size); | |
1054 | ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data), | |
1055 | GFP_KERNEL); | |
1056 | if (!ring->data) | |
1057 | return -ENOMEM; | |
1058 | ||
1059 | for (i = 0; i < MTK_DMA_SIZE; i++) { | |
1060 | ring->data[i] = netdev_alloc_frag(ring->frag_size); | |
1061 | if (!ring->data[i]) | |
1062 | return -ENOMEM; | |
1063 | } | |
1064 | ||
1065 | ring->dma = dma_alloc_coherent(eth->dev, | |
1066 | MTK_DMA_SIZE * sizeof(*ring->dma), | |
1067 | &ring->phys, | |
1068 | GFP_ATOMIC | __GFP_ZERO); | |
1069 | if (!ring->dma) | |
1070 | return -ENOMEM; | |
1071 | ||
1072 | for (i = 0; i < MTK_DMA_SIZE; i++) { | |
1073 | dma_addr_t dma_addr = dma_map_single(eth->dev, | |
1074 | ring->data[i] + NET_SKB_PAD, | |
1075 | ring->buf_size, | |
1076 | DMA_FROM_DEVICE); | |
1077 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
1078 | return -ENOMEM; | |
1079 | ring->dma[i].rxd1 = (unsigned int)dma_addr; | |
1080 | ||
1081 | ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); | |
1082 | } | |
1083 | ring->calc_idx = MTK_DMA_SIZE - 1; | |
1084 | /* make sure that all changes to the dma ring are flushed before we | |
1085 | * continue | |
1086 | */ | |
1087 | wmb(); | |
1088 | ||
1089 | mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0); | |
1090 | mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0); | |
1091 | mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0); | |
1092 | mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); | |
1093 | mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); | |
1094 | ||
1095 | return 0; | |
1096 | } | |
1097 | ||
1098 | static void mtk_rx_clean(struct mtk_eth *eth) | |
1099 | { | |
1100 | struct mtk_rx_ring *ring = ð->rx_ring; | |
1101 | int i; | |
1102 | ||
1103 | if (ring->data && ring->dma) { | |
1104 | for (i = 0; i < MTK_DMA_SIZE; i++) { | |
1105 | if (!ring->data[i]) | |
1106 | continue; | |
1107 | if (!ring->dma[i].rxd1) | |
1108 | continue; | |
1109 | dma_unmap_single(eth->dev, | |
1110 | ring->dma[i].rxd1, | |
1111 | ring->buf_size, | |
1112 | DMA_FROM_DEVICE); | |
1113 | skb_free_frag(ring->data[i]); | |
1114 | } | |
1115 | kfree(ring->data); | |
1116 | ring->data = NULL; | |
1117 | } | |
1118 | ||
1119 | if (ring->dma) { | |
1120 | dma_free_coherent(eth->dev, | |
1121 | MTK_DMA_SIZE * sizeof(*ring->dma), | |
1122 | ring->dma, | |
1123 | ring->phys); | |
1124 | ring->dma = NULL; | |
1125 | } | |
1126 | } | |
1127 | ||
1128 | /* wait for DMA to finish whatever it is doing before we start using it again */ | |
1129 | static int mtk_dma_busy_wait(struct mtk_eth *eth) | |
1130 | { | |
1131 | unsigned long t_start = jiffies; | |
1132 | ||
1133 | while (1) { | |
1134 | if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & | |
1135 | (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) | |
1136 | return 0; | |
1137 | if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) | |
1138 | break; | |
1139 | } | |
1140 | ||
1141 | dev_err(eth->dev, "DMA init timeout\n"); | |
1142 | return -1; | |
1143 | } | |
1144 | ||
1145 | static int mtk_dma_init(struct mtk_eth *eth) | |
1146 | { | |
1147 | int err; | |
1148 | ||
1149 | if (mtk_dma_busy_wait(eth)) | |
1150 | return -EBUSY; | |
1151 | ||
1152 | /* QDMA needs scratch memory for internal reordering of the | |
1153 | * descriptors | |
1154 | */ | |
1155 | err = mtk_init_fq_dma(eth); | |
1156 | if (err) | |
1157 | return err; | |
1158 | ||
1159 | err = mtk_tx_alloc(eth); | |
1160 | if (err) | |
1161 | return err; | |
1162 | ||
1163 | err = mtk_rx_alloc(eth); | |
1164 | if (err) | |
1165 | return err; | |
1166 | ||
1167 | /* Enable random early drop and set drop threshold automatically */ | |
1168 | mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, | |
1169 | MTK_QDMA_FC_THRES); | |
1170 | mtk_w32(eth, 0x0, MTK_QDMA_HRED2); | |
1171 | ||
1172 | return 0; | |
1173 | } | |
1174 | ||
1175 | static void mtk_dma_free(struct mtk_eth *eth) | |
1176 | { | |
1177 | int i; | |
1178 | ||
1179 | for (i = 0; i < MTK_MAC_COUNT; i++) | |
1180 | if (eth->netdev[i]) | |
1181 | netdev_reset_queue(eth->netdev[i]); | |
1182 | mtk_tx_clean(eth); | |
1183 | mtk_rx_clean(eth); | |
1184 | kfree(eth->scratch_head); | |
1185 | } | |
1186 | ||
1187 | static void mtk_tx_timeout(struct net_device *dev) | |
1188 | { | |
1189 | struct mtk_mac *mac = netdev_priv(dev); | |
1190 | struct mtk_eth *eth = mac->hw; | |
1191 | ||
1192 | eth->netdev[mac->id]->stats.tx_errors++; | |
1193 | netif_err(eth, tx_err, dev, | |
1194 | "transmit timed out\n"); | |
7c78b4ad | 1195 | schedule_work(ð->pending_work); |
656e7052 JC |
1196 | } |
1197 | ||
1198 | static irqreturn_t mtk_handle_irq(int irq, void *_eth) | |
1199 | { | |
1200 | struct mtk_eth *eth = _eth; | |
1201 | u32 status; | |
1202 | ||
1203 | status = mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
1204 | if (unlikely(!status)) | |
1205 | return IRQ_NONE; | |
1206 | ||
1207 | if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) { | |
1208 | if (likely(napi_schedule_prep(ð->rx_napi))) | |
1209 | __napi_schedule(ð->rx_napi); | |
1210 | } else { | |
1211 | mtk_w32(eth, status, MTK_QMTK_INT_STATUS); | |
1212 | } | |
1213 | mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT)); | |
1214 | ||
1215 | return IRQ_HANDLED; | |
1216 | } | |
1217 | ||
1218 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1219 | static void mtk_poll_controller(struct net_device *dev) | |
1220 | { | |
1221 | struct mtk_mac *mac = netdev_priv(dev); | |
1222 | struct mtk_eth *eth = mac->hw; | |
1223 | u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; | |
1224 | ||
1225 | mtk_irq_disable(eth, int_mask); | |
1226 | mtk_handle_irq(dev->irq, dev); | |
1227 | mtk_irq_enable(eth, int_mask); | |
1228 | } | |
1229 | #endif | |
1230 | ||
1231 | static int mtk_start_dma(struct mtk_eth *eth) | |
1232 | { | |
1233 | int err; | |
1234 | ||
1235 | err = mtk_dma_init(eth); | |
1236 | if (err) { | |
1237 | mtk_dma_free(eth); | |
1238 | return err; | |
1239 | } | |
1240 | ||
1241 | mtk_w32(eth, | |
1242 | MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | | |
1243 | MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | | |
1244 | MTK_RX_BT_32DWORDS, | |
1245 | MTK_QDMA_GLO_CFG); | |
1246 | ||
1247 | return 0; | |
1248 | } | |
1249 | ||
1250 | static int mtk_open(struct net_device *dev) | |
1251 | { | |
1252 | struct mtk_mac *mac = netdev_priv(dev); | |
1253 | struct mtk_eth *eth = mac->hw; | |
1254 | ||
1255 | /* we run 2 netdevs on the same dma ring so we only bring it up once */ | |
1256 | if (!atomic_read(ð->dma_refcnt)) { | |
1257 | int err = mtk_start_dma(eth); | |
1258 | ||
1259 | if (err) | |
1260 | return err; | |
1261 | ||
1262 | napi_enable(ð->rx_napi); | |
1263 | mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); | |
1264 | } | |
1265 | atomic_inc(ð->dma_refcnt); | |
1266 | ||
1267 | phy_start(mac->phy_dev); | |
1268 | netif_start_queue(dev); | |
1269 | ||
1270 | return 0; | |
1271 | } | |
1272 | ||
1273 | static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) | |
1274 | { | |
1275 | unsigned long flags; | |
1276 | u32 val; | |
1277 | int i; | |
1278 | ||
1279 | /* stop the dma engine */ | |
1280 | spin_lock_irqsave(ð->page_lock, flags); | |
1281 | val = mtk_r32(eth, glo_cfg); | |
1282 | mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), | |
1283 | glo_cfg); | |
1284 | spin_unlock_irqrestore(ð->page_lock, flags); | |
1285 | ||
1286 | /* wait for dma stop */ | |
1287 | for (i = 0; i < 10; i++) { | |
1288 | val = mtk_r32(eth, glo_cfg); | |
1289 | if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { | |
1290 | msleep(20); | |
1291 | continue; | |
1292 | } | |
1293 | break; | |
1294 | } | |
1295 | } | |
1296 | ||
1297 | static int mtk_stop(struct net_device *dev) | |
1298 | { | |
1299 | struct mtk_mac *mac = netdev_priv(dev); | |
1300 | struct mtk_eth *eth = mac->hw; | |
1301 | ||
1302 | netif_tx_disable(dev); | |
1303 | phy_stop(mac->phy_dev); | |
1304 | ||
1305 | /* only shutdown DMA if this is the last user */ | |
1306 | if (!atomic_dec_and_test(ð->dma_refcnt)) | |
1307 | return 0; | |
1308 | ||
1309 | mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); | |
1310 | napi_disable(ð->rx_napi); | |
1311 | ||
1312 | mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); | |
1313 | ||
1314 | mtk_dma_free(eth); | |
1315 | ||
1316 | return 0; | |
1317 | } | |
1318 | ||
1319 | static int __init mtk_hw_init(struct mtk_eth *eth) | |
1320 | { | |
1321 | int err, i; | |
1322 | ||
1323 | /* reset the frame engine */ | |
1324 | reset_control_assert(eth->rstc); | |
1325 | usleep_range(10, 20); | |
1326 | reset_control_deassert(eth->rstc); | |
1327 | usleep_range(10, 20); | |
1328 | ||
1329 | /* Set GE2 driving and slew rate */ | |
1330 | regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); | |
1331 | ||
1332 | /* set GE2 TDSEL */ | |
1333 | regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); | |
1334 | ||
1335 | /* set GE2 TUNE */ | |
1336 | regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); | |
1337 | ||
1338 | /* GE1, Force 1000M/FD, FC ON */ | |
1339 | mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); | |
1340 | ||
1341 | /* GE2, Force 1000M/FD, FC ON */ | |
1342 | mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); | |
1343 | ||
1344 | /* Enable RX VLan Offloading */ | |
1345 | mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); | |
1346 | ||
1347 | err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, | |
1348 | dev_name(eth->dev), eth); | |
1349 | if (err) | |
1350 | return err; | |
1351 | ||
1352 | err = mtk_mdio_init(eth); | |
1353 | if (err) | |
1354 | return err; | |
1355 | ||
1356 | /* disable delay and normal interrupt */ | |
1357 | mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); | |
1358 | mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); | |
1359 | mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); | |
1360 | mtk_w32(eth, 0, MTK_RST_GL); | |
1361 | ||
1362 | /* FE int grouping */ | |
1363 | mtk_w32(eth, 0, MTK_FE_INT_GRP); | |
1364 | ||
1365 | for (i = 0; i < 2; i++) { | |
1366 | u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); | |
1367 | ||
1368 | /* setup the forward port to send frame to QDMA */ | |
1369 | val &= ~0xffff; | |
1370 | val |= 0x5555; | |
1371 | ||
1372 | /* Enable RX checksum */ | |
1373 | val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; | |
1374 | ||
1375 | /* setup the mac dma */ | |
1376 | mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); | |
1377 | } | |
1378 | ||
1379 | return 0; | |
1380 | } | |
1381 | ||
1382 | static int __init mtk_init(struct net_device *dev) | |
1383 | { | |
1384 | struct mtk_mac *mac = netdev_priv(dev); | |
1385 | struct mtk_eth *eth = mac->hw; | |
1386 | const char *mac_addr; | |
1387 | ||
1388 | mac_addr = of_get_mac_address(mac->of_node); | |
1389 | if (mac_addr) | |
1390 | ether_addr_copy(dev->dev_addr, mac_addr); | |
1391 | ||
1392 | /* If the mac address is invalid, use random mac address */ | |
1393 | if (!is_valid_ether_addr(dev->dev_addr)) { | |
1394 | random_ether_addr(dev->dev_addr); | |
1395 | dev_err(eth->dev, "generated random MAC address %pM\n", | |
1396 | dev->dev_addr); | |
1397 | dev->addr_assign_type = NET_ADDR_RANDOM; | |
1398 | } | |
1399 | ||
1400 | return mtk_phy_connect(mac); | |
1401 | } | |
1402 | ||
1403 | static void mtk_uninit(struct net_device *dev) | |
1404 | { | |
1405 | struct mtk_mac *mac = netdev_priv(dev); | |
1406 | struct mtk_eth *eth = mac->hw; | |
1407 | ||
1408 | phy_disconnect(mac->phy_dev); | |
1409 | mtk_mdio_cleanup(eth); | |
1410 | mtk_irq_disable(eth, ~0); | |
1411 | free_irq(dev->irq, dev); | |
1412 | } | |
1413 | ||
1414 | static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
1415 | { | |
1416 | struct mtk_mac *mac = netdev_priv(dev); | |
1417 | ||
1418 | switch (cmd) { | |
1419 | case SIOCGMIIPHY: | |
1420 | case SIOCGMIIREG: | |
1421 | case SIOCSMIIREG: | |
1422 | return phy_mii_ioctl(mac->phy_dev, ifr, cmd); | |
1423 | default: | |
1424 | break; | |
1425 | } | |
1426 | ||
1427 | return -EOPNOTSUPP; | |
1428 | } | |
1429 | ||
1430 | static void mtk_pending_work(struct work_struct *work) | |
1431 | { | |
7c78b4ad | 1432 | struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); |
e7d425dc JC |
1433 | int err, i; |
1434 | unsigned long restart = 0; | |
656e7052 JC |
1435 | |
1436 | rtnl_lock(); | |
656e7052 | 1437 | |
e7d425dc JC |
1438 | /* stop all devices to make sure that dma is properly shut down */ |
1439 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
7c78b4ad | 1440 | if (!eth->netdev[i]) |
e7d425dc JC |
1441 | continue; |
1442 | mtk_stop(eth->netdev[i]); | |
1443 | __set_bit(i, &restart); | |
1444 | } | |
1445 | ||
1446 | /* restart DMA and enable IRQs */ | |
1447 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1448 | if (!test_bit(i, &restart)) | |
1449 | continue; | |
1450 | err = mtk_open(eth->netdev[i]); | |
1451 | if (err) { | |
1452 | netif_alert(eth, ifup, eth->netdev[i], | |
1453 | "Driver up/down cycle failed, closing device.\n"); | |
1454 | dev_close(eth->netdev[i]); | |
1455 | } | |
656e7052 JC |
1456 | } |
1457 | rtnl_unlock(); | |
1458 | } | |
1459 | ||
1460 | static int mtk_cleanup(struct mtk_eth *eth) | |
1461 | { | |
1462 | int i; | |
1463 | ||
1464 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
656e7052 JC |
1465 | if (!eth->netdev[i]) |
1466 | continue; | |
1467 | ||
1468 | unregister_netdev(eth->netdev[i]); | |
1469 | free_netdev(eth->netdev[i]); | |
656e7052 | 1470 | } |
7c78b4ad | 1471 | cancel_work_sync(ð->pending_work); |
656e7052 JC |
1472 | |
1473 | return 0; | |
1474 | } | |
1475 | ||
1476 | static int mtk_get_settings(struct net_device *dev, | |
1477 | struct ethtool_cmd *cmd) | |
1478 | { | |
1479 | struct mtk_mac *mac = netdev_priv(dev); | |
1480 | int err; | |
1481 | ||
1482 | err = phy_read_status(mac->phy_dev); | |
1483 | if (err) | |
1484 | return -ENODEV; | |
1485 | ||
1486 | return phy_ethtool_gset(mac->phy_dev, cmd); | |
1487 | } | |
1488 | ||
1489 | static int mtk_set_settings(struct net_device *dev, | |
1490 | struct ethtool_cmd *cmd) | |
1491 | { | |
1492 | struct mtk_mac *mac = netdev_priv(dev); | |
1493 | ||
1494 | if (cmd->phy_address != mac->phy_dev->mdio.addr) { | |
1495 | mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus, | |
1496 | cmd->phy_address); | |
1497 | if (!mac->phy_dev) | |
1498 | return -ENODEV; | |
1499 | } | |
1500 | ||
1501 | return phy_ethtool_sset(mac->phy_dev, cmd); | |
1502 | } | |
1503 | ||
1504 | static void mtk_get_drvinfo(struct net_device *dev, | |
1505 | struct ethtool_drvinfo *info) | |
1506 | { | |
1507 | struct mtk_mac *mac = netdev_priv(dev); | |
1508 | ||
1509 | strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); | |
1510 | strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); | |
1511 | info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); | |
1512 | } | |
1513 | ||
1514 | static u32 mtk_get_msglevel(struct net_device *dev) | |
1515 | { | |
1516 | struct mtk_mac *mac = netdev_priv(dev); | |
1517 | ||
1518 | return mac->hw->msg_enable; | |
1519 | } | |
1520 | ||
1521 | static void mtk_set_msglevel(struct net_device *dev, u32 value) | |
1522 | { | |
1523 | struct mtk_mac *mac = netdev_priv(dev); | |
1524 | ||
1525 | mac->hw->msg_enable = value; | |
1526 | } | |
1527 | ||
1528 | static int mtk_nway_reset(struct net_device *dev) | |
1529 | { | |
1530 | struct mtk_mac *mac = netdev_priv(dev); | |
1531 | ||
1532 | return genphy_restart_aneg(mac->phy_dev); | |
1533 | } | |
1534 | ||
1535 | static u32 mtk_get_link(struct net_device *dev) | |
1536 | { | |
1537 | struct mtk_mac *mac = netdev_priv(dev); | |
1538 | int err; | |
1539 | ||
1540 | err = genphy_update_link(mac->phy_dev); | |
1541 | if (err) | |
1542 | return ethtool_op_get_link(dev); | |
1543 | ||
1544 | return mac->phy_dev->link; | |
1545 | } | |
1546 | ||
1547 | static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) | |
1548 | { | |
1549 | int i; | |
1550 | ||
1551 | switch (stringset) { | |
1552 | case ETH_SS_STATS: | |
1553 | for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { | |
1554 | memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); | |
1555 | data += ETH_GSTRING_LEN; | |
1556 | } | |
1557 | break; | |
1558 | } | |
1559 | } | |
1560 | ||
1561 | static int mtk_get_sset_count(struct net_device *dev, int sset) | |
1562 | { | |
1563 | switch (sset) { | |
1564 | case ETH_SS_STATS: | |
1565 | return ARRAY_SIZE(mtk_ethtool_stats); | |
1566 | default: | |
1567 | return -EOPNOTSUPP; | |
1568 | } | |
1569 | } | |
1570 | ||
1571 | static void mtk_get_ethtool_stats(struct net_device *dev, | |
1572 | struct ethtool_stats *stats, u64 *data) | |
1573 | { | |
1574 | struct mtk_mac *mac = netdev_priv(dev); | |
1575 | struct mtk_hw_stats *hwstats = mac->hw_stats; | |
1576 | u64 *data_src, *data_dst; | |
1577 | unsigned int start; | |
1578 | int i; | |
1579 | ||
1580 | if (netif_running(dev) && netif_device_present(dev)) { | |
1581 | if (spin_trylock(&hwstats->stats_lock)) { | |
1582 | mtk_stats_update_mac(mac); | |
1583 | spin_unlock(&hwstats->stats_lock); | |
1584 | } | |
1585 | } | |
1586 | ||
1587 | do { | |
1588 | data_src = (u64*)hwstats; | |
1589 | data_dst = data; | |
1590 | start = u64_stats_fetch_begin_irq(&hwstats->syncp); | |
1591 | ||
1592 | for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) | |
1593 | *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); | |
1594 | } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); | |
1595 | } | |
1596 | ||
1597 | static struct ethtool_ops mtk_ethtool_ops = { | |
1598 | .get_settings = mtk_get_settings, | |
1599 | .set_settings = mtk_set_settings, | |
1600 | .get_drvinfo = mtk_get_drvinfo, | |
1601 | .get_msglevel = mtk_get_msglevel, | |
1602 | .set_msglevel = mtk_set_msglevel, | |
1603 | .nway_reset = mtk_nway_reset, | |
1604 | .get_link = mtk_get_link, | |
1605 | .get_strings = mtk_get_strings, | |
1606 | .get_sset_count = mtk_get_sset_count, | |
1607 | .get_ethtool_stats = mtk_get_ethtool_stats, | |
1608 | }; | |
1609 | ||
1610 | static const struct net_device_ops mtk_netdev_ops = { | |
1611 | .ndo_init = mtk_init, | |
1612 | .ndo_uninit = mtk_uninit, | |
1613 | .ndo_open = mtk_open, | |
1614 | .ndo_stop = mtk_stop, | |
1615 | .ndo_start_xmit = mtk_start_xmit, | |
1616 | .ndo_set_mac_address = mtk_set_mac_address, | |
1617 | .ndo_validate_addr = eth_validate_addr, | |
1618 | .ndo_do_ioctl = mtk_do_ioctl, | |
1619 | .ndo_change_mtu = eth_change_mtu, | |
1620 | .ndo_tx_timeout = mtk_tx_timeout, | |
1621 | .ndo_get_stats64 = mtk_get_stats64, | |
1622 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1623 | .ndo_poll_controller = mtk_poll_controller, | |
1624 | #endif | |
1625 | }; | |
1626 | ||
1627 | static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |
1628 | { | |
1629 | struct mtk_mac *mac; | |
1630 | const __be32 *_id = of_get_property(np, "reg", NULL); | |
1631 | int id, err; | |
1632 | ||
1633 | if (!_id) { | |
1634 | dev_err(eth->dev, "missing mac id\n"); | |
1635 | return -EINVAL; | |
1636 | } | |
1637 | ||
1638 | id = be32_to_cpup(_id); | |
1639 | if (id >= MTK_MAC_COUNT) { | |
1640 | dev_err(eth->dev, "%d is not a valid mac id\n", id); | |
1641 | return -EINVAL; | |
1642 | } | |
1643 | ||
1644 | if (eth->netdev[id]) { | |
1645 | dev_err(eth->dev, "duplicate mac id found: %d\n", id); | |
1646 | return -EINVAL; | |
1647 | } | |
1648 | ||
1649 | eth->netdev[id] = alloc_etherdev(sizeof(*mac)); | |
1650 | if (!eth->netdev[id]) { | |
1651 | dev_err(eth->dev, "alloc_etherdev failed\n"); | |
1652 | return -ENOMEM; | |
1653 | } | |
1654 | mac = netdev_priv(eth->netdev[id]); | |
1655 | eth->mac[id] = mac; | |
1656 | mac->id = id; | |
1657 | mac->hw = eth; | |
1658 | mac->of_node = np; | |
656e7052 JC |
1659 | |
1660 | mac->hw_stats = devm_kzalloc(eth->dev, | |
1661 | sizeof(*mac->hw_stats), | |
1662 | GFP_KERNEL); | |
1663 | if (!mac->hw_stats) { | |
1664 | dev_err(eth->dev, "failed to allocate counter memory\n"); | |
1665 | err = -ENOMEM; | |
1666 | goto free_netdev; | |
1667 | } | |
1668 | spin_lock_init(&mac->hw_stats->stats_lock); | |
1669 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | |
1670 | ||
1671 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | |
82500aa0 | 1672 | eth->netdev[id]->watchdog_timeo = HZ; |
656e7052 JC |
1673 | eth->netdev[id]->netdev_ops = &mtk_netdev_ops; |
1674 | eth->netdev[id]->base_addr = (unsigned long)eth->base; | |
1675 | eth->netdev[id]->vlan_features = MTK_HW_FEATURES & | |
1676 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); | |
1677 | eth->netdev[id]->features |= MTK_HW_FEATURES; | |
1678 | eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; | |
1679 | ||
1680 | err = register_netdev(eth->netdev[id]); | |
1681 | if (err) { | |
1682 | dev_err(eth->dev, "error bringing up device\n"); | |
1683 | goto free_netdev; | |
1684 | } | |
1685 | eth->netdev[id]->irq = eth->irq; | |
1686 | netif_info(eth, probe, eth->netdev[id], | |
1687 | "mediatek frame engine at 0x%08lx, irq %d\n", | |
1688 | eth->netdev[id]->base_addr, eth->netdev[id]->irq); | |
1689 | ||
1690 | return 0; | |
1691 | ||
1692 | free_netdev: | |
1693 | free_netdev(eth->netdev[id]); | |
1694 | return err; | |
1695 | } | |
1696 | ||
1697 | static int mtk_probe(struct platform_device *pdev) | |
1698 | { | |
1699 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1700 | struct device_node *mac_np; | |
1701 | const struct of_device_id *match; | |
1702 | struct mtk_soc_data *soc; | |
1703 | struct mtk_eth *eth; | |
1704 | int err; | |
1705 | ||
656e7052 JC |
1706 | match = of_match_device(of_mtk_match, &pdev->dev); |
1707 | soc = (struct mtk_soc_data *)match->data; | |
1708 | ||
1709 | eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); | |
1710 | if (!eth) | |
1711 | return -ENOMEM; | |
1712 | ||
1713 | eth->base = devm_ioremap_resource(&pdev->dev, res); | |
621e49f6 VZ |
1714 | if (IS_ERR(eth->base)) |
1715 | return PTR_ERR(eth->base); | |
656e7052 JC |
1716 | |
1717 | spin_lock_init(ð->page_lock); | |
1718 | ||
1719 | eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
1720 | "mediatek,ethsys"); | |
1721 | if (IS_ERR(eth->ethsys)) { | |
1722 | dev_err(&pdev->dev, "no ethsys regmap found\n"); | |
1723 | return PTR_ERR(eth->ethsys); | |
1724 | } | |
1725 | ||
1726 | eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
1727 | "mediatek,pctl"); | |
1728 | if (IS_ERR(eth->pctl)) { | |
1729 | dev_err(&pdev->dev, "no pctl regmap found\n"); | |
1730 | return PTR_ERR(eth->pctl); | |
1731 | } | |
1732 | ||
1733 | eth->rstc = devm_reset_control_get(&pdev->dev, "eth"); | |
1734 | if (IS_ERR(eth->rstc)) { | |
1735 | dev_err(&pdev->dev, "no eth reset found\n"); | |
1736 | return PTR_ERR(eth->rstc); | |
1737 | } | |
1738 | ||
1739 | eth->irq = platform_get_irq(pdev, 0); | |
1740 | if (eth->irq < 0) { | |
1741 | dev_err(&pdev->dev, "no IRQ resource found\n"); | |
1742 | return -ENXIO; | |
1743 | } | |
1744 | ||
1745 | eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); | |
1746 | eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); | |
1747 | eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); | |
1748 | eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); | |
1749 | if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) || | |
1750 | IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif)) | |
1751 | return -ENODEV; | |
1752 | ||
1753 | clk_prepare_enable(eth->clk_ethif); | |
1754 | clk_prepare_enable(eth->clk_esw); | |
1755 | clk_prepare_enable(eth->clk_gp1); | |
1756 | clk_prepare_enable(eth->clk_gp2); | |
1757 | ||
1758 | eth->dev = &pdev->dev; | |
1759 | eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); | |
7c78b4ad | 1760 | INIT_WORK(ð->pending_work, mtk_pending_work); |
656e7052 JC |
1761 | |
1762 | err = mtk_hw_init(eth); | |
1763 | if (err) | |
1764 | return err; | |
1765 | ||
1766 | for_each_child_of_node(pdev->dev.of_node, mac_np) { | |
1767 | if (!of_device_is_compatible(mac_np, | |
1768 | "mediatek,eth-mac")) | |
1769 | continue; | |
1770 | ||
1771 | if (!of_device_is_available(mac_np)) | |
1772 | continue; | |
1773 | ||
1774 | err = mtk_add_mac(eth, mac_np); | |
1775 | if (err) | |
1776 | goto err_free_dev; | |
1777 | } | |
1778 | ||
1779 | /* we run 2 devices on the same DMA ring so we need a dummy device | |
1780 | * for NAPI to work | |
1781 | */ | |
1782 | init_dummy_netdev(ð->dummy_dev); | |
1783 | netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, | |
1784 | MTK_NAPI_WEIGHT); | |
1785 | ||
1786 | platform_set_drvdata(pdev, eth); | |
1787 | ||
1788 | return 0; | |
1789 | ||
1790 | err_free_dev: | |
1791 | mtk_cleanup(eth); | |
1792 | return err; | |
1793 | } | |
1794 | ||
1795 | static int mtk_remove(struct platform_device *pdev) | |
1796 | { | |
1797 | struct mtk_eth *eth = platform_get_drvdata(pdev); | |
1798 | ||
1799 | clk_disable_unprepare(eth->clk_ethif); | |
1800 | clk_disable_unprepare(eth->clk_esw); | |
1801 | clk_disable_unprepare(eth->clk_gp1); | |
1802 | clk_disable_unprepare(eth->clk_gp2); | |
1803 | ||
1804 | netif_napi_del(ð->rx_napi); | |
1805 | mtk_cleanup(eth); | |
1806 | platform_set_drvdata(pdev, NULL); | |
1807 | ||
1808 | return 0; | |
1809 | } | |
1810 | ||
1811 | const struct of_device_id of_mtk_match[] = { | |
1812 | { .compatible = "mediatek,mt7623-eth" }, | |
1813 | {}, | |
1814 | }; | |
1815 | ||
1816 | static struct platform_driver mtk_driver = { | |
1817 | .probe = mtk_probe, | |
1818 | .remove = mtk_remove, | |
1819 | .driver = { | |
1820 | .name = "mtk_soc_eth", | |
1821 | .owner = THIS_MODULE, | |
1822 | .of_match_table = of_mtk_match, | |
1823 | }, | |
1824 | }; | |
1825 | ||
1826 | module_platform_driver(mtk_driver); | |
1827 | ||
1828 | MODULE_LICENSE("GPL"); | |
1829 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | |
1830 | MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); |