Commit | Line | Data |
---|---|---|
ab69bde6 JB |
1 | /* |
2 | * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> | |
3 | * | |
4 | * This file is free software: you may copy, redistribute and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the | |
6 | * Free Software Foundation, either version 2 of the License, or (at your | |
7 | * option) any later version. | |
8 | * | |
9 | * This file is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | * | |
17 | * This file incorporates work covered by the following copyright and | |
18 | * permission notice: | |
19 | * | |
20 | * Copyright (c) 2012 Qualcomm Atheros, Inc. | |
21 | * | |
22 | * Permission to use, copy, modify, and/or distribute this software for any | |
23 | * purpose with or without fee is hereby granted, provided that the above | |
24 | * copyright notice and this permission notice appear in all copies. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
27 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
28 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
29 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
30 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
32 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/module.h> | |
36 | #include <linux/pci.h> | |
37 | #include <linux/interrupt.h> | |
38 | #include <linux/ip.h> | |
39 | #include <linux/ipv6.h> | |
40 | #include <linux/if_vlan.h> | |
41 | #include <linux/mdio.h> | |
42 | #include <linux/aer.h> | |
43 | #include <linux/bitops.h> | |
44 | #include <linux/netdevice.h> | |
45 | #include <linux/etherdevice.h> | |
46 | #include <net/ip6_checksum.h> | |
47 | #include <linux/crc32.h> | |
48 | #include "alx.h" | |
49 | #include "hw.h" | |
50 | #include "reg.h" | |
51 | ||
52 | const char alx_drv_name[] = "alx"; | |
53 | ||
54 | ||
55 | static void alx_free_txbuf(struct alx_priv *alx, int entry) | |
56 | { | |
57 | struct alx_buffer *txb = &alx->txq.bufs[entry]; | |
58 | ||
59 | if (dma_unmap_len(txb, size)) { | |
60 | dma_unmap_single(&alx->hw.pdev->dev, | |
61 | dma_unmap_addr(txb, dma), | |
62 | dma_unmap_len(txb, size), | |
63 | DMA_TO_DEVICE); | |
64 | dma_unmap_len_set(txb, size, 0); | |
65 | } | |
66 | ||
67 | if (txb->skb) { | |
68 | dev_kfree_skb_any(txb->skb); | |
69 | txb->skb = NULL; | |
70 | } | |
71 | } | |
72 | ||
73 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |
74 | { | |
75 | struct alx_rx_queue *rxq = &alx->rxq; | |
76 | struct sk_buff *skb; | |
77 | struct alx_buffer *cur_buf; | |
78 | dma_addr_t dma; | |
79 | u16 cur, next, count = 0; | |
80 | ||
81 | next = cur = rxq->write_idx; | |
82 | if (++next == alx->rx_ringsz) | |
83 | next = 0; | |
84 | cur_buf = &rxq->bufs[cur]; | |
85 | ||
86 | while (!cur_buf->skb && next != rxq->read_idx) { | |
87 | struct alx_rfd *rfd = &rxq->rfd[cur]; | |
88 | ||
89 | skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); | |
90 | if (!skb) | |
91 | break; | |
92 | dma = dma_map_single(&alx->hw.pdev->dev, | |
93 | skb->data, alx->rxbuf_size, | |
94 | DMA_FROM_DEVICE); | |
95 | if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { | |
96 | dev_kfree_skb(skb); | |
97 | break; | |
98 | } | |
99 | ||
100 | /* Unfortunately, RX descriptor buffers must be 4-byte | |
101 | * aligned, so we can't use IP alignment. | |
102 | */ | |
103 | if (WARN_ON(dma & 3)) { | |
104 | dev_kfree_skb(skb); | |
105 | break; | |
106 | } | |
107 | ||
108 | cur_buf->skb = skb; | |
109 | dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); | |
110 | dma_unmap_addr_set(cur_buf, dma, dma); | |
111 | rfd->addr = cpu_to_le64(dma); | |
112 | ||
113 | cur = next; | |
114 | if (++next == alx->rx_ringsz) | |
115 | next = 0; | |
116 | cur_buf = &rxq->bufs[cur]; | |
117 | count++; | |
118 | } | |
119 | ||
120 | if (count) { | |
121 | /* flush all updates before updating hardware */ | |
122 | wmb(); | |
123 | rxq->write_idx = cur; | |
124 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); | |
125 | } | |
126 | ||
127 | return count; | |
128 | } | |
129 | ||
130 | static inline int alx_tpd_avail(struct alx_priv *alx) | |
131 | { | |
132 | struct alx_tx_queue *txq = &alx->txq; | |
133 | ||
134 | if (txq->write_idx >= txq->read_idx) | |
135 | return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; | |
136 | return txq->read_idx - txq->write_idx - 1; | |
137 | } | |
138 | ||
139 | static bool alx_clean_tx_irq(struct alx_priv *alx) | |
140 | { | |
141 | struct alx_tx_queue *txq = &alx->txq; | |
142 | u16 hw_read_idx, sw_read_idx; | |
143 | unsigned int total_bytes = 0, total_packets = 0; | |
144 | int budget = ALX_DEFAULT_TX_WORK; | |
145 | ||
146 | sw_read_idx = txq->read_idx; | |
147 | hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); | |
148 | ||
149 | if (sw_read_idx != hw_read_idx) { | |
150 | while (sw_read_idx != hw_read_idx && budget > 0) { | |
151 | struct sk_buff *skb; | |
152 | ||
153 | skb = txq->bufs[sw_read_idx].skb; | |
154 | if (skb) { | |
155 | total_bytes += skb->len; | |
156 | total_packets++; | |
157 | budget--; | |
158 | } | |
159 | ||
160 | alx_free_txbuf(alx, sw_read_idx); | |
161 | ||
162 | if (++sw_read_idx == alx->tx_ringsz) | |
163 | sw_read_idx = 0; | |
164 | } | |
165 | txq->read_idx = sw_read_idx; | |
166 | ||
167 | netdev_completed_queue(alx->dev, total_packets, total_bytes); | |
168 | } | |
169 | ||
170 | if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && | |
171 | alx_tpd_avail(alx) > alx->tx_ringsz/4) | |
172 | netif_wake_queue(alx->dev); | |
173 | ||
174 | return sw_read_idx == hw_read_idx; | |
175 | } | |
176 | ||
177 | static void alx_schedule_link_check(struct alx_priv *alx) | |
178 | { | |
179 | schedule_work(&alx->link_check_wk); | |
180 | } | |
181 | ||
182 | static void alx_schedule_reset(struct alx_priv *alx) | |
183 | { | |
184 | schedule_work(&alx->reset_wk); | |
185 | } | |
186 | ||
187 | static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) | |
188 | { | |
189 | struct alx_rx_queue *rxq = &alx->rxq; | |
190 | struct alx_rrd *rrd; | |
191 | struct alx_buffer *rxb; | |
192 | struct sk_buff *skb; | |
193 | u16 length, rfd_cleaned = 0; | |
194 | ||
195 | while (budget > 0) { | |
196 | rrd = &rxq->rrd[rxq->rrd_read_idx]; | |
197 | if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) | |
198 | break; | |
199 | rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); | |
200 | ||
201 | if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), | |
202 | RRD_SI) != rxq->read_idx || | |
203 | ALX_GET_FIELD(le32_to_cpu(rrd->word0), | |
204 | RRD_NOR) != 1) { | |
205 | alx_schedule_reset(alx); | |
206 | return 0; | |
207 | } | |
208 | ||
209 | rxb = &rxq->bufs[rxq->read_idx]; | |
210 | dma_unmap_single(&alx->hw.pdev->dev, | |
211 | dma_unmap_addr(rxb, dma), | |
212 | dma_unmap_len(rxb, size), | |
213 | DMA_FROM_DEVICE); | |
214 | dma_unmap_len_set(rxb, size, 0); | |
215 | skb = rxb->skb; | |
216 | rxb->skb = NULL; | |
217 | ||
218 | if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || | |
219 | rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { | |
220 | rrd->word3 = 0; | |
221 | dev_kfree_skb_any(skb); | |
222 | goto next_pkt; | |
223 | } | |
224 | ||
225 | length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), | |
226 | RRD_PKTLEN) - ETH_FCS_LEN; | |
227 | skb_put(skb, length); | |
228 | skb->protocol = eth_type_trans(skb, alx->dev); | |
229 | ||
230 | skb_checksum_none_assert(skb); | |
231 | if (alx->dev->features & NETIF_F_RXCSUM && | |
232 | !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | | |
233 | cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { | |
234 | switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), | |
235 | RRD_PID)) { | |
236 | case RRD_PID_IPV6UDP: | |
237 | case RRD_PID_IPV4UDP: | |
238 | case RRD_PID_IPV4TCP: | |
239 | case RRD_PID_IPV6TCP: | |
240 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
241 | break; | |
242 | } | |
243 | } | |
244 | ||
245 | napi_gro_receive(&alx->napi, skb); | |
246 | budget--; | |
247 | ||
248 | next_pkt: | |
249 | if (++rxq->read_idx == alx->rx_ringsz) | |
250 | rxq->read_idx = 0; | |
251 | if (++rxq->rrd_read_idx == alx->rx_ringsz) | |
252 | rxq->rrd_read_idx = 0; | |
253 | ||
254 | if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) | |
255 | rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); | |
256 | } | |
257 | ||
258 | if (rfd_cleaned) | |
259 | alx_refill_rx_ring(alx, GFP_ATOMIC); | |
260 | ||
261 | return budget > 0; | |
262 | } | |
263 | ||
264 | static int alx_poll(struct napi_struct *napi, int budget) | |
265 | { | |
266 | struct alx_priv *alx = container_of(napi, struct alx_priv, napi); | |
267 | struct alx_hw *hw = &alx->hw; | |
268 | bool complete = true; | |
269 | unsigned long flags; | |
270 | ||
271 | complete = alx_clean_tx_irq(alx) && | |
272 | alx_clean_rx_irq(alx, budget); | |
273 | ||
274 | if (!complete) | |
275 | return 1; | |
276 | ||
277 | napi_complete(&alx->napi); | |
278 | ||
279 | /* enable interrupt */ | |
280 | spin_lock_irqsave(&alx->irq_lock, flags); | |
281 | alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; | |
282 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
283 | spin_unlock_irqrestore(&alx->irq_lock, flags); | |
284 | ||
285 | alx_post_write(hw); | |
286 | ||
287 | return 0; | |
288 | } | |
289 | ||
290 | static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) | |
291 | { | |
292 | struct alx_hw *hw = &alx->hw; | |
293 | bool write_int_mask = false; | |
294 | ||
295 | spin_lock(&alx->irq_lock); | |
296 | ||
297 | /* ACK interrupt */ | |
298 | alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); | |
299 | intr &= alx->int_mask; | |
300 | ||
301 | if (intr & ALX_ISR_FATAL) { | |
302 | netif_warn(alx, hw, alx->dev, | |
303 | "fatal interrupt 0x%x, resetting\n", intr); | |
304 | alx_schedule_reset(alx); | |
305 | goto out; | |
306 | } | |
307 | ||
308 | if (intr & ALX_ISR_ALERT) | |
309 | netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); | |
310 | ||
311 | if (intr & ALX_ISR_PHY) { | |
312 | /* suppress PHY interrupt, because the source | |
313 | * is from PHY internal. only the internal status | |
314 | * is cleared, the interrupt status could be cleared. | |
315 | */ | |
316 | alx->int_mask &= ~ALX_ISR_PHY; | |
317 | write_int_mask = true; | |
318 | alx_schedule_link_check(alx); | |
319 | } | |
320 | ||
321 | if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { | |
322 | napi_schedule(&alx->napi); | |
323 | /* mask rx/tx interrupt, enable them when napi complete */ | |
324 | alx->int_mask &= ~ALX_ISR_ALL_QUEUES; | |
325 | write_int_mask = true; | |
326 | } | |
327 | ||
328 | if (write_int_mask) | |
329 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
330 | ||
331 | alx_write_mem32(hw, ALX_ISR, 0); | |
332 | ||
333 | out: | |
334 | spin_unlock(&alx->irq_lock); | |
335 | return IRQ_HANDLED; | |
336 | } | |
337 | ||
338 | static irqreturn_t alx_intr_msi(int irq, void *data) | |
339 | { | |
340 | struct alx_priv *alx = data; | |
341 | ||
342 | return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); | |
343 | } | |
344 | ||
345 | static irqreturn_t alx_intr_legacy(int irq, void *data) | |
346 | { | |
347 | struct alx_priv *alx = data; | |
348 | struct alx_hw *hw = &alx->hw; | |
349 | u32 intr; | |
350 | ||
351 | intr = alx_read_mem32(hw, ALX_ISR); | |
352 | ||
353 | if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) | |
354 | return IRQ_NONE; | |
355 | ||
356 | return alx_intr_handle(alx, intr); | |
357 | } | |
358 | ||
359 | static void alx_init_ring_ptrs(struct alx_priv *alx) | |
360 | { | |
361 | struct alx_hw *hw = &alx->hw; | |
362 | u32 addr_hi = ((u64)alx->descmem.dma) >> 32; | |
363 | ||
364 | alx->rxq.read_idx = 0; | |
365 | alx->rxq.write_idx = 0; | |
366 | alx->rxq.rrd_read_idx = 0; | |
367 | alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); | |
368 | alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); | |
369 | alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); | |
370 | alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); | |
371 | alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); | |
372 | alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); | |
373 | ||
374 | alx->txq.read_idx = 0; | |
375 | alx->txq.write_idx = 0; | |
376 | alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); | |
377 | alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); | |
378 | alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); | |
379 | ||
380 | /* load these pointers into the chip */ | |
381 | alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); | |
382 | } | |
383 | ||
384 | static void alx_free_txring_buf(struct alx_priv *alx) | |
385 | { | |
386 | struct alx_tx_queue *txq = &alx->txq; | |
387 | int i; | |
388 | ||
389 | if (!txq->bufs) | |
390 | return; | |
391 | ||
392 | for (i = 0; i < alx->tx_ringsz; i++) | |
393 | alx_free_txbuf(alx, i); | |
394 | ||
395 | memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); | |
396 | memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); | |
397 | txq->write_idx = 0; | |
398 | txq->read_idx = 0; | |
399 | ||
400 | netdev_reset_queue(alx->dev); | |
401 | } | |
402 | ||
403 | static void alx_free_rxring_buf(struct alx_priv *alx) | |
404 | { | |
405 | struct alx_rx_queue *rxq = &alx->rxq; | |
406 | struct alx_buffer *cur_buf; | |
407 | u16 i; | |
408 | ||
409 | if (rxq == NULL) | |
410 | return; | |
411 | ||
412 | for (i = 0; i < alx->rx_ringsz; i++) { | |
413 | cur_buf = rxq->bufs + i; | |
414 | if (cur_buf->skb) { | |
415 | dma_unmap_single(&alx->hw.pdev->dev, | |
416 | dma_unmap_addr(cur_buf, dma), | |
417 | dma_unmap_len(cur_buf, size), | |
418 | DMA_FROM_DEVICE); | |
419 | dev_kfree_skb(cur_buf->skb); | |
420 | cur_buf->skb = NULL; | |
421 | dma_unmap_len_set(cur_buf, size, 0); | |
422 | dma_unmap_addr_set(cur_buf, dma, 0); | |
423 | } | |
424 | } | |
425 | ||
426 | rxq->write_idx = 0; | |
427 | rxq->read_idx = 0; | |
428 | rxq->rrd_read_idx = 0; | |
429 | } | |
430 | ||
431 | static void alx_free_buffers(struct alx_priv *alx) | |
432 | { | |
433 | alx_free_txring_buf(alx); | |
434 | alx_free_rxring_buf(alx); | |
435 | } | |
436 | ||
437 | static int alx_reinit_rings(struct alx_priv *alx) | |
438 | { | |
439 | alx_free_buffers(alx); | |
440 | ||
441 | alx_init_ring_ptrs(alx); | |
442 | ||
443 | if (!alx_refill_rx_ring(alx, GFP_KERNEL)) | |
444 | return -ENOMEM; | |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
449 | static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) | |
450 | { | |
451 | u32 crc32, bit, reg; | |
452 | ||
453 | crc32 = ether_crc(ETH_ALEN, addr); | |
454 | reg = (crc32 >> 31) & 0x1; | |
455 | bit = (crc32 >> 26) & 0x1F; | |
456 | ||
457 | mc_hash[reg] |= BIT(bit); | |
458 | } | |
459 | ||
460 | static void __alx_set_rx_mode(struct net_device *netdev) | |
461 | { | |
462 | struct alx_priv *alx = netdev_priv(netdev); | |
463 | struct alx_hw *hw = &alx->hw; | |
464 | struct netdev_hw_addr *ha; | |
465 | u32 mc_hash[2] = {}; | |
466 | ||
467 | if (!(netdev->flags & IFF_ALLMULTI)) { | |
468 | netdev_for_each_mc_addr(ha, netdev) | |
469 | alx_add_mc_addr(hw, ha->addr, mc_hash); | |
470 | ||
471 | alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); | |
472 | alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); | |
473 | } | |
474 | ||
475 | hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); | |
476 | if (netdev->flags & IFF_PROMISC) | |
477 | hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; | |
478 | if (netdev->flags & IFF_ALLMULTI) | |
479 | hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; | |
480 | ||
481 | alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); | |
482 | } | |
483 | ||
484 | static void alx_set_rx_mode(struct net_device *netdev) | |
485 | { | |
486 | __alx_set_rx_mode(netdev); | |
487 | } | |
488 | ||
489 | static int alx_set_mac_address(struct net_device *netdev, void *data) | |
490 | { | |
491 | struct alx_priv *alx = netdev_priv(netdev); | |
492 | struct alx_hw *hw = &alx->hw; | |
493 | struct sockaddr *addr = data; | |
494 | ||
495 | if (!is_valid_ether_addr(addr->sa_data)) | |
496 | return -EADDRNOTAVAIL; | |
497 | ||
498 | if (netdev->addr_assign_type & NET_ADDR_RANDOM) | |
499 | netdev->addr_assign_type ^= NET_ADDR_RANDOM; | |
500 | ||
501 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
502 | memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); | |
503 | alx_set_macaddr(hw, hw->mac_addr); | |
504 | ||
505 | return 0; | |
506 | } | |
507 | ||
508 | static int alx_alloc_descriptors(struct alx_priv *alx) | |
509 | { | |
510 | alx->txq.bufs = kcalloc(alx->tx_ringsz, | |
511 | sizeof(struct alx_buffer), | |
512 | GFP_KERNEL); | |
513 | if (!alx->txq.bufs) | |
514 | return -ENOMEM; | |
515 | ||
516 | alx->rxq.bufs = kcalloc(alx->rx_ringsz, | |
517 | sizeof(struct alx_buffer), | |
518 | GFP_KERNEL); | |
519 | if (!alx->rxq.bufs) | |
520 | goto out_free; | |
521 | ||
522 | /* physical tx/rx ring descriptors | |
523 | * | |
524 | * Allocate them as a single chunk because they must not cross a | |
525 | * 4G boundary (hardware has a single register for high 32 bits | |
526 | * of addresses only) | |
527 | */ | |
528 | alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + | |
529 | sizeof(struct alx_rrd) * alx->rx_ringsz + | |
530 | sizeof(struct alx_rfd) * alx->rx_ringsz; | |
531 | alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, | |
532 | alx->descmem.size, | |
533 | &alx->descmem.dma, | |
534 | GFP_KERNEL); | |
535 | if (!alx->descmem.virt) | |
536 | goto out_free; | |
537 | ||
538 | alx->txq.tpd = (void *)alx->descmem.virt; | |
539 | alx->txq.tpd_dma = alx->descmem.dma; | |
540 | ||
541 | /* alignment requirement for next block */ | |
542 | BUILD_BUG_ON(sizeof(struct alx_txd) % 8); | |
543 | ||
544 | alx->rxq.rrd = | |
545 | (void *)((u8 *)alx->descmem.virt + | |
546 | sizeof(struct alx_txd) * alx->tx_ringsz); | |
547 | alx->rxq.rrd_dma = alx->descmem.dma + | |
548 | sizeof(struct alx_txd) * alx->tx_ringsz; | |
549 | ||
550 | /* alignment requirement for next block */ | |
551 | BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); | |
552 | ||
553 | alx->rxq.rfd = | |
554 | (void *)((u8 *)alx->descmem.virt + | |
555 | sizeof(struct alx_txd) * alx->tx_ringsz + | |
556 | sizeof(struct alx_rrd) * alx->rx_ringsz); | |
557 | alx->rxq.rfd_dma = alx->descmem.dma + | |
558 | sizeof(struct alx_txd) * alx->tx_ringsz + | |
559 | sizeof(struct alx_rrd) * alx->rx_ringsz; | |
560 | ||
561 | return 0; | |
562 | out_free: | |
563 | kfree(alx->txq.bufs); | |
564 | kfree(alx->rxq.bufs); | |
565 | return -ENOMEM; | |
566 | } | |
567 | ||
568 | static int alx_alloc_rings(struct alx_priv *alx) | |
569 | { | |
570 | int err; | |
571 | ||
572 | err = alx_alloc_descriptors(alx); | |
573 | if (err) | |
574 | return err; | |
575 | ||
576 | alx->int_mask &= ~ALX_ISR_ALL_QUEUES; | |
577 | alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; | |
578 | alx->tx_ringsz = alx->tx_ringsz; | |
579 | ||
580 | netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); | |
581 | ||
582 | alx_reinit_rings(alx); | |
583 | return 0; | |
584 | } | |
585 | ||
586 | static void alx_free_rings(struct alx_priv *alx) | |
587 | { | |
588 | netif_napi_del(&alx->napi); | |
589 | alx_free_buffers(alx); | |
590 | ||
591 | kfree(alx->txq.bufs); | |
592 | kfree(alx->rxq.bufs); | |
593 | ||
594 | dma_free_coherent(&alx->hw.pdev->dev, | |
595 | alx->descmem.size, | |
596 | alx->descmem.virt, | |
597 | alx->descmem.dma); | |
598 | } | |
599 | ||
600 | static void alx_config_vector_mapping(struct alx_priv *alx) | |
601 | { | |
602 | struct alx_hw *hw = &alx->hw; | |
603 | ||
604 | alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0); | |
605 | alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); | |
606 | alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); | |
607 | } | |
608 | ||
609 | static void alx_irq_enable(struct alx_priv *alx) | |
610 | { | |
611 | struct alx_hw *hw = &alx->hw; | |
612 | ||
613 | /* level-1 interrupt switch */ | |
614 | alx_write_mem32(hw, ALX_ISR, 0); | |
615 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
616 | alx_post_write(hw); | |
617 | } | |
618 | ||
619 | static void alx_irq_disable(struct alx_priv *alx) | |
620 | { | |
621 | struct alx_hw *hw = &alx->hw; | |
622 | ||
623 | alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); | |
624 | alx_write_mem32(hw, ALX_IMR, 0); | |
625 | alx_post_write(hw); | |
626 | ||
627 | synchronize_irq(alx->hw.pdev->irq); | |
628 | } | |
629 | ||
630 | static int alx_request_irq(struct alx_priv *alx) | |
631 | { | |
632 | struct pci_dev *pdev = alx->hw.pdev; | |
633 | struct alx_hw *hw = &alx->hw; | |
634 | int err; | |
635 | u32 msi_ctrl; | |
636 | ||
637 | msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; | |
638 | ||
639 | if (!pci_enable_msi(alx->hw.pdev)) { | |
640 | alx->msi = true; | |
641 | ||
642 | alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, | |
643 | msi_ctrl | ALX_MSI_MASK_SEL_LINE); | |
644 | err = request_irq(pdev->irq, alx_intr_msi, 0, | |
645 | alx->dev->name, alx); | |
646 | if (!err) | |
647 | goto out; | |
648 | /* fall back to legacy interrupt */ | |
649 | pci_disable_msi(alx->hw.pdev); | |
650 | } | |
651 | ||
652 | alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); | |
653 | err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, | |
654 | alx->dev->name, alx); | |
655 | out: | |
656 | if (!err) | |
657 | alx_config_vector_mapping(alx); | |
658 | return err; | |
659 | } | |
660 | ||
661 | static void alx_free_irq(struct alx_priv *alx) | |
662 | { | |
663 | struct pci_dev *pdev = alx->hw.pdev; | |
664 | ||
665 | free_irq(pdev->irq, alx); | |
666 | ||
667 | if (alx->msi) { | |
668 | pci_disable_msi(alx->hw.pdev); | |
669 | alx->msi = false; | |
670 | } | |
671 | } | |
672 | ||
673 | static int alx_identify_hw(struct alx_priv *alx) | |
674 | { | |
675 | struct alx_hw *hw = &alx->hw; | |
676 | int rev = alx_hw_revision(hw); | |
677 | ||
678 | if (rev > ALX_REV_C0) | |
679 | return -EINVAL; | |
680 | ||
681 | hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; | |
682 | ||
683 | return 0; | |
684 | } | |
685 | ||
686 | static int alx_init_sw(struct alx_priv *alx) | |
687 | { | |
688 | struct pci_dev *pdev = alx->hw.pdev; | |
689 | struct alx_hw *hw = &alx->hw; | |
690 | int err; | |
691 | ||
692 | err = alx_identify_hw(alx); | |
693 | if (err) { | |
694 | dev_err(&pdev->dev, "unrecognized chip, aborting\n"); | |
695 | return err; | |
696 | } | |
697 | ||
698 | alx->hw.lnk_patch = | |
699 | pdev->device == ALX_DEV_ID_AR8161 && | |
700 | pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && | |
701 | pdev->subsystem_device == 0x0091 && | |
702 | pdev->revision == 0; | |
703 | ||
704 | hw->smb_timer = 400; | |
705 | hw->mtu = alx->dev->mtu; | |
706 | alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); | |
707 | alx->tx_ringsz = 256; | |
708 | alx->rx_ringsz = 512; | |
ab69bde6 JB |
709 | hw->imt = 200; |
710 | alx->int_mask = ALX_ISR_MISC; | |
711 | hw->dma_chnl = hw->max_dma_chnl; | |
712 | hw->ith_tpd = alx->tx_ringsz / 3; | |
713 | hw->link_speed = SPEED_UNKNOWN; | |
a5b87cc9 | 714 | hw->duplex = DUPLEX_UNKNOWN; |
ab69bde6 JB |
715 | hw->adv_cfg = ADVERTISED_Autoneg | |
716 | ADVERTISED_10baseT_Half | | |
717 | ADVERTISED_10baseT_Full | | |
718 | ADVERTISED_100baseT_Full | | |
719 | ADVERTISED_100baseT_Half | | |
720 | ADVERTISED_1000baseT_Full; | |
721 | hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; | |
722 | ||
723 | hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | | |
724 | ALX_MAC_CTRL_MHASH_ALG_HI5B | | |
725 | ALX_MAC_CTRL_BRD_EN | | |
726 | ALX_MAC_CTRL_PCRCE | | |
727 | ALX_MAC_CTRL_CRCE | | |
728 | ALX_MAC_CTRL_RXFC_EN | | |
729 | ALX_MAC_CTRL_TXFC_EN | | |
730 | 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; | |
731 | ||
732 | return err; | |
733 | } | |
734 | ||
735 | ||
736 | static netdev_features_t alx_fix_features(struct net_device *netdev, | |
737 | netdev_features_t features) | |
738 | { | |
739 | if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) | |
740 | features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | |
741 | ||
742 | return features; | |
743 | } | |
744 | ||
745 | static void alx_netif_stop(struct alx_priv *alx) | |
746 | { | |
747 | alx->dev->trans_start = jiffies; | |
748 | if (netif_carrier_ok(alx->dev)) { | |
749 | netif_carrier_off(alx->dev); | |
750 | netif_tx_disable(alx->dev); | |
751 | napi_disable(&alx->napi); | |
752 | } | |
753 | } | |
754 | ||
755 | static void alx_halt(struct alx_priv *alx) | |
756 | { | |
757 | struct alx_hw *hw = &alx->hw; | |
758 | ||
759 | alx_netif_stop(alx); | |
760 | hw->link_speed = SPEED_UNKNOWN; | |
a5b87cc9 | 761 | hw->duplex = DUPLEX_UNKNOWN; |
ab69bde6 JB |
762 | |
763 | alx_reset_mac(hw); | |
764 | ||
765 | /* disable l0s/l1 */ | |
766 | alx_enable_aspm(hw, false, false); | |
767 | alx_irq_disable(alx); | |
768 | alx_free_buffers(alx); | |
769 | } | |
770 | ||
771 | static void alx_configure(struct alx_priv *alx) | |
772 | { | |
773 | struct alx_hw *hw = &alx->hw; | |
774 | ||
775 | alx_configure_basic(hw); | |
776 | alx_disable_rss(hw); | |
777 | __alx_set_rx_mode(alx->dev); | |
778 | ||
779 | alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); | |
780 | } | |
781 | ||
782 | static void alx_activate(struct alx_priv *alx) | |
783 | { | |
784 | /* hardware setting lost, restore it */ | |
785 | alx_reinit_rings(alx); | |
786 | alx_configure(alx); | |
787 | ||
788 | /* clear old interrupts */ | |
789 | alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); | |
790 | ||
791 | alx_irq_enable(alx); | |
792 | ||
793 | alx_schedule_link_check(alx); | |
794 | } | |
795 | ||
796 | static void alx_reinit(struct alx_priv *alx) | |
797 | { | |
798 | ASSERT_RTNL(); | |
799 | ||
800 | alx_halt(alx); | |
801 | alx_activate(alx); | |
802 | } | |
803 | ||
804 | static int alx_change_mtu(struct net_device *netdev, int mtu) | |
805 | { | |
806 | struct alx_priv *alx = netdev_priv(netdev); | |
807 | int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | |
808 | ||
809 | if ((max_frame < ALX_MIN_FRAME_SIZE) || | |
810 | (max_frame > ALX_MAX_FRAME_SIZE)) | |
811 | return -EINVAL; | |
812 | ||
813 | if (netdev->mtu == mtu) | |
814 | return 0; | |
815 | ||
816 | netdev->mtu = mtu; | |
817 | alx->hw.mtu = mtu; | |
818 | alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? | |
819 | ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; | |
820 | netdev_update_features(netdev); | |
821 | if (netif_running(netdev)) | |
822 | alx_reinit(alx); | |
823 | return 0; | |
824 | } | |
825 | ||
826 | static void alx_netif_start(struct alx_priv *alx) | |
827 | { | |
828 | netif_tx_wake_all_queues(alx->dev); | |
829 | napi_enable(&alx->napi); | |
830 | netif_carrier_on(alx->dev); | |
831 | } | |
832 | ||
833 | static int __alx_open(struct alx_priv *alx, bool resume) | |
834 | { | |
835 | int err; | |
836 | ||
837 | if (!resume) | |
838 | netif_carrier_off(alx->dev); | |
839 | ||
840 | err = alx_alloc_rings(alx); | |
841 | if (err) | |
842 | return err; | |
843 | ||
844 | alx_configure(alx); | |
845 | ||
846 | err = alx_request_irq(alx); | |
847 | if (err) | |
848 | goto out_free_rings; | |
849 | ||
850 | /* clear old interrupts */ | |
851 | alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); | |
852 | ||
853 | alx_irq_enable(alx); | |
854 | ||
855 | if (!resume) | |
856 | netif_tx_start_all_queues(alx->dev); | |
857 | ||
858 | alx_schedule_link_check(alx); | |
859 | return 0; | |
860 | ||
861 | out_free_rings: | |
862 | alx_free_rings(alx); | |
863 | return err; | |
864 | } | |
865 | ||
866 | static void __alx_stop(struct alx_priv *alx) | |
867 | { | |
868 | alx_halt(alx); | |
869 | alx_free_irq(alx); | |
870 | alx_free_rings(alx); | |
871 | } | |
872 | ||
a5b87cc9 | 873 | static const char *alx_speed_desc(struct alx_hw *hw) |
ab69bde6 | 874 | { |
a5b87cc9 JB |
875 | switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { |
876 | case ADVERTISED_1000baseT_Full: | |
ab69bde6 | 877 | return "1 Gbps Full"; |
a5b87cc9 | 878 | case ADVERTISED_100baseT_Full: |
ab69bde6 | 879 | return "100 Mbps Full"; |
a5b87cc9 | 880 | case ADVERTISED_100baseT_Half: |
ab69bde6 | 881 | return "100 Mbps Half"; |
a5b87cc9 | 882 | case ADVERTISED_10baseT_Full: |
ab69bde6 | 883 | return "10 Mbps Full"; |
a5b87cc9 | 884 | case ADVERTISED_10baseT_Half: |
ab69bde6 JB |
885 | return "10 Mbps Half"; |
886 | default: | |
887 | return "Unknown speed"; | |
888 | } | |
889 | } | |
890 | ||
891 | static void alx_check_link(struct alx_priv *alx) | |
892 | { | |
893 | struct alx_hw *hw = &alx->hw; | |
894 | unsigned long flags; | |
a5b87cc9 JB |
895 | int old_speed; |
896 | u8 old_duplex; | |
ab69bde6 JB |
897 | int err; |
898 | ||
899 | /* clear PHY internal interrupt status, otherwise the main | |
900 | * interrupt status will be asserted forever | |
901 | */ | |
902 | alx_clear_phy_intr(hw); | |
903 | ||
a5b87cc9 JB |
904 | old_speed = hw->link_speed; |
905 | old_duplex = hw->duplex; | |
906 | err = alx_read_phy_link(hw); | |
ab69bde6 JB |
907 | if (err < 0) |
908 | goto reset; | |
909 | ||
910 | spin_lock_irqsave(&alx->irq_lock, flags); | |
911 | alx->int_mask |= ALX_ISR_PHY; | |
912 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
913 | spin_unlock_irqrestore(&alx->irq_lock, flags); | |
914 | ||
a5b87cc9 | 915 | if (old_speed == hw->link_speed) |
ab69bde6 | 916 | return; |
ab69bde6 | 917 | |
a5b87cc9 | 918 | if (hw->link_speed != SPEED_UNKNOWN) { |
ab69bde6 | 919 | netif_info(alx, link, alx->dev, |
a5b87cc9 | 920 | "NIC Up: %s\n", alx_speed_desc(hw)); |
ab69bde6 JB |
921 | alx_post_phy_link(hw); |
922 | alx_enable_aspm(hw, true, true); | |
923 | alx_start_mac(hw); | |
924 | ||
925 | if (old_speed == SPEED_UNKNOWN) | |
926 | alx_netif_start(alx); | |
927 | } else { | |
928 | /* link is now down */ | |
929 | alx_netif_stop(alx); | |
930 | netif_info(alx, link, alx->dev, "Link Down\n"); | |
931 | err = alx_reset_mac(hw); | |
932 | if (err) | |
933 | goto reset; | |
934 | alx_irq_disable(alx); | |
935 | ||
936 | /* MAC reset causes all HW settings to be lost, restore all */ | |
937 | err = alx_reinit_rings(alx); | |
938 | if (err) | |
939 | goto reset; | |
940 | alx_configure(alx); | |
941 | alx_enable_aspm(hw, false, true); | |
942 | alx_post_phy_link(hw); | |
943 | alx_irq_enable(alx); | |
944 | } | |
945 | ||
946 | return; | |
947 | ||
948 | reset: | |
949 | alx_schedule_reset(alx); | |
950 | } | |
951 | ||
952 | static int alx_open(struct net_device *netdev) | |
953 | { | |
954 | return __alx_open(netdev_priv(netdev), false); | |
955 | } | |
956 | ||
957 | static int alx_stop(struct net_device *netdev) | |
958 | { | |
959 | __alx_stop(netdev_priv(netdev)); | |
960 | return 0; | |
961 | } | |
962 | ||
ab69bde6 JB |
963 | static void alx_link_check(struct work_struct *work) |
964 | { | |
965 | struct alx_priv *alx; | |
966 | ||
967 | alx = container_of(work, struct alx_priv, link_check_wk); | |
968 | ||
969 | rtnl_lock(); | |
970 | alx_check_link(alx); | |
971 | rtnl_unlock(); | |
972 | } | |
973 | ||
974 | static void alx_reset(struct work_struct *work) | |
975 | { | |
976 | struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); | |
977 | ||
978 | rtnl_lock(); | |
979 | alx_reinit(alx); | |
980 | rtnl_unlock(); | |
981 | } | |
982 | ||
983 | static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) | |
984 | { | |
985 | u8 cso, css; | |
986 | ||
987 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
988 | return 0; | |
989 | ||
990 | cso = skb_checksum_start_offset(skb); | |
991 | if (cso & 1) | |
992 | return -EINVAL; | |
993 | ||
994 | css = cso + skb->csum_offset; | |
995 | first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); | |
996 | first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); | |
997 | first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); | |
998 | ||
999 | return 0; | |
1000 | } | |
1001 | ||
1002 | static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) | |
1003 | { | |
1004 | struct alx_tx_queue *txq = &alx->txq; | |
1005 | struct alx_txd *tpd, *first_tpd; | |
1006 | dma_addr_t dma; | |
1007 | int maplen, f, first_idx = txq->write_idx; | |
1008 | ||
1009 | first_tpd = &txq->tpd[txq->write_idx]; | |
1010 | tpd = first_tpd; | |
1011 | ||
1012 | maplen = skb_headlen(skb); | |
1013 | dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, | |
1014 | DMA_TO_DEVICE); | |
1015 | if (dma_mapping_error(&alx->hw.pdev->dev, dma)) | |
1016 | goto err_dma; | |
1017 | ||
1018 | dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); | |
1019 | dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); | |
1020 | ||
1021 | tpd->adrl.addr = cpu_to_le64(dma); | |
1022 | tpd->len = cpu_to_le16(maplen); | |
1023 | ||
1024 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | |
1025 | struct skb_frag_struct *frag; | |
1026 | ||
1027 | frag = &skb_shinfo(skb)->frags[f]; | |
1028 | ||
1029 | if (++txq->write_idx == alx->tx_ringsz) | |
1030 | txq->write_idx = 0; | |
1031 | tpd = &txq->tpd[txq->write_idx]; | |
1032 | ||
1033 | tpd->word1 = first_tpd->word1; | |
1034 | ||
1035 | maplen = skb_frag_size(frag); | |
1036 | dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, | |
1037 | maplen, DMA_TO_DEVICE); | |
1038 | if (dma_mapping_error(&alx->hw.pdev->dev, dma)) | |
1039 | goto err_dma; | |
1040 | dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); | |
1041 | dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); | |
1042 | ||
1043 | tpd->adrl.addr = cpu_to_le64(dma); | |
1044 | tpd->len = cpu_to_le16(maplen); | |
1045 | } | |
1046 | ||
1047 | /* last TPD, set EOP flag and store skb */ | |
1048 | tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); | |
1049 | txq->bufs[txq->write_idx].skb = skb; | |
1050 | ||
1051 | if (++txq->write_idx == alx->tx_ringsz) | |
1052 | txq->write_idx = 0; | |
1053 | ||
1054 | return 0; | |
1055 | ||
1056 | err_dma: | |
1057 | f = first_idx; | |
1058 | while (f != txq->write_idx) { | |
1059 | alx_free_txbuf(alx, f); | |
1060 | if (++f == alx->tx_ringsz) | |
1061 | f = 0; | |
1062 | } | |
1063 | return -ENOMEM; | |
1064 | } | |
1065 | ||
1066 | static netdev_tx_t alx_start_xmit(struct sk_buff *skb, | |
1067 | struct net_device *netdev) | |
1068 | { | |
1069 | struct alx_priv *alx = netdev_priv(netdev); | |
1070 | struct alx_tx_queue *txq = &alx->txq; | |
1071 | struct alx_txd *first; | |
1072 | int tpdreq = skb_shinfo(skb)->nr_frags + 1; | |
1073 | ||
1074 | if (alx_tpd_avail(alx) < tpdreq) { | |
1075 | netif_stop_queue(alx->dev); | |
1076 | goto drop; | |
1077 | } | |
1078 | ||
1079 | first = &txq->tpd[txq->write_idx]; | |
1080 | memset(first, 0, sizeof(*first)); | |
1081 | ||
1082 | if (alx_tx_csum(skb, first)) | |
1083 | goto drop; | |
1084 | ||
1085 | if (alx_map_tx_skb(alx, skb) < 0) | |
1086 | goto drop; | |
1087 | ||
1088 | netdev_sent_queue(alx->dev, skb->len); | |
1089 | ||
1090 | /* flush updates before updating hardware */ | |
1091 | wmb(); | |
1092 | alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); | |
1093 | ||
1094 | if (alx_tpd_avail(alx) < alx->tx_ringsz/8) | |
1095 | netif_stop_queue(alx->dev); | |
1096 | ||
1097 | return NETDEV_TX_OK; | |
1098 | ||
1099 | drop: | |
1100 | dev_kfree_skb(skb); | |
1101 | return NETDEV_TX_OK; | |
1102 | } | |
1103 | ||
1104 | static void alx_tx_timeout(struct net_device *dev) | |
1105 | { | |
1106 | struct alx_priv *alx = netdev_priv(dev); | |
1107 | ||
1108 | alx_schedule_reset(alx); | |
1109 | } | |
1110 | ||
1111 | static int alx_mdio_read(struct net_device *netdev, | |
1112 | int prtad, int devad, u16 addr) | |
1113 | { | |
1114 | struct alx_priv *alx = netdev_priv(netdev); | |
1115 | struct alx_hw *hw = &alx->hw; | |
1116 | u16 val; | |
1117 | int err; | |
1118 | ||
1119 | if (prtad != hw->mdio.prtad) | |
1120 | return -EINVAL; | |
1121 | ||
1122 | if (devad == MDIO_DEVAD_NONE) | |
1123 | err = alx_read_phy_reg(hw, addr, &val); | |
1124 | else | |
1125 | err = alx_read_phy_ext(hw, devad, addr, &val); | |
1126 | ||
1127 | if (err) | |
1128 | return err; | |
1129 | return val; | |
1130 | } | |
1131 | ||
1132 | static int alx_mdio_write(struct net_device *netdev, | |
1133 | int prtad, int devad, u16 addr, u16 val) | |
1134 | { | |
1135 | struct alx_priv *alx = netdev_priv(netdev); | |
1136 | struct alx_hw *hw = &alx->hw; | |
1137 | ||
1138 | if (prtad != hw->mdio.prtad) | |
1139 | return -EINVAL; | |
1140 | ||
1141 | if (devad == MDIO_DEVAD_NONE) | |
1142 | return alx_write_phy_reg(hw, addr, val); | |
1143 | ||
1144 | return alx_write_phy_ext(hw, devad, addr, val); | |
1145 | } | |
1146 | ||
1147 | static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
1148 | { | |
1149 | struct alx_priv *alx = netdev_priv(netdev); | |
1150 | ||
1151 | if (!netif_running(netdev)) | |
1152 | return -EAGAIN; | |
1153 | ||
1154 | return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); | |
1155 | } | |
1156 | ||
1157 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1158 | static void alx_poll_controller(struct net_device *netdev) | |
1159 | { | |
1160 | struct alx_priv *alx = netdev_priv(netdev); | |
1161 | ||
1162 | if (alx->msi) | |
1163 | alx_intr_msi(0, alx); | |
1164 | else | |
1165 | alx_intr_legacy(0, alx); | |
1166 | } | |
1167 | #endif | |
1168 | ||
f1b6b106 SD |
1169 | static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, |
1170 | struct rtnl_link_stats64 *net_stats) | |
1171 | { | |
1172 | struct alx_priv *alx = netdev_priv(dev); | |
1173 | struct alx_hw_stats *hw_stats = &alx->hw.stats; | |
1174 | ||
1175 | spin_lock(&alx->stats_lock); | |
1176 | ||
1177 | alx_update_hw_stats(&alx->hw); | |
1178 | ||
1179 | net_stats->tx_bytes = hw_stats->tx_byte_cnt; | |
1180 | net_stats->rx_bytes = hw_stats->rx_byte_cnt; | |
1181 | net_stats->multicast = hw_stats->rx_mcast; | |
1182 | net_stats->collisions = hw_stats->tx_single_col + | |
1183 | hw_stats->tx_multi_col + | |
1184 | hw_stats->tx_late_col + | |
1185 | hw_stats->tx_abort_col; | |
1186 | ||
1187 | net_stats->rx_errors = hw_stats->rx_frag + | |
1188 | hw_stats->rx_fcs_err + | |
1189 | hw_stats->rx_len_err + | |
1190 | hw_stats->rx_ov_sz + | |
1191 | hw_stats->rx_ov_rrd + | |
1192 | hw_stats->rx_align_err + | |
1193 | hw_stats->rx_ov_rxf; | |
1194 | ||
1195 | net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; | |
1196 | net_stats->rx_length_errors = hw_stats->rx_len_err; | |
1197 | net_stats->rx_crc_errors = hw_stats->rx_fcs_err; | |
1198 | net_stats->rx_frame_errors = hw_stats->rx_align_err; | |
1199 | net_stats->rx_dropped = hw_stats->rx_ov_rrd; | |
1200 | ||
1201 | net_stats->tx_errors = hw_stats->tx_late_col + | |
1202 | hw_stats->tx_abort_col + | |
1203 | hw_stats->tx_underrun + | |
1204 | hw_stats->tx_trunc; | |
1205 | ||
1206 | net_stats->tx_aborted_errors = hw_stats->tx_abort_col; | |
1207 | net_stats->tx_fifo_errors = hw_stats->tx_underrun; | |
1208 | net_stats->tx_window_errors = hw_stats->tx_late_col; | |
1209 | ||
1210 | net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; | |
1211 | net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; | |
1212 | ||
1213 | spin_unlock(&alx->stats_lock); | |
1214 | ||
1215 | return net_stats; | |
1216 | } | |
1217 | ||
ab69bde6 JB |
1218 | static const struct net_device_ops alx_netdev_ops = { |
1219 | .ndo_open = alx_open, | |
1220 | .ndo_stop = alx_stop, | |
1221 | .ndo_start_xmit = alx_start_xmit, | |
f1b6b106 | 1222 | .ndo_get_stats64 = alx_get_stats64, |
ab69bde6 JB |
1223 | .ndo_set_rx_mode = alx_set_rx_mode, |
1224 | .ndo_validate_addr = eth_validate_addr, | |
1225 | .ndo_set_mac_address = alx_set_mac_address, | |
1226 | .ndo_change_mtu = alx_change_mtu, | |
1227 | .ndo_do_ioctl = alx_ioctl, | |
1228 | .ndo_tx_timeout = alx_tx_timeout, | |
1229 | .ndo_fix_features = alx_fix_features, | |
1230 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1231 | .ndo_poll_controller = alx_poll_controller, | |
1232 | #endif | |
1233 | }; | |
1234 | ||
1235 | static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1236 | { | |
1237 | struct net_device *netdev; | |
1238 | struct alx_priv *alx; | |
1239 | struct alx_hw *hw; | |
1240 | bool phy_configured; | |
c3eb7a77 | 1241 | int bars, err; |
ab69bde6 JB |
1242 | |
1243 | err = pci_enable_device_mem(pdev); | |
1244 | if (err) | |
1245 | return err; | |
1246 | ||
1247 | /* The alx chip can DMA to 64-bit addresses, but it uses a single | |
1248 | * shared register for the high 32 bits, so only a single, aligned, | |
1249 | * 4 GB physical address range can be used for descriptors. | |
1250 | */ | |
1251 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && | |
1252 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | |
1253 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); | |
1254 | } else { | |
1255 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
1256 | if (err) { | |
1257 | err = dma_set_coherent_mask(&pdev->dev, | |
1258 | DMA_BIT_MASK(32)); | |
1259 | if (err) { | |
1260 | dev_err(&pdev->dev, | |
1261 | "No usable DMA config, aborting\n"); | |
1262 | goto out_pci_disable; | |
1263 | } | |
1264 | } | |
1265 | } | |
1266 | ||
1267 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | |
1268 | err = pci_request_selected_regions(pdev, bars, alx_drv_name); | |
1269 | if (err) { | |
1270 | dev_err(&pdev->dev, | |
1271 | "pci_request_selected_regions failed(bars:%d)\n", bars); | |
1272 | goto out_pci_disable; | |
1273 | } | |
1274 | ||
1275 | pci_enable_pcie_error_reporting(pdev); | |
1276 | pci_set_master(pdev); | |
1277 | ||
c3eb7a77 | 1278 | if (!pdev->pm_cap) { |
ab69bde6 JB |
1279 | dev_err(&pdev->dev, |
1280 | "Can't find power management capability, aborting\n"); | |
1281 | err = -EIO; | |
1282 | goto out_pci_release; | |
1283 | } | |
1284 | ||
ab69bde6 JB |
1285 | netdev = alloc_etherdev(sizeof(*alx)); |
1286 | if (!netdev) { | |
1287 | err = -ENOMEM; | |
1288 | goto out_pci_release; | |
1289 | } | |
1290 | ||
1291 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
1292 | alx = netdev_priv(netdev); | |
a8798a5c ML |
1293 | spin_lock_init(&alx->hw.mdio_lock); |
1294 | spin_lock_init(&alx->irq_lock); | |
3e5ccc29 | 1295 | spin_lock_init(&alx->stats_lock); |
ab69bde6 JB |
1296 | alx->dev = netdev; |
1297 | alx->hw.pdev = pdev; | |
1298 | alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | | |
1299 | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; | |
1300 | hw = &alx->hw; | |
1301 | pci_set_drvdata(pdev, alx); | |
1302 | ||
1303 | hw->hw_addr = pci_ioremap_bar(pdev, 0); | |
1304 | if (!hw->hw_addr) { | |
1305 | dev_err(&pdev->dev, "cannot map device registers\n"); | |
1306 | err = -EIO; | |
1307 | goto out_free_netdev; | |
1308 | } | |
1309 | ||
1310 | netdev->netdev_ops = &alx_netdev_ops; | |
1311 | SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); | |
1312 | netdev->irq = pdev->irq; | |
1313 | netdev->watchdog_timeo = ALX_WATCHDOG_TIME; | |
1314 | ||
1315 | if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) | |
1316 | pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; | |
1317 | ||
1318 | err = alx_init_sw(alx); | |
1319 | if (err) { | |
1320 | dev_err(&pdev->dev, "net device private data init failed\n"); | |
1321 | goto out_unmap; | |
1322 | } | |
1323 | ||
1324 | alx_reset_pcie(hw); | |
1325 | ||
1326 | phy_configured = alx_phy_configured(hw); | |
1327 | ||
1328 | if (!phy_configured) | |
1329 | alx_reset_phy(hw); | |
1330 | ||
1331 | err = alx_reset_mac(hw); | |
1332 | if (err) { | |
1333 | dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); | |
1334 | goto out_unmap; | |
1335 | } | |
1336 | ||
1337 | /* setup link to put it in a known good starting state */ | |
1338 | if (!phy_configured) { | |
1339 | err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); | |
1340 | if (err) { | |
1341 | dev_err(&pdev->dev, | |
1342 | "failed to configure PHY speed/duplex (err=%d)\n", | |
1343 | err); | |
1344 | goto out_unmap; | |
1345 | } | |
1346 | } | |
1347 | ||
1348 | netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; | |
1349 | ||
1350 | if (alx_get_perm_macaddr(hw, hw->perm_addr)) { | |
1351 | dev_warn(&pdev->dev, | |
1352 | "Invalid permanent address programmed, using random one\n"); | |
1353 | eth_hw_addr_random(netdev); | |
1354 | memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); | |
1355 | } | |
1356 | ||
1357 | memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); | |
1358 | memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); | |
1359 | memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); | |
1360 | ||
1361 | hw->mdio.prtad = 0; | |
1362 | hw->mdio.mmds = 0; | |
1363 | hw->mdio.dev = netdev; | |
1364 | hw->mdio.mode_support = MDIO_SUPPORTS_C45 | | |
1365 | MDIO_SUPPORTS_C22 | | |
1366 | MDIO_EMULATE_C22; | |
1367 | hw->mdio.mdio_read = alx_mdio_read; | |
1368 | hw->mdio.mdio_write = alx_mdio_write; | |
1369 | ||
1370 | if (!alx_get_phy_info(hw)) { | |
1371 | dev_err(&pdev->dev, "failed to identify PHY\n"); | |
1372 | err = -EIO; | |
1373 | goto out_unmap; | |
1374 | } | |
1375 | ||
1376 | INIT_WORK(&alx->link_check_wk, alx_link_check); | |
1377 | INIT_WORK(&alx->reset_wk, alx_reset); | |
ab69bde6 JB |
1378 | netif_carrier_off(netdev); |
1379 | ||
1380 | err = register_netdev(netdev); | |
1381 | if (err) { | |
1382 | dev_err(&pdev->dev, "register netdevice failed\n"); | |
1383 | goto out_unmap; | |
1384 | } | |
1385 | ||
ab69bde6 JB |
1386 | netdev_info(netdev, |
1387 | "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", | |
1388 | netdev->dev_addr); | |
1389 | ||
1390 | return 0; | |
1391 | ||
1392 | out_unmap: | |
1393 | iounmap(hw->hw_addr); | |
1394 | out_free_netdev: | |
1395 | free_netdev(netdev); | |
1396 | out_pci_release: | |
1397 | pci_release_selected_regions(pdev, bars); | |
1398 | out_pci_disable: | |
1399 | pci_disable_device(pdev); | |
1400 | return err; | |
1401 | } | |
1402 | ||
1403 | static void alx_remove(struct pci_dev *pdev) | |
1404 | { | |
1405 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1406 | struct alx_hw *hw = &alx->hw; | |
1407 | ||
1408 | cancel_work_sync(&alx->link_check_wk); | |
1409 | cancel_work_sync(&alx->reset_wk); | |
1410 | ||
1411 | /* restore permanent mac address */ | |
1412 | alx_set_macaddr(hw, hw->perm_addr); | |
1413 | ||
1414 | unregister_netdev(alx->dev); | |
1415 | iounmap(hw->hw_addr); | |
1416 | pci_release_selected_regions(pdev, | |
1417 | pci_select_bars(pdev, IORESOURCE_MEM)); | |
1418 | ||
1419 | pci_disable_pcie_error_reporting(pdev); | |
1420 | pci_disable_device(pdev); | |
ab69bde6 JB |
1421 | |
1422 | free_netdev(alx->dev); | |
1423 | } | |
1424 | ||
1425 | #ifdef CONFIG_PM_SLEEP | |
1426 | static int alx_suspend(struct device *dev) | |
1427 | { | |
1428 | struct pci_dev *pdev = to_pci_dev(dev); | |
bc2bebe8 | 1429 | struct alx_priv *alx = pci_get_drvdata(pdev); |
ab69bde6 | 1430 | |
bc2bebe8 JB |
1431 | if (!netif_running(alx->dev)) |
1432 | return 0; | |
1433 | netif_device_detach(alx->dev); | |
1434 | __alx_stop(alx); | |
ab69bde6 JB |
1435 | return 0; |
1436 | } | |
1437 | ||
1438 | static int alx_resume(struct device *dev) | |
1439 | { | |
1440 | struct pci_dev *pdev = to_pci_dev(dev); | |
1441 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
b54629e2 | 1442 | struct alx_hw *hw = &alx->hw; |
1443 | ||
1444 | alx_reset_phy(hw); | |
ab69bde6 | 1445 | |
bc2bebe8 JB |
1446 | if (!netif_running(alx->dev)) |
1447 | return 0; | |
1448 | netif_device_attach(alx->dev); | |
1449 | return __alx_open(alx, true); | |
ab69bde6 | 1450 | } |
bc2bebe8 JB |
1451 | |
1452 | static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); | |
1453 | #define ALX_PM_OPS (&alx_pm_ops) | |
1454 | #else | |
1455 | #define ALX_PM_OPS NULL | |
ab69bde6 JB |
1456 | #endif |
1457 | ||
bc2bebe8 | 1458 | |
ab69bde6 JB |
1459 | static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, |
1460 | pci_channel_state_t state) | |
1461 | { | |
1462 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1463 | struct net_device *netdev = alx->dev; | |
1464 | pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; | |
1465 | ||
1466 | dev_info(&pdev->dev, "pci error detected\n"); | |
1467 | ||
1468 | rtnl_lock(); | |
1469 | ||
1470 | if (netif_running(netdev)) { | |
1471 | netif_device_detach(netdev); | |
1472 | alx_halt(alx); | |
1473 | } | |
1474 | ||
1475 | if (state == pci_channel_io_perm_failure) | |
1476 | rc = PCI_ERS_RESULT_DISCONNECT; | |
1477 | else | |
1478 | pci_disable_device(pdev); | |
1479 | ||
1480 | rtnl_unlock(); | |
1481 | ||
1482 | return rc; | |
1483 | } | |
1484 | ||
1485 | static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) | |
1486 | { | |
1487 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1488 | struct alx_hw *hw = &alx->hw; | |
1489 | pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; | |
1490 | ||
1491 | dev_info(&pdev->dev, "pci error slot reset\n"); | |
1492 | ||
1493 | rtnl_lock(); | |
1494 | ||
1495 | if (pci_enable_device(pdev)) { | |
1496 | dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); | |
1497 | goto out; | |
1498 | } | |
1499 | ||
1500 | pci_set_master(pdev); | |
ab69bde6 JB |
1501 | |
1502 | alx_reset_pcie(hw); | |
1503 | if (!alx_reset_mac(hw)) | |
1504 | rc = PCI_ERS_RESULT_RECOVERED; | |
1505 | out: | |
1506 | pci_cleanup_aer_uncorrect_error_status(pdev); | |
1507 | ||
1508 | rtnl_unlock(); | |
1509 | ||
1510 | return rc; | |
1511 | } | |
1512 | ||
1513 | static void alx_pci_error_resume(struct pci_dev *pdev) | |
1514 | { | |
1515 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1516 | struct net_device *netdev = alx->dev; | |
1517 | ||
1518 | dev_info(&pdev->dev, "pci error resume\n"); | |
1519 | ||
1520 | rtnl_lock(); | |
1521 | ||
1522 | if (netif_running(netdev)) { | |
1523 | alx_activate(alx); | |
1524 | netif_device_attach(netdev); | |
1525 | } | |
1526 | ||
1527 | rtnl_unlock(); | |
1528 | } | |
1529 | ||
1530 | static const struct pci_error_handlers alx_err_handlers = { | |
1531 | .error_detected = alx_pci_error_detected, | |
1532 | .slot_reset = alx_pci_error_slot_reset, | |
1533 | .resume = alx_pci_error_resume, | |
1534 | }; | |
1535 | ||
ab69bde6 JB |
1536 | static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { |
1537 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), | |
1538 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | |
1539 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), | |
1540 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | |
1541 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), | |
1542 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | |
1543 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, | |
1544 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, | |
1545 | {} | |
1546 | }; | |
1547 | ||
1548 | static struct pci_driver alx_driver = { | |
1549 | .name = alx_drv_name, | |
1550 | .id_table = alx_pci_tbl, | |
1551 | .probe = alx_probe, | |
1552 | .remove = alx_remove, | |
ab69bde6 JB |
1553 | .err_handler = &alx_err_handlers, |
1554 | .driver.pm = ALX_PM_OPS, | |
1555 | }; | |
1556 | ||
1557 | module_pci_driver(alx_driver); | |
1558 | MODULE_DEVICE_TABLE(pci, alx_pci_tbl); | |
1559 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | |
1560 | MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>"); | |
1561 | MODULE_DESCRIPTION( | |
1562 | "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); | |
1563 | MODULE_LICENSE("GPL"); |