Commit | Line | Data |
---|---|---|
c4e84bde RM |
1 | /* |
2 | * QLogic qlge NIC HBA Driver | |
3 | * Copyright (c) 2003-2008 QLogic Corporation | |
4 | * See LICENSE.qlge for copyright and licensing details. | |
5 | * Author: Linux qlge network device driver by | |
6 | * Ron Mercer <ron.mercer@qlogic.com> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/pci.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | #include <linux/pagemap.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/dmapool.h> | |
19 | #include <linux/mempool.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/kthread.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/in.h> | |
26 | #include <linux/ip.h> | |
27 | #include <linux/ipv6.h> | |
28 | #include <net/ipv6.h> | |
29 | #include <linux/tcp.h> | |
30 | #include <linux/udp.h> | |
31 | #include <linux/if_arp.h> | |
32 | #include <linux/if_ether.h> | |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/etherdevice.h> | |
35 | #include <linux/ethtool.h> | |
36 | #include <linux/skbuff.h> | |
37 | #include <linux/rtnetlink.h> | |
38 | #include <linux/if_vlan.h> | |
c4e84bde RM |
39 | #include <linux/delay.h> |
40 | #include <linux/mm.h> | |
41 | #include <linux/vmalloc.h> | |
b7c6bfb7 | 42 | #include <net/ip6_checksum.h> |
c4e84bde RM |
43 | |
44 | #include "qlge.h" | |
45 | ||
46 | char qlge_driver_name[] = DRV_NAME; | |
47 | const char qlge_driver_version[] = DRV_VERSION; | |
48 | ||
49 | MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>"); | |
50 | MODULE_DESCRIPTION(DRV_STRING " "); | |
51 | MODULE_LICENSE("GPL"); | |
52 | MODULE_VERSION(DRV_VERSION); | |
53 | ||
54 | static const u32 default_msg = | |
55 | NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | | |
56 | /* NETIF_MSG_TIMER | */ | |
57 | NETIF_MSG_IFDOWN | | |
58 | NETIF_MSG_IFUP | | |
59 | NETIF_MSG_RX_ERR | | |
60 | NETIF_MSG_TX_ERR | | |
61 | NETIF_MSG_TX_QUEUED | | |
62 | NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | | |
63 | /* NETIF_MSG_PKTDATA | */ | |
64 | NETIF_MSG_HW | NETIF_MSG_WOL | 0; | |
65 | ||
66 | static int debug = 0x00007fff; /* defaults above */ | |
67 | module_param(debug, int, 0); | |
68 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
69 | ||
70 | #define MSIX_IRQ 0 | |
71 | #define MSI_IRQ 1 | |
72 | #define LEG_IRQ 2 | |
73 | static int irq_type = MSIX_IRQ; | |
74 | module_param(irq_type, int, MSIX_IRQ); | |
75 | MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); | |
76 | ||
77 | static struct pci_device_id qlge_pci_tbl[] __devinitdata = { | |
78 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, | |
79 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)}, | |
80 | /* required last entry */ | |
81 | {0,} | |
82 | }; | |
83 | ||
84 | MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); | |
85 | ||
86 | /* This hardware semaphore causes exclusive access to | |
87 | * resources shared between the NIC driver, MPI firmware, | |
88 | * FCOE firmware and the FC driver. | |
89 | */ | |
90 | static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) | |
91 | { | |
92 | u32 sem_bits = 0; | |
93 | ||
94 | switch (sem_mask) { | |
95 | case SEM_XGMAC0_MASK: | |
96 | sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; | |
97 | break; | |
98 | case SEM_XGMAC1_MASK: | |
99 | sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; | |
100 | break; | |
101 | case SEM_ICB_MASK: | |
102 | sem_bits = SEM_SET << SEM_ICB_SHIFT; | |
103 | break; | |
104 | case SEM_MAC_ADDR_MASK: | |
105 | sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; | |
106 | break; | |
107 | case SEM_FLASH_MASK: | |
108 | sem_bits = SEM_SET << SEM_FLASH_SHIFT; | |
109 | break; | |
110 | case SEM_PROBE_MASK: | |
111 | sem_bits = SEM_SET << SEM_PROBE_SHIFT; | |
112 | break; | |
113 | case SEM_RT_IDX_MASK: | |
114 | sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; | |
115 | break; | |
116 | case SEM_PROC_REG_MASK: | |
117 | sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; | |
118 | break; | |
119 | default: | |
120 | QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n"); | |
121 | return -EINVAL; | |
122 | } | |
123 | ||
124 | ql_write32(qdev, SEM, sem_bits | sem_mask); | |
125 | return !(ql_read32(qdev, SEM) & sem_bits); | |
126 | } | |
127 | ||
128 | int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) | |
129 | { | |
130 | unsigned int seconds = 3; | |
131 | do { | |
132 | if (!ql_sem_trylock(qdev, sem_mask)) | |
133 | return 0; | |
134 | ssleep(1); | |
135 | } while (--seconds); | |
136 | return -ETIMEDOUT; | |
137 | } | |
138 | ||
139 | void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) | |
140 | { | |
141 | ql_write32(qdev, SEM, sem_mask); | |
142 | ql_read32(qdev, SEM); /* flush */ | |
143 | } | |
144 | ||
145 | /* This function waits for a specific bit to come ready | |
146 | * in a given register. It is used mostly by the initialize | |
147 | * process, but is also used in kernel thread API such as | |
148 | * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. | |
149 | */ | |
150 | int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) | |
151 | { | |
152 | u32 temp; | |
153 | int count = UDELAY_COUNT; | |
154 | ||
155 | while (count) { | |
156 | temp = ql_read32(qdev, reg); | |
157 | ||
158 | /* check for errors */ | |
159 | if (temp & err_bit) { | |
160 | QPRINTK(qdev, PROBE, ALERT, | |
161 | "register 0x%.08x access error, value = 0x%.08x!.\n", | |
162 | reg, temp); | |
163 | return -EIO; | |
164 | } else if (temp & bit) | |
165 | return 0; | |
166 | udelay(UDELAY_DELAY); | |
167 | count--; | |
168 | } | |
169 | QPRINTK(qdev, PROBE, ALERT, | |
170 | "Timed out waiting for reg %x to come ready.\n", reg); | |
171 | return -ETIMEDOUT; | |
172 | } | |
173 | ||
174 | /* The CFG register is used to download TX and RX control blocks | |
175 | * to the chip. This function waits for an operation to complete. | |
176 | */ | |
177 | static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) | |
178 | { | |
179 | int count = UDELAY_COUNT; | |
180 | u32 temp; | |
181 | ||
182 | while (count) { | |
183 | temp = ql_read32(qdev, CFG); | |
184 | if (temp & CFG_LE) | |
185 | return -EIO; | |
186 | if (!(temp & bit)) | |
187 | return 0; | |
188 | udelay(UDELAY_DELAY); | |
189 | count--; | |
190 | } | |
191 | return -ETIMEDOUT; | |
192 | } | |
193 | ||
194 | ||
195 | /* Used to issue init control blocks to hw. Maps control block, | |
196 | * sets address, triggers download, waits for completion. | |
197 | */ | |
198 | int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, | |
199 | u16 q_id) | |
200 | { | |
201 | u64 map; | |
202 | int status = 0; | |
203 | int direction; | |
204 | u32 mask; | |
205 | u32 value; | |
206 | ||
207 | direction = | |
208 | (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : | |
209 | PCI_DMA_FROMDEVICE; | |
210 | ||
211 | map = pci_map_single(qdev->pdev, ptr, size, direction); | |
212 | if (pci_dma_mapping_error(qdev->pdev, map)) { | |
213 | QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n"); | |
214 | return -ENOMEM; | |
215 | } | |
216 | ||
217 | status = ql_wait_cfg(qdev, bit); | |
218 | if (status) { | |
219 | QPRINTK(qdev, IFUP, ERR, | |
220 | "Timed out waiting for CFG to come ready.\n"); | |
221 | goto exit; | |
222 | } | |
223 | ||
224 | status = ql_sem_spinlock(qdev, SEM_ICB_MASK); | |
225 | if (status) | |
226 | goto exit; | |
227 | ql_write32(qdev, ICB_L, (u32) map); | |
228 | ql_write32(qdev, ICB_H, (u32) (map >> 32)); | |
229 | ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ | |
230 | ||
231 | mask = CFG_Q_MASK | (bit << 16); | |
232 | value = bit | (q_id << CFG_Q_SHIFT); | |
233 | ql_write32(qdev, CFG, (mask | value)); | |
234 | ||
235 | /* | |
236 | * Wait for the bit to clear after signaling hw. | |
237 | */ | |
238 | status = ql_wait_cfg(qdev, bit); | |
239 | exit: | |
240 | pci_unmap_single(qdev->pdev, map, size, direction); | |
241 | return status; | |
242 | } | |
243 | ||
244 | /* Get a specific MAC address from the CAM. Used for debug and reg dump. */ | |
245 | int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, | |
246 | u32 *value) | |
247 | { | |
248 | u32 offset = 0; | |
249 | int status; | |
250 | ||
251 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | |
252 | if (status) | |
253 | return status; | |
254 | switch (type) { | |
255 | case MAC_ADDR_TYPE_MULTI_MAC: | |
256 | case MAC_ADDR_TYPE_CAM_MAC: | |
257 | { | |
258 | status = | |
259 | ql_wait_reg_rdy(qdev, | |
260 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | |
261 | if (status) | |
262 | goto exit; | |
263 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
264 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
265 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | |
266 | status = | |
267 | ql_wait_reg_rdy(qdev, | |
268 | MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E); | |
269 | if (status) | |
270 | goto exit; | |
271 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | |
272 | status = | |
273 | ql_wait_reg_rdy(qdev, | |
274 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | |
275 | if (status) | |
276 | goto exit; | |
277 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
278 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
279 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | |
280 | status = | |
281 | ql_wait_reg_rdy(qdev, | |
282 | MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E); | |
283 | if (status) | |
284 | goto exit; | |
285 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | |
286 | if (type == MAC_ADDR_TYPE_CAM_MAC) { | |
287 | status = | |
288 | ql_wait_reg_rdy(qdev, | |
289 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | |
290 | if (status) | |
291 | goto exit; | |
292 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
293 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
294 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | |
295 | status = | |
296 | ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, | |
297 | MAC_ADDR_MR, MAC_ADDR_E); | |
298 | if (status) | |
299 | goto exit; | |
300 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | |
301 | } | |
302 | break; | |
303 | } | |
304 | case MAC_ADDR_TYPE_VLAN: | |
305 | case MAC_ADDR_TYPE_MULTI_FLTR: | |
306 | default: | |
307 | QPRINTK(qdev, IFUP, CRIT, | |
308 | "Address type %d not yet supported.\n", type); | |
309 | status = -EPERM; | |
310 | } | |
311 | exit: | |
312 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | |
313 | return status; | |
314 | } | |
315 | ||
316 | /* Set up a MAC, multicast or VLAN address for the | |
317 | * inbound frame matching. | |
318 | */ | |
319 | static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |
320 | u16 index) | |
321 | { | |
322 | u32 offset = 0; | |
323 | int status = 0; | |
324 | ||
325 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | |
326 | if (status) | |
327 | return status; | |
328 | switch (type) { | |
329 | case MAC_ADDR_TYPE_MULTI_MAC: | |
330 | case MAC_ADDR_TYPE_CAM_MAC: | |
331 | { | |
332 | u32 cam_output; | |
333 | u32 upper = (addr[0] << 8) | addr[1]; | |
334 | u32 lower = | |
335 | (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | | |
336 | (addr[5]); | |
337 | ||
338 | QPRINTK(qdev, IFUP, INFO, | |
7c510e4b | 339 | "Adding %s address %pM" |
c4e84bde RM |
340 | " at index %d in the CAM.\n", |
341 | ((type == | |
342 | MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" : | |
7c510e4b | 343 | "UNICAST"), addr, index); |
c4e84bde RM |
344 | |
345 | status = | |
346 | ql_wait_reg_rdy(qdev, | |
347 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | |
348 | if (status) | |
349 | goto exit; | |
350 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
351 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
352 | type); /* type */ | |
353 | ql_write32(qdev, MAC_ADDR_DATA, lower); | |
354 | status = | |
355 | ql_wait_reg_rdy(qdev, | |
356 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | |
357 | if (status) | |
358 | goto exit; | |
359 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
360 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
361 | type); /* type */ | |
362 | ql_write32(qdev, MAC_ADDR_DATA, upper); | |
363 | status = | |
364 | ql_wait_reg_rdy(qdev, | |
365 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | |
366 | if (status) | |
367 | goto exit; | |
368 | ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ | |
369 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
370 | type); /* type */ | |
371 | /* This field should also include the queue id | |
372 | and possibly the function id. Right now we hardcode | |
373 | the route field to NIC core. | |
374 | */ | |
375 | if (type == MAC_ADDR_TYPE_CAM_MAC) { | |
376 | cam_output = (CAM_OUT_ROUTE_NIC | | |
377 | (qdev-> | |
378 | func << CAM_OUT_FUNC_SHIFT) | | |
379 | (qdev-> | |
380 | rss_ring_first_cq_id << | |
381 | CAM_OUT_CQ_ID_SHIFT)); | |
382 | if (qdev->vlgrp) | |
383 | cam_output |= CAM_OUT_RV; | |
384 | /* route to NIC core */ | |
385 | ql_write32(qdev, MAC_ADDR_DATA, cam_output); | |
386 | } | |
387 | break; | |
388 | } | |
389 | case MAC_ADDR_TYPE_VLAN: | |
390 | { | |
391 | u32 enable_bit = *((u32 *) &addr[0]); | |
392 | /* For VLAN, the addr actually holds a bit that | |
393 | * either enables or disables the vlan id we are | |
394 | * addressing. It's either MAC_ADDR_E on or off. | |
395 | * That's bit-27 we're talking about. | |
396 | */ | |
397 | QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n", | |
398 | (enable_bit ? "Adding" : "Removing"), | |
399 | index, (enable_bit ? "to" : "from")); | |
400 | ||
401 | status = | |
402 | ql_wait_reg_rdy(qdev, | |
403 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | |
404 | if (status) | |
405 | goto exit; | |
406 | ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ | |
407 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
408 | type | /* type */ | |
409 | enable_bit); /* enable/disable */ | |
410 | break; | |
411 | } | |
412 | case MAC_ADDR_TYPE_MULTI_FLTR: | |
413 | default: | |
414 | QPRINTK(qdev, IFUP, CRIT, | |
415 | "Address type %d not yet supported.\n", type); | |
416 | status = -EPERM; | |
417 | } | |
418 | exit: | |
419 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | |
420 | return status; | |
421 | } | |
422 | ||
423 | /* Get a specific frame routing value from the CAM. | |
424 | * Used for debug and reg dump. | |
425 | */ | |
426 | int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) | |
427 | { | |
428 | int status = 0; | |
429 | ||
430 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | |
431 | if (status) | |
432 | goto exit; | |
433 | ||
434 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E); | |
435 | if (status) | |
436 | goto exit; | |
437 | ||
438 | ql_write32(qdev, RT_IDX, | |
439 | RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); | |
440 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E); | |
441 | if (status) | |
442 | goto exit; | |
443 | *value = ql_read32(qdev, RT_DATA); | |
444 | exit: | |
445 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | |
446 | return status; | |
447 | } | |
448 | ||
449 | /* The NIC function for this chip has 16 routing indexes. Each one can be used | |
450 | * to route different frame types to various inbound queues. We send broadcast/ | |
451 | * multicast/error frames to the default queue for slow handling, | |
452 | * and CAM hit/RSS frames to the fast handling queues. | |
453 | */ | |
454 | static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, | |
455 | int enable) | |
456 | { | |
457 | int status; | |
458 | u32 value = 0; | |
459 | ||
460 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | |
461 | if (status) | |
462 | return status; | |
463 | ||
464 | QPRINTK(qdev, IFUP, DEBUG, | |
465 | "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n", | |
466 | (enable ? "Adding" : "Removing"), | |
467 | ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""), | |
468 | ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""), | |
469 | ((index == | |
470 | RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""), | |
471 | ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""), | |
472 | ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""), | |
473 | ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""), | |
474 | ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""), | |
475 | ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""), | |
476 | ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""), | |
477 | ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""), | |
478 | ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""), | |
479 | ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""), | |
480 | ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""), | |
481 | ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""), | |
482 | ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""), | |
483 | ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""), | |
484 | (enable ? "to" : "from")); | |
485 | ||
486 | switch (mask) { | |
487 | case RT_IDX_CAM_HIT: | |
488 | { | |
489 | value = RT_IDX_DST_CAM_Q | /* dest */ | |
490 | RT_IDX_TYPE_NICQ | /* type */ | |
491 | (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
492 | break; | |
493 | } | |
494 | case RT_IDX_VALID: /* Promiscuous Mode frames. */ | |
495 | { | |
496 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
497 | RT_IDX_TYPE_NICQ | /* type */ | |
498 | (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
499 | break; | |
500 | } | |
501 | case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ | |
502 | { | |
503 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
504 | RT_IDX_TYPE_NICQ | /* type */ | |
505 | (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
506 | break; | |
507 | } | |
508 | case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ | |
509 | { | |
510 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
511 | RT_IDX_TYPE_NICQ | /* type */ | |
512 | (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
513 | break; | |
514 | } | |
515 | case RT_IDX_MCAST: /* Pass up All Multicast frames. */ | |
516 | { | |
517 | value = RT_IDX_DST_CAM_Q | /* dest */ | |
518 | RT_IDX_TYPE_NICQ | /* type */ | |
519 | (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
520 | break; | |
521 | } | |
522 | case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ | |
523 | { | |
524 | value = RT_IDX_DST_CAM_Q | /* dest */ | |
525 | RT_IDX_TYPE_NICQ | /* type */ | |
526 | (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
527 | break; | |
528 | } | |
529 | case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ | |
530 | { | |
531 | value = RT_IDX_DST_RSS | /* dest */ | |
532 | RT_IDX_TYPE_NICQ | /* type */ | |
533 | (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
534 | break; | |
535 | } | |
536 | case 0: /* Clear the E-bit on an entry. */ | |
537 | { | |
538 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
539 | RT_IDX_TYPE_NICQ | /* type */ | |
540 | (index << RT_IDX_IDX_SHIFT);/* index */ | |
541 | break; | |
542 | } | |
543 | default: | |
544 | QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n", | |
545 | mask); | |
546 | status = -EPERM; | |
547 | goto exit; | |
548 | } | |
549 | ||
550 | if (value) { | |
551 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); | |
552 | if (status) | |
553 | goto exit; | |
554 | value |= (enable ? RT_IDX_E : 0); | |
555 | ql_write32(qdev, RT_IDX, value); | |
556 | ql_write32(qdev, RT_DATA, enable ? mask : 0); | |
557 | } | |
558 | exit: | |
559 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | |
560 | return status; | |
561 | } | |
562 | ||
563 | static void ql_enable_interrupts(struct ql_adapter *qdev) | |
564 | { | |
565 | ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); | |
566 | } | |
567 | ||
568 | static void ql_disable_interrupts(struct ql_adapter *qdev) | |
569 | { | |
570 | ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); | |
571 | } | |
572 | ||
573 | /* If we're running with multiple MSI-X vectors then we enable on the fly. | |
574 | * Otherwise, we may have multiple outstanding workers and don't want to | |
575 | * enable until the last one finishes. In this case, the irq_cnt gets | |
576 | * incremented everytime we queue a worker and decremented everytime | |
577 | * a worker finishes. Once it hits zero we enable the interrupt. | |
578 | */ | |
bb0d215c | 579 | u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) |
c4e84bde | 580 | { |
bb0d215c RM |
581 | u32 var = 0; |
582 | unsigned long hw_flags = 0; | |
583 | struct intr_context *ctx = qdev->intr_context + intr; | |
584 | ||
585 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { | |
586 | /* Always enable if we're MSIX multi interrupts and | |
587 | * it's not the default (zeroeth) interrupt. | |
588 | */ | |
c4e84bde | 589 | ql_write32(qdev, INTR_EN, |
bb0d215c RM |
590 | ctx->intr_en_mask); |
591 | var = ql_read32(qdev, STS); | |
592 | return var; | |
c4e84bde | 593 | } |
bb0d215c RM |
594 | |
595 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
596 | if (atomic_dec_and_test(&ctx->irq_cnt)) { | |
597 | ql_write32(qdev, INTR_EN, | |
598 | ctx->intr_en_mask); | |
599 | var = ql_read32(qdev, STS); | |
600 | } | |
601 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
602 | return var; | |
c4e84bde RM |
603 | } |
604 | ||
605 | static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) | |
606 | { | |
607 | u32 var = 0; | |
bb0d215c RM |
608 | unsigned long hw_flags; |
609 | struct intr_context *ctx; | |
c4e84bde | 610 | |
bb0d215c RM |
611 | /* HW disables for us if we're MSIX multi interrupts and |
612 | * it's not the default (zeroeth) interrupt. | |
613 | */ | |
614 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) | |
615 | return 0; | |
616 | ||
617 | ctx = qdev->intr_context + intr; | |
618 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
619 | if (!atomic_read(&ctx->irq_cnt)) { | |
c4e84bde | 620 | ql_write32(qdev, INTR_EN, |
bb0d215c | 621 | ctx->intr_dis_mask); |
c4e84bde RM |
622 | var = ql_read32(qdev, STS); |
623 | } | |
bb0d215c RM |
624 | atomic_inc(&ctx->irq_cnt); |
625 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
c4e84bde RM |
626 | return var; |
627 | } | |
628 | ||
629 | static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) | |
630 | { | |
631 | int i; | |
632 | for (i = 0; i < qdev->intr_count; i++) { | |
633 | /* The enable call does a atomic_dec_and_test | |
634 | * and enables only if the result is zero. | |
635 | * So we precharge it here. | |
636 | */ | |
bb0d215c RM |
637 | if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || |
638 | i == 0)) | |
639 | atomic_set(&qdev->intr_context[i].irq_cnt, 1); | |
c4e84bde RM |
640 | ql_enable_completion_interrupt(qdev, i); |
641 | } | |
642 | ||
643 | } | |
644 | ||
645 | int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data) | |
646 | { | |
647 | int status = 0; | |
648 | /* wait for reg to come ready */ | |
649 | status = ql_wait_reg_rdy(qdev, | |
650 | FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); | |
651 | if (status) | |
652 | goto exit; | |
653 | /* set up for reg read */ | |
654 | ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); | |
655 | /* wait for reg to come ready */ | |
656 | status = ql_wait_reg_rdy(qdev, | |
657 | FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); | |
658 | if (status) | |
659 | goto exit; | |
660 | /* get the data */ | |
661 | *data = ql_read32(qdev, FLASH_DATA); | |
662 | exit: | |
663 | return status; | |
664 | } | |
665 | ||
666 | static int ql_get_flash_params(struct ql_adapter *qdev) | |
667 | { | |
668 | int i; | |
669 | int status; | |
670 | u32 *p = (u32 *)&qdev->flash; | |
671 | ||
672 | if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) | |
673 | return -ETIMEDOUT; | |
674 | ||
675 | for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) { | |
676 | status = ql_read_flash_word(qdev, i, p); | |
677 | if (status) { | |
678 | QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n"); | |
679 | goto exit; | |
680 | } | |
681 | ||
682 | } | |
683 | exit: | |
684 | ql_sem_unlock(qdev, SEM_FLASH_MASK); | |
685 | return status; | |
686 | } | |
687 | ||
688 | /* xgmac register are located behind the xgmac_addr and xgmac_data | |
689 | * register pair. Each read/write requires us to wait for the ready | |
690 | * bit before reading/writing the data. | |
691 | */ | |
692 | static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) | |
693 | { | |
694 | int status; | |
695 | /* wait for reg to come ready */ | |
696 | status = ql_wait_reg_rdy(qdev, | |
697 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | |
698 | if (status) | |
699 | return status; | |
700 | /* write the data to the data reg */ | |
701 | ql_write32(qdev, XGMAC_DATA, data); | |
702 | /* trigger the write */ | |
703 | ql_write32(qdev, XGMAC_ADDR, reg); | |
704 | return status; | |
705 | } | |
706 | ||
707 | /* xgmac register are located behind the xgmac_addr and xgmac_data | |
708 | * register pair. Each read/write requires us to wait for the ready | |
709 | * bit before reading/writing the data. | |
710 | */ | |
711 | int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) | |
712 | { | |
713 | int status = 0; | |
714 | /* wait for reg to come ready */ | |
715 | status = ql_wait_reg_rdy(qdev, | |
716 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | |
717 | if (status) | |
718 | goto exit; | |
719 | /* set up for reg read */ | |
720 | ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); | |
721 | /* wait for reg to come ready */ | |
722 | status = ql_wait_reg_rdy(qdev, | |
723 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | |
724 | if (status) | |
725 | goto exit; | |
726 | /* get the data */ | |
727 | *data = ql_read32(qdev, XGMAC_DATA); | |
728 | exit: | |
729 | return status; | |
730 | } | |
731 | ||
732 | /* This is used for reading the 64-bit statistics regs. */ | |
733 | int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) | |
734 | { | |
735 | int status = 0; | |
736 | u32 hi = 0; | |
737 | u32 lo = 0; | |
738 | ||
739 | status = ql_read_xgmac_reg(qdev, reg, &lo); | |
740 | if (status) | |
741 | goto exit; | |
742 | ||
743 | status = ql_read_xgmac_reg(qdev, reg + 4, &hi); | |
744 | if (status) | |
745 | goto exit; | |
746 | ||
747 | *data = (u64) lo | ((u64) hi << 32); | |
748 | ||
749 | exit: | |
750 | return status; | |
751 | } | |
752 | ||
753 | /* Take the MAC Core out of reset. | |
754 | * Enable statistics counting. | |
755 | * Take the transmitter/receiver out of reset. | |
756 | * This functionality may be done in the MPI firmware at a | |
757 | * later date. | |
758 | */ | |
759 | static int ql_port_initialize(struct ql_adapter *qdev) | |
760 | { | |
761 | int status = 0; | |
762 | u32 data; | |
763 | ||
764 | if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { | |
765 | /* Another function has the semaphore, so | |
766 | * wait for the port init bit to come ready. | |
767 | */ | |
768 | QPRINTK(qdev, LINK, INFO, | |
769 | "Another function has the semaphore, so wait for the port init bit to come ready.\n"); | |
770 | status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); | |
771 | if (status) { | |
772 | QPRINTK(qdev, LINK, CRIT, | |
773 | "Port initialize timed out.\n"); | |
774 | } | |
775 | return status; | |
776 | } | |
777 | ||
778 | QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n"); | |
779 | /* Set the core reset. */ | |
780 | status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); | |
781 | if (status) | |
782 | goto end; | |
783 | data |= GLOBAL_CFG_RESET; | |
784 | status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); | |
785 | if (status) | |
786 | goto end; | |
787 | ||
788 | /* Clear the core reset and turn on jumbo for receiver. */ | |
789 | data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ | |
790 | data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ | |
791 | data |= GLOBAL_CFG_TX_STAT_EN; | |
792 | data |= GLOBAL_CFG_RX_STAT_EN; | |
793 | status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); | |
794 | if (status) | |
795 | goto end; | |
796 | ||
797 | /* Enable transmitter, and clear it's reset. */ | |
798 | status = ql_read_xgmac_reg(qdev, TX_CFG, &data); | |
799 | if (status) | |
800 | goto end; | |
801 | data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ | |
802 | data |= TX_CFG_EN; /* Enable the transmitter. */ | |
803 | status = ql_write_xgmac_reg(qdev, TX_CFG, data); | |
804 | if (status) | |
805 | goto end; | |
806 | ||
807 | /* Enable receiver and clear it's reset. */ | |
808 | status = ql_read_xgmac_reg(qdev, RX_CFG, &data); | |
809 | if (status) | |
810 | goto end; | |
811 | data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ | |
812 | data |= RX_CFG_EN; /* Enable the receiver. */ | |
813 | status = ql_write_xgmac_reg(qdev, RX_CFG, data); | |
814 | if (status) | |
815 | goto end; | |
816 | ||
817 | /* Turn on jumbo. */ | |
818 | status = | |
819 | ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); | |
820 | if (status) | |
821 | goto end; | |
822 | status = | |
823 | ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); | |
824 | if (status) | |
825 | goto end; | |
826 | ||
827 | /* Signal to the world that the port is enabled. */ | |
828 | ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); | |
829 | end: | |
830 | ql_sem_unlock(qdev, qdev->xg_sem_mask); | |
831 | return status; | |
832 | } | |
833 | ||
834 | /* Get the next large buffer. */ | |
835 | struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) | |
836 | { | |
837 | struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; | |
838 | rx_ring->lbq_curr_idx++; | |
839 | if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) | |
840 | rx_ring->lbq_curr_idx = 0; | |
841 | rx_ring->lbq_free_cnt++; | |
842 | return lbq_desc; | |
843 | } | |
844 | ||
845 | /* Get the next small buffer. */ | |
846 | struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) | |
847 | { | |
848 | struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; | |
849 | rx_ring->sbq_curr_idx++; | |
850 | if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) | |
851 | rx_ring->sbq_curr_idx = 0; | |
852 | rx_ring->sbq_free_cnt++; | |
853 | return sbq_desc; | |
854 | } | |
855 | ||
856 | /* Update an rx ring index. */ | |
857 | static void ql_update_cq(struct rx_ring *rx_ring) | |
858 | { | |
859 | rx_ring->cnsmr_idx++; | |
860 | rx_ring->curr_entry++; | |
861 | if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { | |
862 | rx_ring->cnsmr_idx = 0; | |
863 | rx_ring->curr_entry = rx_ring->cq_base; | |
864 | } | |
865 | } | |
866 | ||
867 | static void ql_write_cq_idx(struct rx_ring *rx_ring) | |
868 | { | |
869 | ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); | |
870 | } | |
871 | ||
872 | /* Process (refill) a large buffer queue. */ | |
873 | static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
874 | { | |
875 | int clean_idx = rx_ring->lbq_clean_idx; | |
876 | struct bq_desc *lbq_desc; | |
877 | struct bq_element *bq; | |
878 | u64 map; | |
879 | int i; | |
880 | ||
881 | while (rx_ring->lbq_free_cnt > 16) { | |
882 | for (i = 0; i < 16; i++) { | |
883 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
884 | "lbq: try cleaning clean_idx = %d.\n", | |
885 | clean_idx); | |
886 | lbq_desc = &rx_ring->lbq[clean_idx]; | |
887 | bq = lbq_desc->bq; | |
888 | if (lbq_desc->p.lbq_page == NULL) { | |
889 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
890 | "lbq: getting new page for index %d.\n", | |
891 | lbq_desc->index); | |
892 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | |
893 | if (lbq_desc->p.lbq_page == NULL) { | |
894 | QPRINTK(qdev, RX_STATUS, ERR, | |
895 | "Couldn't get a page.\n"); | |
896 | return; | |
897 | } | |
898 | map = pci_map_page(qdev->pdev, | |
899 | lbq_desc->p.lbq_page, | |
900 | 0, PAGE_SIZE, | |
901 | PCI_DMA_FROMDEVICE); | |
902 | if (pci_dma_mapping_error(qdev->pdev, map)) { | |
903 | QPRINTK(qdev, RX_STATUS, ERR, | |
904 | "PCI mapping failed.\n"); | |
905 | return; | |
906 | } | |
907 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | |
908 | pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); | |
909 | bq->addr_lo = /*lbq_desc->addr_lo = */ | |
910 | cpu_to_le32(map); | |
911 | bq->addr_hi = /*lbq_desc->addr_hi = */ | |
912 | cpu_to_le32(map >> 32); | |
913 | } | |
914 | clean_idx++; | |
915 | if (clean_idx == rx_ring->lbq_len) | |
916 | clean_idx = 0; | |
917 | } | |
918 | ||
919 | rx_ring->lbq_clean_idx = clean_idx; | |
920 | rx_ring->lbq_prod_idx += 16; | |
921 | if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) | |
922 | rx_ring->lbq_prod_idx = 0; | |
923 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
924 | "lbq: updating prod idx = %d.\n", | |
925 | rx_ring->lbq_prod_idx); | |
926 | ql_write_db_reg(rx_ring->lbq_prod_idx, | |
927 | rx_ring->lbq_prod_idx_db_reg); | |
928 | rx_ring->lbq_free_cnt -= 16; | |
929 | } | |
930 | } | |
931 | ||
932 | /* Process (refill) a small buffer queue. */ | |
933 | static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
934 | { | |
935 | int clean_idx = rx_ring->sbq_clean_idx; | |
936 | struct bq_desc *sbq_desc; | |
937 | struct bq_element *bq; | |
938 | u64 map; | |
939 | int i; | |
940 | ||
941 | while (rx_ring->sbq_free_cnt > 16) { | |
942 | for (i = 0; i < 16; i++) { | |
943 | sbq_desc = &rx_ring->sbq[clean_idx]; | |
944 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
945 | "sbq: try cleaning clean_idx = %d.\n", | |
946 | clean_idx); | |
947 | bq = sbq_desc->bq; | |
948 | if (sbq_desc->p.skb == NULL) { | |
949 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
950 | "sbq: getting new skb for index %d.\n", | |
951 | sbq_desc->index); | |
952 | sbq_desc->p.skb = | |
953 | netdev_alloc_skb(qdev->ndev, | |
954 | rx_ring->sbq_buf_size); | |
955 | if (sbq_desc->p.skb == NULL) { | |
956 | QPRINTK(qdev, PROBE, ERR, | |
957 | "Couldn't get an skb.\n"); | |
958 | rx_ring->sbq_clean_idx = clean_idx; | |
959 | return; | |
960 | } | |
961 | skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); | |
962 | map = pci_map_single(qdev->pdev, | |
963 | sbq_desc->p.skb->data, | |
964 | rx_ring->sbq_buf_size / | |
965 | 2, PCI_DMA_FROMDEVICE); | |
966 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | |
967 | pci_unmap_len_set(sbq_desc, maplen, | |
968 | rx_ring->sbq_buf_size / 2); | |
969 | bq->addr_lo = cpu_to_le32(map); | |
970 | bq->addr_hi = cpu_to_le32(map >> 32); | |
971 | } | |
972 | ||
973 | clean_idx++; | |
974 | if (clean_idx == rx_ring->sbq_len) | |
975 | clean_idx = 0; | |
976 | } | |
977 | rx_ring->sbq_clean_idx = clean_idx; | |
978 | rx_ring->sbq_prod_idx += 16; | |
979 | if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) | |
980 | rx_ring->sbq_prod_idx = 0; | |
981 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
982 | "sbq: updating prod idx = %d.\n", | |
983 | rx_ring->sbq_prod_idx); | |
984 | ql_write_db_reg(rx_ring->sbq_prod_idx, | |
985 | rx_ring->sbq_prod_idx_db_reg); | |
986 | ||
987 | rx_ring->sbq_free_cnt -= 16; | |
988 | } | |
989 | } | |
990 | ||
991 | static void ql_update_buffer_queues(struct ql_adapter *qdev, | |
992 | struct rx_ring *rx_ring) | |
993 | { | |
994 | ql_update_sbq(qdev, rx_ring); | |
995 | ql_update_lbq(qdev, rx_ring); | |
996 | } | |
997 | ||
998 | /* Unmaps tx buffers. Can be called from send() if a pci mapping | |
999 | * fails at some stage, or from the interrupt when a tx completes. | |
1000 | */ | |
1001 | static void ql_unmap_send(struct ql_adapter *qdev, | |
1002 | struct tx_ring_desc *tx_ring_desc, int mapped) | |
1003 | { | |
1004 | int i; | |
1005 | for (i = 0; i < mapped; i++) { | |
1006 | if (i == 0 || (i == 7 && mapped > 7)) { | |
1007 | /* | |
1008 | * Unmap the skb->data area, or the | |
1009 | * external sglist (AKA the Outbound | |
1010 | * Address List (OAL)). | |
1011 | * If its the zeroeth element, then it's | |
1012 | * the skb->data area. If it's the 7th | |
1013 | * element and there is more than 6 frags, | |
1014 | * then its an OAL. | |
1015 | */ | |
1016 | if (i == 7) { | |
1017 | QPRINTK(qdev, TX_DONE, DEBUG, | |
1018 | "unmapping OAL area.\n"); | |
1019 | } | |
1020 | pci_unmap_single(qdev->pdev, | |
1021 | pci_unmap_addr(&tx_ring_desc->map[i], | |
1022 | mapaddr), | |
1023 | pci_unmap_len(&tx_ring_desc->map[i], | |
1024 | maplen), | |
1025 | PCI_DMA_TODEVICE); | |
1026 | } else { | |
1027 | QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n", | |
1028 | i); | |
1029 | pci_unmap_page(qdev->pdev, | |
1030 | pci_unmap_addr(&tx_ring_desc->map[i], | |
1031 | mapaddr), | |
1032 | pci_unmap_len(&tx_ring_desc->map[i], | |
1033 | maplen), PCI_DMA_TODEVICE); | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | } | |
1038 | ||
1039 | /* Map the buffers for this transmit. This will return | |
1040 | * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. | |
1041 | */ | |
1042 | static int ql_map_send(struct ql_adapter *qdev, | |
1043 | struct ob_mac_iocb_req *mac_iocb_ptr, | |
1044 | struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) | |
1045 | { | |
1046 | int len = skb_headlen(skb); | |
1047 | dma_addr_t map; | |
1048 | int frag_idx, err, map_idx = 0; | |
1049 | struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; | |
1050 | int frag_cnt = skb_shinfo(skb)->nr_frags; | |
1051 | ||
1052 | if (frag_cnt) { | |
1053 | QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt); | |
1054 | } | |
1055 | /* | |
1056 | * Map the skb buffer first. | |
1057 | */ | |
1058 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | |
1059 | ||
1060 | err = pci_dma_mapping_error(qdev->pdev, map); | |
1061 | if (err) { | |
1062 | QPRINTK(qdev, TX_QUEUED, ERR, | |
1063 | "PCI mapping failed with error: %d\n", err); | |
1064 | ||
1065 | return NETDEV_TX_BUSY; | |
1066 | } | |
1067 | ||
1068 | tbd->len = cpu_to_le32(len); | |
1069 | tbd->addr = cpu_to_le64(map); | |
1070 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | |
1071 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); | |
1072 | map_idx++; | |
1073 | ||
1074 | /* | |
1075 | * This loop fills the remainder of the 8 address descriptors | |
1076 | * in the IOCB. If there are more than 7 fragments, then the | |
1077 | * eighth address desc will point to an external list (OAL). | |
1078 | * When this happens, the remainder of the frags will be stored | |
1079 | * in this list. | |
1080 | */ | |
1081 | for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { | |
1082 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; | |
1083 | tbd++; | |
1084 | if (frag_idx == 6 && frag_cnt > 7) { | |
1085 | /* Let's tack on an sglist. | |
1086 | * Our control block will now | |
1087 | * look like this: | |
1088 | * iocb->seg[0] = skb->data | |
1089 | * iocb->seg[1] = frag[0] | |
1090 | * iocb->seg[2] = frag[1] | |
1091 | * iocb->seg[3] = frag[2] | |
1092 | * iocb->seg[4] = frag[3] | |
1093 | * iocb->seg[5] = frag[4] | |
1094 | * iocb->seg[6] = frag[5] | |
1095 | * iocb->seg[7] = ptr to OAL (external sglist) | |
1096 | * oal->seg[0] = frag[6] | |
1097 | * oal->seg[1] = frag[7] | |
1098 | * oal->seg[2] = frag[8] | |
1099 | * oal->seg[3] = frag[9] | |
1100 | * oal->seg[4] = frag[10] | |
1101 | * etc... | |
1102 | */ | |
1103 | /* Tack on the OAL in the eighth segment of IOCB. */ | |
1104 | map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, | |
1105 | sizeof(struct oal), | |
1106 | PCI_DMA_TODEVICE); | |
1107 | err = pci_dma_mapping_error(qdev->pdev, map); | |
1108 | if (err) { | |
1109 | QPRINTK(qdev, TX_QUEUED, ERR, | |
1110 | "PCI mapping outbound address list with error: %d\n", | |
1111 | err); | |
1112 | goto map_error; | |
1113 | } | |
1114 | ||
1115 | tbd->addr = cpu_to_le64(map); | |
1116 | /* | |
1117 | * The length is the number of fragments | |
1118 | * that remain to be mapped times the length | |
1119 | * of our sglist (OAL). | |
1120 | */ | |
1121 | tbd->len = | |
1122 | cpu_to_le32((sizeof(struct tx_buf_desc) * | |
1123 | (frag_cnt - frag_idx)) | TX_DESC_C); | |
1124 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, | |
1125 | map); | |
1126 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | |
1127 | sizeof(struct oal)); | |
1128 | tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; | |
1129 | map_idx++; | |
1130 | } | |
1131 | ||
1132 | map = | |
1133 | pci_map_page(qdev->pdev, frag->page, | |
1134 | frag->page_offset, frag->size, | |
1135 | PCI_DMA_TODEVICE); | |
1136 | ||
1137 | err = pci_dma_mapping_error(qdev->pdev, map); | |
1138 | if (err) { | |
1139 | QPRINTK(qdev, TX_QUEUED, ERR, | |
1140 | "PCI mapping frags failed with error: %d.\n", | |
1141 | err); | |
1142 | goto map_error; | |
1143 | } | |
1144 | ||
1145 | tbd->addr = cpu_to_le64(map); | |
1146 | tbd->len = cpu_to_le32(frag->size); | |
1147 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | |
1148 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | |
1149 | frag->size); | |
1150 | ||
1151 | } | |
1152 | /* Save the number of segments we've mapped. */ | |
1153 | tx_ring_desc->map_cnt = map_idx; | |
1154 | /* Terminate the last segment. */ | |
1155 | tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); | |
1156 | return NETDEV_TX_OK; | |
1157 | ||
1158 | map_error: | |
1159 | /* | |
1160 | * If the first frag mapping failed, then i will be zero. | |
1161 | * This causes the unmap of the skb->data area. Otherwise | |
1162 | * we pass in the number of frags that mapped successfully | |
1163 | * so they can be umapped. | |
1164 | */ | |
1165 | ql_unmap_send(qdev, tx_ring_desc, map_idx); | |
1166 | return NETDEV_TX_BUSY; | |
1167 | } | |
1168 | ||
1169 | void ql_realign_skb(struct sk_buff *skb, int len) | |
1170 | { | |
1171 | void *temp_addr = skb->data; | |
1172 | ||
1173 | /* Undo the skb_reserve(skb,32) we did before | |
1174 | * giving to hardware, and realign data on | |
1175 | * a 2-byte boundary. | |
1176 | */ | |
1177 | skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; | |
1178 | skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; | |
1179 | skb_copy_to_linear_data(skb, temp_addr, | |
1180 | (unsigned int)len); | |
1181 | } | |
1182 | ||
1183 | /* | |
1184 | * This function builds an skb for the given inbound | |
1185 | * completion. It will be rewritten for readability in the near | |
1186 | * future, but for not it works well. | |
1187 | */ | |
1188 | static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |
1189 | struct rx_ring *rx_ring, | |
1190 | struct ib_mac_iocb_rsp *ib_mac_rsp) | |
1191 | { | |
1192 | struct bq_desc *lbq_desc; | |
1193 | struct bq_desc *sbq_desc; | |
1194 | struct sk_buff *skb = NULL; | |
1195 | u32 length = le32_to_cpu(ib_mac_rsp->data_len); | |
1196 | u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); | |
1197 | ||
1198 | /* | |
1199 | * Handle the header buffer if present. | |
1200 | */ | |
1201 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && | |
1202 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | |
1203 | QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len); | |
1204 | /* | |
1205 | * Headers fit nicely into a small buffer. | |
1206 | */ | |
1207 | sbq_desc = ql_get_curr_sbuf(rx_ring); | |
1208 | pci_unmap_single(qdev->pdev, | |
1209 | pci_unmap_addr(sbq_desc, mapaddr), | |
1210 | pci_unmap_len(sbq_desc, maplen), | |
1211 | PCI_DMA_FROMDEVICE); | |
1212 | skb = sbq_desc->p.skb; | |
1213 | ql_realign_skb(skb, hdr_len); | |
1214 | skb_put(skb, hdr_len); | |
1215 | sbq_desc->p.skb = NULL; | |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * Handle the data buffer(s). | |
1220 | */ | |
1221 | if (unlikely(!length)) { /* Is there data too? */ | |
1222 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1223 | "No Data buffer in this packet.\n"); | |
1224 | return skb; | |
1225 | } | |
1226 | ||
1227 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { | |
1228 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | |
1229 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1230 | "Headers in small, data of %d bytes in small, combine them.\n", length); | |
1231 | /* | |
1232 | * Data is less than small buffer size so it's | |
1233 | * stuffed in a small buffer. | |
1234 | * For this case we append the data | |
1235 | * from the "data" small buffer to the "header" small | |
1236 | * buffer. | |
1237 | */ | |
1238 | sbq_desc = ql_get_curr_sbuf(rx_ring); | |
1239 | pci_dma_sync_single_for_cpu(qdev->pdev, | |
1240 | pci_unmap_addr | |
1241 | (sbq_desc, mapaddr), | |
1242 | pci_unmap_len | |
1243 | (sbq_desc, maplen), | |
1244 | PCI_DMA_FROMDEVICE); | |
1245 | memcpy(skb_put(skb, length), | |
1246 | sbq_desc->p.skb->data, length); | |
1247 | pci_dma_sync_single_for_device(qdev->pdev, | |
1248 | pci_unmap_addr | |
1249 | (sbq_desc, | |
1250 | mapaddr), | |
1251 | pci_unmap_len | |
1252 | (sbq_desc, | |
1253 | maplen), | |
1254 | PCI_DMA_FROMDEVICE); | |
1255 | } else { | |
1256 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1257 | "%d bytes in a single small buffer.\n", length); | |
1258 | sbq_desc = ql_get_curr_sbuf(rx_ring); | |
1259 | skb = sbq_desc->p.skb; | |
1260 | ql_realign_skb(skb, length); | |
1261 | skb_put(skb, length); | |
1262 | pci_unmap_single(qdev->pdev, | |
1263 | pci_unmap_addr(sbq_desc, | |
1264 | mapaddr), | |
1265 | pci_unmap_len(sbq_desc, | |
1266 | maplen), | |
1267 | PCI_DMA_FROMDEVICE); | |
1268 | sbq_desc->p.skb = NULL; | |
1269 | } | |
1270 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { | |
1271 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | |
1272 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1273 | "Header in small, %d bytes in large. Chain large to small!\n", length); | |
1274 | /* | |
1275 | * The data is in a single large buffer. We | |
1276 | * chain it to the header buffer's skb and let | |
1277 | * it rip. | |
1278 | */ | |
1279 | lbq_desc = ql_get_curr_lbuf(rx_ring); | |
1280 | pci_unmap_page(qdev->pdev, | |
1281 | pci_unmap_addr(lbq_desc, | |
1282 | mapaddr), | |
1283 | pci_unmap_len(lbq_desc, maplen), | |
1284 | PCI_DMA_FROMDEVICE); | |
1285 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1286 | "Chaining page to skb.\n"); | |
1287 | skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, | |
1288 | 0, length); | |
1289 | skb->len += length; | |
1290 | skb->data_len += length; | |
1291 | skb->truesize += length; | |
1292 | lbq_desc->p.lbq_page = NULL; | |
1293 | } else { | |
1294 | /* | |
1295 | * The headers and data are in a single large buffer. We | |
1296 | * copy it to a new skb and let it go. This can happen with | |
1297 | * jumbo mtu on a non-TCP/UDP frame. | |
1298 | */ | |
1299 | lbq_desc = ql_get_curr_lbuf(rx_ring); | |
1300 | skb = netdev_alloc_skb(qdev->ndev, length); | |
1301 | if (skb == NULL) { | |
1302 | QPRINTK(qdev, PROBE, DEBUG, | |
1303 | "No skb available, drop the packet.\n"); | |
1304 | return NULL; | |
1305 | } | |
1306 | skb_reserve(skb, NET_IP_ALIGN); | |
1307 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1308 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); | |
1309 | skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, | |
1310 | 0, length); | |
1311 | skb->len += length; | |
1312 | skb->data_len += length; | |
1313 | skb->truesize += length; | |
1314 | length -= length; | |
1315 | lbq_desc->p.lbq_page = NULL; | |
1316 | __pskb_pull_tail(skb, | |
1317 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | |
1318 | VLAN_ETH_HLEN : ETH_HLEN); | |
1319 | } | |
1320 | } else { | |
1321 | /* | |
1322 | * The data is in a chain of large buffers | |
1323 | * pointed to by a small buffer. We loop | |
1324 | * thru and chain them to the our small header | |
1325 | * buffer's skb. | |
1326 | * frags: There are 18 max frags and our small | |
1327 | * buffer will hold 32 of them. The thing is, | |
1328 | * we'll use 3 max for our 9000 byte jumbo | |
1329 | * frames. If the MTU goes up we could | |
1330 | * eventually be in trouble. | |
1331 | */ | |
1332 | int size, offset, i = 0; | |
1333 | struct bq_element *bq, bq_array[8]; | |
1334 | sbq_desc = ql_get_curr_sbuf(rx_ring); | |
1335 | pci_unmap_single(qdev->pdev, | |
1336 | pci_unmap_addr(sbq_desc, mapaddr), | |
1337 | pci_unmap_len(sbq_desc, maplen), | |
1338 | PCI_DMA_FROMDEVICE); | |
1339 | if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { | |
1340 | /* | |
1341 | * This is an non TCP/UDP IP frame, so | |
1342 | * the headers aren't split into a small | |
1343 | * buffer. We have to use the small buffer | |
1344 | * that contains our sg list as our skb to | |
1345 | * send upstairs. Copy the sg list here to | |
1346 | * a local buffer and use it to find the | |
1347 | * pages to chain. | |
1348 | */ | |
1349 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1350 | "%d bytes of headers & data in chain of large.\n", length); | |
1351 | skb = sbq_desc->p.skb; | |
1352 | bq = &bq_array[0]; | |
1353 | memcpy(bq, skb->data, sizeof(bq_array)); | |
1354 | sbq_desc->p.skb = NULL; | |
1355 | skb_reserve(skb, NET_IP_ALIGN); | |
1356 | } else { | |
1357 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1358 | "Headers in small, %d bytes of data in chain of large.\n", length); | |
1359 | bq = (struct bq_element *)sbq_desc->p.skb->data; | |
1360 | } | |
1361 | while (length > 0) { | |
1362 | lbq_desc = ql_get_curr_lbuf(rx_ring); | |
1363 | if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) { | |
1364 | QPRINTK(qdev, RX_STATUS, ERR, | |
1365 | "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n", | |
1366 | lbq_desc->bq->addr_lo, bq->addr_lo); | |
1367 | return NULL; | |
1368 | } | |
1369 | pci_unmap_page(qdev->pdev, | |
1370 | pci_unmap_addr(lbq_desc, | |
1371 | mapaddr), | |
1372 | pci_unmap_len(lbq_desc, | |
1373 | maplen), | |
1374 | PCI_DMA_FROMDEVICE); | |
1375 | size = (length < PAGE_SIZE) ? length : PAGE_SIZE; | |
1376 | offset = 0; | |
1377 | ||
1378 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1379 | "Adding page %d to skb for %d bytes.\n", | |
1380 | i, size); | |
1381 | skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, | |
1382 | offset, size); | |
1383 | skb->len += size; | |
1384 | skb->data_len += size; | |
1385 | skb->truesize += size; | |
1386 | length -= size; | |
1387 | lbq_desc->p.lbq_page = NULL; | |
1388 | bq++; | |
1389 | i++; | |
1390 | } | |
1391 | __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | |
1392 | VLAN_ETH_HLEN : ETH_HLEN); | |
1393 | } | |
1394 | return skb; | |
1395 | } | |
1396 | ||
1397 | /* Process an inbound completion from an rx ring. */ | |
1398 | static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | |
1399 | struct rx_ring *rx_ring, | |
1400 | struct ib_mac_iocb_rsp *ib_mac_rsp) | |
1401 | { | |
1402 | struct net_device *ndev = qdev->ndev; | |
1403 | struct sk_buff *skb = NULL; | |
1404 | ||
1405 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | |
1406 | ||
1407 | skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); | |
1408 | if (unlikely(!skb)) { | |
1409 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1410 | "No skb available, drop packet.\n"); | |
1411 | return; | |
1412 | } | |
1413 | ||
1414 | prefetch(skb->data); | |
1415 | skb->dev = ndev; | |
1416 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { | |
1417 | QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n", | |
1418 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1419 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", | |
1420 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1421 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", | |
1422 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1423 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | |
1424 | } | |
1425 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { | |
1426 | QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); | |
1427 | } | |
1428 | if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) { | |
1429 | QPRINTK(qdev, RX_STATUS, ERR, | |
1430 | "Bad checksum for this %s packet.\n", | |
1431 | ((ib_mac_rsp-> | |
1432 | flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP")); | |
1433 | skb->ip_summed = CHECKSUM_NONE; | |
1434 | } else if (qdev->rx_csum && | |
1435 | ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) || | |
1436 | ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | |
1437 | !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) { | |
1438 | QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n"); | |
1439 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1440 | } | |
1441 | qdev->stats.rx_packets++; | |
1442 | qdev->stats.rx_bytes += skb->len; | |
1443 | skb->protocol = eth_type_trans(skb, ndev); | |
1444 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { | |
1445 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1446 | "Passing a VLAN packet upstream.\n"); | |
1447 | vlan_hwaccel_rx(skb, qdev->vlgrp, | |
1448 | le16_to_cpu(ib_mac_rsp->vlan_id)); | |
1449 | } else { | |
1450 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1451 | "Passing a normal packet upstream.\n"); | |
1452 | netif_rx(skb); | |
1453 | } | |
1454 | ndev->last_rx = jiffies; | |
1455 | } | |
1456 | ||
1457 | /* Process an outbound completion from an rx ring. */ | |
1458 | static void ql_process_mac_tx_intr(struct ql_adapter *qdev, | |
1459 | struct ob_mac_iocb_rsp *mac_rsp) | |
1460 | { | |
1461 | struct tx_ring *tx_ring; | |
1462 | struct tx_ring_desc *tx_ring_desc; | |
1463 | ||
1464 | QL_DUMP_OB_MAC_RSP(mac_rsp); | |
1465 | tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; | |
1466 | tx_ring_desc = &tx_ring->q[mac_rsp->tid]; | |
1467 | ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); | |
1468 | qdev->stats.tx_bytes += tx_ring_desc->map_cnt; | |
1469 | qdev->stats.tx_packets++; | |
1470 | dev_kfree_skb(tx_ring_desc->skb); | |
1471 | tx_ring_desc->skb = NULL; | |
1472 | ||
1473 | if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | | |
1474 | OB_MAC_IOCB_RSP_S | | |
1475 | OB_MAC_IOCB_RSP_L | | |
1476 | OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { | |
1477 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { | |
1478 | QPRINTK(qdev, TX_DONE, WARNING, | |
1479 | "Total descriptor length did not match transfer length.\n"); | |
1480 | } | |
1481 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { | |
1482 | QPRINTK(qdev, TX_DONE, WARNING, | |
1483 | "Frame too short to be legal, not sent.\n"); | |
1484 | } | |
1485 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { | |
1486 | QPRINTK(qdev, TX_DONE, WARNING, | |
1487 | "Frame too long, but sent anyway.\n"); | |
1488 | } | |
1489 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { | |
1490 | QPRINTK(qdev, TX_DONE, WARNING, | |
1491 | "PCI backplane error. Frame not sent.\n"); | |
1492 | } | |
1493 | } | |
1494 | atomic_inc(&tx_ring->tx_count); | |
1495 | } | |
1496 | ||
1497 | /* Fire up a handler to reset the MPI processor. */ | |
1498 | void ql_queue_fw_error(struct ql_adapter *qdev) | |
1499 | { | |
1500 | netif_stop_queue(qdev->ndev); | |
1501 | netif_carrier_off(qdev->ndev); | |
1502 | queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); | |
1503 | } | |
1504 | ||
1505 | void ql_queue_asic_error(struct ql_adapter *qdev) | |
1506 | { | |
1507 | netif_stop_queue(qdev->ndev); | |
1508 | netif_carrier_off(qdev->ndev); | |
1509 | ql_disable_interrupts(qdev); | |
1510 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | |
1511 | } | |
1512 | ||
1513 | static void ql_process_chip_ae_intr(struct ql_adapter *qdev, | |
1514 | struct ib_ae_iocb_rsp *ib_ae_rsp) | |
1515 | { | |
1516 | switch (ib_ae_rsp->event) { | |
1517 | case MGMT_ERR_EVENT: | |
1518 | QPRINTK(qdev, RX_ERR, ERR, | |
1519 | "Management Processor Fatal Error.\n"); | |
1520 | ql_queue_fw_error(qdev); | |
1521 | return; | |
1522 | ||
1523 | case CAM_LOOKUP_ERR_EVENT: | |
1524 | QPRINTK(qdev, LINK, ERR, | |
1525 | "Multiple CAM hits lookup occurred.\n"); | |
1526 | QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n"); | |
1527 | ql_queue_asic_error(qdev); | |
1528 | return; | |
1529 | ||
1530 | case SOFT_ECC_ERROR_EVENT: | |
1531 | QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n"); | |
1532 | ql_queue_asic_error(qdev); | |
1533 | break; | |
1534 | ||
1535 | case PCI_ERR_ANON_BUF_RD: | |
1536 | QPRINTK(qdev, RX_ERR, ERR, | |
1537 | "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", | |
1538 | ib_ae_rsp->q_id); | |
1539 | ql_queue_asic_error(qdev); | |
1540 | break; | |
1541 | ||
1542 | default: | |
1543 | QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n", | |
1544 | ib_ae_rsp->event); | |
1545 | ql_queue_asic_error(qdev); | |
1546 | break; | |
1547 | } | |
1548 | } | |
1549 | ||
1550 | static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |
1551 | { | |
1552 | struct ql_adapter *qdev = rx_ring->qdev; | |
1553 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | |
1554 | struct ob_mac_iocb_rsp *net_rsp = NULL; | |
1555 | int count = 0; | |
1556 | ||
1557 | /* While there are entries in the completion queue. */ | |
1558 | while (prod != rx_ring->cnsmr_idx) { | |
1559 | ||
1560 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1561 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, | |
1562 | prod, rx_ring->cnsmr_idx); | |
1563 | ||
1564 | net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; | |
1565 | rmb(); | |
1566 | switch (net_rsp->opcode) { | |
1567 | ||
1568 | case OPCODE_OB_MAC_TSO_IOCB: | |
1569 | case OPCODE_OB_MAC_IOCB: | |
1570 | ql_process_mac_tx_intr(qdev, net_rsp); | |
1571 | break; | |
1572 | default: | |
1573 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1574 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | |
1575 | net_rsp->opcode); | |
1576 | } | |
1577 | count++; | |
1578 | ql_update_cq(rx_ring); | |
1579 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | |
1580 | } | |
1581 | ql_write_cq_idx(rx_ring); | |
1582 | if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { | |
1583 | struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; | |
1584 | if (atomic_read(&tx_ring->queue_stopped) && | |
1585 | (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) | |
1586 | /* | |
1587 | * The queue got stopped because the tx_ring was full. | |
1588 | * Wake it up, because it's now at least 25% empty. | |
1589 | */ | |
1590 | netif_wake_queue(qdev->ndev); | |
1591 | } | |
1592 | ||
1593 | return count; | |
1594 | } | |
1595 | ||
1596 | static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) | |
1597 | { | |
1598 | struct ql_adapter *qdev = rx_ring->qdev; | |
1599 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | |
1600 | struct ql_net_rsp_iocb *net_rsp; | |
1601 | int count = 0; | |
1602 | ||
1603 | /* While there are entries in the completion queue. */ | |
1604 | while (prod != rx_ring->cnsmr_idx) { | |
1605 | ||
1606 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1607 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, | |
1608 | prod, rx_ring->cnsmr_idx); | |
1609 | ||
1610 | net_rsp = rx_ring->curr_entry; | |
1611 | rmb(); | |
1612 | switch (net_rsp->opcode) { | |
1613 | case OPCODE_IB_MAC_IOCB: | |
1614 | ql_process_mac_rx_intr(qdev, rx_ring, | |
1615 | (struct ib_mac_iocb_rsp *) | |
1616 | net_rsp); | |
1617 | break; | |
1618 | ||
1619 | case OPCODE_IB_AE_IOCB: | |
1620 | ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) | |
1621 | net_rsp); | |
1622 | break; | |
1623 | default: | |
1624 | { | |
1625 | QPRINTK(qdev, RX_STATUS, DEBUG, | |
1626 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | |
1627 | net_rsp->opcode); | |
1628 | } | |
1629 | } | |
1630 | count++; | |
1631 | ql_update_cq(rx_ring); | |
1632 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | |
1633 | if (count == budget) | |
1634 | break; | |
1635 | } | |
1636 | ql_update_buffer_queues(qdev, rx_ring); | |
1637 | ql_write_cq_idx(rx_ring); | |
1638 | return count; | |
1639 | } | |
1640 | ||
1641 | static int ql_napi_poll_msix(struct napi_struct *napi, int budget) | |
1642 | { | |
1643 | struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); | |
1644 | struct ql_adapter *qdev = rx_ring->qdev; | |
1645 | int work_done = ql_clean_inbound_rx_ring(rx_ring, budget); | |
1646 | ||
1647 | QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n", | |
1648 | rx_ring->cq_id); | |
1649 | ||
1650 | if (work_done < budget) { | |
1651 | __netif_rx_complete(qdev->ndev, napi); | |
1652 | ql_enable_completion_interrupt(qdev, rx_ring->irq); | |
1653 | } | |
1654 | return work_done; | |
1655 | } | |
1656 | ||
1657 | static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) | |
1658 | { | |
1659 | struct ql_adapter *qdev = netdev_priv(ndev); | |
1660 | ||
1661 | qdev->vlgrp = grp; | |
1662 | if (grp) { | |
1663 | QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n"); | |
1664 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | | |
1665 | NIC_RCV_CFG_VLAN_MATCH_AND_NON); | |
1666 | } else { | |
1667 | QPRINTK(qdev, IFUP, DEBUG, | |
1668 | "Turning off VLAN in NIC_RCV_CFG.\n"); | |
1669 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); | |
1670 | } | |
1671 | } | |
1672 | ||
1673 | static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) | |
1674 | { | |
1675 | struct ql_adapter *qdev = netdev_priv(ndev); | |
1676 | u32 enable_bit = MAC_ADDR_E; | |
1677 | ||
1678 | spin_lock(&qdev->hw_lock); | |
1679 | if (ql_set_mac_addr_reg | |
1680 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | |
1681 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); | |
1682 | } | |
1683 | spin_unlock(&qdev->hw_lock); | |
1684 | } | |
1685 | ||
1686 | static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) | |
1687 | { | |
1688 | struct ql_adapter *qdev = netdev_priv(ndev); | |
1689 | u32 enable_bit = 0; | |
1690 | ||
1691 | spin_lock(&qdev->hw_lock); | |
1692 | if (ql_set_mac_addr_reg | |
1693 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | |
1694 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); | |
1695 | } | |
1696 | spin_unlock(&qdev->hw_lock); | |
1697 | ||
1698 | } | |
1699 | ||
1700 | /* Worker thread to process a given rx_ring that is dedicated | |
1701 | * to outbound completions. | |
1702 | */ | |
1703 | static void ql_tx_clean(struct work_struct *work) | |
1704 | { | |
1705 | struct rx_ring *rx_ring = | |
1706 | container_of(work, struct rx_ring, rx_work.work); | |
1707 | ql_clean_outbound_rx_ring(rx_ring); | |
1708 | ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq); | |
1709 | ||
1710 | } | |
1711 | ||
1712 | /* Worker thread to process a given rx_ring that is dedicated | |
1713 | * to inbound completions. | |
1714 | */ | |
1715 | static void ql_rx_clean(struct work_struct *work) | |
1716 | { | |
1717 | struct rx_ring *rx_ring = | |
1718 | container_of(work, struct rx_ring, rx_work.work); | |
1719 | ql_clean_inbound_rx_ring(rx_ring, 64); | |
1720 | ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq); | |
1721 | } | |
1722 | ||
1723 | /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */ | |
1724 | static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id) | |
1725 | { | |
1726 | struct rx_ring *rx_ring = dev_id; | |
1727 | queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue, | |
1728 | &rx_ring->rx_work, 0); | |
1729 | return IRQ_HANDLED; | |
1730 | } | |
1731 | ||
1732 | /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ | |
1733 | static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) | |
1734 | { | |
1735 | struct rx_ring *rx_ring = dev_id; | |
1736 | struct ql_adapter *qdev = rx_ring->qdev; | |
1737 | netif_rx_schedule(qdev->ndev, &rx_ring->napi); | |
1738 | return IRQ_HANDLED; | |
1739 | } | |
1740 | ||
c4e84bde RM |
1741 | /* This handles a fatal error, MPI activity, and the default |
1742 | * rx_ring in an MSI-X multiple vector environment. | |
1743 | * In MSI/Legacy environment it also process the rest of | |
1744 | * the rx_rings. | |
1745 | */ | |
1746 | static irqreturn_t qlge_isr(int irq, void *dev_id) | |
1747 | { | |
1748 | struct rx_ring *rx_ring = dev_id; | |
1749 | struct ql_adapter *qdev = rx_ring->qdev; | |
1750 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
1751 | u32 var; | |
1752 | int i; | |
1753 | int work_done = 0; | |
1754 | ||
bb0d215c RM |
1755 | spin_lock(&qdev->hw_lock); |
1756 | if (atomic_read(&qdev->intr_context[0].irq_cnt)) { | |
1757 | QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n"); | |
1758 | spin_unlock(&qdev->hw_lock); | |
1759 | return IRQ_NONE; | |
c4e84bde | 1760 | } |
bb0d215c | 1761 | spin_unlock(&qdev->hw_lock); |
c4e84bde | 1762 | |
bb0d215c | 1763 | var = ql_disable_completion_interrupt(qdev, intr_context->intr); |
c4e84bde RM |
1764 | |
1765 | /* | |
1766 | * Check for fatal error. | |
1767 | */ | |
1768 | if (var & STS_FE) { | |
1769 | ql_queue_asic_error(qdev); | |
1770 | QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var); | |
1771 | var = ql_read32(qdev, ERR_STS); | |
1772 | QPRINTK(qdev, INTR, ERR, | |
1773 | "Resetting chip. Error Status Register = 0x%x\n", var); | |
1774 | return IRQ_HANDLED; | |
1775 | } | |
1776 | ||
1777 | /* | |
1778 | * Check MPI processor activity. | |
1779 | */ | |
1780 | if (var & STS_PI) { | |
1781 | /* | |
1782 | * We've got an async event or mailbox completion. | |
1783 | * Handle it and clear the source of the interrupt. | |
1784 | */ | |
1785 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); | |
1786 | ql_disable_completion_interrupt(qdev, intr_context->intr); | |
1787 | queue_delayed_work_on(smp_processor_id(), qdev->workqueue, | |
1788 | &qdev->mpi_work, 0); | |
1789 | work_done++; | |
1790 | } | |
1791 | ||
1792 | /* | |
1793 | * Check the default queue and wake handler if active. | |
1794 | */ | |
1795 | rx_ring = &qdev->rx_ring[0]; | |
1796 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { | |
1797 | QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); | |
1798 | ql_disable_completion_interrupt(qdev, intr_context->intr); | |
1799 | queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, | |
1800 | &rx_ring->rx_work, 0); | |
1801 | work_done++; | |
1802 | } | |
1803 | ||
1804 | if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | |
1805 | /* | |
1806 | * Start the DPC for each active queue. | |
1807 | */ | |
1808 | for (i = 1; i < qdev->rx_ring_count; i++) { | |
1809 | rx_ring = &qdev->rx_ring[i]; | |
1810 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != | |
1811 | rx_ring->cnsmr_idx) { | |
1812 | QPRINTK(qdev, INTR, INFO, | |
1813 | "Waking handler for rx_ring[%d].\n", i); | |
1814 | ql_disable_completion_interrupt(qdev, | |
1815 | intr_context-> | |
1816 | intr); | |
1817 | if (i < qdev->rss_ring_first_cq_id) | |
1818 | queue_delayed_work_on(rx_ring->cpu, | |
1819 | qdev->q_workqueue, | |
1820 | &rx_ring->rx_work, | |
1821 | 0); | |
1822 | else | |
1823 | netif_rx_schedule(qdev->ndev, | |
1824 | &rx_ring->napi); | |
1825 | work_done++; | |
1826 | } | |
1827 | } | |
1828 | } | |
bb0d215c | 1829 | ql_enable_completion_interrupt(qdev, intr_context->intr); |
c4e84bde RM |
1830 | return work_done ? IRQ_HANDLED : IRQ_NONE; |
1831 | } | |
1832 | ||
1833 | static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |
1834 | { | |
1835 | ||
1836 | if (skb_is_gso(skb)) { | |
1837 | int err; | |
1838 | if (skb_header_cloned(skb)) { | |
1839 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
1840 | if (err) | |
1841 | return err; | |
1842 | } | |
1843 | ||
1844 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; | |
1845 | mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; | |
1846 | mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); | |
1847 | mac_iocb_ptr->total_hdrs_len = | |
1848 | cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); | |
1849 | mac_iocb_ptr->net_trans_offset = | |
1850 | cpu_to_le16(skb_network_offset(skb) | | |
1851 | skb_transport_offset(skb) | |
1852 | << OB_MAC_TRANSPORT_HDR_SHIFT); | |
1853 | mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | |
1854 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; | |
1855 | if (likely(skb->protocol == htons(ETH_P_IP))) { | |
1856 | struct iphdr *iph = ip_hdr(skb); | |
1857 | iph->check = 0; | |
1858 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | |
1859 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | |
1860 | iph->daddr, 0, | |
1861 | IPPROTO_TCP, | |
1862 | 0); | |
1863 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
1864 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; | |
1865 | tcp_hdr(skb)->check = | |
1866 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
1867 | &ipv6_hdr(skb)->daddr, | |
1868 | 0, IPPROTO_TCP, 0); | |
1869 | } | |
1870 | return 1; | |
1871 | } | |
1872 | return 0; | |
1873 | } | |
1874 | ||
1875 | static void ql_hw_csum_setup(struct sk_buff *skb, | |
1876 | struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |
1877 | { | |
1878 | int len; | |
1879 | struct iphdr *iph = ip_hdr(skb); | |
1880 | u16 *check; | |
1881 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; | |
1882 | mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); | |
1883 | mac_iocb_ptr->net_trans_offset = | |
1884 | cpu_to_le16(skb_network_offset(skb) | | |
1885 | skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); | |
1886 | ||
1887 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | |
1888 | len = (ntohs(iph->tot_len) - (iph->ihl << 2)); | |
1889 | if (likely(iph->protocol == IPPROTO_TCP)) { | |
1890 | check = &(tcp_hdr(skb)->check); | |
1891 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; | |
1892 | mac_iocb_ptr->total_hdrs_len = | |
1893 | cpu_to_le16(skb_transport_offset(skb) + | |
1894 | (tcp_hdr(skb)->doff << 2)); | |
1895 | } else { | |
1896 | check = &(udp_hdr(skb)->check); | |
1897 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; | |
1898 | mac_iocb_ptr->total_hdrs_len = | |
1899 | cpu_to_le16(skb_transport_offset(skb) + | |
1900 | sizeof(struct udphdr)); | |
1901 | } | |
1902 | *check = ~csum_tcpudp_magic(iph->saddr, | |
1903 | iph->daddr, len, iph->protocol, 0); | |
1904 | } | |
1905 | ||
1906 | static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |
1907 | { | |
1908 | struct tx_ring_desc *tx_ring_desc; | |
1909 | struct ob_mac_iocb_req *mac_iocb_ptr; | |
1910 | struct ql_adapter *qdev = netdev_priv(ndev); | |
1911 | int tso; | |
1912 | struct tx_ring *tx_ring; | |
1913 | u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb); | |
1914 | ||
1915 | tx_ring = &qdev->tx_ring[tx_ring_idx]; | |
1916 | ||
1917 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { | |
1918 | QPRINTK(qdev, TX_QUEUED, INFO, | |
1919 | "%s: shutting down tx queue %d du to lack of resources.\n", | |
1920 | __func__, tx_ring_idx); | |
1921 | netif_stop_queue(ndev); | |
1922 | atomic_inc(&tx_ring->queue_stopped); | |
1923 | return NETDEV_TX_BUSY; | |
1924 | } | |
1925 | tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; | |
1926 | mac_iocb_ptr = tx_ring_desc->queue_entry; | |
1927 | memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); | |
1928 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) { | |
1929 | QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n"); | |
1930 | return NETDEV_TX_BUSY; | |
1931 | } | |
1932 | ||
1933 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; | |
1934 | mac_iocb_ptr->tid = tx_ring_desc->index; | |
1935 | /* We use the upper 32-bits to store the tx queue for this IO. | |
1936 | * When we get the completion we can use it to establish the context. | |
1937 | */ | |
1938 | mac_iocb_ptr->txq_idx = tx_ring_idx; | |
1939 | tx_ring_desc->skb = skb; | |
1940 | ||
1941 | mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); | |
1942 | ||
1943 | if (qdev->vlgrp && vlan_tx_tag_present(skb)) { | |
1944 | QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n", | |
1945 | vlan_tx_tag_get(skb)); | |
1946 | mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; | |
1947 | mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); | |
1948 | } | |
1949 | tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | |
1950 | if (tso < 0) { | |
1951 | dev_kfree_skb_any(skb); | |
1952 | return NETDEV_TX_OK; | |
1953 | } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { | |
1954 | ql_hw_csum_setup(skb, | |
1955 | (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | |
1956 | } | |
1957 | QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); | |
1958 | tx_ring->prod_idx++; | |
1959 | if (tx_ring->prod_idx == tx_ring->wq_len) | |
1960 | tx_ring->prod_idx = 0; | |
1961 | wmb(); | |
1962 | ||
1963 | ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); | |
1964 | ndev->trans_start = jiffies; | |
1965 | QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n", | |
1966 | tx_ring->prod_idx, skb->len); | |
1967 | ||
1968 | atomic_dec(&tx_ring->tx_count); | |
1969 | return NETDEV_TX_OK; | |
1970 | } | |
1971 | ||
1972 | static void ql_free_shadow_space(struct ql_adapter *qdev) | |
1973 | { | |
1974 | if (qdev->rx_ring_shadow_reg_area) { | |
1975 | pci_free_consistent(qdev->pdev, | |
1976 | PAGE_SIZE, | |
1977 | qdev->rx_ring_shadow_reg_area, | |
1978 | qdev->rx_ring_shadow_reg_dma); | |
1979 | qdev->rx_ring_shadow_reg_area = NULL; | |
1980 | } | |
1981 | if (qdev->tx_ring_shadow_reg_area) { | |
1982 | pci_free_consistent(qdev->pdev, | |
1983 | PAGE_SIZE, | |
1984 | qdev->tx_ring_shadow_reg_area, | |
1985 | qdev->tx_ring_shadow_reg_dma); | |
1986 | qdev->tx_ring_shadow_reg_area = NULL; | |
1987 | } | |
1988 | } | |
1989 | ||
1990 | static int ql_alloc_shadow_space(struct ql_adapter *qdev) | |
1991 | { | |
1992 | qdev->rx_ring_shadow_reg_area = | |
1993 | pci_alloc_consistent(qdev->pdev, | |
1994 | PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); | |
1995 | if (qdev->rx_ring_shadow_reg_area == NULL) { | |
1996 | QPRINTK(qdev, IFUP, ERR, | |
1997 | "Allocation of RX shadow space failed.\n"); | |
1998 | return -ENOMEM; | |
1999 | } | |
2000 | qdev->tx_ring_shadow_reg_area = | |
2001 | pci_alloc_consistent(qdev->pdev, PAGE_SIZE, | |
2002 | &qdev->tx_ring_shadow_reg_dma); | |
2003 | if (qdev->tx_ring_shadow_reg_area == NULL) { | |
2004 | QPRINTK(qdev, IFUP, ERR, | |
2005 | "Allocation of TX shadow space failed.\n"); | |
2006 | goto err_wqp_sh_area; | |
2007 | } | |
2008 | return 0; | |
2009 | ||
2010 | err_wqp_sh_area: | |
2011 | pci_free_consistent(qdev->pdev, | |
2012 | PAGE_SIZE, | |
2013 | qdev->rx_ring_shadow_reg_area, | |
2014 | qdev->rx_ring_shadow_reg_dma); | |
2015 | return -ENOMEM; | |
2016 | } | |
2017 | ||
2018 | static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |
2019 | { | |
2020 | struct tx_ring_desc *tx_ring_desc; | |
2021 | int i; | |
2022 | struct ob_mac_iocb_req *mac_iocb_ptr; | |
2023 | ||
2024 | mac_iocb_ptr = tx_ring->wq_base; | |
2025 | tx_ring_desc = tx_ring->q; | |
2026 | for (i = 0; i < tx_ring->wq_len; i++) { | |
2027 | tx_ring_desc->index = i; | |
2028 | tx_ring_desc->skb = NULL; | |
2029 | tx_ring_desc->queue_entry = mac_iocb_ptr; | |
2030 | mac_iocb_ptr++; | |
2031 | tx_ring_desc++; | |
2032 | } | |
2033 | atomic_set(&tx_ring->tx_count, tx_ring->wq_len); | |
2034 | atomic_set(&tx_ring->queue_stopped, 0); | |
2035 | } | |
2036 | ||
2037 | static void ql_free_tx_resources(struct ql_adapter *qdev, | |
2038 | struct tx_ring *tx_ring) | |
2039 | { | |
2040 | if (tx_ring->wq_base) { | |
2041 | pci_free_consistent(qdev->pdev, tx_ring->wq_size, | |
2042 | tx_ring->wq_base, tx_ring->wq_base_dma); | |
2043 | tx_ring->wq_base = NULL; | |
2044 | } | |
2045 | kfree(tx_ring->q); | |
2046 | tx_ring->q = NULL; | |
2047 | } | |
2048 | ||
2049 | static int ql_alloc_tx_resources(struct ql_adapter *qdev, | |
2050 | struct tx_ring *tx_ring) | |
2051 | { | |
2052 | tx_ring->wq_base = | |
2053 | pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, | |
2054 | &tx_ring->wq_base_dma); | |
2055 | ||
2056 | if ((tx_ring->wq_base == NULL) | |
2057 | || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) { | |
2058 | QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n"); | |
2059 | return -ENOMEM; | |
2060 | } | |
2061 | tx_ring->q = | |
2062 | kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); | |
2063 | if (tx_ring->q == NULL) | |
2064 | goto err; | |
2065 | ||
2066 | return 0; | |
2067 | err: | |
2068 | pci_free_consistent(qdev->pdev, tx_ring->wq_size, | |
2069 | tx_ring->wq_base, tx_ring->wq_base_dma); | |
2070 | return -ENOMEM; | |
2071 | } | |
2072 | ||
2073 | void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
2074 | { | |
2075 | int i; | |
2076 | struct bq_desc *lbq_desc; | |
2077 | ||
2078 | for (i = 0; i < rx_ring->lbq_len; i++) { | |
2079 | lbq_desc = &rx_ring->lbq[i]; | |
2080 | if (lbq_desc->p.lbq_page) { | |
2081 | pci_unmap_page(qdev->pdev, | |
2082 | pci_unmap_addr(lbq_desc, mapaddr), | |
2083 | pci_unmap_len(lbq_desc, maplen), | |
2084 | PCI_DMA_FROMDEVICE); | |
2085 | ||
2086 | put_page(lbq_desc->p.lbq_page); | |
2087 | lbq_desc->p.lbq_page = NULL; | |
2088 | } | |
2089 | lbq_desc->bq->addr_lo = 0; | |
2090 | lbq_desc->bq->addr_hi = 0; | |
2091 | } | |
2092 | } | |
2093 | ||
2094 | /* | |
2095 | * Allocate and map a page for each element of the lbq. | |
2096 | */ | |
2097 | static int ql_alloc_lbq_buffers(struct ql_adapter *qdev, | |
2098 | struct rx_ring *rx_ring) | |
2099 | { | |
2100 | int i; | |
2101 | struct bq_desc *lbq_desc; | |
2102 | u64 map; | |
2103 | struct bq_element *bq = rx_ring->lbq_base; | |
2104 | ||
2105 | for (i = 0; i < rx_ring->lbq_len; i++) { | |
2106 | lbq_desc = &rx_ring->lbq[i]; | |
2107 | memset(lbq_desc, 0, sizeof(lbq_desc)); | |
2108 | lbq_desc->bq = bq; | |
2109 | lbq_desc->index = i; | |
2110 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | |
2111 | if (unlikely(!lbq_desc->p.lbq_page)) { | |
2112 | QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n"); | |
2113 | goto mem_error; | |
2114 | } else { | |
2115 | map = pci_map_page(qdev->pdev, | |
2116 | lbq_desc->p.lbq_page, | |
2117 | 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); | |
2118 | if (pci_dma_mapping_error(qdev->pdev, map)) { | |
2119 | QPRINTK(qdev, IFUP, ERR, | |
2120 | "PCI mapping failed.\n"); | |
2121 | goto mem_error; | |
2122 | } | |
2123 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | |
2124 | pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); | |
2125 | bq->addr_lo = cpu_to_le32(map); | |
2126 | bq->addr_hi = cpu_to_le32(map >> 32); | |
2127 | } | |
2128 | bq++; | |
2129 | } | |
2130 | return 0; | |
2131 | mem_error: | |
2132 | ql_free_lbq_buffers(qdev, rx_ring); | |
2133 | return -ENOMEM; | |
2134 | } | |
2135 | ||
2136 | void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
2137 | { | |
2138 | int i; | |
2139 | struct bq_desc *sbq_desc; | |
2140 | ||
2141 | for (i = 0; i < rx_ring->sbq_len; i++) { | |
2142 | sbq_desc = &rx_ring->sbq[i]; | |
2143 | if (sbq_desc == NULL) { | |
2144 | QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i); | |
2145 | return; | |
2146 | } | |
2147 | if (sbq_desc->p.skb) { | |
2148 | pci_unmap_single(qdev->pdev, | |
2149 | pci_unmap_addr(sbq_desc, mapaddr), | |
2150 | pci_unmap_len(sbq_desc, maplen), | |
2151 | PCI_DMA_FROMDEVICE); | |
2152 | dev_kfree_skb(sbq_desc->p.skb); | |
2153 | sbq_desc->p.skb = NULL; | |
2154 | } | |
2155 | if (sbq_desc->bq == NULL) { | |
2156 | QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n", | |
2157 | i); | |
2158 | return; | |
2159 | } | |
2160 | sbq_desc->bq->addr_lo = 0; | |
2161 | sbq_desc->bq->addr_hi = 0; | |
2162 | } | |
2163 | } | |
2164 | ||
2165 | /* Allocate and map an skb for each element of the sbq. */ | |
2166 | static int ql_alloc_sbq_buffers(struct ql_adapter *qdev, | |
2167 | struct rx_ring *rx_ring) | |
2168 | { | |
2169 | int i; | |
2170 | struct bq_desc *sbq_desc; | |
2171 | struct sk_buff *skb; | |
2172 | u64 map; | |
2173 | struct bq_element *bq = rx_ring->sbq_base; | |
2174 | ||
2175 | for (i = 0; i < rx_ring->sbq_len; i++) { | |
2176 | sbq_desc = &rx_ring->sbq[i]; | |
2177 | memset(sbq_desc, 0, sizeof(sbq_desc)); | |
2178 | sbq_desc->index = i; | |
2179 | sbq_desc->bq = bq; | |
2180 | skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size); | |
2181 | if (unlikely(!skb)) { | |
2182 | /* Better luck next round */ | |
2183 | QPRINTK(qdev, IFUP, ERR, | |
2184 | "small buff alloc failed for %d bytes at index %d.\n", | |
2185 | rx_ring->sbq_buf_size, i); | |
2186 | goto mem_err; | |
2187 | } | |
2188 | skb_reserve(skb, QLGE_SB_PAD); | |
2189 | sbq_desc->p.skb = skb; | |
2190 | /* | |
2191 | * Map only half the buffer. Because the | |
2192 | * other half may get some data copied to it | |
2193 | * when the completion arrives. | |
2194 | */ | |
2195 | map = pci_map_single(qdev->pdev, | |
2196 | skb->data, | |
2197 | rx_ring->sbq_buf_size / 2, | |
2198 | PCI_DMA_FROMDEVICE); | |
2199 | if (pci_dma_mapping_error(qdev->pdev, map)) { | |
2200 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); | |
2201 | goto mem_err; | |
2202 | } | |
2203 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | |
2204 | pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2); | |
2205 | bq->addr_lo = /*sbq_desc->addr_lo = */ | |
2206 | cpu_to_le32(map); | |
2207 | bq->addr_hi = /*sbq_desc->addr_hi = */ | |
2208 | cpu_to_le32(map >> 32); | |
2209 | bq++; | |
2210 | } | |
2211 | return 0; | |
2212 | mem_err: | |
2213 | ql_free_sbq_buffers(qdev, rx_ring); | |
2214 | return -ENOMEM; | |
2215 | } | |
2216 | ||
2217 | static void ql_free_rx_resources(struct ql_adapter *qdev, | |
2218 | struct rx_ring *rx_ring) | |
2219 | { | |
2220 | if (rx_ring->sbq_len) | |
2221 | ql_free_sbq_buffers(qdev, rx_ring); | |
2222 | if (rx_ring->lbq_len) | |
2223 | ql_free_lbq_buffers(qdev, rx_ring); | |
2224 | ||
2225 | /* Free the small buffer queue. */ | |
2226 | if (rx_ring->sbq_base) { | |
2227 | pci_free_consistent(qdev->pdev, | |
2228 | rx_ring->sbq_size, | |
2229 | rx_ring->sbq_base, rx_ring->sbq_base_dma); | |
2230 | rx_ring->sbq_base = NULL; | |
2231 | } | |
2232 | ||
2233 | /* Free the small buffer queue control blocks. */ | |
2234 | kfree(rx_ring->sbq); | |
2235 | rx_ring->sbq = NULL; | |
2236 | ||
2237 | /* Free the large buffer queue. */ | |
2238 | if (rx_ring->lbq_base) { | |
2239 | pci_free_consistent(qdev->pdev, | |
2240 | rx_ring->lbq_size, | |
2241 | rx_ring->lbq_base, rx_ring->lbq_base_dma); | |
2242 | rx_ring->lbq_base = NULL; | |
2243 | } | |
2244 | ||
2245 | /* Free the large buffer queue control blocks. */ | |
2246 | kfree(rx_ring->lbq); | |
2247 | rx_ring->lbq = NULL; | |
2248 | ||
2249 | /* Free the rx queue. */ | |
2250 | if (rx_ring->cq_base) { | |
2251 | pci_free_consistent(qdev->pdev, | |
2252 | rx_ring->cq_size, | |
2253 | rx_ring->cq_base, rx_ring->cq_base_dma); | |
2254 | rx_ring->cq_base = NULL; | |
2255 | } | |
2256 | } | |
2257 | ||
2258 | /* Allocate queues and buffers for this completions queue based | |
2259 | * on the values in the parameter structure. */ | |
2260 | static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |
2261 | struct rx_ring *rx_ring) | |
2262 | { | |
2263 | ||
2264 | /* | |
2265 | * Allocate the completion queue for this rx_ring. | |
2266 | */ | |
2267 | rx_ring->cq_base = | |
2268 | pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, | |
2269 | &rx_ring->cq_base_dma); | |
2270 | ||
2271 | if (rx_ring->cq_base == NULL) { | |
2272 | QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n"); | |
2273 | return -ENOMEM; | |
2274 | } | |
2275 | ||
2276 | if (rx_ring->sbq_len) { | |
2277 | /* | |
2278 | * Allocate small buffer queue. | |
2279 | */ | |
2280 | rx_ring->sbq_base = | |
2281 | pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, | |
2282 | &rx_ring->sbq_base_dma); | |
2283 | ||
2284 | if (rx_ring->sbq_base == NULL) { | |
2285 | QPRINTK(qdev, IFUP, ERR, | |
2286 | "Small buffer queue allocation failed.\n"); | |
2287 | goto err_mem; | |
2288 | } | |
2289 | ||
2290 | /* | |
2291 | * Allocate small buffer queue control blocks. | |
2292 | */ | |
2293 | rx_ring->sbq = | |
2294 | kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), | |
2295 | GFP_KERNEL); | |
2296 | if (rx_ring->sbq == NULL) { | |
2297 | QPRINTK(qdev, IFUP, ERR, | |
2298 | "Small buffer queue control block allocation failed.\n"); | |
2299 | goto err_mem; | |
2300 | } | |
2301 | ||
2302 | if (ql_alloc_sbq_buffers(qdev, rx_ring)) { | |
2303 | QPRINTK(qdev, IFUP, ERR, | |
2304 | "Small buffer allocation failed.\n"); | |
2305 | goto err_mem; | |
2306 | } | |
2307 | } | |
2308 | ||
2309 | if (rx_ring->lbq_len) { | |
2310 | /* | |
2311 | * Allocate large buffer queue. | |
2312 | */ | |
2313 | rx_ring->lbq_base = | |
2314 | pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, | |
2315 | &rx_ring->lbq_base_dma); | |
2316 | ||
2317 | if (rx_ring->lbq_base == NULL) { | |
2318 | QPRINTK(qdev, IFUP, ERR, | |
2319 | "Large buffer queue allocation failed.\n"); | |
2320 | goto err_mem; | |
2321 | } | |
2322 | /* | |
2323 | * Allocate large buffer queue control blocks. | |
2324 | */ | |
2325 | rx_ring->lbq = | |
2326 | kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), | |
2327 | GFP_KERNEL); | |
2328 | if (rx_ring->lbq == NULL) { | |
2329 | QPRINTK(qdev, IFUP, ERR, | |
2330 | "Large buffer queue control block allocation failed.\n"); | |
2331 | goto err_mem; | |
2332 | } | |
2333 | ||
2334 | /* | |
2335 | * Allocate the buffers. | |
2336 | */ | |
2337 | if (ql_alloc_lbq_buffers(qdev, rx_ring)) { | |
2338 | QPRINTK(qdev, IFUP, ERR, | |
2339 | "Large buffer allocation failed.\n"); | |
2340 | goto err_mem; | |
2341 | } | |
2342 | } | |
2343 | ||
2344 | return 0; | |
2345 | ||
2346 | err_mem: | |
2347 | ql_free_rx_resources(qdev, rx_ring); | |
2348 | return -ENOMEM; | |
2349 | } | |
2350 | ||
2351 | static void ql_tx_ring_clean(struct ql_adapter *qdev) | |
2352 | { | |
2353 | struct tx_ring *tx_ring; | |
2354 | struct tx_ring_desc *tx_ring_desc; | |
2355 | int i, j; | |
2356 | ||
2357 | /* | |
2358 | * Loop through all queues and free | |
2359 | * any resources. | |
2360 | */ | |
2361 | for (j = 0; j < qdev->tx_ring_count; j++) { | |
2362 | tx_ring = &qdev->tx_ring[j]; | |
2363 | for (i = 0; i < tx_ring->wq_len; i++) { | |
2364 | tx_ring_desc = &tx_ring->q[i]; | |
2365 | if (tx_ring_desc && tx_ring_desc->skb) { | |
2366 | QPRINTK(qdev, IFDOWN, ERR, | |
2367 | "Freeing lost SKB %p, from queue %d, index %d.\n", | |
2368 | tx_ring_desc->skb, j, | |
2369 | tx_ring_desc->index); | |
2370 | ql_unmap_send(qdev, tx_ring_desc, | |
2371 | tx_ring_desc->map_cnt); | |
2372 | dev_kfree_skb(tx_ring_desc->skb); | |
2373 | tx_ring_desc->skb = NULL; | |
2374 | } | |
2375 | } | |
2376 | } | |
2377 | } | |
2378 | ||
2379 | static void ql_free_ring_cb(struct ql_adapter *qdev) | |
2380 | { | |
2381 | kfree(qdev->ring_mem); | |
2382 | } | |
2383 | ||
2384 | static int ql_alloc_ring_cb(struct ql_adapter *qdev) | |
2385 | { | |
2386 | /* Allocate space for tx/rx ring control blocks. */ | |
2387 | qdev->ring_mem_size = | |
2388 | (qdev->tx_ring_count * sizeof(struct tx_ring)) + | |
2389 | (qdev->rx_ring_count * sizeof(struct rx_ring)); | |
2390 | qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL); | |
2391 | if (qdev->ring_mem == NULL) { | |
2392 | return -ENOMEM; | |
2393 | } else { | |
2394 | qdev->rx_ring = qdev->ring_mem; | |
2395 | qdev->tx_ring = qdev->ring_mem + | |
2396 | (qdev->rx_ring_count * sizeof(struct rx_ring)); | |
2397 | } | |
2398 | return 0; | |
2399 | } | |
2400 | ||
2401 | static void ql_free_mem_resources(struct ql_adapter *qdev) | |
2402 | { | |
2403 | int i; | |
2404 | ||
2405 | for (i = 0; i < qdev->tx_ring_count; i++) | |
2406 | ql_free_tx_resources(qdev, &qdev->tx_ring[i]); | |
2407 | for (i = 0; i < qdev->rx_ring_count; i++) | |
2408 | ql_free_rx_resources(qdev, &qdev->rx_ring[i]); | |
2409 | ql_free_shadow_space(qdev); | |
2410 | } | |
2411 | ||
2412 | static int ql_alloc_mem_resources(struct ql_adapter *qdev) | |
2413 | { | |
2414 | int i; | |
2415 | ||
2416 | /* Allocate space for our shadow registers and such. */ | |
2417 | if (ql_alloc_shadow_space(qdev)) | |
2418 | return -ENOMEM; | |
2419 | ||
2420 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
2421 | if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { | |
2422 | QPRINTK(qdev, IFUP, ERR, | |
2423 | "RX resource allocation failed.\n"); | |
2424 | goto err_mem; | |
2425 | } | |
2426 | } | |
2427 | /* Allocate tx queue resources */ | |
2428 | for (i = 0; i < qdev->tx_ring_count; i++) { | |
2429 | if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { | |
2430 | QPRINTK(qdev, IFUP, ERR, | |
2431 | "TX resource allocation failed.\n"); | |
2432 | goto err_mem; | |
2433 | } | |
2434 | } | |
2435 | return 0; | |
2436 | ||
2437 | err_mem: | |
2438 | ql_free_mem_resources(qdev); | |
2439 | return -ENOMEM; | |
2440 | } | |
2441 | ||
2442 | /* Set up the rx ring control block and pass it to the chip. | |
2443 | * The control block is defined as | |
2444 | * "Completion Queue Initialization Control Block", or cqicb. | |
2445 | */ | |
2446 | static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
2447 | { | |
2448 | struct cqicb *cqicb = &rx_ring->cqicb; | |
2449 | void *shadow_reg = qdev->rx_ring_shadow_reg_area + | |
2450 | (rx_ring->cq_id * sizeof(u64) * 4); | |
2451 | u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + | |
2452 | (rx_ring->cq_id * sizeof(u64) * 4); | |
2453 | void __iomem *doorbell_area = | |
2454 | qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); | |
2455 | int err = 0; | |
2456 | u16 bq_len; | |
2457 | ||
2458 | /* Set up the shadow registers for this ring. */ | |
2459 | rx_ring->prod_idx_sh_reg = shadow_reg; | |
2460 | rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; | |
2461 | shadow_reg += sizeof(u64); | |
2462 | shadow_reg_dma += sizeof(u64); | |
2463 | rx_ring->lbq_base_indirect = shadow_reg; | |
2464 | rx_ring->lbq_base_indirect_dma = shadow_reg_dma; | |
2465 | shadow_reg += sizeof(u64); | |
2466 | shadow_reg_dma += sizeof(u64); | |
2467 | rx_ring->sbq_base_indirect = shadow_reg; | |
2468 | rx_ring->sbq_base_indirect_dma = shadow_reg_dma; | |
2469 | ||
2470 | /* PCI doorbell mem area + 0x00 for consumer index register */ | |
2471 | rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area; | |
2472 | rx_ring->cnsmr_idx = 0; | |
2473 | rx_ring->curr_entry = rx_ring->cq_base; | |
2474 | ||
2475 | /* PCI doorbell mem area + 0x04 for valid register */ | |
2476 | rx_ring->valid_db_reg = doorbell_area + 0x04; | |
2477 | ||
2478 | /* PCI doorbell mem area + 0x18 for large buffer consumer */ | |
2479 | rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18); | |
2480 | ||
2481 | /* PCI doorbell mem area + 0x1c */ | |
2482 | rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c); | |
2483 | ||
2484 | memset((void *)cqicb, 0, sizeof(struct cqicb)); | |
2485 | cqicb->msix_vect = rx_ring->irq; | |
2486 | ||
2487 | cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT); | |
2488 | ||
2489 | cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma); | |
2490 | cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32); | |
2491 | ||
2492 | cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma); | |
2493 | cqicb->prod_idx_addr_hi = | |
2494 | cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32); | |
2495 | ||
2496 | /* | |
2497 | * Set up the control block load flags. | |
2498 | */ | |
2499 | cqicb->flags = FLAGS_LC | /* Load queue base address */ | |
2500 | FLAGS_LV | /* Load MSI-X vector */ | |
2501 | FLAGS_LI; /* Load irq delay values */ | |
2502 | if (rx_ring->lbq_len) { | |
2503 | cqicb->flags |= FLAGS_LL; /* Load lbq values */ | |
2504 | *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; | |
2505 | cqicb->lbq_addr_lo = | |
2506 | cpu_to_le32(rx_ring->lbq_base_indirect_dma); | |
2507 | cqicb->lbq_addr_hi = | |
2508 | cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32); | |
2509 | cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size); | |
2510 | bq_len = (u16) rx_ring->lbq_len; | |
2511 | cqicb->lbq_len = cpu_to_le16(bq_len); | |
2512 | rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16; | |
2513 | rx_ring->lbq_curr_idx = 0; | |
2514 | rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx; | |
2515 | rx_ring->lbq_free_cnt = 16; | |
2516 | } | |
2517 | if (rx_ring->sbq_len) { | |
2518 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ | |
2519 | *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; | |
2520 | cqicb->sbq_addr_lo = | |
2521 | cpu_to_le32(rx_ring->sbq_base_indirect_dma); | |
2522 | cqicb->sbq_addr_hi = | |
2523 | cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32); | |
2524 | cqicb->sbq_buf_size = | |
2525 | cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); | |
2526 | bq_len = (u16) rx_ring->sbq_len; | |
2527 | cqicb->sbq_len = cpu_to_le16(bq_len); | |
2528 | rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16; | |
2529 | rx_ring->sbq_curr_idx = 0; | |
2530 | rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx; | |
2531 | rx_ring->sbq_free_cnt = 16; | |
2532 | } | |
2533 | switch (rx_ring->type) { | |
2534 | case TX_Q: | |
2535 | /* If there's only one interrupt, then we use | |
2536 | * worker threads to process the outbound | |
2537 | * completion handling rx_rings. We do this so | |
2538 | * they can be run on multiple CPUs. There is | |
2539 | * room to play with this more where we would only | |
2540 | * run in a worker if there are more than x number | |
2541 | * of outbound completions on the queue and more | |
2542 | * than one queue active. Some threshold that | |
2543 | * would indicate a benefit in spite of the cost | |
2544 | * of a context switch. | |
2545 | * If there's more than one interrupt, then the | |
2546 | * outbound completions are processed in the ISR. | |
2547 | */ | |
2548 | if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) | |
2549 | INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean); | |
2550 | else { | |
2551 | /* With all debug warnings on we see a WARN_ON message | |
2552 | * when we free the skb in the interrupt context. | |
2553 | */ | |
2554 | INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean); | |
2555 | } | |
2556 | cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); | |
2557 | cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); | |
2558 | break; | |
2559 | case DEFAULT_Q: | |
2560 | INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean); | |
2561 | cqicb->irq_delay = 0; | |
2562 | cqicb->pkt_delay = 0; | |
2563 | break; | |
2564 | case RX_Q: | |
2565 | /* Inbound completion handling rx_rings run in | |
2566 | * separate NAPI contexts. | |
2567 | */ | |
2568 | netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, | |
2569 | 64); | |
2570 | cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); | |
2571 | cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); | |
2572 | break; | |
2573 | default: | |
2574 | QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n", | |
2575 | rx_ring->type); | |
2576 | } | |
2577 | QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n"); | |
2578 | err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), | |
2579 | CFG_LCQ, rx_ring->cq_id); | |
2580 | if (err) { | |
2581 | QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n"); | |
2582 | return err; | |
2583 | } | |
2584 | QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n"); | |
2585 | /* | |
2586 | * Advance the producer index for the buffer queues. | |
2587 | */ | |
2588 | wmb(); | |
2589 | if (rx_ring->lbq_len) | |
2590 | ql_write_db_reg(rx_ring->lbq_prod_idx, | |
2591 | rx_ring->lbq_prod_idx_db_reg); | |
2592 | if (rx_ring->sbq_len) | |
2593 | ql_write_db_reg(rx_ring->sbq_prod_idx, | |
2594 | rx_ring->sbq_prod_idx_db_reg); | |
2595 | return err; | |
2596 | } | |
2597 | ||
2598 | static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |
2599 | { | |
2600 | struct wqicb *wqicb = (struct wqicb *)tx_ring; | |
2601 | void __iomem *doorbell_area = | |
2602 | qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); | |
2603 | void *shadow_reg = qdev->tx_ring_shadow_reg_area + | |
2604 | (tx_ring->wq_id * sizeof(u64)); | |
2605 | u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + | |
2606 | (tx_ring->wq_id * sizeof(u64)); | |
2607 | int err = 0; | |
2608 | ||
2609 | /* | |
2610 | * Assign doorbell registers for this tx_ring. | |
2611 | */ | |
2612 | /* TX PCI doorbell mem area for tx producer index */ | |
2613 | tx_ring->prod_idx_db_reg = (u32 *) doorbell_area; | |
2614 | tx_ring->prod_idx = 0; | |
2615 | /* TX PCI doorbell mem area + 0x04 */ | |
2616 | tx_ring->valid_db_reg = doorbell_area + 0x04; | |
2617 | ||
2618 | /* | |
2619 | * Assign shadow registers for this tx_ring. | |
2620 | */ | |
2621 | tx_ring->cnsmr_idx_sh_reg = shadow_reg; | |
2622 | tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; | |
2623 | ||
2624 | wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); | |
2625 | wqicb->flags = cpu_to_le16(Q_FLAGS_LC | | |
2626 | Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); | |
2627 | wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); | |
2628 | wqicb->rid = 0; | |
2629 | wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma); | |
2630 | wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32); | |
2631 | ||
2632 | wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma); | |
2633 | wqicb->cnsmr_idx_addr_hi = | |
2634 | cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32); | |
2635 | ||
2636 | ql_init_tx_ring(qdev, tx_ring); | |
2637 | ||
2638 | err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ, | |
2639 | (u16) tx_ring->wq_id); | |
2640 | if (err) { | |
2641 | QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); | |
2642 | return err; | |
2643 | } | |
2644 | QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n"); | |
2645 | return err; | |
2646 | } | |
2647 | ||
2648 | static void ql_disable_msix(struct ql_adapter *qdev) | |
2649 | { | |
2650 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | |
2651 | pci_disable_msix(qdev->pdev); | |
2652 | clear_bit(QL_MSIX_ENABLED, &qdev->flags); | |
2653 | kfree(qdev->msi_x_entry); | |
2654 | qdev->msi_x_entry = NULL; | |
2655 | } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { | |
2656 | pci_disable_msi(qdev->pdev); | |
2657 | clear_bit(QL_MSI_ENABLED, &qdev->flags); | |
2658 | } | |
2659 | } | |
2660 | ||
2661 | static void ql_enable_msix(struct ql_adapter *qdev) | |
2662 | { | |
2663 | int i; | |
2664 | ||
2665 | qdev->intr_count = 1; | |
2666 | /* Get the MSIX vectors. */ | |
2667 | if (irq_type == MSIX_IRQ) { | |
2668 | /* Try to alloc space for the msix struct, | |
2669 | * if it fails then go to MSI/legacy. | |
2670 | */ | |
2671 | qdev->msi_x_entry = kcalloc(qdev->rx_ring_count, | |
2672 | sizeof(struct msix_entry), | |
2673 | GFP_KERNEL); | |
2674 | if (!qdev->msi_x_entry) { | |
2675 | irq_type = MSI_IRQ; | |
2676 | goto msi; | |
2677 | } | |
2678 | ||
2679 | for (i = 0; i < qdev->rx_ring_count; i++) | |
2680 | qdev->msi_x_entry[i].entry = i; | |
2681 | ||
2682 | if (!pci_enable_msix | |
2683 | (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) { | |
2684 | set_bit(QL_MSIX_ENABLED, &qdev->flags); | |
2685 | qdev->intr_count = qdev->rx_ring_count; | |
2686 | QPRINTK(qdev, IFUP, INFO, | |
2687 | "MSI-X Enabled, got %d vectors.\n", | |
2688 | qdev->intr_count); | |
2689 | return; | |
2690 | } else { | |
2691 | kfree(qdev->msi_x_entry); | |
2692 | qdev->msi_x_entry = NULL; | |
2693 | QPRINTK(qdev, IFUP, WARNING, | |
2694 | "MSI-X Enable failed, trying MSI.\n"); | |
2695 | irq_type = MSI_IRQ; | |
2696 | } | |
2697 | } | |
2698 | msi: | |
2699 | if (irq_type == MSI_IRQ) { | |
2700 | if (!pci_enable_msi(qdev->pdev)) { | |
2701 | set_bit(QL_MSI_ENABLED, &qdev->flags); | |
2702 | QPRINTK(qdev, IFUP, INFO, | |
2703 | "Running with MSI interrupts.\n"); | |
2704 | return; | |
2705 | } | |
2706 | } | |
2707 | irq_type = LEG_IRQ; | |
c4e84bde RM |
2708 | QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); |
2709 | } | |
2710 | ||
2711 | /* | |
2712 | * Here we build the intr_context structures based on | |
2713 | * our rx_ring count and intr vector count. | |
2714 | * The intr_context structure is used to hook each vector | |
2715 | * to possibly different handlers. | |
2716 | */ | |
2717 | static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) | |
2718 | { | |
2719 | int i = 0; | |
2720 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
2721 | ||
2722 | ql_enable_msix(qdev); | |
2723 | ||
2724 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { | |
2725 | /* Each rx_ring has it's | |
2726 | * own intr_context since we have separate | |
2727 | * vectors for each queue. | |
2728 | * This only true when MSI-X is enabled. | |
2729 | */ | |
2730 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | |
2731 | qdev->rx_ring[i].irq = i; | |
2732 | intr_context->intr = i; | |
2733 | intr_context->qdev = qdev; | |
2734 | /* | |
2735 | * We set up each vectors enable/disable/read bits so | |
2736 | * there's no bit/mask calculations in the critical path. | |
2737 | */ | |
2738 | intr_context->intr_en_mask = | |
2739 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
2740 | INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD | |
2741 | | i; | |
2742 | intr_context->intr_dis_mask = | |
2743 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
2744 | INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | | |
2745 | INTR_EN_IHD | i; | |
2746 | intr_context->intr_read_mask = | |
2747 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
2748 | INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | | |
2749 | i; | |
2750 | ||
2751 | if (i == 0) { | |
2752 | /* | |
2753 | * Default queue handles bcast/mcast plus | |
2754 | * async events. Needs buffers. | |
2755 | */ | |
2756 | intr_context->handler = qlge_isr; | |
2757 | sprintf(intr_context->name, "%s-default-queue", | |
2758 | qdev->ndev->name); | |
2759 | } else if (i < qdev->rss_ring_first_cq_id) { | |
2760 | /* | |
2761 | * Outbound queue is for outbound completions only. | |
2762 | */ | |
2763 | intr_context->handler = qlge_msix_tx_isr; | |
2764 | sprintf(intr_context->name, "%s-txq-%d", | |
2765 | qdev->ndev->name, i); | |
2766 | } else { | |
2767 | /* | |
2768 | * Inbound queues handle unicast frames only. | |
2769 | */ | |
2770 | intr_context->handler = qlge_msix_rx_isr; | |
2771 | sprintf(intr_context->name, "%s-rxq-%d", | |
2772 | qdev->ndev->name, i); | |
2773 | } | |
2774 | } | |
2775 | } else { | |
2776 | /* | |
2777 | * All rx_rings use the same intr_context since | |
2778 | * there is only one vector. | |
2779 | */ | |
2780 | intr_context->intr = 0; | |
2781 | intr_context->qdev = qdev; | |
2782 | /* | |
2783 | * We set up each vectors enable/disable/read bits so | |
2784 | * there's no bit/mask calculations in the critical path. | |
2785 | */ | |
2786 | intr_context->intr_en_mask = | |
2787 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; | |
2788 | intr_context->intr_dis_mask = | |
2789 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
2790 | INTR_EN_TYPE_DISABLE; | |
2791 | intr_context->intr_read_mask = | |
2792 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; | |
2793 | /* | |
2794 | * Single interrupt means one handler for all rings. | |
2795 | */ | |
2796 | intr_context->handler = qlge_isr; | |
2797 | sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); | |
2798 | for (i = 0; i < qdev->rx_ring_count; i++) | |
2799 | qdev->rx_ring[i].irq = 0; | |
2800 | } | |
2801 | } | |
2802 | ||
2803 | static void ql_free_irq(struct ql_adapter *qdev) | |
2804 | { | |
2805 | int i; | |
2806 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
2807 | ||
2808 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | |
2809 | if (intr_context->hooked) { | |
2810 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | |
2811 | free_irq(qdev->msi_x_entry[i].vector, | |
2812 | &qdev->rx_ring[i]); | |
2813 | QPRINTK(qdev, IFDOWN, ERR, | |
2814 | "freeing msix interrupt %d.\n", i); | |
2815 | } else { | |
2816 | free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); | |
2817 | QPRINTK(qdev, IFDOWN, ERR, | |
2818 | "freeing msi interrupt %d.\n", i); | |
2819 | } | |
2820 | } | |
2821 | } | |
2822 | ql_disable_msix(qdev); | |
2823 | } | |
2824 | ||
2825 | static int ql_request_irq(struct ql_adapter *qdev) | |
2826 | { | |
2827 | int i; | |
2828 | int status = 0; | |
2829 | struct pci_dev *pdev = qdev->pdev; | |
2830 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
2831 | ||
2832 | ql_resolve_queues_to_irqs(qdev); | |
2833 | ||
2834 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | |
2835 | atomic_set(&intr_context->irq_cnt, 0); | |
2836 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | |
2837 | status = request_irq(qdev->msi_x_entry[i].vector, | |
2838 | intr_context->handler, | |
2839 | 0, | |
2840 | intr_context->name, | |
2841 | &qdev->rx_ring[i]); | |
2842 | if (status) { | |
2843 | QPRINTK(qdev, IFUP, ERR, | |
2844 | "Failed request for MSIX interrupt %d.\n", | |
2845 | i); | |
2846 | goto err_irq; | |
2847 | } else { | |
2848 | QPRINTK(qdev, IFUP, INFO, | |
2849 | "Hooked intr %d, queue type %s%s%s, with name %s.\n", | |
2850 | i, | |
2851 | qdev->rx_ring[i].type == | |
2852 | DEFAULT_Q ? "DEFAULT_Q" : "", | |
2853 | qdev->rx_ring[i].type == | |
2854 | TX_Q ? "TX_Q" : "", | |
2855 | qdev->rx_ring[i].type == | |
2856 | RX_Q ? "RX_Q" : "", intr_context->name); | |
2857 | } | |
2858 | } else { | |
2859 | QPRINTK(qdev, IFUP, DEBUG, | |
2860 | "trying msi or legacy interrupts.\n"); | |
2861 | QPRINTK(qdev, IFUP, DEBUG, | |
2862 | "%s: irq = %d.\n", __func__, pdev->irq); | |
2863 | QPRINTK(qdev, IFUP, DEBUG, | |
2864 | "%s: context->name = %s.\n", __func__, | |
2865 | intr_context->name); | |
2866 | QPRINTK(qdev, IFUP, DEBUG, | |
2867 | "%s: dev_id = 0x%p.\n", __func__, | |
2868 | &qdev->rx_ring[0]); | |
2869 | status = | |
2870 | request_irq(pdev->irq, qlge_isr, | |
2871 | test_bit(QL_MSI_ENABLED, | |
2872 | &qdev-> | |
2873 | flags) ? 0 : IRQF_SHARED, | |
2874 | intr_context->name, &qdev->rx_ring[0]); | |
2875 | if (status) | |
2876 | goto err_irq; | |
2877 | ||
2878 | QPRINTK(qdev, IFUP, ERR, | |
2879 | "Hooked intr %d, queue type %s%s%s, with name %s.\n", | |
2880 | i, | |
2881 | qdev->rx_ring[0].type == | |
2882 | DEFAULT_Q ? "DEFAULT_Q" : "", | |
2883 | qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "", | |
2884 | qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", | |
2885 | intr_context->name); | |
2886 | } | |
2887 | intr_context->hooked = 1; | |
2888 | } | |
2889 | return status; | |
2890 | err_irq: | |
2891 | QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n"); | |
2892 | ql_free_irq(qdev); | |
2893 | return status; | |
2894 | } | |
2895 | ||
2896 | static int ql_start_rss(struct ql_adapter *qdev) | |
2897 | { | |
2898 | struct ricb *ricb = &qdev->ricb; | |
2899 | int status = 0; | |
2900 | int i; | |
2901 | u8 *hash_id = (u8 *) ricb->hash_cq_id; | |
2902 | ||
2903 | memset((void *)ricb, 0, sizeof(ricb)); | |
2904 | ||
2905 | ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K; | |
2906 | ricb->flags = | |
2907 | (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | | |
2908 | RSS_RT6); | |
2909 | ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1); | |
2910 | ||
2911 | /* | |
2912 | * Fill out the Indirection Table. | |
2913 | */ | |
2914 | for (i = 0; i < 32; i++) | |
2915 | hash_id[i] = i & 1; | |
2916 | ||
2917 | /* | |
2918 | * Random values for the IPv6 and IPv4 Hash Keys. | |
2919 | */ | |
2920 | get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40); | |
2921 | get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16); | |
2922 | ||
2923 | QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n"); | |
2924 | ||
2925 | status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0); | |
2926 | if (status) { | |
2927 | QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); | |
2928 | return status; | |
2929 | } | |
2930 | QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n"); | |
2931 | return status; | |
2932 | } | |
2933 | ||
2934 | /* Initialize the frame-to-queue routing. */ | |
2935 | static int ql_route_initialize(struct ql_adapter *qdev) | |
2936 | { | |
2937 | int status = 0; | |
2938 | int i; | |
2939 | ||
2940 | /* Clear all the entries in the routing table. */ | |
2941 | for (i = 0; i < 16; i++) { | |
2942 | status = ql_set_routing_reg(qdev, i, 0, 0); | |
2943 | if (status) { | |
2944 | QPRINTK(qdev, IFUP, ERR, | |
2945 | "Failed to init routing register for CAM packets.\n"); | |
2946 | return status; | |
2947 | } | |
2948 | } | |
2949 | ||
2950 | status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); | |
2951 | if (status) { | |
2952 | QPRINTK(qdev, IFUP, ERR, | |
2953 | "Failed to init routing register for error packets.\n"); | |
2954 | return status; | |
2955 | } | |
2956 | status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); | |
2957 | if (status) { | |
2958 | QPRINTK(qdev, IFUP, ERR, | |
2959 | "Failed to init routing register for broadcast packets.\n"); | |
2960 | return status; | |
2961 | } | |
2962 | /* If we have more than one inbound queue, then turn on RSS in the | |
2963 | * routing block. | |
2964 | */ | |
2965 | if (qdev->rss_ring_count > 1) { | |
2966 | status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, | |
2967 | RT_IDX_RSS_MATCH, 1); | |
2968 | if (status) { | |
2969 | QPRINTK(qdev, IFUP, ERR, | |
2970 | "Failed to init routing register for MATCH RSS packets.\n"); | |
2971 | return status; | |
2972 | } | |
2973 | } | |
2974 | ||
2975 | status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, | |
2976 | RT_IDX_CAM_HIT, 1); | |
2977 | if (status) { | |
2978 | QPRINTK(qdev, IFUP, ERR, | |
2979 | "Failed to init routing register for CAM packets.\n"); | |
2980 | return status; | |
2981 | } | |
2982 | return status; | |
2983 | } | |
2984 | ||
2985 | static int ql_adapter_initialize(struct ql_adapter *qdev) | |
2986 | { | |
2987 | u32 value, mask; | |
2988 | int i; | |
2989 | int status = 0; | |
2990 | ||
2991 | /* | |
2992 | * Set up the System register to halt on errors. | |
2993 | */ | |
2994 | value = SYS_EFE | SYS_FAE; | |
2995 | mask = value << 16; | |
2996 | ql_write32(qdev, SYS, mask | value); | |
2997 | ||
2998 | /* Set the default queue. */ | |
2999 | value = NIC_RCV_CFG_DFQ; | |
3000 | mask = NIC_RCV_CFG_DFQ_MASK; | |
3001 | ql_write32(qdev, NIC_RCV_CFG, (mask | value)); | |
3002 | ||
3003 | /* Set the MPI interrupt to enabled. */ | |
3004 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); | |
3005 | ||
3006 | /* Enable the function, set pagesize, enable error checking. */ | |
3007 | value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | | |
3008 | FSC_EC | FSC_VM_PAGE_4K | FSC_SH; | |
3009 | ||
3010 | /* Set/clear header splitting. */ | |
3011 | mask = FSC_VM_PAGESIZE_MASK | | |
3012 | FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); | |
3013 | ql_write32(qdev, FSC, mask | value); | |
3014 | ||
3015 | ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | | |
3016 | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); | |
3017 | ||
3018 | /* Start up the rx queues. */ | |
3019 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
3020 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); | |
3021 | if (status) { | |
3022 | QPRINTK(qdev, IFUP, ERR, | |
3023 | "Failed to start rx ring[%d].\n", i); | |
3024 | return status; | |
3025 | } | |
3026 | } | |
3027 | ||
3028 | /* If there is more than one inbound completion queue | |
3029 | * then download a RICB to configure RSS. | |
3030 | */ | |
3031 | if (qdev->rss_ring_count > 1) { | |
3032 | status = ql_start_rss(qdev); | |
3033 | if (status) { | |
3034 | QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n"); | |
3035 | return status; | |
3036 | } | |
3037 | } | |
3038 | ||
3039 | /* Start up the tx queues. */ | |
3040 | for (i = 0; i < qdev->tx_ring_count; i++) { | |
3041 | status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); | |
3042 | if (status) { | |
3043 | QPRINTK(qdev, IFUP, ERR, | |
3044 | "Failed to start tx ring[%d].\n", i); | |
3045 | return status; | |
3046 | } | |
3047 | } | |
3048 | ||
3049 | status = ql_port_initialize(qdev); | |
3050 | if (status) { | |
3051 | QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); | |
3052 | return status; | |
3053 | } | |
3054 | ||
3055 | status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr, | |
3056 | MAC_ADDR_TYPE_CAM_MAC, qdev->func); | |
3057 | if (status) { | |
3058 | QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); | |
3059 | return status; | |
3060 | } | |
3061 | ||
3062 | status = ql_route_initialize(qdev); | |
3063 | if (status) { | |
3064 | QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n"); | |
3065 | return status; | |
3066 | } | |
3067 | ||
3068 | /* Start NAPI for the RSS queues. */ | |
3069 | for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) { | |
3070 | QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n", | |
3071 | i); | |
3072 | napi_enable(&qdev->rx_ring[i].napi); | |
3073 | } | |
3074 | ||
3075 | return status; | |
3076 | } | |
3077 | ||
3078 | /* Issue soft reset to chip. */ | |
3079 | static int ql_adapter_reset(struct ql_adapter *qdev) | |
3080 | { | |
3081 | u32 value; | |
3082 | int max_wait_time; | |
3083 | int status = 0; | |
3084 | int resetCnt = 0; | |
3085 | ||
3086 | #define MAX_RESET_CNT 1 | |
3087 | issueReset: | |
3088 | resetCnt++; | |
3089 | QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n"); | |
3090 | ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); | |
3091 | /* Wait for reset to complete. */ | |
3092 | max_wait_time = 3; | |
3093 | QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n", | |
3094 | max_wait_time); | |
3095 | do { | |
3096 | value = ql_read32(qdev, RST_FO); | |
3097 | if ((value & RST_FO_FR) == 0) | |
3098 | break; | |
3099 | ||
3100 | ssleep(1); | |
3101 | } while ((--max_wait_time)); | |
3102 | if (value & RST_FO_FR) { | |
3103 | QPRINTK(qdev, IFDOWN, ERR, | |
3104 | "Stuck in SoftReset: FSC_SR:0x%08x\n", value); | |
3105 | if (resetCnt < MAX_RESET_CNT) | |
3106 | goto issueReset; | |
3107 | } | |
3108 | if (max_wait_time == 0) { | |
3109 | status = -ETIMEDOUT; | |
3110 | QPRINTK(qdev, IFDOWN, ERR, | |
3111 | "ETIMEOUT!!! errored out of resetting the chip!\n"); | |
3112 | } | |
3113 | ||
3114 | return status; | |
3115 | } | |
3116 | ||
3117 | static void ql_display_dev_info(struct net_device *ndev) | |
3118 | { | |
3119 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | |
3120 | ||
3121 | QPRINTK(qdev, PROBE, INFO, | |
3122 | "Function #%d, NIC Roll %d, NIC Rev = %d, " | |
3123 | "XG Roll = %d, XG Rev = %d.\n", | |
3124 | qdev->func, | |
3125 | qdev->chip_rev_id & 0x0000000f, | |
3126 | qdev->chip_rev_id >> 4 & 0x0000000f, | |
3127 | qdev->chip_rev_id >> 8 & 0x0000000f, | |
3128 | qdev->chip_rev_id >> 12 & 0x0000000f); | |
7c510e4b | 3129 | QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); |
c4e84bde RM |
3130 | } |
3131 | ||
3132 | static int ql_adapter_down(struct ql_adapter *qdev) | |
3133 | { | |
3134 | struct net_device *ndev = qdev->ndev; | |
3135 | int i, status = 0; | |
3136 | struct rx_ring *rx_ring; | |
3137 | ||
3138 | netif_stop_queue(ndev); | |
3139 | netif_carrier_off(ndev); | |
3140 | ||
3141 | cancel_delayed_work_sync(&qdev->asic_reset_work); | |
3142 | cancel_delayed_work_sync(&qdev->mpi_reset_work); | |
3143 | cancel_delayed_work_sync(&qdev->mpi_work); | |
3144 | ||
3145 | /* The default queue at index 0 is always processed in | |
3146 | * a workqueue. | |
3147 | */ | |
3148 | cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work); | |
3149 | ||
3150 | /* The rest of the rx_rings are processed in | |
3151 | * a workqueue only if it's a single interrupt | |
3152 | * environment (MSI/Legacy). | |
3153 | */ | |
3154 | for (i = 1; i > qdev->rx_ring_count; i++) { | |
3155 | rx_ring = &qdev->rx_ring[i]; | |
3156 | /* Only the RSS rings use NAPI on multi irq | |
3157 | * environment. Outbound completion processing | |
3158 | * is done in interrupt context. | |
3159 | */ | |
3160 | if (i >= qdev->rss_ring_first_cq_id) { | |
3161 | napi_disable(&rx_ring->napi); | |
3162 | } else { | |
3163 | cancel_delayed_work_sync(&rx_ring->rx_work); | |
3164 | } | |
3165 | } | |
3166 | ||
3167 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | |
3168 | ||
3169 | ql_disable_interrupts(qdev); | |
3170 | ||
3171 | ql_tx_ring_clean(qdev); | |
3172 | ||
3173 | spin_lock(&qdev->hw_lock); | |
3174 | status = ql_adapter_reset(qdev); | |
3175 | if (status) | |
3176 | QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", | |
3177 | qdev->func); | |
3178 | spin_unlock(&qdev->hw_lock); | |
3179 | return status; | |
3180 | } | |
3181 | ||
3182 | static int ql_adapter_up(struct ql_adapter *qdev) | |
3183 | { | |
3184 | int err = 0; | |
3185 | ||
3186 | spin_lock(&qdev->hw_lock); | |
3187 | err = ql_adapter_initialize(qdev); | |
3188 | if (err) { | |
3189 | QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n"); | |
3190 | spin_unlock(&qdev->hw_lock); | |
3191 | goto err_init; | |
3192 | } | |
3193 | spin_unlock(&qdev->hw_lock); | |
3194 | set_bit(QL_ADAPTER_UP, &qdev->flags); | |
3195 | ql_enable_interrupts(qdev); | |
3196 | ql_enable_all_completion_interrupts(qdev); | |
3197 | if ((ql_read32(qdev, STS) & qdev->port_init)) { | |
3198 | netif_carrier_on(qdev->ndev); | |
3199 | netif_start_queue(qdev->ndev); | |
3200 | } | |
3201 | ||
3202 | return 0; | |
3203 | err_init: | |
3204 | ql_adapter_reset(qdev); | |
3205 | return err; | |
3206 | } | |
3207 | ||
3208 | static int ql_cycle_adapter(struct ql_adapter *qdev) | |
3209 | { | |
3210 | int status; | |
3211 | ||
3212 | status = ql_adapter_down(qdev); | |
3213 | if (status) | |
3214 | goto error; | |
3215 | ||
3216 | status = ql_adapter_up(qdev); | |
3217 | if (status) | |
3218 | goto error; | |
3219 | ||
3220 | return status; | |
3221 | error: | |
3222 | QPRINTK(qdev, IFUP, ALERT, | |
3223 | "Driver up/down cycle failed, closing device\n"); | |
3224 | rtnl_lock(); | |
3225 | dev_close(qdev->ndev); | |
3226 | rtnl_unlock(); | |
3227 | return status; | |
3228 | } | |
3229 | ||
3230 | static void ql_release_adapter_resources(struct ql_adapter *qdev) | |
3231 | { | |
3232 | ql_free_mem_resources(qdev); | |
3233 | ql_free_irq(qdev); | |
3234 | } | |
3235 | ||
3236 | static int ql_get_adapter_resources(struct ql_adapter *qdev) | |
3237 | { | |
3238 | int status = 0; | |
3239 | ||
3240 | if (ql_alloc_mem_resources(qdev)) { | |
3241 | QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n"); | |
3242 | return -ENOMEM; | |
3243 | } | |
3244 | status = ql_request_irq(qdev); | |
3245 | if (status) | |
3246 | goto err_irq; | |
3247 | return status; | |
3248 | err_irq: | |
3249 | ql_free_mem_resources(qdev); | |
3250 | return status; | |
3251 | } | |
3252 | ||
3253 | static int qlge_close(struct net_device *ndev) | |
3254 | { | |
3255 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3256 | ||
3257 | /* | |
3258 | * Wait for device to recover from a reset. | |
3259 | * (Rarely happens, but possible.) | |
3260 | */ | |
3261 | while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) | |
3262 | msleep(1); | |
3263 | ql_adapter_down(qdev); | |
3264 | ql_release_adapter_resources(qdev); | |
3265 | ql_free_ring_cb(qdev); | |
3266 | return 0; | |
3267 | } | |
3268 | ||
3269 | static int ql_configure_rings(struct ql_adapter *qdev) | |
3270 | { | |
3271 | int i; | |
3272 | struct rx_ring *rx_ring; | |
3273 | struct tx_ring *tx_ring; | |
3274 | int cpu_cnt = num_online_cpus(); | |
3275 | ||
3276 | /* | |
3277 | * For each processor present we allocate one | |
3278 | * rx_ring for outbound completions, and one | |
3279 | * rx_ring for inbound completions. Plus there is | |
3280 | * always the one default queue. For the CPU | |
3281 | * counts we end up with the following rx_rings: | |
3282 | * rx_ring count = | |
3283 | * one default queue + | |
3284 | * (CPU count * outbound completion rx_ring) + | |
3285 | * (CPU count * inbound (RSS) completion rx_ring) | |
3286 | * To keep it simple we limit the total number of | |
3287 | * queues to < 32, so we truncate CPU to 8. | |
3288 | * This limitation can be removed when requested. | |
3289 | */ | |
3290 | ||
3291 | if (cpu_cnt > 8) | |
3292 | cpu_cnt = 8; | |
3293 | ||
3294 | /* | |
3295 | * rx_ring[0] is always the default queue. | |
3296 | */ | |
3297 | /* Allocate outbound completion ring for each CPU. */ | |
3298 | qdev->tx_ring_count = cpu_cnt; | |
3299 | /* Allocate inbound completion (RSS) ring for each CPU. */ | |
3300 | qdev->rss_ring_count = cpu_cnt; | |
3301 | /* cq_id for the first inbound ring handler. */ | |
3302 | qdev->rss_ring_first_cq_id = cpu_cnt + 1; | |
3303 | /* | |
3304 | * qdev->rx_ring_count: | |
3305 | * Total number of rx_rings. This includes the one | |
3306 | * default queue, a number of outbound completion | |
3307 | * handler rx_rings, and the number of inbound | |
3308 | * completion handler rx_rings. | |
3309 | */ | |
3310 | qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; | |
3311 | ||
3312 | if (ql_alloc_ring_cb(qdev)) | |
3313 | return -ENOMEM; | |
3314 | ||
3315 | for (i = 0; i < qdev->tx_ring_count; i++) { | |
3316 | tx_ring = &qdev->tx_ring[i]; | |
3317 | memset((void *)tx_ring, 0, sizeof(tx_ring)); | |
3318 | tx_ring->qdev = qdev; | |
3319 | tx_ring->wq_id = i; | |
3320 | tx_ring->wq_len = qdev->tx_ring_size; | |
3321 | tx_ring->wq_size = | |
3322 | tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); | |
3323 | ||
3324 | /* | |
3325 | * The completion queue ID for the tx rings start | |
3326 | * immediately after the default Q ID, which is zero. | |
3327 | */ | |
3328 | tx_ring->cq_id = i + 1; | |
3329 | } | |
3330 | ||
3331 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
3332 | rx_ring = &qdev->rx_ring[i]; | |
3333 | memset((void *)rx_ring, 0, sizeof(rx_ring)); | |
3334 | rx_ring->qdev = qdev; | |
3335 | rx_ring->cq_id = i; | |
3336 | rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ | |
3337 | if (i == 0) { /* Default queue at index 0. */ | |
3338 | /* | |
3339 | * Default queue handles bcast/mcast plus | |
3340 | * async events. Needs buffers. | |
3341 | */ | |
3342 | rx_ring->cq_len = qdev->rx_ring_size; | |
3343 | rx_ring->cq_size = | |
3344 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | |
3345 | rx_ring->lbq_len = NUM_LARGE_BUFFERS; | |
3346 | rx_ring->lbq_size = | |
3347 | rx_ring->lbq_len * sizeof(struct bq_element); | |
3348 | rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; | |
3349 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; | |
3350 | rx_ring->sbq_size = | |
3351 | rx_ring->sbq_len * sizeof(struct bq_element); | |
3352 | rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; | |
3353 | rx_ring->type = DEFAULT_Q; | |
3354 | } else if (i < qdev->rss_ring_first_cq_id) { | |
3355 | /* | |
3356 | * Outbound queue handles outbound completions only. | |
3357 | */ | |
3358 | /* outbound cq is same size as tx_ring it services. */ | |
3359 | rx_ring->cq_len = qdev->tx_ring_size; | |
3360 | rx_ring->cq_size = | |
3361 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | |
3362 | rx_ring->lbq_len = 0; | |
3363 | rx_ring->lbq_size = 0; | |
3364 | rx_ring->lbq_buf_size = 0; | |
3365 | rx_ring->sbq_len = 0; | |
3366 | rx_ring->sbq_size = 0; | |
3367 | rx_ring->sbq_buf_size = 0; | |
3368 | rx_ring->type = TX_Q; | |
3369 | } else { /* Inbound completions (RSS) queues */ | |
3370 | /* | |
3371 | * Inbound queues handle unicast frames only. | |
3372 | */ | |
3373 | rx_ring->cq_len = qdev->rx_ring_size; | |
3374 | rx_ring->cq_size = | |
3375 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | |
3376 | rx_ring->lbq_len = NUM_LARGE_BUFFERS; | |
3377 | rx_ring->lbq_size = | |
3378 | rx_ring->lbq_len * sizeof(struct bq_element); | |
3379 | rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; | |
3380 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; | |
3381 | rx_ring->sbq_size = | |
3382 | rx_ring->sbq_len * sizeof(struct bq_element); | |
3383 | rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; | |
3384 | rx_ring->type = RX_Q; | |
3385 | } | |
3386 | } | |
3387 | return 0; | |
3388 | } | |
3389 | ||
3390 | static int qlge_open(struct net_device *ndev) | |
3391 | { | |
3392 | int err = 0; | |
3393 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3394 | ||
3395 | err = ql_configure_rings(qdev); | |
3396 | if (err) | |
3397 | return err; | |
3398 | ||
3399 | err = ql_get_adapter_resources(qdev); | |
3400 | if (err) | |
3401 | goto error_up; | |
3402 | ||
3403 | err = ql_adapter_up(qdev); | |
3404 | if (err) | |
3405 | goto error_up; | |
3406 | ||
3407 | return err; | |
3408 | ||
3409 | error_up: | |
3410 | ql_release_adapter_resources(qdev); | |
3411 | ql_free_ring_cb(qdev); | |
3412 | return err; | |
3413 | } | |
3414 | ||
3415 | static int qlge_change_mtu(struct net_device *ndev, int new_mtu) | |
3416 | { | |
3417 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3418 | ||
3419 | if (ndev->mtu == 1500 && new_mtu == 9000) { | |
3420 | QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); | |
3421 | } else if (ndev->mtu == 9000 && new_mtu == 1500) { | |
3422 | QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); | |
3423 | } else if ((ndev->mtu == 1500 && new_mtu == 1500) || | |
3424 | (ndev->mtu == 9000 && new_mtu == 9000)) { | |
3425 | return 0; | |
3426 | } else | |
3427 | return -EINVAL; | |
3428 | ndev->mtu = new_mtu; | |
3429 | return 0; | |
3430 | } | |
3431 | ||
3432 | static struct net_device_stats *qlge_get_stats(struct net_device | |
3433 | *ndev) | |
3434 | { | |
3435 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3436 | return &qdev->stats; | |
3437 | } | |
3438 | ||
3439 | static void qlge_set_multicast_list(struct net_device *ndev) | |
3440 | { | |
3441 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | |
3442 | struct dev_mc_list *mc_ptr; | |
3443 | int i; | |
3444 | ||
3445 | spin_lock(&qdev->hw_lock); | |
3446 | /* | |
3447 | * Set or clear promiscuous mode if a | |
3448 | * transition is taking place. | |
3449 | */ | |
3450 | if (ndev->flags & IFF_PROMISC) { | |
3451 | if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { | |
3452 | if (ql_set_routing_reg | |
3453 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { | |
3454 | QPRINTK(qdev, HW, ERR, | |
3455 | "Failed to set promiscous mode.\n"); | |
3456 | } else { | |
3457 | set_bit(QL_PROMISCUOUS, &qdev->flags); | |
3458 | } | |
3459 | } | |
3460 | } else { | |
3461 | if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { | |
3462 | if (ql_set_routing_reg | |
3463 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { | |
3464 | QPRINTK(qdev, HW, ERR, | |
3465 | "Failed to clear promiscous mode.\n"); | |
3466 | } else { | |
3467 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | |
3468 | } | |
3469 | } | |
3470 | } | |
3471 | ||
3472 | /* | |
3473 | * Set or clear all multicast mode if a | |
3474 | * transition is taking place. | |
3475 | */ | |
3476 | if ((ndev->flags & IFF_ALLMULTI) || | |
3477 | (ndev->mc_count > MAX_MULTICAST_ENTRIES)) { | |
3478 | if (!test_bit(QL_ALLMULTI, &qdev->flags)) { | |
3479 | if (ql_set_routing_reg | |
3480 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { | |
3481 | QPRINTK(qdev, HW, ERR, | |
3482 | "Failed to set all-multi mode.\n"); | |
3483 | } else { | |
3484 | set_bit(QL_ALLMULTI, &qdev->flags); | |
3485 | } | |
3486 | } | |
3487 | } else { | |
3488 | if (test_bit(QL_ALLMULTI, &qdev->flags)) { | |
3489 | if (ql_set_routing_reg | |
3490 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { | |
3491 | QPRINTK(qdev, HW, ERR, | |
3492 | "Failed to clear all-multi mode.\n"); | |
3493 | } else { | |
3494 | clear_bit(QL_ALLMULTI, &qdev->flags); | |
3495 | } | |
3496 | } | |
3497 | } | |
3498 | ||
3499 | if (ndev->mc_count) { | |
3500 | for (i = 0, mc_ptr = ndev->mc_list; mc_ptr; | |
3501 | i++, mc_ptr = mc_ptr->next) | |
3502 | if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, | |
3503 | MAC_ADDR_TYPE_MULTI_MAC, i)) { | |
3504 | QPRINTK(qdev, HW, ERR, | |
3505 | "Failed to loadmulticast address.\n"); | |
3506 | goto exit; | |
3507 | } | |
3508 | if (ql_set_routing_reg | |
3509 | (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { | |
3510 | QPRINTK(qdev, HW, ERR, | |
3511 | "Failed to set multicast match mode.\n"); | |
3512 | } else { | |
3513 | set_bit(QL_ALLMULTI, &qdev->flags); | |
3514 | } | |
3515 | } | |
3516 | exit: | |
3517 | spin_unlock(&qdev->hw_lock); | |
3518 | } | |
3519 | ||
3520 | static int qlge_set_mac_address(struct net_device *ndev, void *p) | |
3521 | { | |
3522 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | |
3523 | struct sockaddr *addr = p; | |
3524 | ||
3525 | if (netif_running(ndev)) | |
3526 | return -EBUSY; | |
3527 | ||
3528 | if (!is_valid_ether_addr(addr->sa_data)) | |
3529 | return -EADDRNOTAVAIL; | |
3530 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | |
3531 | ||
3532 | spin_lock(&qdev->hw_lock); | |
3533 | if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, | |
3534 | MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */ | |
3535 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); | |
3536 | return -1; | |
3537 | } | |
3538 | spin_unlock(&qdev->hw_lock); | |
3539 | ||
3540 | return 0; | |
3541 | } | |
3542 | ||
3543 | static void qlge_tx_timeout(struct net_device *ndev) | |
3544 | { | |
3545 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | |
3546 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | |
3547 | } | |
3548 | ||
3549 | static void ql_asic_reset_work(struct work_struct *work) | |
3550 | { | |
3551 | struct ql_adapter *qdev = | |
3552 | container_of(work, struct ql_adapter, asic_reset_work.work); | |
3553 | ql_cycle_adapter(qdev); | |
3554 | } | |
3555 | ||
3556 | static void ql_get_board_info(struct ql_adapter *qdev) | |
3557 | { | |
3558 | qdev->func = | |
3559 | (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; | |
3560 | if (qdev->func) { | |
3561 | qdev->xg_sem_mask = SEM_XGMAC1_MASK; | |
3562 | qdev->port_link_up = STS_PL1; | |
3563 | qdev->port_init = STS_PI1; | |
3564 | qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; | |
3565 | qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; | |
3566 | } else { | |
3567 | qdev->xg_sem_mask = SEM_XGMAC0_MASK; | |
3568 | qdev->port_link_up = STS_PL0; | |
3569 | qdev->port_init = STS_PI0; | |
3570 | qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; | |
3571 | qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; | |
3572 | } | |
3573 | qdev->chip_rev_id = ql_read32(qdev, REV_ID); | |
3574 | } | |
3575 | ||
3576 | static void ql_release_all(struct pci_dev *pdev) | |
3577 | { | |
3578 | struct net_device *ndev = pci_get_drvdata(pdev); | |
3579 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3580 | ||
3581 | if (qdev->workqueue) { | |
3582 | destroy_workqueue(qdev->workqueue); | |
3583 | qdev->workqueue = NULL; | |
3584 | } | |
3585 | if (qdev->q_workqueue) { | |
3586 | destroy_workqueue(qdev->q_workqueue); | |
3587 | qdev->q_workqueue = NULL; | |
3588 | } | |
3589 | if (qdev->reg_base) | |
3590 | iounmap((void *)qdev->reg_base); | |
3591 | if (qdev->doorbell_area) | |
3592 | iounmap(qdev->doorbell_area); | |
3593 | pci_release_regions(pdev); | |
3594 | pci_set_drvdata(pdev, NULL); | |
3595 | } | |
3596 | ||
3597 | static int __devinit ql_init_device(struct pci_dev *pdev, | |
3598 | struct net_device *ndev, int cards_found) | |
3599 | { | |
3600 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3601 | int pos, err = 0; | |
3602 | u16 val16; | |
3603 | ||
3604 | memset((void *)qdev, 0, sizeof(qdev)); | |
3605 | err = pci_enable_device(pdev); | |
3606 | if (err) { | |
3607 | dev_err(&pdev->dev, "PCI device enable failed.\n"); | |
3608 | return err; | |
3609 | } | |
3610 | ||
3611 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | |
3612 | if (pos <= 0) { | |
3613 | dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " | |
3614 | "aborting.\n"); | |
3615 | goto err_out; | |
3616 | } else { | |
3617 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); | |
3618 | val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; | |
3619 | val16 |= (PCI_EXP_DEVCTL_CERE | | |
3620 | PCI_EXP_DEVCTL_NFERE | | |
3621 | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); | |
3622 | pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); | |
3623 | } | |
3624 | ||
3625 | err = pci_request_regions(pdev, DRV_NAME); | |
3626 | if (err) { | |
3627 | dev_err(&pdev->dev, "PCI region request failed.\n"); | |
3628 | goto err_out; | |
3629 | } | |
3630 | ||
3631 | pci_set_master(pdev); | |
3632 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | |
3633 | set_bit(QL_DMA64, &qdev->flags); | |
3634 | err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
3635 | } else { | |
3636 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
3637 | if (!err) | |
3638 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
3639 | } | |
3640 | ||
3641 | if (err) { | |
3642 | dev_err(&pdev->dev, "No usable DMA configuration.\n"); | |
3643 | goto err_out; | |
3644 | } | |
3645 | ||
3646 | pci_set_drvdata(pdev, ndev); | |
3647 | qdev->reg_base = | |
3648 | ioremap_nocache(pci_resource_start(pdev, 1), | |
3649 | pci_resource_len(pdev, 1)); | |
3650 | if (!qdev->reg_base) { | |
3651 | dev_err(&pdev->dev, "Register mapping failed.\n"); | |
3652 | err = -ENOMEM; | |
3653 | goto err_out; | |
3654 | } | |
3655 | ||
3656 | qdev->doorbell_area_size = pci_resource_len(pdev, 3); | |
3657 | qdev->doorbell_area = | |
3658 | ioremap_nocache(pci_resource_start(pdev, 3), | |
3659 | pci_resource_len(pdev, 3)); | |
3660 | if (!qdev->doorbell_area) { | |
3661 | dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); | |
3662 | err = -ENOMEM; | |
3663 | goto err_out; | |
3664 | } | |
3665 | ||
3666 | ql_get_board_info(qdev); | |
3667 | qdev->ndev = ndev; | |
3668 | qdev->pdev = pdev; | |
3669 | qdev->msg_enable = netif_msg_init(debug, default_msg); | |
3670 | spin_lock_init(&qdev->hw_lock); | |
3671 | spin_lock_init(&qdev->stats_lock); | |
3672 | ||
3673 | /* make sure the EEPROM is good */ | |
3674 | err = ql_get_flash_params(qdev); | |
3675 | if (err) { | |
3676 | dev_err(&pdev->dev, "Invalid FLASH.\n"); | |
3677 | goto err_out; | |
3678 | } | |
3679 | ||
3680 | if (!is_valid_ether_addr(qdev->flash.mac_addr)) | |
3681 | goto err_out; | |
3682 | ||
3683 | memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len); | |
3684 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | |
3685 | ||
3686 | /* Set up the default ring sizes. */ | |
3687 | qdev->tx_ring_size = NUM_TX_RING_ENTRIES; | |
3688 | qdev->rx_ring_size = NUM_RX_RING_ENTRIES; | |
3689 | ||
3690 | /* Set up the coalescing parameters. */ | |
3691 | qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; | |
3692 | qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; | |
3693 | qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; | |
3694 | qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; | |
3695 | ||
3696 | /* | |
3697 | * Set up the operating parameters. | |
3698 | */ | |
3699 | qdev->rx_csum = 1; | |
3700 | ||
3701 | qdev->q_workqueue = create_workqueue(ndev->name); | |
3702 | qdev->workqueue = create_singlethread_workqueue(ndev->name); | |
3703 | INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); | |
3704 | INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); | |
3705 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); | |
3706 | ||
3707 | if (!cards_found) { | |
3708 | dev_info(&pdev->dev, "%s\n", DRV_STRING); | |
3709 | dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", | |
3710 | DRV_NAME, DRV_VERSION); | |
3711 | } | |
3712 | return 0; | |
3713 | err_out: | |
3714 | ql_release_all(pdev); | |
3715 | pci_disable_device(pdev); | |
3716 | return err; | |
3717 | } | |
3718 | ||
3719 | static int __devinit qlge_probe(struct pci_dev *pdev, | |
3720 | const struct pci_device_id *pci_entry) | |
3721 | { | |
3722 | struct net_device *ndev = NULL; | |
3723 | struct ql_adapter *qdev = NULL; | |
3724 | static int cards_found = 0; | |
3725 | int err = 0; | |
3726 | ||
3727 | ndev = alloc_etherdev(sizeof(struct ql_adapter)); | |
3728 | if (!ndev) | |
3729 | return -ENOMEM; | |
3730 | ||
3731 | err = ql_init_device(pdev, ndev, cards_found); | |
3732 | if (err < 0) { | |
3733 | free_netdev(ndev); | |
3734 | return err; | |
3735 | } | |
3736 | ||
3737 | qdev = netdev_priv(ndev); | |
3738 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
3739 | ndev->features = (0 | |
3740 | | NETIF_F_IP_CSUM | |
3741 | | NETIF_F_SG | |
3742 | | NETIF_F_TSO | |
3743 | | NETIF_F_TSO6 | |
3744 | | NETIF_F_TSO_ECN | |
3745 | | NETIF_F_HW_VLAN_TX | |
3746 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER); | |
3747 | ||
3748 | if (test_bit(QL_DMA64, &qdev->flags)) | |
3749 | ndev->features |= NETIF_F_HIGHDMA; | |
3750 | ||
3751 | /* | |
3752 | * Set up net_device structure. | |
3753 | */ | |
3754 | ndev->tx_queue_len = qdev->tx_ring_size; | |
3755 | ndev->irq = pdev->irq; | |
3756 | ndev->open = qlge_open; | |
3757 | ndev->stop = qlge_close; | |
3758 | ndev->hard_start_xmit = qlge_send; | |
3759 | SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); | |
3760 | ndev->change_mtu = qlge_change_mtu; | |
3761 | ndev->get_stats = qlge_get_stats; | |
3762 | ndev->set_multicast_list = qlge_set_multicast_list; | |
3763 | ndev->set_mac_address = qlge_set_mac_address; | |
3764 | ndev->tx_timeout = qlge_tx_timeout; | |
3765 | ndev->watchdog_timeo = 10 * HZ; | |
3766 | ndev->vlan_rx_register = ql_vlan_rx_register; | |
3767 | ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid; | |
3768 | ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid; | |
3769 | err = register_netdev(ndev); | |
3770 | if (err) { | |
3771 | dev_err(&pdev->dev, "net device registration failed.\n"); | |
3772 | ql_release_all(pdev); | |
3773 | pci_disable_device(pdev); | |
3774 | return err; | |
3775 | } | |
3776 | netif_carrier_off(ndev); | |
3777 | netif_stop_queue(ndev); | |
3778 | ql_display_dev_info(ndev); | |
3779 | cards_found++; | |
3780 | return 0; | |
3781 | } | |
3782 | ||
3783 | static void __devexit qlge_remove(struct pci_dev *pdev) | |
3784 | { | |
3785 | struct net_device *ndev = pci_get_drvdata(pdev); | |
3786 | unregister_netdev(ndev); | |
3787 | ql_release_all(pdev); | |
3788 | pci_disable_device(pdev); | |
3789 | free_netdev(ndev); | |
3790 | } | |
3791 | ||
3792 | /* | |
3793 | * This callback is called by the PCI subsystem whenever | |
3794 | * a PCI bus error is detected. | |
3795 | */ | |
3796 | static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |
3797 | enum pci_channel_state state) | |
3798 | { | |
3799 | struct net_device *ndev = pci_get_drvdata(pdev); | |
3800 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3801 | ||
3802 | if (netif_running(ndev)) | |
3803 | ql_adapter_down(qdev); | |
3804 | ||
3805 | pci_disable_device(pdev); | |
3806 | ||
3807 | /* Request a slot reset. */ | |
3808 | return PCI_ERS_RESULT_NEED_RESET; | |
3809 | } | |
3810 | ||
3811 | /* | |
3812 | * This callback is called after the PCI buss has been reset. | |
3813 | * Basically, this tries to restart the card from scratch. | |
3814 | * This is a shortened version of the device probe/discovery code, | |
3815 | * it resembles the first-half of the () routine. | |
3816 | */ | |
3817 | static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) | |
3818 | { | |
3819 | struct net_device *ndev = pci_get_drvdata(pdev); | |
3820 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3821 | ||
3822 | if (pci_enable_device(pdev)) { | |
3823 | QPRINTK(qdev, IFUP, ERR, | |
3824 | "Cannot re-enable PCI device after reset.\n"); | |
3825 | return PCI_ERS_RESULT_DISCONNECT; | |
3826 | } | |
3827 | ||
3828 | pci_set_master(pdev); | |
3829 | ||
3830 | netif_carrier_off(ndev); | |
3831 | netif_stop_queue(ndev); | |
3832 | ql_adapter_reset(qdev); | |
3833 | ||
3834 | /* Make sure the EEPROM is good */ | |
3835 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | |
3836 | ||
3837 | if (!is_valid_ether_addr(ndev->perm_addr)) { | |
3838 | QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n"); | |
3839 | return PCI_ERS_RESULT_DISCONNECT; | |
3840 | } | |
3841 | ||
3842 | return PCI_ERS_RESULT_RECOVERED; | |
3843 | } | |
3844 | ||
3845 | static void qlge_io_resume(struct pci_dev *pdev) | |
3846 | { | |
3847 | struct net_device *ndev = pci_get_drvdata(pdev); | |
3848 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3849 | ||
3850 | pci_set_master(pdev); | |
3851 | ||
3852 | if (netif_running(ndev)) { | |
3853 | if (ql_adapter_up(qdev)) { | |
3854 | QPRINTK(qdev, IFUP, ERR, | |
3855 | "Device initialization failed after reset.\n"); | |
3856 | return; | |
3857 | } | |
3858 | } | |
3859 | ||
3860 | netif_device_attach(ndev); | |
3861 | } | |
3862 | ||
3863 | static struct pci_error_handlers qlge_err_handler = { | |
3864 | .error_detected = qlge_io_error_detected, | |
3865 | .slot_reset = qlge_io_slot_reset, | |
3866 | .resume = qlge_io_resume, | |
3867 | }; | |
3868 | ||
3869 | static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) | |
3870 | { | |
3871 | struct net_device *ndev = pci_get_drvdata(pdev); | |
3872 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3873 | int err; | |
3874 | ||
3875 | netif_device_detach(ndev); | |
3876 | ||
3877 | if (netif_running(ndev)) { | |
3878 | err = ql_adapter_down(qdev); | |
3879 | if (!err) | |
3880 | return err; | |
3881 | } | |
3882 | ||
3883 | err = pci_save_state(pdev); | |
3884 | if (err) | |
3885 | return err; | |
3886 | ||
3887 | pci_disable_device(pdev); | |
3888 | ||
3889 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
3890 | ||
3891 | return 0; | |
3892 | } | |
3893 | ||
04da2cf9 | 3894 | #ifdef CONFIG_PM |
c4e84bde RM |
3895 | static int qlge_resume(struct pci_dev *pdev) |
3896 | { | |
3897 | struct net_device *ndev = pci_get_drvdata(pdev); | |
3898 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3899 | int err; | |
3900 | ||
3901 | pci_set_power_state(pdev, PCI_D0); | |
3902 | pci_restore_state(pdev); | |
3903 | err = pci_enable_device(pdev); | |
3904 | if (err) { | |
3905 | QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n"); | |
3906 | return err; | |
3907 | } | |
3908 | pci_set_master(pdev); | |
3909 | ||
3910 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
3911 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
3912 | ||
3913 | if (netif_running(ndev)) { | |
3914 | err = ql_adapter_up(qdev); | |
3915 | if (err) | |
3916 | return err; | |
3917 | } | |
3918 | ||
3919 | netif_device_attach(ndev); | |
3920 | ||
3921 | return 0; | |
3922 | } | |
04da2cf9 | 3923 | #endif /* CONFIG_PM */ |
c4e84bde RM |
3924 | |
3925 | static void qlge_shutdown(struct pci_dev *pdev) | |
3926 | { | |
3927 | qlge_suspend(pdev, PMSG_SUSPEND); | |
3928 | } | |
3929 | ||
3930 | static struct pci_driver qlge_driver = { | |
3931 | .name = DRV_NAME, | |
3932 | .id_table = qlge_pci_tbl, | |
3933 | .probe = qlge_probe, | |
3934 | .remove = __devexit_p(qlge_remove), | |
3935 | #ifdef CONFIG_PM | |
3936 | .suspend = qlge_suspend, | |
3937 | .resume = qlge_resume, | |
3938 | #endif | |
3939 | .shutdown = qlge_shutdown, | |
3940 | .err_handler = &qlge_err_handler | |
3941 | }; | |
3942 | ||
3943 | static int __init qlge_init_module(void) | |
3944 | { | |
3945 | return pci_register_driver(&qlge_driver); | |
3946 | } | |
3947 | ||
3948 | static void __exit qlge_exit(void) | |
3949 | { | |
3950 | pci_unregister_driver(&qlge_driver); | |
3951 | } | |
3952 | ||
3953 | module_init(qlge_init_module); | |
3954 | module_exit(qlge_exit); |