[NET]: Make NAPI polling independent of struct net_device objects.
[deliverable/linux.git] / drivers / net / cxgb3 / common.h
1 /*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #ifndef __CHELSIO_COMMON_H
33 #define __CHELSIO_COMMON_H
34
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/ctype.h>
38 #include <linux/delay.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mii.h>
43 #include "version.h"
44
45 #define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
46 #define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
47 #define CH_ALERT(adap, fmt, ...) \
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49
50 /*
51 * More powerful macro that selectively prints messages based on msg_enable.
52 * For info and debugging messages.
53 */
54 #define CH_MSG(adapter, level, category, fmt, ...) do { \
55 if ((adapter)->msg_enable & NETIF_MSG_##category) \
56 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
57 ## __VA_ARGS__); \
58 } while (0)
59
60 #ifdef DEBUG
61 # define CH_DBG(adapter, category, fmt, ...) \
62 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
63 #else
64 # define CH_DBG(adapter, category, fmt, ...)
65 #endif
66
67 /* Additional NETIF_MSG_* categories */
68 #define NETIF_MSG_MMIO 0x8000000
69
70 struct t3_rx_mode {
71 struct net_device *dev;
72 struct dev_mc_list *mclist;
73 unsigned int idx;
74 };
75
76 static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
77 struct dev_mc_list *mclist)
78 {
79 p->dev = dev;
80 p->mclist = mclist;
81 p->idx = 0;
82 }
83
84 static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
85 {
86 u8 *addr = NULL;
87
88 if (rm->mclist && rm->idx < rm->dev->mc_count) {
89 addr = rm->mclist->dmi_addr;
90 rm->mclist = rm->mclist->next;
91 rm->idx++;
92 }
93 return addr;
94 }
95
96 enum {
97 MAX_NPORTS = 2, /* max # of ports */
98 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
99 EEPROMSIZE = 8192, /* Serial EEPROM size */
100 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
101 TCB_SIZE = 128, /* TCB size */
102 NMTUS = 16, /* size of MTU table */
103 NCCTRL_WIN = 32, /* # of congestion control windows */
104 PROTO_SRAM_LINES = 128, /* size of TP sram */
105 };
106
107 #define MAX_RX_COALESCING_LEN 16224U
108
109 enum {
110 PAUSE_RX = 1 << 0,
111 PAUSE_TX = 1 << 1,
112 PAUSE_AUTONEG = 1 << 2
113 };
114
115 enum {
116 SUPPORTED_IRQ = 1 << 24
117 };
118
119 enum { /* adapter interrupt-maintained statistics */
120 STAT_ULP_CH0_PBL_OOB,
121 STAT_ULP_CH1_PBL_OOB,
122 STAT_PCI_CORR_ECC,
123
124 IRQ_NUM_STATS /* keep last */
125 };
126
127 enum {
128 TP_VERSION_MAJOR = 1,
129 TP_VERSION_MINOR = 0,
130 TP_VERSION_MICRO = 44
131 };
132
133 #define S_TP_VERSION_MAJOR 16
134 #define M_TP_VERSION_MAJOR 0xFF
135 #define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
136 #define G_TP_VERSION_MAJOR(x) \
137 (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
138
139 #define S_TP_VERSION_MINOR 8
140 #define M_TP_VERSION_MINOR 0xFF
141 #define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
142 #define G_TP_VERSION_MINOR(x) \
143 (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
144
145 #define S_TP_VERSION_MICRO 0
146 #define M_TP_VERSION_MICRO 0xFF
147 #define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
148 #define G_TP_VERSION_MICRO(x) \
149 (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
150
151 enum {
152 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
153 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
154 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
155 };
156
157 enum sge_context_type { /* SGE egress context types */
158 SGE_CNTXT_RDMA = 0,
159 SGE_CNTXT_ETH = 2,
160 SGE_CNTXT_OFLD = 4,
161 SGE_CNTXT_CTRL = 5
162 };
163
164 enum {
165 AN_PKT_SIZE = 32, /* async notification packet size */
166 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
167 };
168
169 struct sg_ent { /* SGE scatter/gather entry */
170 u32 len[2];
171 u64 addr[2];
172 };
173
174 #ifndef SGE_NUM_GENBITS
175 /* Must be 1 or 2 */
176 # define SGE_NUM_GENBITS 2
177 #endif
178
179 #define TX_DESC_FLITS 16U
180 #define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
181
182 struct cphy;
183 struct adapter;
184
185 struct mdio_ops {
186 int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
187 int reg_addr, unsigned int *val);
188 int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
189 int reg_addr, unsigned int val);
190 };
191
192 struct adapter_info {
193 unsigned char nports; /* # of ports */
194 unsigned char phy_base_addr; /* MDIO PHY base address */
195 unsigned char mdien;
196 unsigned char mdiinv;
197 unsigned int gpio_out; /* GPIO output settings */
198 unsigned int gpio_intr; /* GPIO IRQ enable mask */
199 unsigned long caps; /* adapter capabilities */
200 const struct mdio_ops *mdio_ops; /* MDIO operations */
201 const char *desc; /* product description */
202 };
203
204 struct port_type_info {
205 void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
206 int phy_addr, const struct mdio_ops *ops);
207 unsigned int caps;
208 const char *desc;
209 };
210
211 struct mc5_stats {
212 unsigned long parity_err;
213 unsigned long active_rgn_full;
214 unsigned long nfa_srch_err;
215 unsigned long unknown_cmd;
216 unsigned long reqq_parity_err;
217 unsigned long dispq_parity_err;
218 unsigned long del_act_empty;
219 };
220
221 struct mc7_stats {
222 unsigned long corr_err;
223 unsigned long uncorr_err;
224 unsigned long parity_err;
225 unsigned long addr_err;
226 };
227
228 struct mac_stats {
229 u64 tx_octets; /* total # of octets in good frames */
230 u64 tx_octets_bad; /* total # of octets in error frames */
231 u64 tx_frames; /* all good frames */
232 u64 tx_mcast_frames; /* good multicast frames */
233 u64 tx_bcast_frames; /* good broadcast frames */
234 u64 tx_pause; /* # of transmitted pause frames */
235 u64 tx_deferred; /* frames with deferred transmissions */
236 u64 tx_late_collisions; /* # of late collisions */
237 u64 tx_total_collisions; /* # of total collisions */
238 u64 tx_excess_collisions; /* frame errors from excessive collissions */
239 u64 tx_underrun; /* # of Tx FIFO underruns */
240 u64 tx_len_errs; /* # of Tx length errors */
241 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
242 u64 tx_excess_deferral; /* # of frames with excessive deferral */
243 u64 tx_fcs_errs; /* # of frames with bad FCS */
244
245 u64 tx_frames_64; /* # of Tx frames in a particular range */
246 u64 tx_frames_65_127;
247 u64 tx_frames_128_255;
248 u64 tx_frames_256_511;
249 u64 tx_frames_512_1023;
250 u64 tx_frames_1024_1518;
251 u64 tx_frames_1519_max;
252
253 u64 rx_octets; /* total # of octets in good frames */
254 u64 rx_octets_bad; /* total # of octets in error frames */
255 u64 rx_frames; /* all good frames */
256 u64 rx_mcast_frames; /* good multicast frames */
257 u64 rx_bcast_frames; /* good broadcast frames */
258 u64 rx_pause; /* # of received pause frames */
259 u64 rx_fcs_errs; /* # of received frames with bad FCS */
260 u64 rx_align_errs; /* alignment errors */
261 u64 rx_symbol_errs; /* symbol errors */
262 u64 rx_data_errs; /* data errors */
263 u64 rx_sequence_errs; /* sequence errors */
264 u64 rx_runt; /* # of runt frames */
265 u64 rx_jabber; /* # of jabber frames */
266 u64 rx_short; /* # of short frames */
267 u64 rx_too_long; /* # of oversized frames */
268 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
269
270 u64 rx_frames_64; /* # of Rx frames in a particular range */
271 u64 rx_frames_65_127;
272 u64 rx_frames_128_255;
273 u64 rx_frames_256_511;
274 u64 rx_frames_512_1023;
275 u64 rx_frames_1024_1518;
276 u64 rx_frames_1519_max;
277
278 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
279
280 unsigned long tx_fifo_parity_err;
281 unsigned long rx_fifo_parity_err;
282 unsigned long tx_fifo_urun;
283 unsigned long rx_fifo_ovfl;
284 unsigned long serdes_signal_loss;
285 unsigned long xaui_pcs_ctc_err;
286 unsigned long xaui_pcs_align_change;
287
288 unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
289 unsigned long num_resets; /* # times reset due to stuck TX */
290
291 };
292
293 struct tp_mib_stats {
294 u32 ipInReceive_hi;
295 u32 ipInReceive_lo;
296 u32 ipInHdrErrors_hi;
297 u32 ipInHdrErrors_lo;
298 u32 ipInAddrErrors_hi;
299 u32 ipInAddrErrors_lo;
300 u32 ipInUnknownProtos_hi;
301 u32 ipInUnknownProtos_lo;
302 u32 ipInDiscards_hi;
303 u32 ipInDiscards_lo;
304 u32 ipInDelivers_hi;
305 u32 ipInDelivers_lo;
306 u32 ipOutRequests_hi;
307 u32 ipOutRequests_lo;
308 u32 ipOutDiscards_hi;
309 u32 ipOutDiscards_lo;
310 u32 ipOutNoRoutes_hi;
311 u32 ipOutNoRoutes_lo;
312 u32 ipReasmTimeout;
313 u32 ipReasmReqds;
314 u32 ipReasmOKs;
315 u32 ipReasmFails;
316
317 u32 reserved[8];
318
319 u32 tcpActiveOpens;
320 u32 tcpPassiveOpens;
321 u32 tcpAttemptFails;
322 u32 tcpEstabResets;
323 u32 tcpOutRsts;
324 u32 tcpCurrEstab;
325 u32 tcpInSegs_hi;
326 u32 tcpInSegs_lo;
327 u32 tcpOutSegs_hi;
328 u32 tcpOutSegs_lo;
329 u32 tcpRetransSeg_hi;
330 u32 tcpRetransSeg_lo;
331 u32 tcpInErrs_hi;
332 u32 tcpInErrs_lo;
333 u32 tcpRtoMin;
334 u32 tcpRtoMax;
335 };
336
337 struct tp_params {
338 unsigned int nchan; /* # of channels */
339 unsigned int pmrx_size; /* total PMRX capacity */
340 unsigned int pmtx_size; /* total PMTX capacity */
341 unsigned int cm_size; /* total CM capacity */
342 unsigned int chan_rx_size; /* per channel Rx size */
343 unsigned int chan_tx_size; /* per channel Tx size */
344 unsigned int rx_pg_size; /* Rx page size */
345 unsigned int tx_pg_size; /* Tx page size */
346 unsigned int rx_num_pgs; /* # of Rx pages */
347 unsigned int tx_num_pgs; /* # of Tx pages */
348 unsigned int ntimer_qs; /* # of timer queues */
349 };
350
351 struct qset_params { /* SGE queue set parameters */
352 unsigned int polling; /* polling/interrupt service for rspq */
353 unsigned int coalesce_usecs; /* irq coalescing timer */
354 unsigned int rspq_size; /* # of entries in response queue */
355 unsigned int fl_size; /* # of entries in regular free list */
356 unsigned int jumbo_size; /* # of entries in jumbo free list */
357 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
358 unsigned int cong_thres; /* FL congestion threshold */
359 };
360
361 struct sge_params {
362 unsigned int max_pkt_size; /* max offload pkt size */
363 struct qset_params qset[SGE_QSETS];
364 };
365
366 struct mc5_params {
367 unsigned int mode; /* selects MC5 width */
368 unsigned int nservers; /* size of server region */
369 unsigned int nfilters; /* size of filter region */
370 unsigned int nroutes; /* size of routing region */
371 };
372
373 /* Default MC5 region sizes */
374 enum {
375 DEFAULT_NSERVERS = 512,
376 DEFAULT_NFILTERS = 128
377 };
378
379 /* MC5 modes, these must be non-0 */
380 enum {
381 MC5_MODE_144_BIT = 1,
382 MC5_MODE_72_BIT = 2
383 };
384
385 /* MC5 min active region size */
386 enum { MC5_MIN_TIDS = 16 };
387
388 struct vpd_params {
389 unsigned int cclk;
390 unsigned int mclk;
391 unsigned int uclk;
392 unsigned int mdc;
393 unsigned int mem_timing;
394 u8 eth_base[6];
395 u8 port_type[MAX_NPORTS];
396 unsigned short xauicfg[2];
397 };
398
399 struct pci_params {
400 unsigned int vpd_cap_addr;
401 unsigned int pcie_cap_addr;
402 unsigned short speed;
403 unsigned char width;
404 unsigned char variant;
405 };
406
407 enum {
408 PCI_VARIANT_PCI,
409 PCI_VARIANT_PCIX_MODE1_PARITY,
410 PCI_VARIANT_PCIX_MODE1_ECC,
411 PCI_VARIANT_PCIX_266_MODE2,
412 PCI_VARIANT_PCIE
413 };
414
415 struct adapter_params {
416 struct sge_params sge;
417 struct mc5_params mc5;
418 struct tp_params tp;
419 struct vpd_params vpd;
420 struct pci_params pci;
421
422 const struct adapter_info *info;
423
424 unsigned short mtus[NMTUS];
425 unsigned short a_wnd[NCCTRL_WIN];
426 unsigned short b_wnd[NCCTRL_WIN];
427
428 unsigned int nports; /* # of ethernet ports */
429 unsigned int stats_update_period; /* MAC stats accumulation period */
430 unsigned int linkpoll_period; /* link poll period in 0.1s */
431 unsigned int rev; /* chip revision */
432 unsigned int offload;
433 };
434
435 enum { /* chip revisions */
436 T3_REV_A = 0,
437 T3_REV_B = 2,
438 T3_REV_B2 = 3,
439 };
440
441 struct trace_params {
442 u32 sip;
443 u32 sip_mask;
444 u32 dip;
445 u32 dip_mask;
446 u16 sport;
447 u16 sport_mask;
448 u16 dport;
449 u16 dport_mask;
450 u32 vlan:12;
451 u32 vlan_mask:12;
452 u32 intf:4;
453 u32 intf_mask:4;
454 u8 proto;
455 u8 proto_mask;
456 };
457
458 struct link_config {
459 unsigned int supported; /* link capabilities */
460 unsigned int advertising; /* advertised capabilities */
461 unsigned short requested_speed; /* speed user has requested */
462 unsigned short speed; /* actual link speed */
463 unsigned char requested_duplex; /* duplex user has requested */
464 unsigned char duplex; /* actual link duplex */
465 unsigned char requested_fc; /* flow control user has requested */
466 unsigned char fc; /* actual link flow control */
467 unsigned char autoneg; /* autonegotiating? */
468 unsigned int link_ok; /* link up? */
469 };
470
471 #define SPEED_INVALID 0xffff
472 #define DUPLEX_INVALID 0xff
473
474 struct mc5 {
475 struct adapter *adapter;
476 unsigned int tcam_size;
477 unsigned char part_type;
478 unsigned char parity_enabled;
479 unsigned char mode;
480 struct mc5_stats stats;
481 };
482
483 static inline unsigned int t3_mc5_size(const struct mc5 *p)
484 {
485 return p->tcam_size;
486 }
487
488 struct mc7 {
489 struct adapter *adapter; /* backpointer to adapter */
490 unsigned int size; /* memory size in bytes */
491 unsigned int width; /* MC7 interface width */
492 unsigned int offset; /* register address offset for MC7 instance */
493 const char *name; /* name of MC7 instance */
494 struct mc7_stats stats; /* MC7 statistics */
495 };
496
497 static inline unsigned int t3_mc7_size(const struct mc7 *p)
498 {
499 return p->size;
500 }
501
502 struct cmac {
503 struct adapter *adapter;
504 unsigned int offset;
505 unsigned int nucast; /* # of address filters for unicast MACs */
506 unsigned int tx_tcnt;
507 unsigned int tx_xcnt;
508 u64 tx_mcnt;
509 unsigned int rx_xcnt;
510 u64 rx_mcnt;
511 unsigned int toggle_cnt;
512 unsigned int txen;
513 struct mac_stats stats;
514 };
515
516 enum {
517 MAC_DIRECTION_RX = 1,
518 MAC_DIRECTION_TX = 2,
519 MAC_RXFIFO_SIZE = 32768
520 };
521
522 /* IEEE 802.3ae specified MDIO devices */
523 enum {
524 MDIO_DEV_PMA_PMD = 1,
525 MDIO_DEV_WIS = 2,
526 MDIO_DEV_PCS = 3,
527 MDIO_DEV_XGXS = 4
528 };
529
530 /* PHY loopback direction */
531 enum {
532 PHY_LOOPBACK_TX = 1,
533 PHY_LOOPBACK_RX = 2
534 };
535
536 /* PHY interrupt types */
537 enum {
538 cphy_cause_link_change = 1,
539 cphy_cause_fifo_error = 2
540 };
541
542 /* PHY operations */
543 struct cphy_ops {
544 void (*destroy)(struct cphy *phy);
545 int (*reset)(struct cphy *phy, int wait);
546
547 int (*intr_enable)(struct cphy *phy);
548 int (*intr_disable)(struct cphy *phy);
549 int (*intr_clear)(struct cphy *phy);
550 int (*intr_handler)(struct cphy *phy);
551
552 int (*autoneg_enable)(struct cphy *phy);
553 int (*autoneg_restart)(struct cphy *phy);
554
555 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
556 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
557 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
558 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
559 int *duplex, int *fc);
560 int (*power_down)(struct cphy *phy, int enable);
561 };
562
563 /* A PHY instance */
564 struct cphy {
565 int addr; /* PHY address */
566 struct adapter *adapter; /* associated adapter */
567 unsigned long fifo_errors; /* FIFO over/under-flows */
568 const struct cphy_ops *ops; /* PHY operations */
569 int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
570 int reg_addr, unsigned int *val);
571 int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
572 int reg_addr, unsigned int val);
573 };
574
575 /* Convenience MDIO read/write wrappers */
576 static inline int mdio_read(struct cphy *phy, int mmd, int reg,
577 unsigned int *valp)
578 {
579 return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
580 }
581
582 static inline int mdio_write(struct cphy *phy, int mmd, int reg,
583 unsigned int val)
584 {
585 return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
586 }
587
588 /* Convenience initializer */
589 static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
590 int phy_addr, struct cphy_ops *phy_ops,
591 const struct mdio_ops *mdio_ops)
592 {
593 phy->adapter = adapter;
594 phy->addr = phy_addr;
595 phy->ops = phy_ops;
596 if (mdio_ops) {
597 phy->mdio_read = mdio_ops->read;
598 phy->mdio_write = mdio_ops->write;
599 }
600 }
601
602 /* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
603 #define MAC_STATS_ACCUM_SECS 180
604
605 #define XGM_REG(reg_addr, idx) \
606 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
607
608 struct addr_val_pair {
609 unsigned int reg_addr;
610 unsigned int val;
611 };
612
613 #include "adapter.h"
614
615 #ifndef PCI_VENDOR_ID_CHELSIO
616 # define PCI_VENDOR_ID_CHELSIO 0x1425
617 #endif
618
619 #define for_each_port(adapter, iter) \
620 for (iter = 0; iter < (adapter)->params.nports; ++iter)
621
622 #define adapter_info(adap) ((adap)->params.info)
623
624 static inline int uses_xaui(const struct adapter *adap)
625 {
626 return adapter_info(adap)->caps & SUPPORTED_AUI;
627 }
628
629 static inline int is_10G(const struct adapter *adap)
630 {
631 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
632 }
633
634 static inline int is_offload(const struct adapter *adap)
635 {
636 return adap->params.offload;
637 }
638
639 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
640 {
641 return adap->params.vpd.cclk / 1000;
642 }
643
644 static inline unsigned int is_pcie(const struct adapter *adap)
645 {
646 return adap->params.pci.variant == PCI_VARIANT_PCIE;
647 }
648
649 void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
650 u32 val);
651 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
652 int n, unsigned int offset);
653 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
654 int polarity, int attempts, int delay, u32 *valp);
655 static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
656 int polarity, int attempts, int delay)
657 {
658 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
659 delay, NULL);
660 }
661 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
662 unsigned int set);
663 int t3_phy_reset(struct cphy *phy, int mmd, int wait);
664 int t3_phy_advertise(struct cphy *phy, unsigned int advert);
665 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
666
667 void t3_intr_enable(struct adapter *adapter);
668 void t3_intr_disable(struct adapter *adapter);
669 void t3_intr_clear(struct adapter *adapter);
670 void t3_port_intr_enable(struct adapter *adapter, int idx);
671 void t3_port_intr_disable(struct adapter *adapter, int idx);
672 void t3_port_intr_clear(struct adapter *adapter, int idx);
673 int t3_slow_intr_handler(struct adapter *adapter);
674 int t3_phy_intr_handler(struct adapter *adapter);
675
676 void t3_link_changed(struct adapter *adapter, int port_id);
677 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
678 const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
679 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
680 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
681 int t3_seeprom_wp(struct adapter *adapter, int enable);
682 int t3_get_tp_version(struct adapter *adapter, u32 *vers);
683 int t3_check_tpsram_version(struct adapter *adapter, int *must_load);
684 int t3_check_tpsram(struct adapter *adapter, u8 *tp_ram, unsigned int size);
685 int t3_set_proto_sram(struct adapter *adap, u8 *data);
686 int t3_read_flash(struct adapter *adapter, unsigned int addr,
687 unsigned int nwords, u32 *data, int byte_oriented);
688 int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
689 int t3_get_fw_version(struct adapter *adapter, u32 *vers);
690 int t3_check_fw_version(struct adapter *adapter);
691 int t3_init_hw(struct adapter *adapter, u32 fw_params);
692 void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
693 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
694 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
695 int reset);
696 void t3_led_ready(struct adapter *adapter);
697 void t3_fatal_err(struct adapter *adapter);
698 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
699 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
700 const u8 * cpus, const u16 *rspq);
701 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
702 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
703 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
704 unsigned int n, unsigned int *valp);
705 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
706 u64 *buf);
707
708 int t3_mac_reset(struct cmac *mac);
709 void t3b_pcs_reset(struct cmac *mac);
710 int t3_mac_enable(struct cmac *mac, int which);
711 int t3_mac_disable(struct cmac *mac, int which);
712 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
713 int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
714 int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
715 int t3_mac_set_num_ucast(struct cmac *mac, int n);
716 const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
717 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
718 int t3b2_mac_watchdog_task(struct cmac *mac);
719
720 void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
721 int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
722 unsigned int nroutes);
723 void t3_mc5_intr_handler(struct mc5 *mc5);
724 int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
725 u32 *buf);
726
727 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
728 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
729 void t3_tp_set_offload_mode(struct adapter *adap, int enable);
730 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
731 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
732 unsigned short alpha[NCCTRL_WIN],
733 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
734 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
735 void t3_get_cong_cntl_tab(struct adapter *adap,
736 unsigned short incr[NMTUS][NCCTRL_WIN]);
737 void t3_config_trace_filter(struct adapter *adapter,
738 const struct trace_params *tp, int filter_index,
739 int invert, int enable);
740 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
741
742 void t3_sge_prep(struct adapter *adap, struct sge_params *p);
743 void t3_sge_init(struct adapter *adap, struct sge_params *p);
744 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
745 enum sge_context_type type, int respq, u64 base_addr,
746 unsigned int size, unsigned int token, int gen,
747 unsigned int cidx);
748 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
749 int gts_enable, u64 base_addr, unsigned int size,
750 unsigned int esize, unsigned int cong_thres, int gen,
751 unsigned int cidx);
752 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
753 int irq_vec_idx, u64 base_addr, unsigned int size,
754 unsigned int fl_thres, int gen, unsigned int cidx);
755 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
756 unsigned int size, int rspq, int ovfl_mode,
757 unsigned int credits, unsigned int credit_thres);
758 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
759 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
760 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
761 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
762 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
763 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
764 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
765 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
766 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
767 unsigned int credits);
768
769 void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
770 int phy_addr, const struct mdio_ops *mdio_ops);
771 void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
772 int phy_addr, const struct mdio_ops *mdio_ops);
773 void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
774 int phy_addr, const struct mdio_ops *mdio_ops);
775 void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
776 const struct mdio_ops *mdio_ops);
777 void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
778 int phy_addr, const struct mdio_ops *mdio_ops);
779 #endif /* __CHELSIO_COMMON_H */
This page took 0.061282 seconds and 5 git commands to generate.