net: ucc_geth: Don't use the MAC as PHY without a fixed link
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4.h
CommitLineData
625ba2c2
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
625ba2c2
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_H__
36#define __CXGB4_H__
37
dca4faeb
VP
38#include "t4_hw.h"
39
625ba2c2
DM
40#include <linux/bitops.h>
41#include <linux/cache.h>
42#include <linux/interrupt.h>
43#include <linux/list.h>
44#include <linux/netdevice.h>
45#include <linux/pci.h>
46#include <linux/spinlock.h>
47#include <linux/timer.h>
c0b8b992 48#include <linux/vmalloc.h>
625ba2c2
DM
49#include <asm/io.h>
50#include "cxgb4_uld.h"
625ba2c2 51
16e47624 52#define T4FW_VERSION_MAJOR 0x01
6c5caae0
HS
53#define T4FW_VERSION_MINOR 0x0B
54#define T4FW_VERSION_MICRO 0x1B
16e47624 55#define T4FW_VERSION_BUILD 0x00
625ba2c2 56
16e47624 57#define T5FW_VERSION_MAJOR 0x01
6c5caae0
HS
58#define T5FW_VERSION_MINOR 0x0B
59#define T5FW_VERSION_MICRO 0x1B
16e47624 60#define T5FW_VERSION_BUILD 0x00
2422d9a3 61
3069ee9b
VP
62#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
63
625ba2c2
DM
64enum {
65 MAX_NPORTS = 4, /* max # of ports */
47d54d65 66 SERNUM_LEN = 24, /* Serial # length */
625ba2c2
DM
67 EC_LEN = 16, /* E/C length */
68 ID_LEN = 16, /* ID length */
a94cd705 69 PN_LEN = 16, /* Part Number length */
625ba2c2
DM
70};
71
72enum {
73 MEM_EDC0,
74 MEM_EDC1,
2422d9a3
SR
75 MEM_MC,
76 MEM_MC0 = MEM_MC,
77 MEM_MC1
625ba2c2
DM
78};
79
3069ee9b 80enum {
3eb4afbf
VP
81 MEMWIN0_APERTURE = 2048,
82 MEMWIN0_BASE = 0x1b800,
3069ee9b
VP
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
2422d9a3 85 MEMWIN1_BASE_T5 = 0x52000,
3eb4afbf
VP
86 MEMWIN2_APERTURE = 65536,
87 MEMWIN2_BASE = 0x30000,
0abfd152
HS
88 MEMWIN2_APERTURE_T5 = 131072,
89 MEMWIN2_BASE_T5 = 0x60000,
3069ee9b
VP
90};
91
625ba2c2
DM
92enum dev_master {
93 MASTER_CANT,
94 MASTER_MAY,
95 MASTER_MUST
96};
97
98enum dev_state {
99 DEV_STATE_UNINIT,
100 DEV_STATE_INIT,
101 DEV_STATE_ERR
102};
103
104enum {
105 PAUSE_RX = 1 << 0,
106 PAUSE_TX = 1 << 1,
107 PAUSE_AUTONEG = 1 << 2
108};
109
110struct port_stats {
111 u64 tx_octets; /* total # of octets in good frames */
112 u64 tx_frames; /* all good frames */
113 u64 tx_bcast_frames; /* all broadcast frames */
114 u64 tx_mcast_frames; /* all multicast frames */
115 u64 tx_ucast_frames; /* all unicast frames */
116 u64 tx_error_frames; /* all error frames */
117
118 u64 tx_frames_64; /* # of Tx frames in a particular range */
119 u64 tx_frames_65_127;
120 u64 tx_frames_128_255;
121 u64 tx_frames_256_511;
122 u64 tx_frames_512_1023;
123 u64 tx_frames_1024_1518;
124 u64 tx_frames_1519_max;
125
126 u64 tx_drop; /* # of dropped Tx frames */
127 u64 tx_pause; /* # of transmitted pause frames */
128 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
129 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
130 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
131 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
132 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
133 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
134 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
135 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
136
137 u64 rx_octets; /* total # of octets in good frames */
138 u64 rx_frames; /* all good frames */
139 u64 rx_bcast_frames; /* all broadcast frames */
140 u64 rx_mcast_frames; /* all multicast frames */
141 u64 rx_ucast_frames; /* all unicast frames */
142 u64 rx_too_long; /* # of frames exceeding MTU */
143 u64 rx_jabber; /* # of jabber frames */
144 u64 rx_fcs_err; /* # of received frames with bad FCS */
145 u64 rx_len_err; /* # of received frames with length error */
146 u64 rx_symbol_err; /* symbol errors */
147 u64 rx_runt; /* # of short frames */
148
149 u64 rx_frames_64; /* # of Rx frames in a particular range */
150 u64 rx_frames_65_127;
151 u64 rx_frames_128_255;
152 u64 rx_frames_256_511;
153 u64 rx_frames_512_1023;
154 u64 rx_frames_1024_1518;
155 u64 rx_frames_1519_max;
156
157 u64 rx_pause; /* # of received pause frames */
158 u64 rx_ppp0; /* # of received PPP prio 0 frames */
159 u64 rx_ppp1; /* # of received PPP prio 1 frames */
160 u64 rx_ppp2; /* # of received PPP prio 2 frames */
161 u64 rx_ppp3; /* # of received PPP prio 3 frames */
162 u64 rx_ppp4; /* # of received PPP prio 4 frames */
163 u64 rx_ppp5; /* # of received PPP prio 5 frames */
164 u64 rx_ppp6; /* # of received PPP prio 6 frames */
165 u64 rx_ppp7; /* # of received PPP prio 7 frames */
166
167 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
168 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
169 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
170 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
171 u64 rx_trunc0; /* buffer-group 0 truncated packets */
172 u64 rx_trunc1; /* buffer-group 1 truncated packets */
173 u64 rx_trunc2; /* buffer-group 2 truncated packets */
174 u64 rx_trunc3; /* buffer-group 3 truncated packets */
175};
176
177struct lb_port_stats {
178 u64 octets;
179 u64 frames;
180 u64 bcast_frames;
181 u64 mcast_frames;
182 u64 ucast_frames;
183 u64 error_frames;
184
185 u64 frames_64;
186 u64 frames_65_127;
187 u64 frames_128_255;
188 u64 frames_256_511;
189 u64 frames_512_1023;
190 u64 frames_1024_1518;
191 u64 frames_1519_max;
192
193 u64 drop;
194
195 u64 ovflow0;
196 u64 ovflow1;
197 u64 ovflow2;
198 u64 ovflow3;
199 u64 trunc0;
200 u64 trunc1;
201 u64 trunc2;
202 u64 trunc3;
203};
204
205struct tp_tcp_stats {
206 u32 tcpOutRsts;
207 u64 tcpInSegs;
208 u64 tcpOutSegs;
209 u64 tcpRetransSegs;
210};
211
212struct tp_err_stats {
213 u32 macInErrs[4];
214 u32 hdrInErrs[4];
215 u32 tcpInErrs[4];
216 u32 tnlCongDrops[4];
217 u32 ofldChanDrops[4];
218 u32 tnlTxDrops[4];
219 u32 ofldVlanDrops[4];
220 u32 tcp6InErrs[4];
221 u32 ofldNoNeigh;
222 u32 ofldCongDefer;
223};
224
225struct tp_params {
226 unsigned int ntxchan; /* # of Tx channels */
227 unsigned int tre; /* log2 of core clocks per TP tick */
dca4faeb
VP
228 unsigned short tx_modq_map; /* TX modulation scheduler queue to */
229 /* channel map */
636f9d37
VP
230
231 uint32_t dack_re; /* DACK timer resolution */
232 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
dcf7b6f5
KS
233
234 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
235 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
236
237 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
238 * subset of the set of fields which may be present in the Compressed
239 * Filter Tuple portion of filters and TCP TCB connections. The
240 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
241 * Since a variable number of fields may or may not be present, their
242 * shifted field positions within the Compressed Filter Tuple may
243 * vary, or not even be present if the field isn't selected in
244 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
245 * places we store their offsets here, or a -1 if the field isn't
246 * present.
247 */
248 int vlan_shift;
249 int vnic_shift;
250 int port_shift;
251 int protocol_shift;
625ba2c2
DM
252};
253
254struct vpd_params {
255 unsigned int cclk;
256 u8 ec[EC_LEN + 1];
257 u8 sn[SERNUM_LEN + 1];
258 u8 id[ID_LEN + 1];
a94cd705 259 u8 pn[PN_LEN + 1];
625ba2c2
DM
260};
261
262struct pci_params {
263 unsigned char speed;
264 unsigned char width;
265};
266
d14807dd
HS
267#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
268#define CHELSIO_CHIP_FPGA 0x100
269#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
270#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
271
272#define CHELSIO_T4 0x4
273#define CHELSIO_T5 0x5
274
275enum chip_type {
276 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
277 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
278 T4_FIRST_REV = T4_A1,
279 T4_LAST_REV = T4_A2,
280
281 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
282 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
283 T5_FIRST_REV = T5_A0,
284 T5_LAST_REV = T5_A1,
285};
286
625ba2c2
DM
287struct adapter_params {
288 struct tp_params tp;
289 struct vpd_params vpd;
290 struct pci_params pci;
291
900a6596
DM
292 unsigned int sf_size; /* serial flash size in bytes */
293 unsigned int sf_nsec; /* # of flash sectors */
294 unsigned int sf_fw_start; /* start of FW image in flash */
295
625ba2c2
DM
296 unsigned int fw_vers;
297 unsigned int tp_vers;
298 u8 api_vers[7];
299
300 unsigned short mtus[NMTUS];
301 unsigned short a_wnd[NCCTRL_WIN];
302 unsigned short b_wnd[NCCTRL_WIN];
303
304 unsigned char nports; /* # of ethernet ports */
305 unsigned char portvec;
d14807dd 306 enum chip_type chip; /* chip code */
625ba2c2
DM
307 unsigned char offload;
308
9a4da2cd
VP
309 unsigned char bypass;
310
625ba2c2 311 unsigned int ofldq_wr_cred;
1ac0f095 312 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
4c2c5763
HS
313
314 unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
315 unsigned int max_ird_adapter; /* Max read depth per adapter */
625ba2c2
DM
316};
317
16e47624
HS
318#include "t4fw_api.h"
319
320#define FW_VERSION(chip) ( \
321 FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
322 FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
323 FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
324 FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
325#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
326
327struct fw_info {
328 u8 chip;
329 char *fs_name;
330 char *fw_mod_name;
331 struct fw_hdr fw_hdr;
332};
333
334
625ba2c2
DM
335struct trace_params {
336 u32 data[TRACE_LEN / 4];
337 u32 mask[TRACE_LEN / 4];
338 unsigned short snap_len;
339 unsigned short min_len;
340 unsigned char skip_ofst;
341 unsigned char skip_len;
342 unsigned char invert;
343 unsigned char port;
344};
345
346struct link_config {
347 unsigned short supported; /* link capabilities */
348 unsigned short advertising; /* advertised capabilities */
349 unsigned short requested_speed; /* speed user has requested */
350 unsigned short speed; /* actual link speed */
351 unsigned char requested_fc; /* flow control user has requested */
352 unsigned char fc; /* actual link flow control */
353 unsigned char autoneg; /* autonegotiating? */
354 unsigned char link_ok; /* link up? */
355};
356
357#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
358
359enum {
360 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
361 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
362 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
363 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
cf38be6d
HS
364 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
365 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
625ba2c2
DM
366};
367
368enum {
cf38be6d
HS
369 INGQ_EXTRAS = 2, /* firmware event queue and */
370 /* forwarded interrupts */
371 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
372 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
373 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
374 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
625ba2c2
DM
375};
376
377struct adapter;
625ba2c2
DM
378struct sge_rspq;
379
688848b1
AB
380#include "cxgb4_dcb.h"
381
625ba2c2
DM
382struct port_info {
383 struct adapter *adapter;
625ba2c2
DM
384 u16 viid;
385 s16 xact_addr_filt; /* index of exact MAC address filter */
386 u16 rss_size; /* size of VI's RSS table slice */
387 s8 mdio_addr;
388 u8 port_type;
389 u8 mod_type;
390 u8 port_id;
391 u8 tx_chan;
392 u8 lport; /* associated offload logical port */
625ba2c2
DM
393 u8 nqsets; /* # of qsets */
394 u8 first_qset; /* index of first qset */
f796564a 395 u8 rss_mode;
625ba2c2 396 struct link_config link_cfg;
671b0060 397 u16 *rss;
688848b1
AB
398#ifdef CONFIG_CHELSIO_T4_DCB
399 struct port_dcb_info dcb; /* Data Center Bridging support */
400#endif
625ba2c2
DM
401};
402
625ba2c2
DM
403struct dentry;
404struct work_struct;
405
406enum { /* adapter flags */
407 FULL_INIT_DONE = (1 << 0),
144be3d9
GS
408 DEV_ENABLED = (1 << 1),
409 USING_MSI = (1 << 2),
410 USING_MSIX = (1 << 3),
625ba2c2 411 FW_OK = (1 << 4),
13ee15d3 412 RSS_TNLALLLOOKUP = (1 << 5),
52367a76
VP
413 USING_SOFT_PARAMS = (1 << 6),
414 MASTER_PF = (1 << 7),
415 FW_OFLD_CONN = (1 << 9),
625ba2c2
DM
416};
417
418struct rx_sw_desc;
419
420struct sge_fl { /* SGE free-buffer queue state */
421 unsigned int avail; /* # of available Rx buffers */
422 unsigned int pend_cred; /* new buffers since last FL DB ring */
423 unsigned int cidx; /* consumer index */
424 unsigned int pidx; /* producer index */
425 unsigned long alloc_failed; /* # of times buffer allocation failed */
426 unsigned long large_alloc_failed;
427 unsigned long starving;
428 /* RO fields */
429 unsigned int cntxt_id; /* SGE context id for the free list */
430 unsigned int size; /* capacity of free list */
431 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
432 __be64 *desc; /* address of HW Rx descriptor ring */
433 dma_addr_t addr; /* bus address of HW ring start */
434};
435
436/* A packet gather list */
437struct pkt_gl {
e91b0f24 438 struct page_frag frags[MAX_SKB_FRAGS];
625ba2c2
DM
439 void *va; /* virtual address of first byte */
440 unsigned int nfrags; /* # of fragments */
441 unsigned int tot_len; /* total length of fragments */
442};
443
444typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
445 const struct pkt_gl *gl);
446
447struct sge_rspq { /* state for an SGE response queue */
448 struct napi_struct napi;
449 const __be64 *cur_desc; /* current descriptor in queue */
450 unsigned int cidx; /* consumer index */
451 u8 gen; /* current generation bit */
452 u8 intr_params; /* interrupt holdoff parameters */
453 u8 next_intr_params; /* holdoff params for next interrupt */
454 u8 pktcnt_idx; /* interrupt packet threshold */
455 u8 uld; /* ULD handling this queue */
456 u8 idx; /* queue index within its group */
457 int offset; /* offset into current Rx buffer */
458 u16 cntxt_id; /* SGE context id for the response q */
459 u16 abs_id; /* absolute SGE id for the response q */
460 __be64 *desc; /* address of HW response ring */
461 dma_addr_t phys_addr; /* physical address of the ring */
462 unsigned int iqe_len; /* entry size */
463 unsigned int size; /* capacity of response queue */
464 struct adapter *adap;
465 struct net_device *netdev; /* associated net device */
466 rspq_handler_t handler;
467};
468
469struct sge_eth_stats { /* Ethernet queue statistics */
470 unsigned long pkts; /* # of ethernet packets */
471 unsigned long lro_pkts; /* # of LRO super packets */
472 unsigned long lro_merged; /* # of wire packets merged by LRO */
473 unsigned long rx_cso; /* # of Rx checksum offloads */
474 unsigned long vlan_ex; /* # of Rx VLAN extractions */
475 unsigned long rx_drops; /* # of packets dropped due to no mem */
476};
477
478struct sge_eth_rxq { /* SW Ethernet Rx queue */
479 struct sge_rspq rspq;
480 struct sge_fl fl;
481 struct sge_eth_stats stats;
482} ____cacheline_aligned_in_smp;
483
484struct sge_ofld_stats { /* offload queue statistics */
485 unsigned long pkts; /* # of packets */
486 unsigned long imm; /* # of immediate-data packets */
487 unsigned long an; /* # of asynchronous notifications */
488 unsigned long nomem; /* # of responses deferred due to no mem */
489};
490
491struct sge_ofld_rxq { /* SW offload Rx queue */
492 struct sge_rspq rspq;
493 struct sge_fl fl;
494 struct sge_ofld_stats stats;
495} ____cacheline_aligned_in_smp;
496
497struct tx_desc {
498 __be64 flit[8];
499};
500
501struct tx_sw_desc;
502
503struct sge_txq {
504 unsigned int in_use; /* # of in-use Tx descriptors */
505 unsigned int size; /* # of descriptors */
506 unsigned int cidx; /* SW consumer index */
507 unsigned int pidx; /* producer index */
508 unsigned long stops; /* # of times q has been stopped */
509 unsigned long restarts; /* # of queue restarts */
510 unsigned int cntxt_id; /* SGE context id for the Tx q */
511 struct tx_desc *desc; /* address of HW Tx descriptor ring */
512 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
513 struct sge_qstat *stat; /* queue status entry */
514 dma_addr_t phys_addr; /* physical address of the ring */
3069ee9b
VP
515 spinlock_t db_lock;
516 int db_disabled;
517 unsigned short db_pidx;
05eb2389 518 unsigned short db_pidx_inc;
22adfe0a 519 u64 udb;
625ba2c2
DM
520};
521
522struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
523 struct sge_txq q;
524 struct netdev_queue *txq; /* associated netdev TX queue */
525 unsigned long tso; /* # of TSO requests */
526 unsigned long tx_cso; /* # of Tx checksum offloads */
527 unsigned long vlan_ins; /* # of Tx VLAN insertions */
528 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
529} ____cacheline_aligned_in_smp;
530
531struct sge_ofld_txq { /* state for an SGE offload Tx queue */
532 struct sge_txq q;
533 struct adapter *adap;
534 struct sk_buff_head sendq; /* list of backpressured packets */
535 struct tasklet_struct qresume_tsk; /* restarts the queue */
536 u8 full; /* the Tx ring is full */
537 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
538} ____cacheline_aligned_in_smp;
539
540struct sge_ctrl_txq { /* state for an SGE control Tx queue */
541 struct sge_txq q;
542 struct adapter *adap;
543 struct sk_buff_head sendq; /* list of backpressured packets */
544 struct tasklet_struct qresume_tsk; /* restarts the queue */
545 u8 full; /* the Tx ring is full */
546} ____cacheline_aligned_in_smp;
547
548struct sge {
549 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
550 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
551 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
552
553 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
554 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
555 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
cf38be6d 556 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
625ba2c2
DM
557 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
558
559 struct sge_rspq intrq ____cacheline_aligned_in_smp;
560 spinlock_t intrq_lock;
561
562 u16 max_ethqsets; /* # of available Ethernet queue sets */
563 u16 ethqsets; /* # of active Ethernet queue sets */
564 u16 ethtxq_rover; /* Tx queue to clean up next */
565 u16 ofldqsets; /* # of active offload queue sets */
566 u16 rdmaqs; /* # of available RDMA Rx queues */
cf38be6d 567 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
625ba2c2
DM
568 u16 ofld_rxq[MAX_OFLD_QSETS];
569 u16 rdma_rxq[NCHAN];
cf38be6d 570 u16 rdma_ciq[NCHAN];
625ba2c2
DM
571 u16 timer_val[SGE_NTIMERS];
572 u8 counter_val[SGE_NCOUNTERS];
52367a76
VP
573 u32 fl_pg_order; /* large page allocation size */
574 u32 stat_len; /* length of status page at ring end */
575 u32 pktshift; /* padding between CPL & packet data */
576 u32 fl_align; /* response queue message alignment */
577 u32 fl_starve_thres; /* Free List starvation threshold */
0f4d201f
KS
578
579 /* State variables for detecting an SGE Ingress DMA hang */
580 unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
581 unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
582 unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
583 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
584
e46dab4d
DM
585 unsigned int egr_start;
586 unsigned int ingr_start;
625ba2c2
DM
587 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
588 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
589 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
590 DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
591 struct timer_list rx_timer; /* refills starving FLs */
592 struct timer_list tx_timer; /* checks Tx queues */
593};
594
595#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
596#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
597#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
cf38be6d 598#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
625ba2c2
DM
599
600struct l2t_data;
601
2422d9a3
SR
602#ifdef CONFIG_PCI_IOV
603
7d6727cf
SR
604/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
605 * Configuration initialization for T5 only has SR-IOV functionality enabled
606 * on PF0-3 in order to simplify everything.
2422d9a3 607 */
7d6727cf 608#define NUM_OF_PF_WITH_SRIOV 4
2422d9a3
SR
609
610#endif
611
625ba2c2
DM
612struct adapter {
613 void __iomem *regs;
22adfe0a 614 void __iomem *bar2;
0abfd152 615 u32 t4_bar0;
625ba2c2
DM
616 struct pci_dev *pdev;
617 struct device *pdev_dev;
3069ee9b 618 unsigned int mbox;
060e0c75
DM
619 unsigned int fn;
620 unsigned int flags;
2422d9a3 621 enum chip_type chip;
625ba2c2 622
625ba2c2
DM
623 int msg_enable;
624
625 struct adapter_params params;
626 struct cxgb4_virt_res vres;
627 unsigned int swintr;
628
629 unsigned int wol;
630
631 struct {
632 unsigned short vec;
8cd18ac4 633 char desc[IFNAMSIZ + 10];
625ba2c2
DM
634 } msix_info[MAX_INGQ + 1];
635
636 struct sge sge;
637
638 struct net_device *port[MAX_NPORTS];
639 u8 chan_map[NCHAN]; /* channel -> port map */
640
793dad94 641 u32 filter_mode;
636f9d37
VP
642 unsigned int l2t_start;
643 unsigned int l2t_end;
625ba2c2
DM
644 struct l2t_data *l2t;
645 void *uld_handle[CXGB4_ULD_MAX];
646 struct list_head list_node;
01bcca68 647 struct list_head rcu_node;
625ba2c2
DM
648
649 struct tid_info tids;
650 void **tid_release_head;
651 spinlock_t tid_release_lock;
652 struct work_struct tid_release_task;
881806bc
VP
653 struct work_struct db_full_task;
654 struct work_struct db_drop_task;
625ba2c2
DM
655 bool tid_release_task_busy;
656
657 struct dentry *debugfs_root;
658
659 spinlock_t stats_lock;
fc5ab020 660 spinlock_t win0_lock ____cacheline_aligned_in_smp;
625ba2c2
DM
661};
662
f2b7e78d
VP
663/* Defined bit width of user definable filter tuples
664 */
665#define ETHTYPE_BITWIDTH 16
666#define FRAG_BITWIDTH 1
667#define MACIDX_BITWIDTH 9
668#define FCOE_BITWIDTH 1
669#define IPORT_BITWIDTH 3
670#define MATCHTYPE_BITWIDTH 3
671#define PROTO_BITWIDTH 8
672#define TOS_BITWIDTH 8
673#define PF_BITWIDTH 8
674#define VF_BITWIDTH 8
675#define IVLAN_BITWIDTH 16
676#define OVLAN_BITWIDTH 16
677
678/* Filter matching rules. These consist of a set of ingress packet field
679 * (value, mask) tuples. The associated ingress packet field matches the
680 * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
681 * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
682 * matches an ingress packet when all of the individual individual field
683 * matching rules are true.
684 *
685 * Partial field masks are always valid, however, while it may be easy to
686 * understand their meanings for some fields (e.g. IP address to match a
687 * subnet), for others making sensible partial masks is less intuitive (e.g.
688 * MPS match type) ...
689 *
690 * Most of the following data structures are modeled on T4 capabilities.
691 * Drivers for earlier chips use the subsets which make sense for those chips.
692 * We really need to come up with a hardware-independent mechanism to
693 * represent hardware filter capabilities ...
694 */
695struct ch_filter_tuple {
696 /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
697 * register selects which of these fields will participate in the
698 * filter match rules -- up to a maximum of 36 bits. Because
699 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
700 * set of fields.
701 */
702 uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
703 uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
704 uint32_t ivlan_vld:1; /* inner VLAN valid */
705 uint32_t ovlan_vld:1; /* outer VLAN valid */
706 uint32_t pfvf_vld:1; /* PF/VF valid */
707 uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
708 uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
709 uint32_t iport:IPORT_BITWIDTH; /* ingress port */
710 uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
711 uint32_t proto:PROTO_BITWIDTH; /* protocol type */
712 uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
713 uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
714 uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
715 uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
716 uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
717
718 /* Uncompressed header matching field rules. These are always
719 * available for field rules.
720 */
721 uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
722 uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
723 uint16_t lport; /* local port */
724 uint16_t fport; /* foreign port */
725};
726
727/* A filter ioctl command.
728 */
729struct ch_filter_specification {
730 /* Administrative fields for filter.
731 */
732 uint32_t hitcnts:1; /* count filter hits in TCB */
733 uint32_t prio:1; /* filter has priority over active/server */
734
735 /* Fundamental filter typing. This is the one element of filter
736 * matching that doesn't exist as a (value, mask) tuple.
737 */
738 uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
739
740 /* Packet dispatch information. Ingress packets which match the
741 * filter rules will be dropped, passed to the host or switched back
742 * out as egress packets.
743 */
744 uint32_t action:2; /* drop, pass, switch */
745
746 uint32_t rpttid:1; /* report TID in RSS hash field */
747
748 uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
749 uint32_t iq:10; /* ingress queue */
750
751 uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
752 uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
753 /* 1 => TCB contains IQ ID */
754
755 /* Switch proxy/rewrite fields. An ingress packet which matches a
756 * filter with "switch" set will be looped back out as an egress
757 * packet -- potentially with some Ethernet header rewriting.
758 */
759 uint32_t eport:2; /* egress port to switch packet out */
760 uint32_t newdmac:1; /* rewrite destination MAC address */
761 uint32_t newsmac:1; /* rewrite source MAC address */
762 uint32_t newvlan:2; /* rewrite VLAN Tag */
763 uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
764 uint8_t smac[ETH_ALEN]; /* new source MAC address */
765 uint16_t vlan; /* VLAN Tag to insert */
766
767 /* Filter rule value/mask pairs.
768 */
769 struct ch_filter_tuple val;
770 struct ch_filter_tuple mask;
771};
772
773enum {
774 FILTER_PASS = 0, /* default */
775 FILTER_DROP,
776 FILTER_SWITCH
777};
778
779enum {
780 VLAN_NOCHANGE = 0, /* default */
781 VLAN_REMOVE,
782 VLAN_INSERT,
783 VLAN_REWRITE
784};
785
2422d9a3
SR
786static inline int is_t5(enum chip_type chip)
787{
d14807dd 788 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
2422d9a3
SR
789}
790
791static inline int is_t4(enum chip_type chip)
792{
d14807dd 793 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
2422d9a3
SR
794}
795
625ba2c2
DM
796static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
797{
798 return readl(adap->regs + reg_addr);
799}
800
801static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
802{
803 writel(val, adap->regs + reg_addr);
804}
805
806#ifndef readq
807static inline u64 readq(const volatile void __iomem *addr)
808{
809 return readl(addr) + ((u64)readl(addr + 4) << 32);
810}
811
812static inline void writeq(u64 val, volatile void __iomem *addr)
813{
814 writel(val, addr);
815 writel(val >> 32, addr + 4);
816}
817#endif
818
819static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
820{
821 return readq(adap->regs + reg_addr);
822}
823
824static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
825{
826 writeq(val, adap->regs + reg_addr);
827}
828
829/**
830 * netdev2pinfo - return the port_info structure associated with a net_device
831 * @dev: the netdev
832 *
833 * Return the struct port_info associated with a net_device
834 */
835static inline struct port_info *netdev2pinfo(const struct net_device *dev)
836{
837 return netdev_priv(dev);
838}
839
840/**
841 * adap2pinfo - return the port_info of a port
842 * @adap: the adapter
843 * @idx: the port index
844 *
845 * Return the port_info structure for the port of the given index.
846 */
847static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
848{
849 return netdev_priv(adap->port[idx]);
850}
851
852/**
853 * netdev2adap - return the adapter structure associated with a net_device
854 * @dev: the netdev
855 *
856 * Return the struct adapter associated with a net_device
857 */
858static inline struct adapter *netdev2adap(const struct net_device *dev)
859{
860 return netdev2pinfo(dev)->adapter;
861}
862
863void t4_os_portmod_changed(const struct adapter *adap, int port_id);
864void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
865
866void *t4_alloc_mem(size_t size);
625ba2c2
DM
867
868void t4_free_sge_resources(struct adapter *adap);
5fa76694 869void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
625ba2c2
DM
870irq_handler_t t4_intr_handler(struct adapter *adap);
871netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
872int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
873 const struct pkt_gl *gl);
874int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
875int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
876int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
877 struct net_device *dev, int intr_idx,
878 struct sge_fl *fl, rspq_handler_t hnd);
879int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
880 struct net_device *dev, struct netdev_queue *netdevq,
881 unsigned int iqid);
882int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
883 struct net_device *dev, unsigned int iqid,
884 unsigned int cmplqid);
885int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
886 struct net_device *dev, unsigned int iqid);
887irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
52367a76 888int t4_sge_init(struct adapter *adap);
625ba2c2
DM
889void t4_sge_start(struct adapter *adap);
890void t4_sge_stop(struct adapter *adap);
3069ee9b 891extern int dbfifo_int_thresh;
625ba2c2
DM
892
893#define for_each_port(adapter, iter) \
894 for (iter = 0; iter < (adapter)->params.nports; ++iter)
895
9a4da2cd
VP
896static inline int is_bypass(struct adapter *adap)
897{
898 return adap->params.bypass;
899}
900
901static inline int is_bypass_device(int device)
902{
903 /* this should be set based upon device capabilities */
904 switch (device) {
905 case 0x440b:
906 case 0x440c:
907 return 1;
908 default:
909 return 0;
910 }
911}
912
625ba2c2
DM
913static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
914{
915 return adap->params.vpd.cclk / 1000;
916}
917
918static inline unsigned int us_to_core_ticks(const struct adapter *adap,
919 unsigned int us)
920{
921 return (us * adap->params.vpd.cclk) / 1000;
922}
923
52367a76
VP
924static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
925 unsigned int ticks)
926{
927 /* add Core Clock / 2 to round ticks to nearest uS */
928 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
929 adapter->params.vpd.cclk);
930}
931
625ba2c2
DM
932void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
933 u32 val);
934
935int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
936 void *rpl, bool sleep_ok);
937
938static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
939 int size, void *rpl)
940{
941 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
942}
943
944static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
945 int size, void *rpl)
946{
947 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
948}
949
13ee15d3
VP
950void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
951 unsigned int data_reg, const u32 *vals,
952 unsigned int nregs, unsigned int start_idx);
f2b7e78d
VP
953void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
954 unsigned int data_reg, u32 *vals, unsigned int nregs,
955 unsigned int start_idx);
0abfd152 956void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
f2b7e78d
VP
957
958struct fw_filter_wr;
959
625ba2c2
DM
960void t4_intr_enable(struct adapter *adapter);
961void t4_intr_disable(struct adapter *adapter);
625ba2c2
DM
962int t4_slow_intr_handler(struct adapter *adapter);
963
204dc3c0 964int t4_wait_dev_ready(struct adapter *adap);
625ba2c2
DM
965int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
966 struct link_config *lc);
967int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
fc5ab020
HS
968
969#define T4_MEMORY_WRITE 0
970#define T4_MEMORY_READ 1
971int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
972 __be32 *buf, int dir);
973static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
974 u32 len, __be32 *buf)
975{
976 return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
977}
978
625ba2c2 979int t4_seeprom_wp(struct adapter *adapter, bool enable);
636f9d37 980int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
625ba2c2 981int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
636f9d37 982unsigned int t4_flash_cfg_addr(struct adapter *adapter);
16e47624
HS
983int t4_get_fw_version(struct adapter *adapter, u32 *vers);
984int t4_get_tp_version(struct adapter *adapter, u32 *vers);
985int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
986 const u8 *fw_data, unsigned int fw_size,
987 struct fw_hdr *card_fw, enum dev_state state, int *reset);
625ba2c2 988int t4_prep_adapter(struct adapter *adapter);
dcf7b6f5
KS
989int t4_init_tp_params(struct adapter *adap);
990int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
625ba2c2
DM
991int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
992void t4_fatal_err(struct adapter *adapter);
625ba2c2
DM
993int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
994 int start, int n, const u16 *rspq, unsigned int nrspq);
995int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
996 unsigned int flags);
19dd37ba
SR
997int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
998 u64 *parity);
625ba2c2
DM
999int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1000 u64 *parity);
72aca4bf 1001const char *t4_get_port_type_description(enum fw_port_type port_type);
625ba2c2 1002void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
625ba2c2 1003void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
636f9d37
VP
1004void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1005 unsigned int mask, unsigned int val);
625ba2c2
DM
1006void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1007 struct tp_tcp_stats *v6);
1008void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1009 const unsigned short *alpha, const unsigned short *beta);
1010
f2b7e78d
VP
1011void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1012
625ba2c2
DM
1013void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1014 const u8 *addr);
1015int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1016 u64 mask0, u64 mask1, unsigned int crc, bool enable);
1017
1018int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1019 enum dev_master master, enum dev_state *state);
1020int t4_fw_bye(struct adapter *adap, unsigned int mbox);
1021int t4_early_init(struct adapter *adap, unsigned int mbox);
1022int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
636f9d37
VP
1023int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1024 unsigned int cache_line_size);
1025int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
625ba2c2
DM
1026int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1027 unsigned int vf, unsigned int nparams, const u32 *params,
1028 u32 *val);
1029int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1030 unsigned int vf, unsigned int nparams, const u32 *params,
1031 const u32 *val);
688848b1
AB
1032int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
1033 unsigned int pf, unsigned int vf,
1034 unsigned int nparams, const u32 *params,
1035 const u32 *val);
625ba2c2
DM
1036int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1037 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
1038 unsigned int rxqi, unsigned int rxq, unsigned int tc,
1039 unsigned int vi, unsigned int cmask, unsigned int pmask,
1040 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
1041int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
1042 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1043 unsigned int *rss_size);
625ba2c2 1044int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
1045 int mtu, int promisc, int all_multi, int bcast, int vlanex,
1046 bool sleep_ok);
625ba2c2
DM
1047int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
1048 unsigned int viid, bool free, unsigned int naddr,
1049 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
1050int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
1051 int idx, const u8 *addr, bool persist, bool add_smt);
1052int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
1053 bool ucast, u64 vec, bool sleep_ok);
688848b1
AB
1054int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
1055 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
625ba2c2
DM
1056int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
1057 bool rx_en, bool tx_en);
1058int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
1059 unsigned int nblinks);
1060int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1061 unsigned int mmd, unsigned int reg, u16 *valp);
1062int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1063 unsigned int mmd, unsigned int reg, u16 val);
625ba2c2
DM
1064int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1065 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1066 unsigned int fl0id, unsigned int fl1id);
1067int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1068 unsigned int vf, unsigned int eqid);
1069int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1070 unsigned int vf, unsigned int eqid);
1071int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1072 unsigned int vf, unsigned int eqid);
1073int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
881806bc
VP
1074void t4_db_full(struct adapter *adapter);
1075void t4_db_dropped(struct adapter *adapter);
8caa1e84
VP
1076int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
1077 u32 addr, u32 val);
68bce192 1078void t4_sge_decode_idma_state(struct adapter *adapter, int state);
625ba2c2 1079#endif /* __CXGB4_H__ */
This page took 0.732575 seconds and 5 git commands to generate.