Commit | Line | Data |
---|---|---|
625ba2c2 DM |
1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | |
3 | * | |
ce100b8b | 4 | * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. |
625ba2c2 DM |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #ifndef __CXGB4_H__ | |
36 | #define __CXGB4_H__ | |
37 | ||
dca4faeb VP |
38 | #include "t4_hw.h" |
39 | ||
625ba2c2 DM |
40 | #include <linux/bitops.h> |
41 | #include <linux/cache.h> | |
42 | #include <linux/interrupt.h> | |
43 | #include <linux/list.h> | |
44 | #include <linux/netdevice.h> | |
45 | #include <linux/pci.h> | |
46 | #include <linux/spinlock.h> | |
47 | #include <linux/timer.h> | |
c0b8b992 | 48 | #include <linux/vmalloc.h> |
625ba2c2 DM |
49 | #include <asm/io.h> |
50 | #include "cxgb4_uld.h" | |
625ba2c2 | 51 | |
3069ee9b VP |
52 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) |
53 | ||
625ba2c2 DM |
54 | enum { |
55 | MAX_NPORTS = 4, /* max # of ports */ | |
47d54d65 | 56 | SERNUM_LEN = 24, /* Serial # length */ |
625ba2c2 DM |
57 | EC_LEN = 16, /* E/C length */ |
58 | ID_LEN = 16, /* ID length */ | |
a94cd705 | 59 | PN_LEN = 16, /* Part Number length */ |
625ba2c2 DM |
60 | }; |
61 | ||
812034f1 HS |
62 | enum { |
63 | T4_REGMAP_SIZE = (160 * 1024), | |
64 | T5_REGMAP_SIZE = (332 * 1024), | |
65 | }; | |
66 | ||
625ba2c2 DM |
67 | enum { |
68 | MEM_EDC0, | |
69 | MEM_EDC1, | |
2422d9a3 SR |
70 | MEM_MC, |
71 | MEM_MC0 = MEM_MC, | |
72 | MEM_MC1 | |
625ba2c2 DM |
73 | }; |
74 | ||
3069ee9b | 75 | enum { |
3eb4afbf VP |
76 | MEMWIN0_APERTURE = 2048, |
77 | MEMWIN0_BASE = 0x1b800, | |
3069ee9b VP |
78 | MEMWIN1_APERTURE = 32768, |
79 | MEMWIN1_BASE = 0x28000, | |
2422d9a3 | 80 | MEMWIN1_BASE_T5 = 0x52000, |
3eb4afbf VP |
81 | MEMWIN2_APERTURE = 65536, |
82 | MEMWIN2_BASE = 0x30000, | |
0abfd152 HS |
83 | MEMWIN2_APERTURE_T5 = 131072, |
84 | MEMWIN2_BASE_T5 = 0x60000, | |
3069ee9b VP |
85 | }; |
86 | ||
625ba2c2 DM |
87 | enum dev_master { |
88 | MASTER_CANT, | |
89 | MASTER_MAY, | |
90 | MASTER_MUST | |
91 | }; | |
92 | ||
93 | enum dev_state { | |
94 | DEV_STATE_UNINIT, | |
95 | DEV_STATE_INIT, | |
96 | DEV_STATE_ERR | |
97 | }; | |
98 | ||
99 | enum { | |
100 | PAUSE_RX = 1 << 0, | |
101 | PAUSE_TX = 1 << 1, | |
102 | PAUSE_AUTONEG = 1 << 2 | |
103 | }; | |
104 | ||
105 | struct port_stats { | |
106 | u64 tx_octets; /* total # of octets in good frames */ | |
107 | u64 tx_frames; /* all good frames */ | |
108 | u64 tx_bcast_frames; /* all broadcast frames */ | |
109 | u64 tx_mcast_frames; /* all multicast frames */ | |
110 | u64 tx_ucast_frames; /* all unicast frames */ | |
111 | u64 tx_error_frames; /* all error frames */ | |
112 | ||
113 | u64 tx_frames_64; /* # of Tx frames in a particular range */ | |
114 | u64 tx_frames_65_127; | |
115 | u64 tx_frames_128_255; | |
116 | u64 tx_frames_256_511; | |
117 | u64 tx_frames_512_1023; | |
118 | u64 tx_frames_1024_1518; | |
119 | u64 tx_frames_1519_max; | |
120 | ||
121 | u64 tx_drop; /* # of dropped Tx frames */ | |
122 | u64 tx_pause; /* # of transmitted pause frames */ | |
123 | u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ | |
124 | u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ | |
125 | u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ | |
126 | u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ | |
127 | u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ | |
128 | u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ | |
129 | u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ | |
130 | u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ | |
131 | ||
132 | u64 rx_octets; /* total # of octets in good frames */ | |
133 | u64 rx_frames; /* all good frames */ | |
134 | u64 rx_bcast_frames; /* all broadcast frames */ | |
135 | u64 rx_mcast_frames; /* all multicast frames */ | |
136 | u64 rx_ucast_frames; /* all unicast frames */ | |
137 | u64 rx_too_long; /* # of frames exceeding MTU */ | |
138 | u64 rx_jabber; /* # of jabber frames */ | |
139 | u64 rx_fcs_err; /* # of received frames with bad FCS */ | |
140 | u64 rx_len_err; /* # of received frames with length error */ | |
141 | u64 rx_symbol_err; /* symbol errors */ | |
142 | u64 rx_runt; /* # of short frames */ | |
143 | ||
144 | u64 rx_frames_64; /* # of Rx frames in a particular range */ | |
145 | u64 rx_frames_65_127; | |
146 | u64 rx_frames_128_255; | |
147 | u64 rx_frames_256_511; | |
148 | u64 rx_frames_512_1023; | |
149 | u64 rx_frames_1024_1518; | |
150 | u64 rx_frames_1519_max; | |
151 | ||
152 | u64 rx_pause; /* # of received pause frames */ | |
153 | u64 rx_ppp0; /* # of received PPP prio 0 frames */ | |
154 | u64 rx_ppp1; /* # of received PPP prio 1 frames */ | |
155 | u64 rx_ppp2; /* # of received PPP prio 2 frames */ | |
156 | u64 rx_ppp3; /* # of received PPP prio 3 frames */ | |
157 | u64 rx_ppp4; /* # of received PPP prio 4 frames */ | |
158 | u64 rx_ppp5; /* # of received PPP prio 5 frames */ | |
159 | u64 rx_ppp6; /* # of received PPP prio 6 frames */ | |
160 | u64 rx_ppp7; /* # of received PPP prio 7 frames */ | |
161 | ||
162 | u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ | |
163 | u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ | |
164 | u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ | |
165 | u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ | |
166 | u64 rx_trunc0; /* buffer-group 0 truncated packets */ | |
167 | u64 rx_trunc1; /* buffer-group 1 truncated packets */ | |
168 | u64 rx_trunc2; /* buffer-group 2 truncated packets */ | |
169 | u64 rx_trunc3; /* buffer-group 3 truncated packets */ | |
170 | }; | |
171 | ||
172 | struct lb_port_stats { | |
173 | u64 octets; | |
174 | u64 frames; | |
175 | u64 bcast_frames; | |
176 | u64 mcast_frames; | |
177 | u64 ucast_frames; | |
178 | u64 error_frames; | |
179 | ||
180 | u64 frames_64; | |
181 | u64 frames_65_127; | |
182 | u64 frames_128_255; | |
183 | u64 frames_256_511; | |
184 | u64 frames_512_1023; | |
185 | u64 frames_1024_1518; | |
186 | u64 frames_1519_max; | |
187 | ||
188 | u64 drop; | |
189 | ||
190 | u64 ovflow0; | |
191 | u64 ovflow1; | |
192 | u64 ovflow2; | |
193 | u64 ovflow3; | |
194 | u64 trunc0; | |
195 | u64 trunc1; | |
196 | u64 trunc2; | |
197 | u64 trunc3; | |
198 | }; | |
199 | ||
200 | struct tp_tcp_stats { | |
a4cfd929 HS |
201 | u32 tcp_out_rsts; |
202 | u64 tcp_in_segs; | |
203 | u64 tcp_out_segs; | |
204 | u64 tcp_retrans_segs; | |
205 | }; | |
206 | ||
207 | struct tp_usm_stats { | |
208 | u32 frames; | |
209 | u32 drops; | |
210 | u64 octets; | |
625ba2c2 DM |
211 | }; |
212 | ||
a6222975 HS |
213 | struct tp_fcoe_stats { |
214 | u32 frames_ddp; | |
215 | u32 frames_drop; | |
216 | u64 octets_ddp; | |
217 | }; | |
218 | ||
625ba2c2 | 219 | struct tp_err_stats { |
a4cfd929 HS |
220 | u32 mac_in_errs[4]; |
221 | u32 hdr_in_errs[4]; | |
222 | u32 tcp_in_errs[4]; | |
223 | u32 tnl_cong_drops[4]; | |
224 | u32 ofld_chan_drops[4]; | |
225 | u32 tnl_tx_drops[4]; | |
226 | u32 ofld_vlan_drops[4]; | |
227 | u32 tcp6_in_errs[4]; | |
228 | u32 ofld_no_neigh; | |
229 | u32 ofld_cong_defer; | |
230 | }; | |
231 | ||
a6222975 HS |
232 | struct tp_cpl_stats { |
233 | u32 req[4]; | |
234 | u32 rsp[4]; | |
235 | }; | |
236 | ||
a4cfd929 HS |
237 | struct tp_rdma_stats { |
238 | u32 rqe_dfr_pkt; | |
239 | u32 rqe_dfr_mod; | |
625ba2c2 DM |
240 | }; |
241 | ||
e85c9a7a HS |
242 | struct sge_params { |
243 | u32 hps; /* host page size for our PF/VF */ | |
244 | u32 eq_qpp; /* egress queues/page for our PF/VF */ | |
245 | u32 iq_qpp; /* egress queues/page for our PF/VF */ | |
246 | }; | |
247 | ||
625ba2c2 | 248 | struct tp_params { |
625ba2c2 | 249 | unsigned int tre; /* log2 of core clocks per TP tick */ |
2d277b3b | 250 | unsigned int la_mask; /* what events are recorded by TP LA */ |
dca4faeb VP |
251 | unsigned short tx_modq_map; /* TX modulation scheduler queue to */ |
252 | /* channel map */ | |
636f9d37 VP |
253 | |
254 | uint32_t dack_re; /* DACK timer resolution */ | |
255 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ | |
dcf7b6f5 KS |
256 | |
257 | u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ | |
258 | u32 ingress_config; /* cached TP_INGRESS_CONFIG */ | |
259 | ||
260 | /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a | |
261 | * subset of the set of fields which may be present in the Compressed | |
262 | * Filter Tuple portion of filters and TCP TCB connections. The | |
263 | * fields which are present are controlled by the TP_VLAN_PRI_MAP. | |
264 | * Since a variable number of fields may or may not be present, their | |
265 | * shifted field positions within the Compressed Filter Tuple may | |
266 | * vary, or not even be present if the field isn't selected in | |
267 | * TP_VLAN_PRI_MAP. Since some of these fields are needed in various | |
268 | * places we store their offsets here, or a -1 if the field isn't | |
269 | * present. | |
270 | */ | |
271 | int vlan_shift; | |
272 | int vnic_shift; | |
273 | int port_shift; | |
274 | int protocol_shift; | |
625ba2c2 DM |
275 | }; |
276 | ||
277 | struct vpd_params { | |
278 | unsigned int cclk; | |
279 | u8 ec[EC_LEN + 1]; | |
280 | u8 sn[SERNUM_LEN + 1]; | |
281 | u8 id[ID_LEN + 1]; | |
a94cd705 | 282 | u8 pn[PN_LEN + 1]; |
625ba2c2 DM |
283 | }; |
284 | ||
285 | struct pci_params { | |
286 | unsigned char speed; | |
287 | unsigned char width; | |
288 | }; | |
289 | ||
d14807dd HS |
290 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) |
291 | #define CHELSIO_CHIP_FPGA 0x100 | |
292 | #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) | |
293 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | |
294 | ||
295 | #define CHELSIO_T4 0x4 | |
296 | #define CHELSIO_T5 0x5 | |
ab4b583b | 297 | #define CHELSIO_T6 0x6 |
d14807dd HS |
298 | |
299 | enum chip_type { | |
300 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | |
301 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | |
302 | T4_FIRST_REV = T4_A1, | |
303 | T4_LAST_REV = T4_A2, | |
304 | ||
305 | T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | |
306 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), | |
307 | T5_FIRST_REV = T5_A0, | |
308 | T5_LAST_REV = T5_A1, | |
ab4b583b HS |
309 | |
310 | T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0), | |
311 | T6_FIRST_REV = T6_A0, | |
312 | T6_LAST_REV = T6_A0, | |
d14807dd HS |
313 | }; |
314 | ||
49aa284f HS |
315 | struct devlog_params { |
316 | u32 memtype; /* which memory (EDC0, EDC1, MC) */ | |
317 | u32 start; /* start of log in firmware memory */ | |
318 | u32 size; /* size of log */ | |
319 | }; | |
320 | ||
3ccc6cf7 HS |
321 | /* Stores chip specific parameters */ |
322 | struct arch_specific_params { | |
323 | u8 nchan; | |
324 | u16 mps_rplc_size; | |
325 | u16 vfcount; | |
326 | u32 sge_fl_db; | |
327 | u16 mps_tcam_size; | |
328 | }; | |
329 | ||
625ba2c2 | 330 | struct adapter_params { |
e85c9a7a | 331 | struct sge_params sge; |
625ba2c2 DM |
332 | struct tp_params tp; |
333 | struct vpd_params vpd; | |
334 | struct pci_params pci; | |
49aa284f HS |
335 | struct devlog_params devlog; |
336 | enum pcie_memwin drv_memwin; | |
625ba2c2 | 337 | |
f1ff24aa HS |
338 | unsigned int cim_la_size; |
339 | ||
900a6596 DM |
340 | unsigned int sf_size; /* serial flash size in bytes */ |
341 | unsigned int sf_nsec; /* # of flash sectors */ | |
342 | unsigned int sf_fw_start; /* start of FW image in flash */ | |
343 | ||
625ba2c2 DM |
344 | unsigned int fw_vers; |
345 | unsigned int tp_vers; | |
346 | u8 api_vers[7]; | |
347 | ||
348 | unsigned short mtus[NMTUS]; | |
349 | unsigned short a_wnd[NCCTRL_WIN]; | |
350 | unsigned short b_wnd[NCCTRL_WIN]; | |
351 | ||
352 | unsigned char nports; /* # of ethernet ports */ | |
353 | unsigned char portvec; | |
d14807dd | 354 | enum chip_type chip; /* chip code */ |
3ccc6cf7 | 355 | struct arch_specific_params arch; /* chip specific params */ |
625ba2c2 DM |
356 | unsigned char offload; |
357 | ||
9a4da2cd VP |
358 | unsigned char bypass; |
359 | ||
625ba2c2 | 360 | unsigned int ofldq_wr_cred; |
1ac0f095 | 361 | bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ |
4c2c5763 HS |
362 | |
363 | unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ | |
364 | unsigned int max_ird_adapter; /* Max read depth per adapter */ | |
625ba2c2 DM |
365 | }; |
366 | ||
a3bfb617 HS |
367 | /* State needed to monitor the forward progress of SGE Ingress DMA activities |
368 | * and possible hangs. | |
369 | */ | |
370 | struct sge_idma_monitor_state { | |
371 | unsigned int idma_1s_thresh; /* 1s threshold in Core Clock ticks */ | |
372 | unsigned int idma_stalled[2]; /* synthesized stalled timers in HZ */ | |
373 | unsigned int idma_state[2]; /* IDMA Hang detect state */ | |
374 | unsigned int idma_qid[2]; /* IDMA Hung Ingress Queue ID */ | |
375 | unsigned int idma_warn[2]; /* time to warning in HZ */ | |
376 | }; | |
377 | ||
16e47624 HS |
378 | #include "t4fw_api.h" |
379 | ||
380 | #define FW_VERSION(chip) ( \ | |
b2e1a3f0 HS |
381 | FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ |
382 | FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ | |
383 | FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ | |
384 | FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) | |
16e47624 HS |
385 | #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) |
386 | ||
387 | struct fw_info { | |
388 | u8 chip; | |
389 | char *fs_name; | |
390 | char *fw_mod_name; | |
391 | struct fw_hdr fw_hdr; | |
392 | }; | |
393 | ||
394 | ||
625ba2c2 DM |
395 | struct trace_params { |
396 | u32 data[TRACE_LEN / 4]; | |
397 | u32 mask[TRACE_LEN / 4]; | |
398 | unsigned short snap_len; | |
399 | unsigned short min_len; | |
400 | unsigned char skip_ofst; | |
401 | unsigned char skip_len; | |
402 | unsigned char invert; | |
403 | unsigned char port; | |
404 | }; | |
405 | ||
406 | struct link_config { | |
407 | unsigned short supported; /* link capabilities */ | |
408 | unsigned short advertising; /* advertised capabilities */ | |
409 | unsigned short requested_speed; /* speed user has requested */ | |
410 | unsigned short speed; /* actual link speed */ | |
411 | unsigned char requested_fc; /* flow control user has requested */ | |
412 | unsigned char fc; /* actual link flow control */ | |
413 | unsigned char autoneg; /* autonegotiating? */ | |
414 | unsigned char link_ok; /* link up? */ | |
415 | }; | |
416 | ||
e2ac9628 | 417 | #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) |
625ba2c2 DM |
418 | |
419 | enum { | |
420 | MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ | |
421 | MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ | |
422 | MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ | |
423 | MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ | |
f36e58e5 | 424 | MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ |
cf38be6d | 425 | MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ |
625ba2c2 DM |
426 | }; |
427 | ||
812034f1 HS |
428 | enum { |
429 | MAX_TXQ_ENTRIES = 16384, | |
430 | MAX_CTRL_TXQ_ENTRIES = 1024, | |
431 | MAX_RSPQ_ENTRIES = 16384, | |
432 | MAX_RX_BUFFERS = 16384, | |
433 | MIN_TXQ_ENTRIES = 32, | |
434 | MIN_CTRL_TXQ_ENTRIES = 32, | |
435 | MIN_RSPQ_ENTRIES = 128, | |
436 | MIN_FL_ENTRIES = 16 | |
437 | }; | |
438 | ||
625ba2c2 | 439 | enum { |
cf38be6d HS |
440 | INGQ_EXTRAS = 2, /* firmware event queue and */ |
441 | /* forwarded interrupts */ | |
cf38be6d HS |
442 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES |
443 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, | |
625ba2c2 DM |
444 | }; |
445 | ||
446 | struct adapter; | |
625ba2c2 DM |
447 | struct sge_rspq; |
448 | ||
688848b1 AB |
449 | #include "cxgb4_dcb.h" |
450 | ||
76fed8a9 VP |
451 | #ifdef CONFIG_CHELSIO_T4_FCOE |
452 | #include "cxgb4_fcoe.h" | |
453 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
454 | ||
625ba2c2 DM |
455 | struct port_info { |
456 | struct adapter *adapter; | |
625ba2c2 DM |
457 | u16 viid; |
458 | s16 xact_addr_filt; /* index of exact MAC address filter */ | |
459 | u16 rss_size; /* size of VI's RSS table slice */ | |
460 | s8 mdio_addr; | |
40e9de4b | 461 | enum fw_port_type port_type; |
625ba2c2 DM |
462 | u8 mod_type; |
463 | u8 port_id; | |
464 | u8 tx_chan; | |
465 | u8 lport; /* associated offload logical port */ | |
625ba2c2 DM |
466 | u8 nqsets; /* # of qsets */ |
467 | u8 first_qset; /* index of first qset */ | |
f796564a | 468 | u8 rss_mode; |
625ba2c2 | 469 | struct link_config link_cfg; |
671b0060 | 470 | u16 *rss; |
a4cfd929 | 471 | struct port_stats stats_base; |
688848b1 AB |
472 | #ifdef CONFIG_CHELSIO_T4_DCB |
473 | struct port_dcb_info dcb; /* Data Center Bridging support */ | |
474 | #endif | |
76fed8a9 VP |
475 | #ifdef CONFIG_CHELSIO_T4_FCOE |
476 | struct cxgb_fcoe fcoe; | |
477 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
625ba2c2 DM |
478 | }; |
479 | ||
625ba2c2 DM |
480 | struct dentry; |
481 | struct work_struct; | |
482 | ||
483 | enum { /* adapter flags */ | |
484 | FULL_INIT_DONE = (1 << 0), | |
144be3d9 GS |
485 | DEV_ENABLED = (1 << 1), |
486 | USING_MSI = (1 << 2), | |
487 | USING_MSIX = (1 << 3), | |
625ba2c2 | 488 | FW_OK = (1 << 4), |
13ee15d3 | 489 | RSS_TNLALLLOOKUP = (1 << 5), |
52367a76 VP |
490 | USING_SOFT_PARAMS = (1 << 6), |
491 | MASTER_PF = (1 << 7), | |
492 | FW_OFLD_CONN = (1 << 9), | |
625ba2c2 DM |
493 | }; |
494 | ||
495 | struct rx_sw_desc; | |
496 | ||
497 | struct sge_fl { /* SGE free-buffer queue state */ | |
498 | unsigned int avail; /* # of available Rx buffers */ | |
499 | unsigned int pend_cred; /* new buffers since last FL DB ring */ | |
500 | unsigned int cidx; /* consumer index */ | |
501 | unsigned int pidx; /* producer index */ | |
502 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | |
503 | unsigned long large_alloc_failed; | |
504 | unsigned long starving; | |
505 | /* RO fields */ | |
506 | unsigned int cntxt_id; /* SGE context id for the free list */ | |
507 | unsigned int size; /* capacity of free list */ | |
508 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | |
509 | __be64 *desc; /* address of HW Rx descriptor ring */ | |
510 | dma_addr_t addr; /* bus address of HW ring start */ | |
df64e4d3 HS |
511 | void __iomem *bar2_addr; /* address of BAR2 Queue registers */ |
512 | unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ | |
625ba2c2 DM |
513 | }; |
514 | ||
515 | /* A packet gather list */ | |
516 | struct pkt_gl { | |
e91b0f24 | 517 | struct page_frag frags[MAX_SKB_FRAGS]; |
625ba2c2 DM |
518 | void *va; /* virtual address of first byte */ |
519 | unsigned int nfrags; /* # of fragments */ | |
520 | unsigned int tot_len; /* total length of fragments */ | |
521 | }; | |
522 | ||
523 | typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, | |
524 | const struct pkt_gl *gl); | |
525 | ||
526 | struct sge_rspq { /* state for an SGE response queue */ | |
527 | struct napi_struct napi; | |
528 | const __be64 *cur_desc; /* current descriptor in queue */ | |
529 | unsigned int cidx; /* consumer index */ | |
530 | u8 gen; /* current generation bit */ | |
531 | u8 intr_params; /* interrupt holdoff parameters */ | |
532 | u8 next_intr_params; /* holdoff params for next interrupt */ | |
e553ec3f | 533 | u8 adaptive_rx; |
625ba2c2 DM |
534 | u8 pktcnt_idx; /* interrupt packet threshold */ |
535 | u8 uld; /* ULD handling this queue */ | |
536 | u8 idx; /* queue index within its group */ | |
537 | int offset; /* offset into current Rx buffer */ | |
538 | u16 cntxt_id; /* SGE context id for the response q */ | |
539 | u16 abs_id; /* absolute SGE id for the response q */ | |
540 | __be64 *desc; /* address of HW response ring */ | |
541 | dma_addr_t phys_addr; /* physical address of the ring */ | |
df64e4d3 HS |
542 | void __iomem *bar2_addr; /* address of BAR2 Queue registers */ |
543 | unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ | |
625ba2c2 DM |
544 | unsigned int iqe_len; /* entry size */ |
545 | unsigned int size; /* capacity of response queue */ | |
546 | struct adapter *adap; | |
547 | struct net_device *netdev; /* associated net device */ | |
548 | rspq_handler_t handler; | |
3a336cb1 HS |
549 | #ifdef CONFIG_NET_RX_BUSY_POLL |
550 | #define CXGB_POLL_STATE_IDLE 0 | |
551 | #define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ | |
552 | #define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */ | |
553 | #define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */ | |
554 | #define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */ | |
555 | #define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \ | |
556 | CXGB_POLL_STATE_POLL_YIELD) | |
557 | #define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \ | |
558 | CXGB_POLL_STATE_POLL) | |
559 | #define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \ | |
560 | CXGB_POLL_STATE_POLL_YIELD) | |
561 | unsigned int bpoll_state; | |
562 | spinlock_t bpoll_lock; /* lock for busy poll */ | |
563 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
564 | ||
625ba2c2 DM |
565 | }; |
566 | ||
567 | struct sge_eth_stats { /* Ethernet queue statistics */ | |
568 | unsigned long pkts; /* # of ethernet packets */ | |
569 | unsigned long lro_pkts; /* # of LRO super packets */ | |
570 | unsigned long lro_merged; /* # of wire packets merged by LRO */ | |
571 | unsigned long rx_cso; /* # of Rx checksum offloads */ | |
572 | unsigned long vlan_ex; /* # of Rx VLAN extractions */ | |
573 | unsigned long rx_drops; /* # of packets dropped due to no mem */ | |
574 | }; | |
575 | ||
576 | struct sge_eth_rxq { /* SW Ethernet Rx queue */ | |
577 | struct sge_rspq rspq; | |
578 | struct sge_fl fl; | |
579 | struct sge_eth_stats stats; | |
580 | } ____cacheline_aligned_in_smp; | |
581 | ||
582 | struct sge_ofld_stats { /* offload queue statistics */ | |
583 | unsigned long pkts; /* # of packets */ | |
584 | unsigned long imm; /* # of immediate-data packets */ | |
585 | unsigned long an; /* # of asynchronous notifications */ | |
586 | unsigned long nomem; /* # of responses deferred due to no mem */ | |
587 | }; | |
588 | ||
589 | struct sge_ofld_rxq { /* SW offload Rx queue */ | |
590 | struct sge_rspq rspq; | |
591 | struct sge_fl fl; | |
592 | struct sge_ofld_stats stats; | |
593 | } ____cacheline_aligned_in_smp; | |
594 | ||
595 | struct tx_desc { | |
596 | __be64 flit[8]; | |
597 | }; | |
598 | ||
599 | struct tx_sw_desc; | |
600 | ||
601 | struct sge_txq { | |
602 | unsigned int in_use; /* # of in-use Tx descriptors */ | |
603 | unsigned int size; /* # of descriptors */ | |
604 | unsigned int cidx; /* SW consumer index */ | |
605 | unsigned int pidx; /* producer index */ | |
606 | unsigned long stops; /* # of times q has been stopped */ | |
607 | unsigned long restarts; /* # of queue restarts */ | |
608 | unsigned int cntxt_id; /* SGE context id for the Tx q */ | |
609 | struct tx_desc *desc; /* address of HW Tx descriptor ring */ | |
610 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ | |
611 | struct sge_qstat *stat; /* queue status entry */ | |
612 | dma_addr_t phys_addr; /* physical address of the ring */ | |
3069ee9b VP |
613 | spinlock_t db_lock; |
614 | int db_disabled; | |
615 | unsigned short db_pidx; | |
05eb2389 | 616 | unsigned short db_pidx_inc; |
df64e4d3 HS |
617 | void __iomem *bar2_addr; /* address of BAR2 Queue registers */ |
618 | unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ | |
625ba2c2 DM |
619 | }; |
620 | ||
621 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ | |
622 | struct sge_txq q; | |
623 | struct netdev_queue *txq; /* associated netdev TX queue */ | |
10b00466 AB |
624 | #ifdef CONFIG_CHELSIO_T4_DCB |
625 | u8 dcb_prio; /* DCB Priority bound to queue */ | |
626 | #endif | |
625ba2c2 DM |
627 | unsigned long tso; /* # of TSO requests */ |
628 | unsigned long tx_cso; /* # of Tx checksum offloads */ | |
629 | unsigned long vlan_ins; /* # of Tx VLAN insertions */ | |
630 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | |
631 | } ____cacheline_aligned_in_smp; | |
632 | ||
633 | struct sge_ofld_txq { /* state for an SGE offload Tx queue */ | |
634 | struct sge_txq q; | |
635 | struct adapter *adap; | |
636 | struct sk_buff_head sendq; /* list of backpressured packets */ | |
637 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | |
638 | u8 full; /* the Tx ring is full */ | |
639 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | |
640 | } ____cacheline_aligned_in_smp; | |
641 | ||
642 | struct sge_ctrl_txq { /* state for an SGE control Tx queue */ | |
643 | struct sge_txq q; | |
644 | struct adapter *adap; | |
645 | struct sk_buff_head sendq; /* list of backpressured packets */ | |
646 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | |
647 | u8 full; /* the Tx ring is full */ | |
648 | } ____cacheline_aligned_in_smp; | |
649 | ||
650 | struct sge { | |
651 | struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; | |
652 | struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; | |
653 | struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; | |
654 | ||
655 | struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; | |
656 | struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; | |
657 | struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; | |
cf38be6d | 658 | struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; |
625ba2c2 DM |
659 | struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; |
660 | ||
661 | struct sge_rspq intrq ____cacheline_aligned_in_smp; | |
662 | spinlock_t intrq_lock; | |
663 | ||
664 | u16 max_ethqsets; /* # of available Ethernet queue sets */ | |
665 | u16 ethqsets; /* # of active Ethernet queue sets */ | |
666 | u16 ethtxq_rover; /* Tx queue to clean up next */ | |
667 | u16 ofldqsets; /* # of active offload queue sets */ | |
668 | u16 rdmaqs; /* # of available RDMA Rx queues */ | |
cf38be6d | 669 | u16 rdmaciqs; /* # of available RDMA concentrator IQs */ |
625ba2c2 | 670 | u16 ofld_rxq[MAX_OFLD_QSETS]; |
f36e58e5 HS |
671 | u16 rdma_rxq[MAX_RDMA_QUEUES]; |
672 | u16 rdma_ciq[MAX_RDMA_CIQS]; | |
625ba2c2 DM |
673 | u16 timer_val[SGE_NTIMERS]; |
674 | u8 counter_val[SGE_NCOUNTERS]; | |
52367a76 VP |
675 | u32 fl_pg_order; /* large page allocation size */ |
676 | u32 stat_len; /* length of status page at ring end */ | |
677 | u32 pktshift; /* padding between CPL & packet data */ | |
678 | u32 fl_align; /* response queue message alignment */ | |
679 | u32 fl_starve_thres; /* Free List starvation threshold */ | |
0f4d201f | 680 | |
a3bfb617 | 681 | struct sge_idma_monitor_state idma_monitor; |
e46dab4d | 682 | unsigned int egr_start; |
4b8e27a8 | 683 | unsigned int egr_sz; |
e46dab4d | 684 | unsigned int ingr_start; |
4b8e27a8 HS |
685 | unsigned int ingr_sz; |
686 | void **egr_map; /* qid->queue egress queue map */ | |
687 | struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ | |
688 | unsigned long *starving_fl; | |
689 | unsigned long *txq_maperr; | |
5b377d11 | 690 | unsigned long *blocked_fl; |
625ba2c2 DM |
691 | struct timer_list rx_timer; /* refills starving FLs */ |
692 | struct timer_list tx_timer; /* checks Tx queues */ | |
693 | }; | |
694 | ||
695 | #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) | |
696 | #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) | |
697 | #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) | |
cf38be6d | 698 | #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) |
625ba2c2 DM |
699 | |
700 | struct l2t_data; | |
701 | ||
2422d9a3 SR |
702 | #ifdef CONFIG_PCI_IOV |
703 | ||
7d6727cf SR |
704 | /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial |
705 | * Configuration initialization for T5 only has SR-IOV functionality enabled | |
706 | * on PF0-3 in order to simplify everything. | |
2422d9a3 | 707 | */ |
7d6727cf | 708 | #define NUM_OF_PF_WITH_SRIOV 4 |
2422d9a3 SR |
709 | |
710 | #endif | |
711 | ||
a4cfd929 HS |
712 | struct doorbell_stats { |
713 | u32 db_drop; | |
714 | u32 db_empty; | |
715 | u32 db_full; | |
716 | }; | |
717 | ||
625ba2c2 DM |
718 | struct adapter { |
719 | void __iomem *regs; | |
22adfe0a | 720 | void __iomem *bar2; |
0abfd152 | 721 | u32 t4_bar0; |
625ba2c2 DM |
722 | struct pci_dev *pdev; |
723 | struct device *pdev_dev; | |
3069ee9b | 724 | unsigned int mbox; |
b2612722 | 725 | unsigned int pf; |
060e0c75 | 726 | unsigned int flags; |
2422d9a3 | 727 | enum chip_type chip; |
625ba2c2 | 728 | |
625ba2c2 DM |
729 | int msg_enable; |
730 | ||
731 | struct adapter_params params; | |
732 | struct cxgb4_virt_res vres; | |
733 | unsigned int swintr; | |
734 | ||
625ba2c2 DM |
735 | struct { |
736 | unsigned short vec; | |
8cd18ac4 | 737 | char desc[IFNAMSIZ + 10]; |
625ba2c2 DM |
738 | } msix_info[MAX_INGQ + 1]; |
739 | ||
a4cfd929 | 740 | struct doorbell_stats db_stats; |
625ba2c2 DM |
741 | struct sge sge; |
742 | ||
743 | struct net_device *port[MAX_NPORTS]; | |
744 | u8 chan_map[NCHAN]; /* channel -> port map */ | |
745 | ||
793dad94 | 746 | u32 filter_mode; |
636f9d37 VP |
747 | unsigned int l2t_start; |
748 | unsigned int l2t_end; | |
625ba2c2 | 749 | struct l2t_data *l2t; |
b5a02f50 AB |
750 | unsigned int clipt_start; |
751 | unsigned int clipt_end; | |
752 | struct clip_tbl *clipt; | |
625ba2c2 DM |
753 | void *uld_handle[CXGB4_ULD_MAX]; |
754 | struct list_head list_node; | |
01bcca68 | 755 | struct list_head rcu_node; |
625ba2c2 DM |
756 | |
757 | struct tid_info tids; | |
758 | void **tid_release_head; | |
759 | spinlock_t tid_release_lock; | |
29aaee65 | 760 | struct workqueue_struct *workq; |
625ba2c2 | 761 | struct work_struct tid_release_task; |
881806bc VP |
762 | struct work_struct db_full_task; |
763 | struct work_struct db_drop_task; | |
625ba2c2 DM |
764 | bool tid_release_task_busy; |
765 | ||
766 | struct dentry *debugfs_root; | |
767 | ||
768 | spinlock_t stats_lock; | |
fc5ab020 | 769 | spinlock_t win0_lock ____cacheline_aligned_in_smp; |
625ba2c2 DM |
770 | }; |
771 | ||
f2b7e78d VP |
772 | /* Defined bit width of user definable filter tuples |
773 | */ | |
774 | #define ETHTYPE_BITWIDTH 16 | |
775 | #define FRAG_BITWIDTH 1 | |
776 | #define MACIDX_BITWIDTH 9 | |
777 | #define FCOE_BITWIDTH 1 | |
778 | #define IPORT_BITWIDTH 3 | |
779 | #define MATCHTYPE_BITWIDTH 3 | |
780 | #define PROTO_BITWIDTH 8 | |
781 | #define TOS_BITWIDTH 8 | |
782 | #define PF_BITWIDTH 8 | |
783 | #define VF_BITWIDTH 8 | |
784 | #define IVLAN_BITWIDTH 16 | |
785 | #define OVLAN_BITWIDTH 16 | |
786 | ||
787 | /* Filter matching rules. These consist of a set of ingress packet field | |
788 | * (value, mask) tuples. The associated ingress packet field matches the | |
789 | * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field | |
790 | * rule can be constructed by specifying a tuple of (0, 0).) A filter rule | |
791 | * matches an ingress packet when all of the individual individual field | |
792 | * matching rules are true. | |
793 | * | |
794 | * Partial field masks are always valid, however, while it may be easy to | |
795 | * understand their meanings for some fields (e.g. IP address to match a | |
796 | * subnet), for others making sensible partial masks is less intuitive (e.g. | |
797 | * MPS match type) ... | |
798 | * | |
799 | * Most of the following data structures are modeled on T4 capabilities. | |
800 | * Drivers for earlier chips use the subsets which make sense for those chips. | |
801 | * We really need to come up with a hardware-independent mechanism to | |
802 | * represent hardware filter capabilities ... | |
803 | */ | |
804 | struct ch_filter_tuple { | |
805 | /* Compressed header matching field rules. The TP_VLAN_PRI_MAP | |
806 | * register selects which of these fields will participate in the | |
807 | * filter match rules -- up to a maximum of 36 bits. Because | |
808 | * TP_VLAN_PRI_MAP is a global register, all filters must use the same | |
809 | * set of fields. | |
810 | */ | |
811 | uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ | |
812 | uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ | |
813 | uint32_t ivlan_vld:1; /* inner VLAN valid */ | |
814 | uint32_t ovlan_vld:1; /* outer VLAN valid */ | |
815 | uint32_t pfvf_vld:1; /* PF/VF valid */ | |
816 | uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ | |
817 | uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ | |
818 | uint32_t iport:IPORT_BITWIDTH; /* ingress port */ | |
819 | uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ | |
820 | uint32_t proto:PROTO_BITWIDTH; /* protocol type */ | |
821 | uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ | |
822 | uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ | |
823 | uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ | |
824 | uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ | |
825 | uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ | |
826 | ||
827 | /* Uncompressed header matching field rules. These are always | |
828 | * available for field rules. | |
829 | */ | |
830 | uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ | |
831 | uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ | |
832 | uint16_t lport; /* local port */ | |
833 | uint16_t fport; /* foreign port */ | |
834 | }; | |
835 | ||
836 | /* A filter ioctl command. | |
837 | */ | |
838 | struct ch_filter_specification { | |
839 | /* Administrative fields for filter. | |
840 | */ | |
841 | uint32_t hitcnts:1; /* count filter hits in TCB */ | |
842 | uint32_t prio:1; /* filter has priority over active/server */ | |
843 | ||
844 | /* Fundamental filter typing. This is the one element of filter | |
845 | * matching that doesn't exist as a (value, mask) tuple. | |
846 | */ | |
847 | uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ | |
848 | ||
849 | /* Packet dispatch information. Ingress packets which match the | |
850 | * filter rules will be dropped, passed to the host or switched back | |
851 | * out as egress packets. | |
852 | */ | |
853 | uint32_t action:2; /* drop, pass, switch */ | |
854 | ||
855 | uint32_t rpttid:1; /* report TID in RSS hash field */ | |
856 | ||
857 | uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ | |
858 | uint32_t iq:10; /* ingress queue */ | |
859 | ||
860 | uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */ | |
861 | uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ | |
862 | /* 1 => TCB contains IQ ID */ | |
863 | ||
864 | /* Switch proxy/rewrite fields. An ingress packet which matches a | |
865 | * filter with "switch" set will be looped back out as an egress | |
866 | * packet -- potentially with some Ethernet header rewriting. | |
867 | */ | |
868 | uint32_t eport:2; /* egress port to switch packet out */ | |
869 | uint32_t newdmac:1; /* rewrite destination MAC address */ | |
870 | uint32_t newsmac:1; /* rewrite source MAC address */ | |
871 | uint32_t newvlan:2; /* rewrite VLAN Tag */ | |
872 | uint8_t dmac[ETH_ALEN]; /* new destination MAC address */ | |
873 | uint8_t smac[ETH_ALEN]; /* new source MAC address */ | |
874 | uint16_t vlan; /* VLAN Tag to insert */ | |
875 | ||
876 | /* Filter rule value/mask pairs. | |
877 | */ | |
878 | struct ch_filter_tuple val; | |
879 | struct ch_filter_tuple mask; | |
880 | }; | |
881 | ||
882 | enum { | |
883 | FILTER_PASS = 0, /* default */ | |
884 | FILTER_DROP, | |
885 | FILTER_SWITCH | |
886 | }; | |
887 | ||
888 | enum { | |
889 | VLAN_NOCHANGE = 0, /* default */ | |
890 | VLAN_REMOVE, | |
891 | VLAN_INSERT, | |
892 | VLAN_REWRITE | |
893 | }; | |
894 | ||
a4cfd929 HS |
895 | static inline int is_offload(const struct adapter *adap) |
896 | { | |
897 | return adap->params.offload; | |
898 | } | |
899 | ||
ab4b583b HS |
900 | static inline int is_t6(enum chip_type chip) |
901 | { | |
902 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6; | |
903 | } | |
904 | ||
2422d9a3 SR |
905 | static inline int is_t5(enum chip_type chip) |
906 | { | |
d14807dd | 907 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; |
2422d9a3 SR |
908 | } |
909 | ||
910 | static inline int is_t4(enum chip_type chip) | |
911 | { | |
d14807dd | 912 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; |
2422d9a3 SR |
913 | } |
914 | ||
625ba2c2 DM |
915 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) |
916 | { | |
917 | return readl(adap->regs + reg_addr); | |
918 | } | |
919 | ||
920 | static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) | |
921 | { | |
922 | writel(val, adap->regs + reg_addr); | |
923 | } | |
924 | ||
925 | #ifndef readq | |
926 | static inline u64 readq(const volatile void __iomem *addr) | |
927 | { | |
928 | return readl(addr) + ((u64)readl(addr + 4) << 32); | |
929 | } | |
930 | ||
931 | static inline void writeq(u64 val, volatile void __iomem *addr) | |
932 | { | |
933 | writel(val, addr); | |
934 | writel(val >> 32, addr + 4); | |
935 | } | |
936 | #endif | |
937 | ||
938 | static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) | |
939 | { | |
940 | return readq(adap->regs + reg_addr); | |
941 | } | |
942 | ||
943 | static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) | |
944 | { | |
945 | writeq(val, adap->regs + reg_addr); | |
946 | } | |
947 | ||
948 | /** | |
949 | * netdev2pinfo - return the port_info structure associated with a net_device | |
950 | * @dev: the netdev | |
951 | * | |
952 | * Return the struct port_info associated with a net_device | |
953 | */ | |
954 | static inline struct port_info *netdev2pinfo(const struct net_device *dev) | |
955 | { | |
956 | return netdev_priv(dev); | |
957 | } | |
958 | ||
959 | /** | |
960 | * adap2pinfo - return the port_info of a port | |
961 | * @adap: the adapter | |
962 | * @idx: the port index | |
963 | * | |
964 | * Return the port_info structure for the port of the given index. | |
965 | */ | |
966 | static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) | |
967 | { | |
968 | return netdev_priv(adap->port[idx]); | |
969 | } | |
970 | ||
971 | /** | |
972 | * netdev2adap - return the adapter structure associated with a net_device | |
973 | * @dev: the netdev | |
974 | * | |
975 | * Return the struct adapter associated with a net_device | |
976 | */ | |
977 | static inline struct adapter *netdev2adap(const struct net_device *dev) | |
978 | { | |
979 | return netdev2pinfo(dev)->adapter; | |
980 | } | |
981 | ||
3a336cb1 HS |
982 | #ifdef CONFIG_NET_RX_BUSY_POLL |
983 | static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) | |
984 | { | |
985 | spin_lock_init(&q->bpoll_lock); | |
986 | q->bpoll_state = CXGB_POLL_STATE_IDLE; | |
987 | } | |
988 | ||
989 | static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) | |
990 | { | |
991 | bool rc = true; | |
992 | ||
993 | spin_lock(&q->bpoll_lock); | |
994 | if (q->bpoll_state & CXGB_POLL_LOCKED) { | |
995 | q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD; | |
996 | rc = false; | |
997 | } else { | |
998 | q->bpoll_state = CXGB_POLL_STATE_NAPI; | |
999 | } | |
1000 | spin_unlock(&q->bpoll_lock); | |
1001 | return rc; | |
1002 | } | |
1003 | ||
1004 | static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) | |
1005 | { | |
1006 | bool rc = false; | |
1007 | ||
1008 | spin_lock(&q->bpoll_lock); | |
1009 | if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) | |
1010 | rc = true; | |
1011 | q->bpoll_state = CXGB_POLL_STATE_IDLE; | |
1012 | spin_unlock(&q->bpoll_lock); | |
1013 | return rc; | |
1014 | } | |
1015 | ||
1016 | static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) | |
1017 | { | |
1018 | bool rc = true; | |
1019 | ||
1020 | spin_lock_bh(&q->bpoll_lock); | |
1021 | if (q->bpoll_state & CXGB_POLL_LOCKED) { | |
1022 | q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD; | |
1023 | rc = false; | |
1024 | } else { | |
1025 | q->bpoll_state |= CXGB_POLL_STATE_POLL; | |
1026 | } | |
1027 | spin_unlock_bh(&q->bpoll_lock); | |
1028 | return rc; | |
1029 | } | |
1030 | ||
1031 | static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) | |
1032 | { | |
1033 | bool rc = false; | |
1034 | ||
1035 | spin_lock_bh(&q->bpoll_lock); | |
1036 | if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) | |
1037 | rc = true; | |
1038 | q->bpoll_state = CXGB_POLL_STATE_IDLE; | |
1039 | spin_unlock_bh(&q->bpoll_lock); | |
1040 | return rc; | |
1041 | } | |
1042 | ||
1043 | static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) | |
1044 | { | |
1045 | return q->bpoll_state & CXGB_POLL_USER_PEND; | |
1046 | } | |
1047 | #else | |
1048 | static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) | |
1049 | { | |
1050 | } | |
1051 | ||
1052 | static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) | |
1053 | { | |
1054 | return true; | |
1055 | } | |
1056 | ||
1057 | static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) | |
1058 | { | |
1059 | return false; | |
1060 | } | |
1061 | ||
1062 | static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) | |
1063 | { | |
1064 | return false; | |
1065 | } | |
1066 | ||
1067 | static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) | |
1068 | { | |
1069 | return false; | |
1070 | } | |
1071 | ||
1072 | static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) | |
1073 | { | |
1074 | return false; | |
1075 | } | |
1076 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
1077 | ||
812034f1 HS |
1078 | /* Return a version number to identify the type of adapter. The scheme is: |
1079 | * - bits 0..9: chip version | |
1080 | * - bits 10..15: chip revision | |
1081 | * - bits 16..23: register dump version | |
1082 | */ | |
1083 | static inline unsigned int mk_adap_vers(struct adapter *ap) | |
1084 | { | |
1085 | return CHELSIO_CHIP_VERSION(ap->params.chip) | | |
1086 | (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); | |
1087 | } | |
1088 | ||
1089 | /* Return a queue's interrupt hold-off time in us. 0 means no timer. */ | |
1090 | static inline unsigned int qtimer_val(const struct adapter *adap, | |
1091 | const struct sge_rspq *q) | |
1092 | { | |
1093 | unsigned int idx = q->intr_params >> 1; | |
1094 | ||
1095 | return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; | |
1096 | } | |
1097 | ||
1098 | /* driver version & name used for ethtool_drvinfo */ | |
1099 | extern char cxgb4_driver_name[]; | |
1100 | extern const char cxgb4_driver_version[]; | |
1101 | ||
625ba2c2 DM |
1102 | void t4_os_portmod_changed(const struct adapter *adap, int port_id); |
1103 | void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); | |
1104 | ||
1105 | void *t4_alloc_mem(size_t size); | |
625ba2c2 DM |
1106 | |
1107 | void t4_free_sge_resources(struct adapter *adap); | |
5fa76694 | 1108 | void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); |
625ba2c2 DM |
1109 | irq_handler_t t4_intr_handler(struct adapter *adap); |
1110 | netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); | |
1111 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | |
1112 | const struct pkt_gl *gl); | |
1113 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); | |
1114 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); | |
1115 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |
1116 | struct net_device *dev, int intr_idx, | |
145ef8a5 | 1117 | struct sge_fl *fl, rspq_handler_t hnd, int cong); |
625ba2c2 DM |
1118 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, |
1119 | struct net_device *dev, struct netdev_queue *netdevq, | |
1120 | unsigned int iqid); | |
1121 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | |
1122 | struct net_device *dev, unsigned int iqid, | |
1123 | unsigned int cmplqid); | |
1124 | int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, | |
1125 | struct net_device *dev, unsigned int iqid); | |
1126 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie); | |
52367a76 | 1127 | int t4_sge_init(struct adapter *adap); |
625ba2c2 DM |
1128 | void t4_sge_start(struct adapter *adap); |
1129 | void t4_sge_stop(struct adapter *adap); | |
3a336cb1 | 1130 | int cxgb_busy_poll(struct napi_struct *napi); |
812034f1 HS |
1131 | int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, |
1132 | unsigned int cnt); | |
1133 | void cxgb4_set_ethtool_ops(struct net_device *netdev); | |
1134 | int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); | |
3069ee9b | 1135 | extern int dbfifo_int_thresh; |
625ba2c2 DM |
1136 | |
1137 | #define for_each_port(adapter, iter) \ | |
1138 | for (iter = 0; iter < (adapter)->params.nports; ++iter) | |
1139 | ||
9a4da2cd VP |
1140 | static inline int is_bypass(struct adapter *adap) |
1141 | { | |
1142 | return adap->params.bypass; | |
1143 | } | |
1144 | ||
1145 | static inline int is_bypass_device(int device) | |
1146 | { | |
1147 | /* this should be set based upon device capabilities */ | |
1148 | switch (device) { | |
1149 | case 0x440b: | |
1150 | case 0x440c: | |
1151 | return 1; | |
1152 | default: | |
1153 | return 0; | |
1154 | } | |
1155 | } | |
1156 | ||
01b69614 HS |
1157 | static inline int is_10gbt_device(int device) |
1158 | { | |
1159 | /* this should be set based upon device capabilities */ | |
1160 | switch (device) { | |
1161 | case 0x4409: | |
1162 | case 0x4486: | |
1163 | return 1; | |
1164 | ||
1165 | default: | |
1166 | return 0; | |
1167 | } | |
1168 | } | |
1169 | ||
625ba2c2 DM |
1170 | static inline unsigned int core_ticks_per_usec(const struct adapter *adap) |
1171 | { | |
1172 | return adap->params.vpd.cclk / 1000; | |
1173 | } | |
1174 | ||
1175 | static inline unsigned int us_to_core_ticks(const struct adapter *adap, | |
1176 | unsigned int us) | |
1177 | { | |
1178 | return (us * adap->params.vpd.cclk) / 1000; | |
1179 | } | |
1180 | ||
52367a76 VP |
1181 | static inline unsigned int core_ticks_to_us(const struct adapter *adapter, |
1182 | unsigned int ticks) | |
1183 | { | |
1184 | /* add Core Clock / 2 to round ticks to nearest uS */ | |
1185 | return ((ticks * 1000 + adapter->params.vpd.cclk/2) / | |
1186 | adapter->params.vpd.cclk); | |
1187 | } | |
1188 | ||
625ba2c2 DM |
1189 | void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, |
1190 | u32 val); | |
1191 | ||
01b69614 HS |
1192 | int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, |
1193 | int size, void *rpl, bool sleep_ok, int timeout); | |
625ba2c2 DM |
1194 | int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, |
1195 | void *rpl, bool sleep_ok); | |
1196 | ||
01b69614 HS |
1197 | static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox, |
1198 | const void *cmd, int size, void *rpl, | |
1199 | int timeout) | |
1200 | { | |
1201 | return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true, | |
1202 | timeout); | |
1203 | } | |
1204 | ||
625ba2c2 DM |
1205 | static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, |
1206 | int size, void *rpl) | |
1207 | { | |
1208 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); | |
1209 | } | |
1210 | ||
1211 | static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, | |
1212 | int size, void *rpl) | |
1213 | { | |
1214 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); | |
1215 | } | |
1216 | ||
13ee15d3 VP |
1217 | void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, |
1218 | unsigned int data_reg, const u32 *vals, | |
1219 | unsigned int nregs, unsigned int start_idx); | |
f2b7e78d VP |
1220 | void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, |
1221 | unsigned int data_reg, u32 *vals, unsigned int nregs, | |
1222 | unsigned int start_idx); | |
0abfd152 | 1223 | void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val); |
f2b7e78d VP |
1224 | |
1225 | struct fw_filter_wr; | |
1226 | ||
625ba2c2 DM |
1227 | void t4_intr_enable(struct adapter *adapter); |
1228 | void t4_intr_disable(struct adapter *adapter); | |
625ba2c2 DM |
1229 | int t4_slow_intr_handler(struct adapter *adapter); |
1230 | ||
8203b509 | 1231 | int t4_wait_dev_ready(void __iomem *regs); |
625ba2c2 DM |
1232 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, |
1233 | struct link_config *lc); | |
1234 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); | |
fc5ab020 | 1235 | |
b562fc37 HS |
1236 | u32 t4_read_pcie_cfg4(struct adapter *adap, int reg); |
1237 | u32 t4_get_util_window(struct adapter *adap); | |
1238 | void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window); | |
1239 | ||
fc5ab020 HS |
1240 | #define T4_MEMORY_WRITE 0 |
1241 | #define T4_MEMORY_READ 1 | |
1242 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, | |
f01aa633 | 1243 | void *buf, int dir); |
fc5ab020 HS |
1244 | static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, |
1245 | u32 len, __be32 *buf) | |
1246 | { | |
1247 | return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0); | |
1248 | } | |
1249 | ||
812034f1 HS |
1250 | unsigned int t4_get_regs_len(struct adapter *adapter); |
1251 | void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); | |
1252 | ||
625ba2c2 | 1253 | int t4_seeprom_wp(struct adapter *adapter, bool enable); |
636f9d37 | 1254 | int get_vpd_params(struct adapter *adapter, struct vpd_params *p); |
49216c1c HS |
1255 | int t4_read_flash(struct adapter *adapter, unsigned int addr, |
1256 | unsigned int nwords, u32 *data, int byte_oriented); | |
625ba2c2 | 1257 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); |
01b69614 HS |
1258 | int t4_load_phy_fw(struct adapter *adap, |
1259 | int win, spinlock_t *lock, | |
1260 | int (*phy_fw_version)(const u8 *, size_t), | |
1261 | const u8 *phy_fw_data, size_t phy_fw_size); | |
1262 | int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver); | |
49216c1c | 1263 | int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); |
22c0b963 HS |
1264 | int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, |
1265 | const u8 *fw_data, unsigned int size, int force); | |
636f9d37 | 1266 | unsigned int t4_flash_cfg_addr(struct adapter *adapter); |
16e47624 HS |
1267 | int t4_get_fw_version(struct adapter *adapter, u32 *vers); |
1268 | int t4_get_tp_version(struct adapter *adapter, u32 *vers); | |
ba3f8cd5 | 1269 | int t4_get_exprom_version(struct adapter *adapter, u32 *vers); |
16e47624 HS |
1270 | int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, |
1271 | const u8 *fw_data, unsigned int fw_size, | |
1272 | struct fw_hdr *card_fw, enum dev_state state, int *reset); | |
625ba2c2 | 1273 | int t4_prep_adapter(struct adapter *adapter); |
e85c9a7a HS |
1274 | |
1275 | enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; | |
b2612722 | 1276 | int t4_bar2_sge_qregs(struct adapter *adapter, |
e85c9a7a HS |
1277 | unsigned int qid, |
1278 | enum t4_bar2_qtype qtype, | |
1279 | u64 *pbar2_qoffset, | |
1280 | unsigned int *pbar2_qid); | |
1281 | ||
dc9daab2 HS |
1282 | unsigned int qtimer_val(const struct adapter *adap, |
1283 | const struct sge_rspq *q); | |
ae469b68 HS |
1284 | |
1285 | int t4_init_devlog_params(struct adapter *adapter); | |
e85c9a7a | 1286 | int t4_init_sge_params(struct adapter *adapter); |
dcf7b6f5 KS |
1287 | int t4_init_tp_params(struct adapter *adap); |
1288 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | |
c035e183 | 1289 | int t4_init_rss_mode(struct adapter *adap, int mbox); |
625ba2c2 DM |
1290 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); |
1291 | void t4_fatal_err(struct adapter *adapter); | |
625ba2c2 DM |
1292 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, |
1293 | int start, int n, const u16 *rspq, unsigned int nrspq); | |
1294 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | |
1295 | unsigned int flags); | |
c035e183 HS |
1296 | int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, |
1297 | unsigned int flags, unsigned int defq); | |
688ea5fe HS |
1298 | int t4_read_rss(struct adapter *adapter, u16 *entries); |
1299 | void t4_read_rss_key(struct adapter *adapter, u32 *key); | |
1300 | void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx); | |
1301 | void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, | |
1302 | u32 *valp); | |
1303 | void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, | |
1304 | u32 *vfl, u32 *vfh); | |
1305 | u32 t4_read_rss_pf_map(struct adapter *adapter); | |
1306 | u32 t4_read_rss_pf_mask(struct adapter *adapter); | |
1307 | ||
145ef8a5 | 1308 | unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx); |
b3bbe36a HS |
1309 | void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); |
1310 | void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); | |
e5f0e43b HS |
1311 | int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, |
1312 | size_t n); | |
c778af7d HS |
1313 | int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, |
1314 | size_t n); | |
f1ff24aa HS |
1315 | int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, |
1316 | unsigned int *valp); | |
1317 | int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, | |
1318 | const unsigned int *valp); | |
1319 | int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); | |
74b3092c | 1320 | void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); |
72aca4bf | 1321 | const char *t4_get_port_type_description(enum fw_port_type port_type); |
625ba2c2 | 1322 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); |
a4cfd929 HS |
1323 | void t4_get_port_stats_offset(struct adapter *adap, int idx, |
1324 | struct port_stats *stats, | |
1325 | struct port_stats *offset); | |
65046e84 | 1326 | void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); |
625ba2c2 | 1327 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); |
bad43792 | 1328 | void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); |
636f9d37 VP |
1329 | void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, |
1330 | unsigned int mask, unsigned int val); | |
2d277b3b | 1331 | void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); |
a4cfd929 | 1332 | void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); |
a6222975 | 1333 | void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st); |
a4cfd929 HS |
1334 | void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st); |
1335 | void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st); | |
625ba2c2 DM |
1336 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, |
1337 | struct tp_tcp_stats *v6); | |
a6222975 HS |
1338 | void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, |
1339 | struct tp_fcoe_stats *st); | |
625ba2c2 DM |
1340 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, |
1341 | const unsigned short *alpha, const unsigned short *beta); | |
1342 | ||
797ff0f5 HS |
1343 | void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf); |
1344 | ||
f2b7e78d VP |
1345 | void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); |
1346 | ||
625ba2c2 DM |
1347 | void t4_wol_magic_enable(struct adapter *adap, unsigned int port, |
1348 | const u8 *addr); | |
1349 | int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | |
1350 | u64 mask0, u64 mask1, unsigned int crc, bool enable); | |
1351 | ||
1352 | int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, | |
1353 | enum dev_master master, enum dev_state *state); | |
1354 | int t4_fw_bye(struct adapter *adap, unsigned int mbox); | |
1355 | int t4_early_init(struct adapter *adap, unsigned int mbox); | |
1356 | int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); | |
636f9d37 VP |
1357 | int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, |
1358 | unsigned int cache_line_size); | |
1359 | int t4_fw_initialize(struct adapter *adap, unsigned int mbox); | |
625ba2c2 DM |
1360 | int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, |
1361 | unsigned int vf, unsigned int nparams, const u32 *params, | |
1362 | u32 *val); | |
01b69614 HS |
1363 | int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, |
1364 | unsigned int vf, unsigned int nparams, const u32 *params, | |
1365 | u32 *val, int rw); | |
1366 | int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, | |
1367 | unsigned int pf, unsigned int vf, | |
1368 | unsigned int nparams, const u32 *params, | |
1369 | const u32 *val, int timeout); | |
625ba2c2 DM |
1370 | int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, |
1371 | unsigned int vf, unsigned int nparams, const u32 *params, | |
1372 | const u32 *val); | |
1373 | int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1374 | unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, | |
1375 | unsigned int rxqi, unsigned int rxq, unsigned int tc, | |
1376 | unsigned int vi, unsigned int cmask, unsigned int pmask, | |
1377 | unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); | |
1378 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | |
1379 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, | |
1380 | unsigned int *rss_size); | |
4f3a0fcf HS |
1381 | int t4_free_vi(struct adapter *adap, unsigned int mbox, |
1382 | unsigned int pf, unsigned int vf, | |
1383 | unsigned int viid); | |
625ba2c2 | 1384 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, |
f8f5aafa DM |
1385 | int mtu, int promisc, int all_multi, int bcast, int vlanex, |
1386 | bool sleep_ok); | |
625ba2c2 DM |
1387 | int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, |
1388 | unsigned int viid, bool free, unsigned int naddr, | |
1389 | const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); | |
1390 | int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | |
1391 | int idx, const u8 *addr, bool persist, bool add_smt); | |
1392 | int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, | |
1393 | bool ucast, u64 vec, bool sleep_ok); | |
688848b1 AB |
1394 | int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, |
1395 | unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); | |
625ba2c2 DM |
1396 | int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, |
1397 | bool rx_en, bool tx_en); | |
1398 | int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, | |
1399 | unsigned int nblinks); | |
1400 | int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | |
1401 | unsigned int mmd, unsigned int reg, u16 *valp); | |
1402 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | |
1403 | unsigned int mmd, unsigned int reg, u16 val); | |
625ba2c2 DM |
1404 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, |
1405 | unsigned int vf, unsigned int iqtype, unsigned int iqid, | |
1406 | unsigned int fl0id, unsigned int fl1id); | |
1407 | int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1408 | unsigned int vf, unsigned int eqid); | |
1409 | int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1410 | unsigned int vf, unsigned int eqid); | |
1411 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1412 | unsigned int vf, unsigned int eqid); | |
5d700ecb | 1413 | int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); |
625ba2c2 | 1414 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); |
881806bc VP |
1415 | void t4_db_full(struct adapter *adapter); |
1416 | void t4_db_dropped(struct adapter *adapter); | |
8caa1e84 VP |
1417 | int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, |
1418 | u32 addr, u32 val); | |
68bce192 | 1419 | void t4_sge_decode_idma_state(struct adapter *adapter, int state); |
fd88b31a | 1420 | void t4_free_mem(void *addr); |
a3bfb617 HS |
1421 | void t4_idma_monitor_init(struct adapter *adapter, |
1422 | struct sge_idma_monitor_state *idma); | |
1423 | void t4_idma_monitor(struct adapter *adapter, | |
1424 | struct sge_idma_monitor_state *idma, | |
1425 | int hz, int ticks); | |
625ba2c2 | 1426 | #endif /* __CXGB4_H__ */ |