Commit | Line | Data |
---|---|---|
625ba2c2 DM |
1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | |
3 | * | |
ce100b8b | 4 | * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. |
625ba2c2 DM |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #ifndef __CXGB4_H__ | |
36 | #define __CXGB4_H__ | |
37 | ||
dca4faeb VP |
38 | #include "t4_hw.h" |
39 | ||
625ba2c2 DM |
40 | #include <linux/bitops.h> |
41 | #include <linux/cache.h> | |
42 | #include <linux/interrupt.h> | |
43 | #include <linux/list.h> | |
44 | #include <linux/netdevice.h> | |
45 | #include <linux/pci.h> | |
46 | #include <linux/spinlock.h> | |
47 | #include <linux/timer.h> | |
c0b8b992 | 48 | #include <linux/vmalloc.h> |
625ba2c2 DM |
49 | #include <asm/io.h> |
50 | #include "cxgb4_uld.h" | |
625ba2c2 | 51 | |
3069ee9b VP |
52 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) |
53 | ||
625ba2c2 DM |
54 | enum { |
55 | MAX_NPORTS = 4, /* max # of ports */ | |
47d54d65 | 56 | SERNUM_LEN = 24, /* Serial # length */ |
625ba2c2 DM |
57 | EC_LEN = 16, /* E/C length */ |
58 | ID_LEN = 16, /* ID length */ | |
a94cd705 | 59 | PN_LEN = 16, /* Part Number length */ |
625ba2c2 DM |
60 | }; |
61 | ||
62 | enum { | |
63 | MEM_EDC0, | |
64 | MEM_EDC1, | |
2422d9a3 SR |
65 | MEM_MC, |
66 | MEM_MC0 = MEM_MC, | |
67 | MEM_MC1 | |
625ba2c2 DM |
68 | }; |
69 | ||
3069ee9b | 70 | enum { |
3eb4afbf VP |
71 | MEMWIN0_APERTURE = 2048, |
72 | MEMWIN0_BASE = 0x1b800, | |
3069ee9b VP |
73 | MEMWIN1_APERTURE = 32768, |
74 | MEMWIN1_BASE = 0x28000, | |
2422d9a3 | 75 | MEMWIN1_BASE_T5 = 0x52000, |
3eb4afbf VP |
76 | MEMWIN2_APERTURE = 65536, |
77 | MEMWIN2_BASE = 0x30000, | |
0abfd152 HS |
78 | MEMWIN2_APERTURE_T5 = 131072, |
79 | MEMWIN2_BASE_T5 = 0x60000, | |
3069ee9b VP |
80 | }; |
81 | ||
625ba2c2 DM |
82 | enum dev_master { |
83 | MASTER_CANT, | |
84 | MASTER_MAY, | |
85 | MASTER_MUST | |
86 | }; | |
87 | ||
88 | enum dev_state { | |
89 | DEV_STATE_UNINIT, | |
90 | DEV_STATE_INIT, | |
91 | DEV_STATE_ERR | |
92 | }; | |
93 | ||
94 | enum { | |
95 | PAUSE_RX = 1 << 0, | |
96 | PAUSE_TX = 1 << 1, | |
97 | PAUSE_AUTONEG = 1 << 2 | |
98 | }; | |
99 | ||
100 | struct port_stats { | |
101 | u64 tx_octets; /* total # of octets in good frames */ | |
102 | u64 tx_frames; /* all good frames */ | |
103 | u64 tx_bcast_frames; /* all broadcast frames */ | |
104 | u64 tx_mcast_frames; /* all multicast frames */ | |
105 | u64 tx_ucast_frames; /* all unicast frames */ | |
106 | u64 tx_error_frames; /* all error frames */ | |
107 | ||
108 | u64 tx_frames_64; /* # of Tx frames in a particular range */ | |
109 | u64 tx_frames_65_127; | |
110 | u64 tx_frames_128_255; | |
111 | u64 tx_frames_256_511; | |
112 | u64 tx_frames_512_1023; | |
113 | u64 tx_frames_1024_1518; | |
114 | u64 tx_frames_1519_max; | |
115 | ||
116 | u64 tx_drop; /* # of dropped Tx frames */ | |
117 | u64 tx_pause; /* # of transmitted pause frames */ | |
118 | u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ | |
119 | u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ | |
120 | u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ | |
121 | u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ | |
122 | u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ | |
123 | u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ | |
124 | u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ | |
125 | u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ | |
126 | ||
127 | u64 rx_octets; /* total # of octets in good frames */ | |
128 | u64 rx_frames; /* all good frames */ | |
129 | u64 rx_bcast_frames; /* all broadcast frames */ | |
130 | u64 rx_mcast_frames; /* all multicast frames */ | |
131 | u64 rx_ucast_frames; /* all unicast frames */ | |
132 | u64 rx_too_long; /* # of frames exceeding MTU */ | |
133 | u64 rx_jabber; /* # of jabber frames */ | |
134 | u64 rx_fcs_err; /* # of received frames with bad FCS */ | |
135 | u64 rx_len_err; /* # of received frames with length error */ | |
136 | u64 rx_symbol_err; /* symbol errors */ | |
137 | u64 rx_runt; /* # of short frames */ | |
138 | ||
139 | u64 rx_frames_64; /* # of Rx frames in a particular range */ | |
140 | u64 rx_frames_65_127; | |
141 | u64 rx_frames_128_255; | |
142 | u64 rx_frames_256_511; | |
143 | u64 rx_frames_512_1023; | |
144 | u64 rx_frames_1024_1518; | |
145 | u64 rx_frames_1519_max; | |
146 | ||
147 | u64 rx_pause; /* # of received pause frames */ | |
148 | u64 rx_ppp0; /* # of received PPP prio 0 frames */ | |
149 | u64 rx_ppp1; /* # of received PPP prio 1 frames */ | |
150 | u64 rx_ppp2; /* # of received PPP prio 2 frames */ | |
151 | u64 rx_ppp3; /* # of received PPP prio 3 frames */ | |
152 | u64 rx_ppp4; /* # of received PPP prio 4 frames */ | |
153 | u64 rx_ppp5; /* # of received PPP prio 5 frames */ | |
154 | u64 rx_ppp6; /* # of received PPP prio 6 frames */ | |
155 | u64 rx_ppp7; /* # of received PPP prio 7 frames */ | |
156 | ||
157 | u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ | |
158 | u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ | |
159 | u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ | |
160 | u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ | |
161 | u64 rx_trunc0; /* buffer-group 0 truncated packets */ | |
162 | u64 rx_trunc1; /* buffer-group 1 truncated packets */ | |
163 | u64 rx_trunc2; /* buffer-group 2 truncated packets */ | |
164 | u64 rx_trunc3; /* buffer-group 3 truncated packets */ | |
165 | }; | |
166 | ||
167 | struct lb_port_stats { | |
168 | u64 octets; | |
169 | u64 frames; | |
170 | u64 bcast_frames; | |
171 | u64 mcast_frames; | |
172 | u64 ucast_frames; | |
173 | u64 error_frames; | |
174 | ||
175 | u64 frames_64; | |
176 | u64 frames_65_127; | |
177 | u64 frames_128_255; | |
178 | u64 frames_256_511; | |
179 | u64 frames_512_1023; | |
180 | u64 frames_1024_1518; | |
181 | u64 frames_1519_max; | |
182 | ||
183 | u64 drop; | |
184 | ||
185 | u64 ovflow0; | |
186 | u64 ovflow1; | |
187 | u64 ovflow2; | |
188 | u64 ovflow3; | |
189 | u64 trunc0; | |
190 | u64 trunc1; | |
191 | u64 trunc2; | |
192 | u64 trunc3; | |
193 | }; | |
194 | ||
195 | struct tp_tcp_stats { | |
196 | u32 tcpOutRsts; | |
197 | u64 tcpInSegs; | |
198 | u64 tcpOutSegs; | |
199 | u64 tcpRetransSegs; | |
200 | }; | |
201 | ||
202 | struct tp_err_stats { | |
203 | u32 macInErrs[4]; | |
204 | u32 hdrInErrs[4]; | |
205 | u32 tcpInErrs[4]; | |
206 | u32 tnlCongDrops[4]; | |
207 | u32 ofldChanDrops[4]; | |
208 | u32 tnlTxDrops[4]; | |
209 | u32 ofldVlanDrops[4]; | |
210 | u32 tcp6InErrs[4]; | |
211 | u32 ofldNoNeigh; | |
212 | u32 ofldCongDefer; | |
213 | }; | |
214 | ||
e85c9a7a HS |
215 | struct sge_params { |
216 | u32 hps; /* host page size for our PF/VF */ | |
217 | u32 eq_qpp; /* egress queues/page for our PF/VF */ | |
218 | u32 iq_qpp; /* egress queues/page for our PF/VF */ | |
219 | }; | |
220 | ||
625ba2c2 DM |
221 | struct tp_params { |
222 | unsigned int ntxchan; /* # of Tx channels */ | |
223 | unsigned int tre; /* log2 of core clocks per TP tick */ | |
2d277b3b | 224 | unsigned int la_mask; /* what events are recorded by TP LA */ |
dca4faeb VP |
225 | unsigned short tx_modq_map; /* TX modulation scheduler queue to */ |
226 | /* channel map */ | |
636f9d37 VP |
227 | |
228 | uint32_t dack_re; /* DACK timer resolution */ | |
229 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ | |
dcf7b6f5 KS |
230 | |
231 | u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ | |
232 | u32 ingress_config; /* cached TP_INGRESS_CONFIG */ | |
233 | ||
234 | /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a | |
235 | * subset of the set of fields which may be present in the Compressed | |
236 | * Filter Tuple portion of filters and TCP TCB connections. The | |
237 | * fields which are present are controlled by the TP_VLAN_PRI_MAP. | |
238 | * Since a variable number of fields may or may not be present, their | |
239 | * shifted field positions within the Compressed Filter Tuple may | |
240 | * vary, or not even be present if the field isn't selected in | |
241 | * TP_VLAN_PRI_MAP. Since some of these fields are needed in various | |
242 | * places we store their offsets here, or a -1 if the field isn't | |
243 | * present. | |
244 | */ | |
245 | int vlan_shift; | |
246 | int vnic_shift; | |
247 | int port_shift; | |
248 | int protocol_shift; | |
625ba2c2 DM |
249 | }; |
250 | ||
251 | struct vpd_params { | |
252 | unsigned int cclk; | |
253 | u8 ec[EC_LEN + 1]; | |
254 | u8 sn[SERNUM_LEN + 1]; | |
255 | u8 id[ID_LEN + 1]; | |
a94cd705 | 256 | u8 pn[PN_LEN + 1]; |
625ba2c2 DM |
257 | }; |
258 | ||
259 | struct pci_params { | |
260 | unsigned char speed; | |
261 | unsigned char width; | |
262 | }; | |
263 | ||
d14807dd HS |
264 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) |
265 | #define CHELSIO_CHIP_FPGA 0x100 | |
266 | #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) | |
267 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | |
268 | ||
269 | #define CHELSIO_T4 0x4 | |
270 | #define CHELSIO_T5 0x5 | |
271 | ||
272 | enum chip_type { | |
273 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | |
274 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | |
275 | T4_FIRST_REV = T4_A1, | |
276 | T4_LAST_REV = T4_A2, | |
277 | ||
278 | T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | |
279 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), | |
280 | T5_FIRST_REV = T5_A0, | |
281 | T5_LAST_REV = T5_A1, | |
282 | }; | |
283 | ||
49aa284f HS |
284 | struct devlog_params { |
285 | u32 memtype; /* which memory (EDC0, EDC1, MC) */ | |
286 | u32 start; /* start of log in firmware memory */ | |
287 | u32 size; /* size of log */ | |
288 | }; | |
289 | ||
625ba2c2 | 290 | struct adapter_params { |
e85c9a7a | 291 | struct sge_params sge; |
625ba2c2 DM |
292 | struct tp_params tp; |
293 | struct vpd_params vpd; | |
294 | struct pci_params pci; | |
49aa284f HS |
295 | struct devlog_params devlog; |
296 | enum pcie_memwin drv_memwin; | |
625ba2c2 | 297 | |
f1ff24aa HS |
298 | unsigned int cim_la_size; |
299 | ||
900a6596 DM |
300 | unsigned int sf_size; /* serial flash size in bytes */ |
301 | unsigned int sf_nsec; /* # of flash sectors */ | |
302 | unsigned int sf_fw_start; /* start of FW image in flash */ | |
303 | ||
625ba2c2 DM |
304 | unsigned int fw_vers; |
305 | unsigned int tp_vers; | |
306 | u8 api_vers[7]; | |
307 | ||
308 | unsigned short mtus[NMTUS]; | |
309 | unsigned short a_wnd[NCCTRL_WIN]; | |
310 | unsigned short b_wnd[NCCTRL_WIN]; | |
311 | ||
312 | unsigned char nports; /* # of ethernet ports */ | |
313 | unsigned char portvec; | |
d14807dd | 314 | enum chip_type chip; /* chip code */ |
625ba2c2 DM |
315 | unsigned char offload; |
316 | ||
9a4da2cd VP |
317 | unsigned char bypass; |
318 | ||
625ba2c2 | 319 | unsigned int ofldq_wr_cred; |
1ac0f095 | 320 | bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ |
4c2c5763 HS |
321 | |
322 | unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ | |
323 | unsigned int max_ird_adapter; /* Max read depth per adapter */ | |
625ba2c2 DM |
324 | }; |
325 | ||
16e47624 HS |
326 | #include "t4fw_api.h" |
327 | ||
328 | #define FW_VERSION(chip) ( \ | |
b2e1a3f0 HS |
329 | FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ |
330 | FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ | |
331 | FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ | |
332 | FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) | |
16e47624 HS |
333 | #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) |
334 | ||
335 | struct fw_info { | |
336 | u8 chip; | |
337 | char *fs_name; | |
338 | char *fw_mod_name; | |
339 | struct fw_hdr fw_hdr; | |
340 | }; | |
341 | ||
342 | ||
625ba2c2 DM |
343 | struct trace_params { |
344 | u32 data[TRACE_LEN / 4]; | |
345 | u32 mask[TRACE_LEN / 4]; | |
346 | unsigned short snap_len; | |
347 | unsigned short min_len; | |
348 | unsigned char skip_ofst; | |
349 | unsigned char skip_len; | |
350 | unsigned char invert; | |
351 | unsigned char port; | |
352 | }; | |
353 | ||
354 | struct link_config { | |
355 | unsigned short supported; /* link capabilities */ | |
356 | unsigned short advertising; /* advertised capabilities */ | |
357 | unsigned short requested_speed; /* speed user has requested */ | |
358 | unsigned short speed; /* actual link speed */ | |
359 | unsigned char requested_fc; /* flow control user has requested */ | |
360 | unsigned char fc; /* actual link flow control */ | |
361 | unsigned char autoneg; /* autonegotiating? */ | |
362 | unsigned char link_ok; /* link up? */ | |
363 | }; | |
364 | ||
e2ac9628 | 365 | #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) |
625ba2c2 DM |
366 | |
367 | enum { | |
368 | MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ | |
369 | MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ | |
370 | MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ | |
371 | MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ | |
f36e58e5 | 372 | MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ |
cf38be6d | 373 | MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ |
625ba2c2 DM |
374 | }; |
375 | ||
376 | enum { | |
cf38be6d HS |
377 | INGQ_EXTRAS = 2, /* firmware event queue and */ |
378 | /* forwarded interrupts */ | |
379 | MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2 | |
380 | + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES, | |
381 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES | |
382 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, | |
625ba2c2 DM |
383 | }; |
384 | ||
385 | struct adapter; | |
625ba2c2 DM |
386 | struct sge_rspq; |
387 | ||
688848b1 AB |
388 | #include "cxgb4_dcb.h" |
389 | ||
625ba2c2 DM |
390 | struct port_info { |
391 | struct adapter *adapter; | |
625ba2c2 DM |
392 | u16 viid; |
393 | s16 xact_addr_filt; /* index of exact MAC address filter */ | |
394 | u16 rss_size; /* size of VI's RSS table slice */ | |
395 | s8 mdio_addr; | |
40e9de4b | 396 | enum fw_port_type port_type; |
625ba2c2 DM |
397 | u8 mod_type; |
398 | u8 port_id; | |
399 | u8 tx_chan; | |
400 | u8 lport; /* associated offload logical port */ | |
625ba2c2 DM |
401 | u8 nqsets; /* # of qsets */ |
402 | u8 first_qset; /* index of first qset */ | |
f796564a | 403 | u8 rss_mode; |
625ba2c2 | 404 | struct link_config link_cfg; |
671b0060 | 405 | u16 *rss; |
688848b1 AB |
406 | #ifdef CONFIG_CHELSIO_T4_DCB |
407 | struct port_dcb_info dcb; /* Data Center Bridging support */ | |
408 | #endif | |
625ba2c2 DM |
409 | }; |
410 | ||
625ba2c2 DM |
411 | struct dentry; |
412 | struct work_struct; | |
413 | ||
414 | enum { /* adapter flags */ | |
415 | FULL_INIT_DONE = (1 << 0), | |
144be3d9 GS |
416 | DEV_ENABLED = (1 << 1), |
417 | USING_MSI = (1 << 2), | |
418 | USING_MSIX = (1 << 3), | |
625ba2c2 | 419 | FW_OK = (1 << 4), |
13ee15d3 | 420 | RSS_TNLALLLOOKUP = (1 << 5), |
52367a76 VP |
421 | USING_SOFT_PARAMS = (1 << 6), |
422 | MASTER_PF = (1 << 7), | |
423 | FW_OFLD_CONN = (1 << 9), | |
625ba2c2 DM |
424 | }; |
425 | ||
426 | struct rx_sw_desc; | |
427 | ||
428 | struct sge_fl { /* SGE free-buffer queue state */ | |
429 | unsigned int avail; /* # of available Rx buffers */ | |
430 | unsigned int pend_cred; /* new buffers since last FL DB ring */ | |
431 | unsigned int cidx; /* consumer index */ | |
432 | unsigned int pidx; /* producer index */ | |
433 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | |
434 | unsigned long large_alloc_failed; | |
435 | unsigned long starving; | |
436 | /* RO fields */ | |
437 | unsigned int cntxt_id; /* SGE context id for the free list */ | |
438 | unsigned int size; /* capacity of free list */ | |
439 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | |
440 | __be64 *desc; /* address of HW Rx descriptor ring */ | |
441 | dma_addr_t addr; /* bus address of HW ring start */ | |
df64e4d3 HS |
442 | void __iomem *bar2_addr; /* address of BAR2 Queue registers */ |
443 | unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ | |
625ba2c2 DM |
444 | }; |
445 | ||
446 | /* A packet gather list */ | |
447 | struct pkt_gl { | |
e91b0f24 | 448 | struct page_frag frags[MAX_SKB_FRAGS]; |
625ba2c2 DM |
449 | void *va; /* virtual address of first byte */ |
450 | unsigned int nfrags; /* # of fragments */ | |
451 | unsigned int tot_len; /* total length of fragments */ | |
452 | }; | |
453 | ||
454 | typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, | |
455 | const struct pkt_gl *gl); | |
456 | ||
457 | struct sge_rspq { /* state for an SGE response queue */ | |
458 | struct napi_struct napi; | |
459 | const __be64 *cur_desc; /* current descriptor in queue */ | |
460 | unsigned int cidx; /* consumer index */ | |
461 | u8 gen; /* current generation bit */ | |
462 | u8 intr_params; /* interrupt holdoff parameters */ | |
463 | u8 next_intr_params; /* holdoff params for next interrupt */ | |
e553ec3f | 464 | u8 adaptive_rx; |
625ba2c2 DM |
465 | u8 pktcnt_idx; /* interrupt packet threshold */ |
466 | u8 uld; /* ULD handling this queue */ | |
467 | u8 idx; /* queue index within its group */ | |
468 | int offset; /* offset into current Rx buffer */ | |
469 | u16 cntxt_id; /* SGE context id for the response q */ | |
470 | u16 abs_id; /* absolute SGE id for the response q */ | |
471 | __be64 *desc; /* address of HW response ring */ | |
472 | dma_addr_t phys_addr; /* physical address of the ring */ | |
df64e4d3 HS |
473 | void __iomem *bar2_addr; /* address of BAR2 Queue registers */ |
474 | unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ | |
625ba2c2 DM |
475 | unsigned int iqe_len; /* entry size */ |
476 | unsigned int size; /* capacity of response queue */ | |
477 | struct adapter *adap; | |
478 | struct net_device *netdev; /* associated net device */ | |
479 | rspq_handler_t handler; | |
3a336cb1 HS |
480 | #ifdef CONFIG_NET_RX_BUSY_POLL |
481 | #define CXGB_POLL_STATE_IDLE 0 | |
482 | #define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ | |
483 | #define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */ | |
484 | #define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */ | |
485 | #define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */ | |
486 | #define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \ | |
487 | CXGB_POLL_STATE_POLL_YIELD) | |
488 | #define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \ | |
489 | CXGB_POLL_STATE_POLL) | |
490 | #define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \ | |
491 | CXGB_POLL_STATE_POLL_YIELD) | |
492 | unsigned int bpoll_state; | |
493 | spinlock_t bpoll_lock; /* lock for busy poll */ | |
494 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
495 | ||
625ba2c2 DM |
496 | }; |
497 | ||
498 | struct sge_eth_stats { /* Ethernet queue statistics */ | |
499 | unsigned long pkts; /* # of ethernet packets */ | |
500 | unsigned long lro_pkts; /* # of LRO super packets */ | |
501 | unsigned long lro_merged; /* # of wire packets merged by LRO */ | |
502 | unsigned long rx_cso; /* # of Rx checksum offloads */ | |
503 | unsigned long vlan_ex; /* # of Rx VLAN extractions */ | |
504 | unsigned long rx_drops; /* # of packets dropped due to no mem */ | |
505 | }; | |
506 | ||
507 | struct sge_eth_rxq { /* SW Ethernet Rx queue */ | |
508 | struct sge_rspq rspq; | |
509 | struct sge_fl fl; | |
510 | struct sge_eth_stats stats; | |
511 | } ____cacheline_aligned_in_smp; | |
512 | ||
513 | struct sge_ofld_stats { /* offload queue statistics */ | |
514 | unsigned long pkts; /* # of packets */ | |
515 | unsigned long imm; /* # of immediate-data packets */ | |
516 | unsigned long an; /* # of asynchronous notifications */ | |
517 | unsigned long nomem; /* # of responses deferred due to no mem */ | |
518 | }; | |
519 | ||
520 | struct sge_ofld_rxq { /* SW offload Rx queue */ | |
521 | struct sge_rspq rspq; | |
522 | struct sge_fl fl; | |
523 | struct sge_ofld_stats stats; | |
524 | } ____cacheline_aligned_in_smp; | |
525 | ||
526 | struct tx_desc { | |
527 | __be64 flit[8]; | |
528 | }; | |
529 | ||
530 | struct tx_sw_desc; | |
531 | ||
532 | struct sge_txq { | |
533 | unsigned int in_use; /* # of in-use Tx descriptors */ | |
534 | unsigned int size; /* # of descriptors */ | |
535 | unsigned int cidx; /* SW consumer index */ | |
536 | unsigned int pidx; /* producer index */ | |
537 | unsigned long stops; /* # of times q has been stopped */ | |
538 | unsigned long restarts; /* # of queue restarts */ | |
539 | unsigned int cntxt_id; /* SGE context id for the Tx q */ | |
540 | struct tx_desc *desc; /* address of HW Tx descriptor ring */ | |
541 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ | |
542 | struct sge_qstat *stat; /* queue status entry */ | |
543 | dma_addr_t phys_addr; /* physical address of the ring */ | |
3069ee9b VP |
544 | spinlock_t db_lock; |
545 | int db_disabled; | |
546 | unsigned short db_pidx; | |
05eb2389 | 547 | unsigned short db_pidx_inc; |
df64e4d3 HS |
548 | void __iomem *bar2_addr; /* address of BAR2 Queue registers */ |
549 | unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ | |
625ba2c2 DM |
550 | }; |
551 | ||
552 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ | |
553 | struct sge_txq q; | |
554 | struct netdev_queue *txq; /* associated netdev TX queue */ | |
10b00466 AB |
555 | #ifdef CONFIG_CHELSIO_T4_DCB |
556 | u8 dcb_prio; /* DCB Priority bound to queue */ | |
557 | #endif | |
625ba2c2 DM |
558 | unsigned long tso; /* # of TSO requests */ |
559 | unsigned long tx_cso; /* # of Tx checksum offloads */ | |
560 | unsigned long vlan_ins; /* # of Tx VLAN insertions */ | |
561 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | |
562 | } ____cacheline_aligned_in_smp; | |
563 | ||
564 | struct sge_ofld_txq { /* state for an SGE offload Tx queue */ | |
565 | struct sge_txq q; | |
566 | struct adapter *adap; | |
567 | struct sk_buff_head sendq; /* list of backpressured packets */ | |
568 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | |
569 | u8 full; /* the Tx ring is full */ | |
570 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | |
571 | } ____cacheline_aligned_in_smp; | |
572 | ||
573 | struct sge_ctrl_txq { /* state for an SGE control Tx queue */ | |
574 | struct sge_txq q; | |
575 | struct adapter *adap; | |
576 | struct sk_buff_head sendq; /* list of backpressured packets */ | |
577 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | |
578 | u8 full; /* the Tx ring is full */ | |
579 | } ____cacheline_aligned_in_smp; | |
580 | ||
581 | struct sge { | |
582 | struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; | |
583 | struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; | |
584 | struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; | |
585 | ||
586 | struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; | |
587 | struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; | |
588 | struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; | |
cf38be6d | 589 | struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; |
625ba2c2 DM |
590 | struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; |
591 | ||
592 | struct sge_rspq intrq ____cacheline_aligned_in_smp; | |
593 | spinlock_t intrq_lock; | |
594 | ||
595 | u16 max_ethqsets; /* # of available Ethernet queue sets */ | |
596 | u16 ethqsets; /* # of active Ethernet queue sets */ | |
597 | u16 ethtxq_rover; /* Tx queue to clean up next */ | |
598 | u16 ofldqsets; /* # of active offload queue sets */ | |
599 | u16 rdmaqs; /* # of available RDMA Rx queues */ | |
cf38be6d | 600 | u16 rdmaciqs; /* # of available RDMA concentrator IQs */ |
625ba2c2 | 601 | u16 ofld_rxq[MAX_OFLD_QSETS]; |
f36e58e5 HS |
602 | u16 rdma_rxq[MAX_RDMA_QUEUES]; |
603 | u16 rdma_ciq[MAX_RDMA_CIQS]; | |
625ba2c2 DM |
604 | u16 timer_val[SGE_NTIMERS]; |
605 | u8 counter_val[SGE_NCOUNTERS]; | |
52367a76 VP |
606 | u32 fl_pg_order; /* large page allocation size */ |
607 | u32 stat_len; /* length of status page at ring end */ | |
608 | u32 pktshift; /* padding between CPL & packet data */ | |
609 | u32 fl_align; /* response queue message alignment */ | |
610 | u32 fl_starve_thres; /* Free List starvation threshold */ | |
0f4d201f KS |
611 | |
612 | /* State variables for detecting an SGE Ingress DMA hang */ | |
613 | unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */ | |
614 | unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */ | |
615 | unsigned int idma_state[2]; /* SGE IDMA Hang detect state */ | |
616 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ | |
617 | ||
e46dab4d DM |
618 | unsigned int egr_start; |
619 | unsigned int ingr_start; | |
625ba2c2 DM |
620 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ |
621 | struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ | |
622 | DECLARE_BITMAP(starving_fl, MAX_EGRQ); | |
623 | DECLARE_BITMAP(txq_maperr, MAX_EGRQ); | |
624 | struct timer_list rx_timer; /* refills starving FLs */ | |
625 | struct timer_list tx_timer; /* checks Tx queues */ | |
626 | }; | |
627 | ||
628 | #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) | |
629 | #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) | |
630 | #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) | |
cf38be6d | 631 | #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) |
625ba2c2 DM |
632 | |
633 | struct l2t_data; | |
634 | ||
2422d9a3 SR |
635 | #ifdef CONFIG_PCI_IOV |
636 | ||
7d6727cf SR |
637 | /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial |
638 | * Configuration initialization for T5 only has SR-IOV functionality enabled | |
639 | * on PF0-3 in order to simplify everything. | |
2422d9a3 | 640 | */ |
7d6727cf | 641 | #define NUM_OF_PF_WITH_SRIOV 4 |
2422d9a3 SR |
642 | |
643 | #endif | |
644 | ||
625ba2c2 DM |
645 | struct adapter { |
646 | void __iomem *regs; | |
22adfe0a | 647 | void __iomem *bar2; |
0abfd152 | 648 | u32 t4_bar0; |
625ba2c2 DM |
649 | struct pci_dev *pdev; |
650 | struct device *pdev_dev; | |
3069ee9b | 651 | unsigned int mbox; |
060e0c75 DM |
652 | unsigned int fn; |
653 | unsigned int flags; | |
2422d9a3 | 654 | enum chip_type chip; |
625ba2c2 | 655 | |
625ba2c2 DM |
656 | int msg_enable; |
657 | ||
658 | struct adapter_params params; | |
659 | struct cxgb4_virt_res vres; | |
660 | unsigned int swintr; | |
661 | ||
662 | unsigned int wol; | |
663 | ||
664 | struct { | |
665 | unsigned short vec; | |
8cd18ac4 | 666 | char desc[IFNAMSIZ + 10]; |
625ba2c2 DM |
667 | } msix_info[MAX_INGQ + 1]; |
668 | ||
669 | struct sge sge; | |
670 | ||
671 | struct net_device *port[MAX_NPORTS]; | |
672 | u8 chan_map[NCHAN]; /* channel -> port map */ | |
673 | ||
793dad94 | 674 | u32 filter_mode; |
636f9d37 VP |
675 | unsigned int l2t_start; |
676 | unsigned int l2t_end; | |
625ba2c2 | 677 | struct l2t_data *l2t; |
b5a02f50 AB |
678 | unsigned int clipt_start; |
679 | unsigned int clipt_end; | |
680 | struct clip_tbl *clipt; | |
625ba2c2 DM |
681 | void *uld_handle[CXGB4_ULD_MAX]; |
682 | struct list_head list_node; | |
01bcca68 | 683 | struct list_head rcu_node; |
625ba2c2 DM |
684 | |
685 | struct tid_info tids; | |
686 | void **tid_release_head; | |
687 | spinlock_t tid_release_lock; | |
29aaee65 | 688 | struct workqueue_struct *workq; |
625ba2c2 | 689 | struct work_struct tid_release_task; |
881806bc VP |
690 | struct work_struct db_full_task; |
691 | struct work_struct db_drop_task; | |
625ba2c2 DM |
692 | bool tid_release_task_busy; |
693 | ||
694 | struct dentry *debugfs_root; | |
695 | ||
696 | spinlock_t stats_lock; | |
fc5ab020 | 697 | spinlock_t win0_lock ____cacheline_aligned_in_smp; |
625ba2c2 DM |
698 | }; |
699 | ||
f2b7e78d VP |
700 | /* Defined bit width of user definable filter tuples |
701 | */ | |
702 | #define ETHTYPE_BITWIDTH 16 | |
703 | #define FRAG_BITWIDTH 1 | |
704 | #define MACIDX_BITWIDTH 9 | |
705 | #define FCOE_BITWIDTH 1 | |
706 | #define IPORT_BITWIDTH 3 | |
707 | #define MATCHTYPE_BITWIDTH 3 | |
708 | #define PROTO_BITWIDTH 8 | |
709 | #define TOS_BITWIDTH 8 | |
710 | #define PF_BITWIDTH 8 | |
711 | #define VF_BITWIDTH 8 | |
712 | #define IVLAN_BITWIDTH 16 | |
713 | #define OVLAN_BITWIDTH 16 | |
714 | ||
715 | /* Filter matching rules. These consist of a set of ingress packet field | |
716 | * (value, mask) tuples. The associated ingress packet field matches the | |
717 | * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field | |
718 | * rule can be constructed by specifying a tuple of (0, 0).) A filter rule | |
719 | * matches an ingress packet when all of the individual individual field | |
720 | * matching rules are true. | |
721 | * | |
722 | * Partial field masks are always valid, however, while it may be easy to | |
723 | * understand their meanings for some fields (e.g. IP address to match a | |
724 | * subnet), for others making sensible partial masks is less intuitive (e.g. | |
725 | * MPS match type) ... | |
726 | * | |
727 | * Most of the following data structures are modeled on T4 capabilities. | |
728 | * Drivers for earlier chips use the subsets which make sense for those chips. | |
729 | * We really need to come up with a hardware-independent mechanism to | |
730 | * represent hardware filter capabilities ... | |
731 | */ | |
732 | struct ch_filter_tuple { | |
733 | /* Compressed header matching field rules. The TP_VLAN_PRI_MAP | |
734 | * register selects which of these fields will participate in the | |
735 | * filter match rules -- up to a maximum of 36 bits. Because | |
736 | * TP_VLAN_PRI_MAP is a global register, all filters must use the same | |
737 | * set of fields. | |
738 | */ | |
739 | uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ | |
740 | uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ | |
741 | uint32_t ivlan_vld:1; /* inner VLAN valid */ | |
742 | uint32_t ovlan_vld:1; /* outer VLAN valid */ | |
743 | uint32_t pfvf_vld:1; /* PF/VF valid */ | |
744 | uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ | |
745 | uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ | |
746 | uint32_t iport:IPORT_BITWIDTH; /* ingress port */ | |
747 | uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ | |
748 | uint32_t proto:PROTO_BITWIDTH; /* protocol type */ | |
749 | uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ | |
750 | uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ | |
751 | uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ | |
752 | uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ | |
753 | uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ | |
754 | ||
755 | /* Uncompressed header matching field rules. These are always | |
756 | * available for field rules. | |
757 | */ | |
758 | uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ | |
759 | uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ | |
760 | uint16_t lport; /* local port */ | |
761 | uint16_t fport; /* foreign port */ | |
762 | }; | |
763 | ||
764 | /* A filter ioctl command. | |
765 | */ | |
766 | struct ch_filter_specification { | |
767 | /* Administrative fields for filter. | |
768 | */ | |
769 | uint32_t hitcnts:1; /* count filter hits in TCB */ | |
770 | uint32_t prio:1; /* filter has priority over active/server */ | |
771 | ||
772 | /* Fundamental filter typing. This is the one element of filter | |
773 | * matching that doesn't exist as a (value, mask) tuple. | |
774 | */ | |
775 | uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ | |
776 | ||
777 | /* Packet dispatch information. Ingress packets which match the | |
778 | * filter rules will be dropped, passed to the host or switched back | |
779 | * out as egress packets. | |
780 | */ | |
781 | uint32_t action:2; /* drop, pass, switch */ | |
782 | ||
783 | uint32_t rpttid:1; /* report TID in RSS hash field */ | |
784 | ||
785 | uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ | |
786 | uint32_t iq:10; /* ingress queue */ | |
787 | ||
788 | uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */ | |
789 | uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ | |
790 | /* 1 => TCB contains IQ ID */ | |
791 | ||
792 | /* Switch proxy/rewrite fields. An ingress packet which matches a | |
793 | * filter with "switch" set will be looped back out as an egress | |
794 | * packet -- potentially with some Ethernet header rewriting. | |
795 | */ | |
796 | uint32_t eport:2; /* egress port to switch packet out */ | |
797 | uint32_t newdmac:1; /* rewrite destination MAC address */ | |
798 | uint32_t newsmac:1; /* rewrite source MAC address */ | |
799 | uint32_t newvlan:2; /* rewrite VLAN Tag */ | |
800 | uint8_t dmac[ETH_ALEN]; /* new destination MAC address */ | |
801 | uint8_t smac[ETH_ALEN]; /* new source MAC address */ | |
802 | uint16_t vlan; /* VLAN Tag to insert */ | |
803 | ||
804 | /* Filter rule value/mask pairs. | |
805 | */ | |
806 | struct ch_filter_tuple val; | |
807 | struct ch_filter_tuple mask; | |
808 | }; | |
809 | ||
810 | enum { | |
811 | FILTER_PASS = 0, /* default */ | |
812 | FILTER_DROP, | |
813 | FILTER_SWITCH | |
814 | }; | |
815 | ||
816 | enum { | |
817 | VLAN_NOCHANGE = 0, /* default */ | |
818 | VLAN_REMOVE, | |
819 | VLAN_INSERT, | |
820 | VLAN_REWRITE | |
821 | }; | |
822 | ||
2422d9a3 SR |
823 | static inline int is_t5(enum chip_type chip) |
824 | { | |
d14807dd | 825 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; |
2422d9a3 SR |
826 | } |
827 | ||
828 | static inline int is_t4(enum chip_type chip) | |
829 | { | |
d14807dd | 830 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; |
2422d9a3 SR |
831 | } |
832 | ||
625ba2c2 DM |
833 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) |
834 | { | |
835 | return readl(adap->regs + reg_addr); | |
836 | } | |
837 | ||
838 | static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) | |
839 | { | |
840 | writel(val, adap->regs + reg_addr); | |
841 | } | |
842 | ||
843 | #ifndef readq | |
844 | static inline u64 readq(const volatile void __iomem *addr) | |
845 | { | |
846 | return readl(addr) + ((u64)readl(addr + 4) << 32); | |
847 | } | |
848 | ||
849 | static inline void writeq(u64 val, volatile void __iomem *addr) | |
850 | { | |
851 | writel(val, addr); | |
852 | writel(val >> 32, addr + 4); | |
853 | } | |
854 | #endif | |
855 | ||
856 | static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) | |
857 | { | |
858 | return readq(adap->regs + reg_addr); | |
859 | } | |
860 | ||
861 | static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) | |
862 | { | |
863 | writeq(val, adap->regs + reg_addr); | |
864 | } | |
865 | ||
866 | /** | |
867 | * netdev2pinfo - return the port_info structure associated with a net_device | |
868 | * @dev: the netdev | |
869 | * | |
870 | * Return the struct port_info associated with a net_device | |
871 | */ | |
872 | static inline struct port_info *netdev2pinfo(const struct net_device *dev) | |
873 | { | |
874 | return netdev_priv(dev); | |
875 | } | |
876 | ||
877 | /** | |
878 | * adap2pinfo - return the port_info of a port | |
879 | * @adap: the adapter | |
880 | * @idx: the port index | |
881 | * | |
882 | * Return the port_info structure for the port of the given index. | |
883 | */ | |
884 | static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) | |
885 | { | |
886 | return netdev_priv(adap->port[idx]); | |
887 | } | |
888 | ||
889 | /** | |
890 | * netdev2adap - return the adapter structure associated with a net_device | |
891 | * @dev: the netdev | |
892 | * | |
893 | * Return the struct adapter associated with a net_device | |
894 | */ | |
895 | static inline struct adapter *netdev2adap(const struct net_device *dev) | |
896 | { | |
897 | return netdev2pinfo(dev)->adapter; | |
898 | } | |
899 | ||
3a336cb1 HS |
900 | #ifdef CONFIG_NET_RX_BUSY_POLL |
901 | static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) | |
902 | { | |
903 | spin_lock_init(&q->bpoll_lock); | |
904 | q->bpoll_state = CXGB_POLL_STATE_IDLE; | |
905 | } | |
906 | ||
907 | static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) | |
908 | { | |
909 | bool rc = true; | |
910 | ||
911 | spin_lock(&q->bpoll_lock); | |
912 | if (q->bpoll_state & CXGB_POLL_LOCKED) { | |
913 | q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD; | |
914 | rc = false; | |
915 | } else { | |
916 | q->bpoll_state = CXGB_POLL_STATE_NAPI; | |
917 | } | |
918 | spin_unlock(&q->bpoll_lock); | |
919 | return rc; | |
920 | } | |
921 | ||
922 | static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) | |
923 | { | |
924 | bool rc = false; | |
925 | ||
926 | spin_lock(&q->bpoll_lock); | |
927 | if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) | |
928 | rc = true; | |
929 | q->bpoll_state = CXGB_POLL_STATE_IDLE; | |
930 | spin_unlock(&q->bpoll_lock); | |
931 | return rc; | |
932 | } | |
933 | ||
934 | static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) | |
935 | { | |
936 | bool rc = true; | |
937 | ||
938 | spin_lock_bh(&q->bpoll_lock); | |
939 | if (q->bpoll_state & CXGB_POLL_LOCKED) { | |
940 | q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD; | |
941 | rc = false; | |
942 | } else { | |
943 | q->bpoll_state |= CXGB_POLL_STATE_POLL; | |
944 | } | |
945 | spin_unlock_bh(&q->bpoll_lock); | |
946 | return rc; | |
947 | } | |
948 | ||
949 | static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) | |
950 | { | |
951 | bool rc = false; | |
952 | ||
953 | spin_lock_bh(&q->bpoll_lock); | |
954 | if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) | |
955 | rc = true; | |
956 | q->bpoll_state = CXGB_POLL_STATE_IDLE; | |
957 | spin_unlock_bh(&q->bpoll_lock); | |
958 | return rc; | |
959 | } | |
960 | ||
961 | static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) | |
962 | { | |
963 | return q->bpoll_state & CXGB_POLL_USER_PEND; | |
964 | } | |
965 | #else | |
966 | static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) | |
967 | { | |
968 | } | |
969 | ||
970 | static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) | |
971 | { | |
972 | return true; | |
973 | } | |
974 | ||
975 | static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) | |
976 | { | |
977 | return false; | |
978 | } | |
979 | ||
980 | static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) | |
981 | { | |
982 | return false; | |
983 | } | |
984 | ||
985 | static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) | |
986 | { | |
987 | return false; | |
988 | } | |
989 | ||
990 | static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) | |
991 | { | |
992 | return false; | |
993 | } | |
994 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
995 | ||
625ba2c2 DM |
996 | void t4_os_portmod_changed(const struct adapter *adap, int port_id); |
997 | void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); | |
998 | ||
999 | void *t4_alloc_mem(size_t size); | |
625ba2c2 DM |
1000 | |
1001 | void t4_free_sge_resources(struct adapter *adap); | |
5fa76694 | 1002 | void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); |
625ba2c2 DM |
1003 | irq_handler_t t4_intr_handler(struct adapter *adap); |
1004 | netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); | |
1005 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | |
1006 | const struct pkt_gl *gl); | |
1007 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); | |
1008 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); | |
1009 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |
1010 | struct net_device *dev, int intr_idx, | |
1011 | struct sge_fl *fl, rspq_handler_t hnd); | |
1012 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | |
1013 | struct net_device *dev, struct netdev_queue *netdevq, | |
1014 | unsigned int iqid); | |
1015 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | |
1016 | struct net_device *dev, unsigned int iqid, | |
1017 | unsigned int cmplqid); | |
1018 | int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, | |
1019 | struct net_device *dev, unsigned int iqid); | |
1020 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie); | |
52367a76 | 1021 | int t4_sge_init(struct adapter *adap); |
625ba2c2 DM |
1022 | void t4_sge_start(struct adapter *adap); |
1023 | void t4_sge_stop(struct adapter *adap); | |
3a336cb1 | 1024 | int cxgb_busy_poll(struct napi_struct *napi); |
3069ee9b | 1025 | extern int dbfifo_int_thresh; |
625ba2c2 DM |
1026 | |
1027 | #define for_each_port(adapter, iter) \ | |
1028 | for (iter = 0; iter < (adapter)->params.nports; ++iter) | |
1029 | ||
9a4da2cd VP |
1030 | static inline int is_bypass(struct adapter *adap) |
1031 | { | |
1032 | return adap->params.bypass; | |
1033 | } | |
1034 | ||
1035 | static inline int is_bypass_device(int device) | |
1036 | { | |
1037 | /* this should be set based upon device capabilities */ | |
1038 | switch (device) { | |
1039 | case 0x440b: | |
1040 | case 0x440c: | |
1041 | return 1; | |
1042 | default: | |
1043 | return 0; | |
1044 | } | |
1045 | } | |
1046 | ||
625ba2c2 DM |
1047 | static inline unsigned int core_ticks_per_usec(const struct adapter *adap) |
1048 | { | |
1049 | return adap->params.vpd.cclk / 1000; | |
1050 | } | |
1051 | ||
1052 | static inline unsigned int us_to_core_ticks(const struct adapter *adap, | |
1053 | unsigned int us) | |
1054 | { | |
1055 | return (us * adap->params.vpd.cclk) / 1000; | |
1056 | } | |
1057 | ||
52367a76 VP |
1058 | static inline unsigned int core_ticks_to_us(const struct adapter *adapter, |
1059 | unsigned int ticks) | |
1060 | { | |
1061 | /* add Core Clock / 2 to round ticks to nearest uS */ | |
1062 | return ((ticks * 1000 + adapter->params.vpd.cclk/2) / | |
1063 | adapter->params.vpd.cclk); | |
1064 | } | |
1065 | ||
625ba2c2 DM |
1066 | void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, |
1067 | u32 val); | |
1068 | ||
1069 | int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, | |
1070 | void *rpl, bool sleep_ok); | |
1071 | ||
1072 | static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, | |
1073 | int size, void *rpl) | |
1074 | { | |
1075 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); | |
1076 | } | |
1077 | ||
1078 | static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, | |
1079 | int size, void *rpl) | |
1080 | { | |
1081 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); | |
1082 | } | |
1083 | ||
13ee15d3 VP |
1084 | void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, |
1085 | unsigned int data_reg, const u32 *vals, | |
1086 | unsigned int nregs, unsigned int start_idx); | |
f2b7e78d VP |
1087 | void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, |
1088 | unsigned int data_reg, u32 *vals, unsigned int nregs, | |
1089 | unsigned int start_idx); | |
0abfd152 | 1090 | void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val); |
f2b7e78d VP |
1091 | |
1092 | struct fw_filter_wr; | |
1093 | ||
625ba2c2 DM |
1094 | void t4_intr_enable(struct adapter *adapter); |
1095 | void t4_intr_disable(struct adapter *adapter); | |
625ba2c2 DM |
1096 | int t4_slow_intr_handler(struct adapter *adapter); |
1097 | ||
8203b509 | 1098 | int t4_wait_dev_ready(void __iomem *regs); |
625ba2c2 DM |
1099 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, |
1100 | struct link_config *lc); | |
1101 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); | |
fc5ab020 HS |
1102 | |
1103 | #define T4_MEMORY_WRITE 0 | |
1104 | #define T4_MEMORY_READ 1 | |
1105 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, | |
f01aa633 | 1106 | void *buf, int dir); |
fc5ab020 HS |
1107 | static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, |
1108 | u32 len, __be32 *buf) | |
1109 | { | |
1110 | return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0); | |
1111 | } | |
1112 | ||
625ba2c2 | 1113 | int t4_seeprom_wp(struct adapter *adapter, bool enable); |
636f9d37 | 1114 | int get_vpd_params(struct adapter *adapter, struct vpd_params *p); |
49216c1c HS |
1115 | int t4_read_flash(struct adapter *adapter, unsigned int addr, |
1116 | unsigned int nwords, u32 *data, int byte_oriented); | |
625ba2c2 | 1117 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); |
49216c1c | 1118 | int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); |
22c0b963 HS |
1119 | int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, |
1120 | const u8 *fw_data, unsigned int size, int force); | |
636f9d37 | 1121 | unsigned int t4_flash_cfg_addr(struct adapter *adapter); |
16e47624 HS |
1122 | int t4_get_fw_version(struct adapter *adapter, u32 *vers); |
1123 | int t4_get_tp_version(struct adapter *adapter, u32 *vers); | |
ba3f8cd5 | 1124 | int t4_get_exprom_version(struct adapter *adapter, u32 *vers); |
16e47624 HS |
1125 | int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, |
1126 | const u8 *fw_data, unsigned int fw_size, | |
1127 | struct fw_hdr *card_fw, enum dev_state state, int *reset); | |
625ba2c2 | 1128 | int t4_prep_adapter(struct adapter *adapter); |
e85c9a7a HS |
1129 | |
1130 | enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; | |
dd0bcc0b | 1131 | int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, |
e85c9a7a HS |
1132 | unsigned int qid, |
1133 | enum t4_bar2_qtype qtype, | |
1134 | u64 *pbar2_qoffset, | |
1135 | unsigned int *pbar2_qid); | |
1136 | ||
dc9daab2 HS |
1137 | unsigned int qtimer_val(const struct adapter *adap, |
1138 | const struct sge_rspq *q); | |
e85c9a7a | 1139 | int t4_init_sge_params(struct adapter *adapter); |
dcf7b6f5 KS |
1140 | int t4_init_tp_params(struct adapter *adap); |
1141 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | |
625ba2c2 DM |
1142 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); |
1143 | void t4_fatal_err(struct adapter *adapter); | |
625ba2c2 DM |
1144 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, |
1145 | int start, int n, const u16 *rspq, unsigned int nrspq); | |
1146 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | |
1147 | unsigned int flags); | |
688ea5fe HS |
1148 | int t4_read_rss(struct adapter *adapter, u16 *entries); |
1149 | void t4_read_rss_key(struct adapter *adapter, u32 *key); | |
1150 | void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx); | |
1151 | void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, | |
1152 | u32 *valp); | |
1153 | void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, | |
1154 | u32 *vfl, u32 *vfh); | |
1155 | u32 t4_read_rss_pf_map(struct adapter *adapter); | |
1156 | u32 t4_read_rss_pf_mask(struct adapter *adapter); | |
1157 | ||
19dd37ba SR |
1158 | int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, |
1159 | u64 *parity); | |
625ba2c2 DM |
1160 | int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, |
1161 | u64 *parity); | |
b3bbe36a HS |
1162 | void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); |
1163 | void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); | |
e5f0e43b HS |
1164 | int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, |
1165 | size_t n); | |
c778af7d HS |
1166 | int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, |
1167 | size_t n); | |
f1ff24aa HS |
1168 | int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, |
1169 | unsigned int *valp); | |
1170 | int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, | |
1171 | const unsigned int *valp); | |
1172 | int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); | |
74b3092c | 1173 | void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); |
72aca4bf | 1174 | const char *t4_get_port_type_description(enum fw_port_type port_type); |
625ba2c2 | 1175 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); |
625ba2c2 | 1176 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); |
bad43792 | 1177 | void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); |
636f9d37 VP |
1178 | void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, |
1179 | unsigned int mask, unsigned int val); | |
2d277b3b | 1180 | void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); |
625ba2c2 DM |
1181 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, |
1182 | struct tp_tcp_stats *v6); | |
1183 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, | |
1184 | const unsigned short *alpha, const unsigned short *beta); | |
1185 | ||
797ff0f5 HS |
1186 | void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf); |
1187 | ||
f2b7e78d VP |
1188 | void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); |
1189 | ||
625ba2c2 DM |
1190 | void t4_wol_magic_enable(struct adapter *adap, unsigned int port, |
1191 | const u8 *addr); | |
1192 | int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | |
1193 | u64 mask0, u64 mask1, unsigned int crc, bool enable); | |
1194 | ||
1195 | int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, | |
1196 | enum dev_master master, enum dev_state *state); | |
1197 | int t4_fw_bye(struct adapter *adap, unsigned int mbox); | |
1198 | int t4_early_init(struct adapter *adap, unsigned int mbox); | |
1199 | int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); | |
636f9d37 VP |
1200 | int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, |
1201 | unsigned int cache_line_size); | |
1202 | int t4_fw_initialize(struct adapter *adap, unsigned int mbox); | |
625ba2c2 DM |
1203 | int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, |
1204 | unsigned int vf, unsigned int nparams, const u32 *params, | |
1205 | u32 *val); | |
1206 | int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1207 | unsigned int vf, unsigned int nparams, const u32 *params, | |
1208 | const u32 *val); | |
688848b1 AB |
1209 | int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, |
1210 | unsigned int pf, unsigned int vf, | |
1211 | unsigned int nparams, const u32 *params, | |
1212 | const u32 *val); | |
625ba2c2 DM |
1213 | int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, |
1214 | unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, | |
1215 | unsigned int rxqi, unsigned int rxq, unsigned int tc, | |
1216 | unsigned int vi, unsigned int cmask, unsigned int pmask, | |
1217 | unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); | |
1218 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | |
1219 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, | |
1220 | unsigned int *rss_size); | |
625ba2c2 | 1221 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, |
f8f5aafa DM |
1222 | int mtu, int promisc, int all_multi, int bcast, int vlanex, |
1223 | bool sleep_ok); | |
625ba2c2 DM |
1224 | int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, |
1225 | unsigned int viid, bool free, unsigned int naddr, | |
1226 | const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); | |
1227 | int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | |
1228 | int idx, const u8 *addr, bool persist, bool add_smt); | |
1229 | int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, | |
1230 | bool ucast, u64 vec, bool sleep_ok); | |
688848b1 AB |
1231 | int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, |
1232 | unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); | |
625ba2c2 DM |
1233 | int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, |
1234 | bool rx_en, bool tx_en); | |
1235 | int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, | |
1236 | unsigned int nblinks); | |
1237 | int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | |
1238 | unsigned int mmd, unsigned int reg, u16 *valp); | |
1239 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | |
1240 | unsigned int mmd, unsigned int reg, u16 val); | |
625ba2c2 DM |
1241 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, |
1242 | unsigned int vf, unsigned int iqtype, unsigned int iqid, | |
1243 | unsigned int fl0id, unsigned int fl1id); | |
1244 | int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1245 | unsigned int vf, unsigned int eqid); | |
1246 | int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1247 | unsigned int vf, unsigned int eqid); | |
1248 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | |
1249 | unsigned int vf, unsigned int eqid); | |
1250 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); | |
881806bc VP |
1251 | void t4_db_full(struct adapter *adapter); |
1252 | void t4_db_dropped(struct adapter *adapter); | |
8caa1e84 VP |
1253 | int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, |
1254 | u32 addr, u32 val); | |
68bce192 | 1255 | void t4_sge_decode_idma_state(struct adapter *adapter, int state); |
fd88b31a | 1256 | void t4_free_mem(void *addr); |
625ba2c2 | 1257 | #endif /* __CXGB4_H__ */ |