Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #ifndef BE_H | |
19 | #define BE_H | |
20 | ||
21 | #include <linux/pci.h> | |
22 | #include <linux/etherdevice.h> | |
23 | #include <linux/version.h> | |
24 | #include <linux/delay.h> | |
25 | #include <net/tcp.h> | |
26 | #include <net/ip.h> | |
27 | #include <net/ipv6.h> | |
28 | #include <linux/if_vlan.h> | |
29 | #include <linux/workqueue.h> | |
30 | #include <linux/interrupt.h> | |
84517482 | 31 | #include <linux/firmware.h> |
6b7c5b94 SP |
32 | |
33 | #include "be_hw.h" | |
34 | ||
84517482 | 35 | #define DRV_VER "2.101.205" |
6b7c5b94 SP |
36 | #define DRV_NAME "be2net" |
37 | #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" | |
c4ca2374 | 38 | #define OC_NAME "Emulex OneConnect 10Gbps NIC" |
6b7c5b94 SP |
39 | #define DRV_DESC BE_NAME "Driver" |
40 | ||
c4ca2374 AK |
41 | #define BE_VENDOR_ID 0x19a2 |
42 | #define BE_DEVICE_ID1 0x211 | |
43 | #define OC_DEVICE_ID1 0x700 | |
44 | #define OC_DEVICE_ID2 0x701 | |
45 | ||
46 | static inline char *nic_name(struct pci_dev *pdev) | |
47 | { | |
48 | if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) | |
49 | return OC_NAME; | |
50 | else | |
51 | return BE_NAME; | |
52 | } | |
53 | ||
6b7c5b94 SP |
54 | /* Number of bytes of an RX frame that are copied to skb->data */ |
55 | #define BE_HDR_LEN 64 | |
56 | #define BE_MAX_JUMBO_FRAME_SIZE 9018 | |
57 | #define BE_MIN_MTU 256 | |
58 | ||
59 | #define BE_NUM_VLANS_SUPPORTED 64 | |
60 | #define BE_MAX_EQD 96 | |
61 | #define BE_MAX_TX_FRAG_COUNT 30 | |
62 | ||
63 | #define EVNT_Q_LEN 1024 | |
64 | #define TX_Q_LEN 2048 | |
65 | #define TX_CQ_LEN 1024 | |
66 | #define RX_Q_LEN 1024 /* Does not support any other value */ | |
67 | #define RX_CQ_LEN 1024 | |
5fb379ee | 68 | #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ |
6b7c5b94 SP |
69 | #define MCC_CQ_LEN 256 |
70 | ||
71 | #define BE_NAPI_WEIGHT 64 | |
72 | #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ | |
73 | #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) | |
74 | ||
8788fdc2 SP |
75 | #define FW_VER_LEN 32 |
76 | ||
6b7c5b94 SP |
77 | struct be_dma_mem { |
78 | void *va; | |
79 | dma_addr_t dma; | |
80 | u32 size; | |
81 | }; | |
82 | ||
83 | struct be_queue_info { | |
84 | struct be_dma_mem dma_mem; | |
85 | u16 len; | |
86 | u16 entry_size; /* Size of an element in the queue */ | |
87 | u16 id; | |
88 | u16 tail, head; | |
89 | bool created; | |
90 | atomic_t used; /* Number of valid elements in the queue */ | |
91 | }; | |
92 | ||
5fb379ee SP |
93 | static inline u32 MODULO(u16 val, u16 limit) |
94 | { | |
95 | BUG_ON(limit & (limit - 1)); | |
96 | return val & (limit - 1); | |
97 | } | |
98 | ||
99 | static inline void index_adv(u16 *index, u16 val, u16 limit) | |
100 | { | |
101 | *index = MODULO((*index + val), limit); | |
102 | } | |
103 | ||
104 | static inline void index_inc(u16 *index, u16 limit) | |
105 | { | |
106 | *index = MODULO((*index + 1), limit); | |
107 | } | |
108 | ||
109 | static inline void *queue_head_node(struct be_queue_info *q) | |
110 | { | |
111 | return q->dma_mem.va + q->head * q->entry_size; | |
112 | } | |
113 | ||
114 | static inline void *queue_tail_node(struct be_queue_info *q) | |
115 | { | |
116 | return q->dma_mem.va + q->tail * q->entry_size; | |
117 | } | |
118 | ||
119 | static inline void queue_head_inc(struct be_queue_info *q) | |
120 | { | |
121 | index_inc(&q->head, q->len); | |
122 | } | |
123 | ||
124 | static inline void queue_tail_inc(struct be_queue_info *q) | |
125 | { | |
126 | index_inc(&q->tail, q->len); | |
127 | } | |
128 | ||
5fb379ee SP |
129 | struct be_eq_obj { |
130 | struct be_queue_info q; | |
131 | char desc[32]; | |
132 | ||
133 | /* Adaptive interrupt coalescing (AIC) info */ | |
134 | bool enable_aic; | |
135 | u16 min_eqd; /* in usecs */ | |
136 | u16 max_eqd; /* in usecs */ | |
137 | u16 cur_eqd; /* in usecs */ | |
138 | ||
139 | struct napi_struct napi; | |
140 | }; | |
141 | ||
142 | struct be_mcc_obj { | |
143 | struct be_queue_info q; | |
144 | struct be_queue_info cq; | |
145 | }; | |
146 | ||
6b7c5b94 SP |
147 | struct be_drvr_stats { |
148 | u32 be_tx_reqs; /* number of TX requests initiated */ | |
149 | u32 be_tx_stops; /* number of times TX Q was stopped */ | |
150 | u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */ | |
151 | u32 be_tx_wrbs; /* number of tx WRBs used */ | |
152 | u32 be_tx_events; /* number of tx completion events */ | |
153 | u32 be_tx_compl; /* number of tx completion entries processed */ | |
4097f663 SP |
154 | ulong be_tx_jiffies; |
155 | u64 be_tx_bytes; | |
156 | u64 be_tx_bytes_prev; | |
6b7c5b94 SP |
157 | u32 be_tx_rate; |
158 | ||
159 | u32 cache_barrier[16]; | |
160 | ||
161 | u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */ | |
162 | u32 be_polls; /* number of times NAPI called poll function */ | |
163 | u32 be_rx_events; /* number of ucast rx completion events */ | |
164 | u32 be_rx_compl; /* number of rx completion entries processed */ | |
4097f663 SP |
165 | ulong be_rx_jiffies; |
166 | u64 be_rx_bytes; | |
167 | u64 be_rx_bytes_prev; | |
6b7c5b94 SP |
168 | u32 be_rx_rate; |
169 | /* number of non ether type II frames dropped where | |
170 | * frame len > length field of Mac Hdr */ | |
171 | u32 be_802_3_dropped_frames; | |
172 | /* number of non ether type II frames malformed where | |
173 | * in frame len < length field of Mac Hdr */ | |
174 | u32 be_802_3_malformed_frames; | |
175 | u32 be_rxcp_err; /* Num rx completion entries w/ err set. */ | |
176 | ulong rx_fps_jiffies; /* jiffies at last FPS calc */ | |
177 | u32 be_rx_frags; | |
178 | u32 be_prev_rx_frags; | |
179 | u32 be_rx_fps; /* Rx frags per second */ | |
180 | }; | |
181 | ||
182 | struct be_stats_obj { | |
183 | struct be_drvr_stats drvr_stats; | |
184 | struct net_device_stats net_stats; | |
185 | struct be_dma_mem cmd; | |
186 | }; | |
187 | ||
6b7c5b94 SP |
188 | struct be_tx_obj { |
189 | struct be_queue_info q; | |
190 | struct be_queue_info cq; | |
191 | /* Remember the skbs that were transmitted */ | |
192 | struct sk_buff *sent_skb_list[TX_Q_LEN]; | |
193 | }; | |
194 | ||
195 | /* Struct to remember the pages posted for rx frags */ | |
196 | struct be_rx_page_info { | |
197 | struct page *page; | |
198 | dma_addr_t bus; | |
199 | u16 page_offset; | |
200 | bool last_page_user; | |
201 | }; | |
202 | ||
203 | struct be_rx_obj { | |
204 | struct be_queue_info q; | |
205 | struct be_queue_info cq; | |
206 | struct be_rx_page_info page_info_tbl[RX_Q_LEN]; | |
6b7c5b94 SP |
207 | }; |
208 | ||
209 | #define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ | |
210 | struct be_adapter { | |
211 | struct pci_dev *pdev; | |
212 | struct net_device *netdev; | |
213 | ||
8788fdc2 SP |
214 | u8 __iomem *csr; |
215 | u8 __iomem *db; /* Door Bell */ | |
216 | u8 __iomem *pcicfg; /* PCI config space */ | |
8788fdc2 SP |
217 | |
218 | spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ | |
219 | struct be_dma_mem mbox_mem; | |
220 | /* Mbox mem is adjusted to align to 16 bytes. The allocated addr | |
221 | * is stored for freeing purpose */ | |
222 | struct be_dma_mem mbox_mem_alloced; | |
223 | ||
224 | struct be_mcc_obj mcc_obj; | |
225 | spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ | |
226 | spinlock_t mcc_cq_lock; | |
6b7c5b94 SP |
227 | |
228 | struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS]; | |
229 | bool msix_enabled; | |
230 | bool isr_registered; | |
231 | ||
232 | /* TX Rings */ | |
233 | struct be_eq_obj tx_eq; | |
234 | struct be_tx_obj tx_obj; | |
235 | ||
236 | u32 cache_line_break[8]; | |
237 | ||
238 | /* Rx rings */ | |
239 | struct be_eq_obj rx_eq; | |
240 | struct be_rx_obj rx_obj; | |
241 | u32 big_page_size; /* Compounded page size shared by rx wrbs */ | |
ea1dae11 | 242 | bool rx_post_starved; /* Zero rx frags have been posted to BE */ |
6b7c5b94 SP |
243 | |
244 | struct vlan_group *vlan_grp; | |
245 | u16 num_vlans; | |
246 | u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; | |
247 | ||
248 | struct be_stats_obj stats; | |
249 | /* Work queue used to perform periodic tasks like getting statistics */ | |
250 | struct delayed_work work; | |
251 | ||
252 | /* Ethtool knobs and info */ | |
253 | bool rx_csum; /* BE card must perform rx-checksumming */ | |
6b7c5b94 SP |
254 | char fw_ver[FW_VER_LEN]; |
255 | u32 if_handle; /* Used to configure filtering */ | |
256 | u32 pmac_id; /* MAC addr handle used by BE card */ | |
257 | ||
a8f447bd | 258 | bool link_up; |
6b7c5b94 | 259 | u32 port_num; |
24307eef | 260 | bool promiscuous; |
6b7c5b94 SP |
261 | }; |
262 | ||
0fc0b732 | 263 | extern const struct ethtool_ops be_ethtool_ops; |
6b7c5b94 SP |
264 | |
265 | #define drvr_stats(adapter) (&adapter->stats.drvr_stats) | |
266 | ||
eec368fb SP |
267 | static inline unsigned int be_pci_func(struct be_adapter *adapter) |
268 | { | |
269 | return PCI_FUNC(adapter->pdev->devfn); | |
270 | } | |
271 | ||
6b7c5b94 SP |
272 | #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) |
273 | ||
6b7c5b94 SP |
274 | #define PAGE_SHIFT_4K 12 |
275 | #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) | |
276 | ||
277 | /* Returns number of pages spanned by the data starting at the given addr */ | |
278 | #define PAGES_4K_SPANNED(_address, size) \ | |
279 | ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ | |
280 | (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) | |
281 | ||
282 | /* Byte offset into the page corresponding to given address */ | |
283 | #define OFFSET_IN_PAGE(addr) \ | |
284 | ((size_t)(addr) & (PAGE_SIZE_4K-1)) | |
285 | ||
286 | /* Returns bit offset within a DWORD of a bitfield */ | |
287 | #define AMAP_BIT_OFFSET(_struct, field) \ | |
288 | (((size_t)&(((_struct *)0)->field))%32) | |
289 | ||
290 | /* Returns the bit mask of the field that is NOT shifted into location. */ | |
291 | static inline u32 amap_mask(u32 bitsize) | |
292 | { | |
293 | return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1); | |
294 | } | |
295 | ||
296 | static inline void | |
297 | amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value) | |
298 | { | |
299 | u32 *dw = (u32 *) ptr + dw_offset; | |
300 | *dw &= ~(mask << offset); | |
301 | *dw |= (mask & value) << offset; | |
302 | } | |
303 | ||
304 | #define AMAP_SET_BITS(_struct, field, ptr, val) \ | |
305 | amap_set(ptr, \ | |
306 | offsetof(_struct, field)/32, \ | |
307 | amap_mask(sizeof(((_struct *)0)->field)), \ | |
308 | AMAP_BIT_OFFSET(_struct, field), \ | |
309 | val) | |
310 | ||
311 | static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) | |
312 | { | |
313 | u32 *dw = (u32 *) ptr; | |
314 | return mask & (*(dw + dw_offset) >> offset); | |
315 | } | |
316 | ||
317 | #define AMAP_GET_BITS(_struct, field, ptr) \ | |
318 | amap_get(ptr, \ | |
319 | offsetof(_struct, field)/32, \ | |
320 | amap_mask(sizeof(((_struct *)0)->field)), \ | |
321 | AMAP_BIT_OFFSET(_struct, field)) | |
322 | ||
323 | #define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) | |
324 | #define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) | |
325 | static inline void swap_dws(void *wrb, int len) | |
326 | { | |
327 | #ifdef __BIG_ENDIAN | |
328 | u32 *dw = wrb; | |
329 | BUG_ON(len % 4); | |
330 | do { | |
331 | *dw = cpu_to_le32(*dw); | |
332 | dw++; | |
333 | len -= 4; | |
334 | } while (len); | |
335 | #endif /* __BIG_ENDIAN */ | |
336 | } | |
337 | ||
338 | static inline u8 is_tcp_pkt(struct sk_buff *skb) | |
339 | { | |
340 | u8 val = 0; | |
341 | ||
342 | if (ip_hdr(skb)->version == 4) | |
343 | val = (ip_hdr(skb)->protocol == IPPROTO_TCP); | |
344 | else if (ip_hdr(skb)->version == 6) | |
345 | val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP); | |
346 | ||
347 | return val; | |
348 | } | |
349 | ||
350 | static inline u8 is_udp_pkt(struct sk_buff *skb) | |
351 | { | |
352 | u8 val = 0; | |
353 | ||
354 | if (ip_hdr(skb)->version == 4) | |
355 | val = (ip_hdr(skb)->protocol == IPPROTO_UDP); | |
356 | else if (ip_hdr(skb)->version == 6) | |
357 | val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP); | |
358 | ||
359 | return val; | |
360 | } | |
361 | ||
8788fdc2 | 362 | extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, |
5fb379ee | 363 | u16 num_popped); |
8788fdc2 | 364 | extern void be_link_status_update(struct be_adapter *adapter, bool link_up); |
84517482 | 365 | extern int be_load_fw(struct be_adapter *adapter, u8 *func); |
6b7c5b94 | 366 | #endif /* BE_H */ |