Merge commit 'v2.6.37-rc7' into x86/security
[deliverable/linux.git] / drivers / net / cxgb4vf / adapter.h
1 /*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 /*
37 * This file should not be included directly. Include t4vf_common.h instead.
38 */
39
40 #ifndef __CXGB4VF_ADAPTER_H__
41 #define __CXGB4VF_ADAPTER_H__
42
43 #include <linux/pci.h>
44 #include <linux/spinlock.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_ether.h>
47 #include <linux/netdevice.h>
48
49 #include "../cxgb4/t4_hw.h"
50
51 /*
52 * Constants of the implementation.
53 */
54 enum {
55 MAX_NPORTS = 1, /* max # of "ports" */
56 MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
57 MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
58
59 /*
60 * MSI-X interrupt index usage.
61 */
62 MSIX_FW = 0, /* MSI-X index for firmware Q */
63 MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */
64 MSIX_EXTRAS = 1,
65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
66
67 /*
68 * The maximum number of Ingress and Egress Queues is determined by
69 * the maximum number of "Queue Sets" which we support plus any
70 * ancillary queues. Each "Queue Set" requires one Ingress Queue
71 * for RX Packet Ingress Event notifications and two Egress Queues for
72 * a Free List and an Ethernet TX list.
73 */
74 INGQ_EXTRAS = 2, /* firmware event queue and */
75 /* forwarded interrupts */
76 MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
77 MAX_EGRQ = MAX_ETH_QSETS*2,
78 };
79
80 /*
81 * Forward structure definition references.
82 */
83 struct adapter;
84 struct sge_eth_rxq;
85 struct sge_rspq;
86
87 /*
88 * Per-"port" information. This is really per-Virtual Interface information
89 * but the use of the "port" nomanclature makes it easier to go back and forth
90 * between the PF and VF drivers ...
91 */
92 struct port_info {
93 struct adapter *adapter; /* our adapter */
94 struct vlan_group *vlan_grp; /* out VLAN group */
95 u16 viid; /* virtual interface ID */
96 s16 xact_addr_filt; /* index of our MAC address filter */
97 u16 rss_size; /* size of VI's RSS table slice */
98 u8 pidx; /* index into adapter port[] */
99 u8 port_id; /* physical port ID */
100 u8 rx_offload; /* CSO, etc. */
101 u8 nqsets; /* # of "Queue Sets" */
102 u8 first_qset; /* index of first "Queue Set" */
103 struct link_config link_cfg; /* physical port configuration */
104 };
105
106 /* port_info.rx_offload flags */
107 enum {
108 RX_CSO = 1 << 0,
109 };
110
111 /*
112 * Scatter Gather Engine resources for the "adapter". Our ingress and egress
113 * queues are organized into "Queue Sets" with one ingress and one egress
114 * queue per Queue Set. These Queue Sets are aportionable between the "ports"
115 * (Virtual Interfaces). One extra ingress queue is used to receive
116 * asynchronous messages from the firmware. Note that the "Queue IDs" that we
117 * use here are really "Relative Queue IDs" which are returned as part of the
118 * firmware command to allocate queues. These queue IDs are relative to the
119 * absolute Queue ID base of the section of the Queue ID space allocated to
120 * the PF/VF.
121 */
122
123 /*
124 * SGE free-list queue state.
125 */
126 struct rx_sw_desc;
127 struct sge_fl {
128 unsigned int avail; /* # of available RX buffers */
129 unsigned int pend_cred; /* new buffers since last FL DB ring */
130 unsigned int cidx; /* consumer index */
131 unsigned int pidx; /* producer index */
132 unsigned long alloc_failed; /* # of buffer allocation failures */
133 unsigned long large_alloc_failed;
134 unsigned long starving; /* # of times FL was found starving */
135
136 /*
137 * Write-once/infrequently fields.
138 * -------------------------------
139 */
140
141 unsigned int cntxt_id; /* SGE relative QID for the free list */
142 unsigned int abs_id; /* SGE absolute QID for the free list */
143 unsigned int size; /* capacity of free list */
144 struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
145 __be64 *desc; /* address of HW RX descriptor ring */
146 dma_addr_t addr; /* PCI bus address of hardware ring */
147 };
148
149 /*
150 * An ingress packet gather list.
151 */
152 struct pkt_gl {
153 skb_frag_t frags[MAX_SKB_FRAGS];
154 void *va; /* virtual address of first byte */
155 unsigned int nfrags; /* # of fragments */
156 unsigned int tot_len; /* total length of fragments */
157 };
158
159 typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
160 const struct pkt_gl *);
161
162 /*
163 * State for an SGE Response Queue.
164 */
165 struct sge_rspq {
166 struct napi_struct napi; /* NAPI scheduling control */
167 const __be64 *cur_desc; /* current descriptor in queue */
168 unsigned int cidx; /* consumer index */
169 u8 gen; /* current generation bit */
170 u8 next_intr_params; /* holdoff params for next interrupt */
171 int offset; /* offset into current FL buffer */
172
173 unsigned int unhandled_irqs; /* bogus interrupts */
174
175 /*
176 * Write-once/infrequently fields.
177 * -------------------------------
178 */
179
180 u8 intr_params; /* interrupt holdoff parameters */
181 u8 pktcnt_idx; /* interrupt packet threshold */
182 u8 idx; /* queue index within its group */
183 u16 cntxt_id; /* SGE rel QID for the response Q */
184 u16 abs_id; /* SGE abs QID for the response Q */
185 __be64 *desc; /* address of hardware response ring */
186 dma_addr_t phys_addr; /* PCI bus address of ring */
187 unsigned int iqe_len; /* entry size */
188 unsigned int size; /* capcity of response Q */
189 struct adapter *adapter; /* our adapter */
190 struct net_device *netdev; /* associated net device */
191 rspq_handler_t handler; /* the handler for this response Q */
192 };
193
194 /*
195 * Ethernet queue statistics
196 */
197 struct sge_eth_stats {
198 unsigned long pkts; /* # of ethernet packets */
199 unsigned long lro_pkts; /* # of LRO super packets */
200 unsigned long lro_merged; /* # of wire packets merged by LRO */
201 unsigned long rx_cso; /* # of Rx checksum offloads */
202 unsigned long vlan_ex; /* # of Rx VLAN extractions */
203 unsigned long rx_drops; /* # of packets dropped due to no mem */
204 };
205
206 /*
207 * State for an Ethernet Receive Queue.
208 */
209 struct sge_eth_rxq {
210 struct sge_rspq rspq; /* Response Queue */
211 struct sge_fl fl; /* Free List */
212 struct sge_eth_stats stats; /* receive statistics */
213 };
214
215 /*
216 * SGE Transmit Queue state. This contains all of the resources associated
217 * with the hardware status of a TX Queue which is a circular ring of hardware
218 * TX Descriptors. For convenience, it also contains a pointer to a parallel
219 * "Software Descriptor" array but we don't know anything about it here other
220 * than its type name.
221 */
222 struct tx_desc {
223 /*
224 * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
225 * hardware: Sizes, Producer and Consumer indices, etc.
226 */
227 __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
228 };
229 struct tx_sw_desc;
230 struct sge_txq {
231 unsigned int in_use; /* # of in-use TX descriptors */
232 unsigned int size; /* # of descriptors */
233 unsigned int cidx; /* SW consumer index */
234 unsigned int pidx; /* producer index */
235 unsigned long stops; /* # of times queue has been stopped */
236 unsigned long restarts; /* # of queue restarts */
237
238 /*
239 * Write-once/infrequently fields.
240 * -------------------------------
241 */
242
243 unsigned int cntxt_id; /* SGE relative QID for the TX Q */
244 unsigned int abs_id; /* SGE absolute QID for the TX Q */
245 struct tx_desc *desc; /* address of HW TX descriptor ring */
246 struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
247 struct sge_qstat *stat; /* queue status entry */
248 dma_addr_t phys_addr; /* PCI bus address of hardware ring */
249 };
250
251 /*
252 * State for an Ethernet Transmit Queue.
253 */
254 struct sge_eth_txq {
255 struct sge_txq q; /* SGE TX Queue */
256 struct netdev_queue *txq; /* associated netdev TX queue */
257 unsigned long tso; /* # of TSO requests */
258 unsigned long tx_cso; /* # of TX checksum offloads */
259 unsigned long vlan_ins; /* # of TX VLAN insertions */
260 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
261 };
262
263 /*
264 * The complete set of Scatter/Gather Engine resources.
265 */
266 struct sge {
267 /*
268 * Our "Queue Sets" ...
269 */
270 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
271 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
272
273 /*
274 * Extra ingress queues for asynchronous firmware events and
275 * forwarded interrupts (when in MSI mode).
276 */
277 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
278
279 struct sge_rspq intrq ____cacheline_aligned_in_smp;
280 spinlock_t intrq_lock;
281
282 /*
283 * State for managing "starving Free Lists" -- Free Lists which have
284 * fallen below a certain threshold of buffers available to the
285 * hardware and attempts to refill them up to that threshold have
286 * failed. We have a regular "slow tick" timer process which will
287 * make periodic attempts to refill these starving Free Lists ...
288 */
289 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
290 struct timer_list rx_timer;
291
292 /*
293 * State for cleaning up completed TX descriptors.
294 */
295 struct timer_list tx_timer;
296
297 /*
298 * Write-once/infrequently fields.
299 * -------------------------------
300 */
301
302 u16 max_ethqsets; /* # of available Ethernet queue sets */
303 u16 ethqsets; /* # of active Ethernet queue sets */
304 u16 ethtxq_rover; /* Tx queue to clean up next */
305 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
306 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
307
308 /*
309 * Reverse maps from Absolute Queue IDs to associated queue pointers.
310 * The absolute Queue IDs are in a compact range which start at a
311 * [potentially large] Base Queue ID. We perform the reverse map by
312 * first converting the Absolute Queue ID into a Relative Queue ID by
313 * subtracting off the Base Queue ID and then use a Relative Queue ID
314 * indexed table to get the pointer to the corresponding software
315 * queue structure.
316 */
317 unsigned int egr_base;
318 unsigned int ingr_base;
319 void *egr_map[MAX_EGRQ];
320 struct sge_rspq *ingr_map[MAX_INGQ];
321 };
322
323 /*
324 * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
325 * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
326 * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
327 */
328 #define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
329 #define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
330
331 #define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
332 #define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
333
334 /*
335 * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
336 */
337 #define for_each_ethrxq(sge, iter) \
338 for (iter = 0; iter < (sge)->ethqsets; iter++)
339
340 /*
341 * Per-"adapter" (Virtual Function) information.
342 */
343 struct adapter {
344 /* PCI resources */
345 void __iomem *regs;
346 struct pci_dev *pdev;
347 struct device *pdev_dev;
348
349 /* "adapter" resources */
350 unsigned long registered_device_map;
351 unsigned long open_device_map;
352 unsigned long flags;
353 struct adapter_params params;
354
355 /* queue and interrupt resources */
356 struct {
357 unsigned short vec;
358 char desc[22];
359 } msix_info[MSIX_ENTRIES];
360 struct sge sge;
361
362 /* Linux network device resources */
363 struct net_device *port[MAX_NPORTS];
364 const char *name;
365 unsigned int msg_enable;
366
367 /* debugfs resources */
368 struct dentry *debugfs_root;
369
370 /* various locks */
371 spinlock_t stats_lock;
372 };
373
374 enum { /* adapter flags */
375 FULL_INIT_DONE = (1UL << 0),
376 USING_MSI = (1UL << 1),
377 USING_MSIX = (1UL << 2),
378 QUEUES_BOUND = (1UL << 3),
379 };
380
381 /*
382 * The following register read/write routine definitions are required by
383 * the common code.
384 */
385
386 /**
387 * t4_read_reg - read a HW register
388 * @adapter: the adapter
389 * @reg_addr: the register address
390 *
391 * Returns the 32-bit value of the given HW register.
392 */
393 static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
394 {
395 return readl(adapter->regs + reg_addr);
396 }
397
398 /**
399 * t4_write_reg - write a HW register
400 * @adapter: the adapter
401 * @reg_addr: the register address
402 * @val: the value to write
403 *
404 * Write a 32-bit value into the given HW register.
405 */
406 static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
407 {
408 writel(val, adapter->regs + reg_addr);
409 }
410
411 #ifndef readq
412 static inline u64 readq(const volatile void __iomem *addr)
413 {
414 return readl(addr) + ((u64)readl(addr + 4) << 32);
415 }
416
417 static inline void writeq(u64 val, volatile void __iomem *addr)
418 {
419 writel(val, addr);
420 writel(val >> 32, addr + 4);
421 }
422 #endif
423
424 /**
425 * t4_read_reg64 - read a 64-bit HW register
426 * @adapter: the adapter
427 * @reg_addr: the register address
428 *
429 * Returns the 64-bit value of the given HW register.
430 */
431 static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
432 {
433 return readq(adapter->regs + reg_addr);
434 }
435
436 /**
437 * t4_write_reg64 - write a 64-bit HW register
438 * @adapter: the adapter
439 * @reg_addr: the register address
440 * @val: the value to write
441 *
442 * Write a 64-bit value into the given HW register.
443 */
444 static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
445 u64 val)
446 {
447 writeq(val, adapter->regs + reg_addr);
448 }
449
450 /**
451 * port_name - return the string name of a port
452 * @adapter: the adapter
453 * @pidx: the port index
454 *
455 * Return the string name of the selected port.
456 */
457 static inline const char *port_name(struct adapter *adapter, int pidx)
458 {
459 return adapter->port[pidx]->name;
460 }
461
462 /**
463 * t4_os_set_hw_addr - store a port's MAC address in SW
464 * @adapter: the adapter
465 * @pidx: the port index
466 * @hw_addr: the Ethernet address
467 *
468 * Store the Ethernet address of the given port in SW. Called by the common
469 * code when it retrieves a port's Ethernet address from EEPROM.
470 */
471 static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
472 u8 hw_addr[])
473 {
474 memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
475 memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
476 }
477
478 /**
479 * netdev2pinfo - return the port_info structure associated with a net_device
480 * @dev: the netdev
481 *
482 * Return the struct port_info associated with a net_device
483 */
484 static inline struct port_info *netdev2pinfo(const struct net_device *dev)
485 {
486 return netdev_priv(dev);
487 }
488
489 /**
490 * adap2pinfo - return the port_info of a port
491 * @adap: the adapter
492 * @pidx: the port index
493 *
494 * Return the port_info structure for the adapter.
495 */
496 static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
497 {
498 return netdev_priv(adapter->port[pidx]);
499 }
500
501 /**
502 * netdev2adap - return the adapter structure associated with a net_device
503 * @dev: the netdev
504 *
505 * Return the struct adapter associated with a net_device
506 */
507 static inline struct adapter *netdev2adap(const struct net_device *dev)
508 {
509 return netdev2pinfo(dev)->adapter;
510 }
511
512 /*
513 * OS "Callback" function declarations. These are functions that the OS code
514 * is "contracted" to provide for the common code.
515 */
516 void t4vf_os_link_changed(struct adapter *, int, int);
517
518 /*
519 * SGE function prototype declarations.
520 */
521 int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
522 struct net_device *, int,
523 struct sge_fl *, rspq_handler_t);
524 int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
525 struct net_device *, struct netdev_queue *,
526 unsigned int);
527 void t4vf_free_sge_resources(struct adapter *);
528
529 int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
530 int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
531 const struct pkt_gl *);
532
533 irq_handler_t t4vf_intr_handler(struct adapter *);
534 irqreturn_t t4vf_sge_intr_msix(int, void *);
535
536 int t4vf_sge_init(struct adapter *);
537 void t4vf_sge_start(struct adapter *);
538 void t4vf_sge_stop(struct adapter *);
539
540 #endif /* __CXGB4VF_ADAPTER_H__ */
This page took 0.046091 seconds and 5 git commands to generate.