[NET]: Make NAPI polling independent of struct net_device objects.
[deliverable/linux.git] / drivers / net / ehea / ehea.h
1 /*
2 * linux/drivers/net/ehea/ehea.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29 #ifndef __EHEA_H__
30 #define __EHEA_H__
31
32 #include <linux/module.h>
33 #include <linux/ethtool.h>
34 #include <linux/vmalloc.h>
35 #include <linux/if_vlan.h>
36
37 #include <asm/ibmebus.h>
38 #include <asm/abs_addr.h>
39 #include <asm/io.h>
40
41 #define DRV_NAME "ehea"
42 #define DRV_VERSION "EHEA_0074"
43
44 /* eHEA capability flags */
45 #define DLPAR_PORT_ADD_REM 1
46 #define DLPAR_MEM_ADD 2
47 #define DLPAR_MEM_REM 4
48 #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
49
50 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
51 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
52
53 #define EHEA_MAX_ENTRIES_RQ1 32767
54 #define EHEA_MAX_ENTRIES_RQ2 16383
55 #define EHEA_MAX_ENTRIES_RQ3 16383
56 #define EHEA_MAX_ENTRIES_SQ 32767
57 #define EHEA_MIN_ENTRIES_QP 127
58
59 #define EHEA_SMALL_QUEUES
60 #define EHEA_NUM_TX_QP 1
61
62 #ifdef EHEA_SMALL_QUEUES
63 #define EHEA_MAX_CQE_COUNT 1023
64 #define EHEA_DEF_ENTRIES_SQ 1023
65 #define EHEA_DEF_ENTRIES_RQ1 4095
66 #define EHEA_DEF_ENTRIES_RQ2 1023
67 #define EHEA_DEF_ENTRIES_RQ3 1023
68 #else
69 #define EHEA_MAX_CQE_COUNT 4080
70 #define EHEA_DEF_ENTRIES_SQ 4080
71 #define EHEA_DEF_ENTRIES_RQ1 8160
72 #define EHEA_DEF_ENTRIES_RQ2 2040
73 #define EHEA_DEF_ENTRIES_RQ3 2040
74 #endif
75
76 #define EHEA_MAX_ENTRIES_EQ 20
77
78 #define EHEA_SG_SQ 2
79 #define EHEA_SG_RQ1 1
80 #define EHEA_SG_RQ2 0
81 #define EHEA_SG_RQ3 0
82
83 #define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
84 #define EHEA_RQ2_PKT_SIZE 1522
85 #define EHEA_L_PKT_SIZE 256 /* low latency */
86
87 /* Send completion signaling */
88
89 /* Protection Domain Identifier */
90 #define EHEA_PD_ID 0xaabcdeff
91
92 #define EHEA_RQ2_THRESHOLD 1
93 #define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
94
95 #define EHEA_SPEED_10G 10000
96 #define EHEA_SPEED_1G 1000
97 #define EHEA_SPEED_100M 100
98 #define EHEA_SPEED_10M 10
99 #define EHEA_SPEED_AUTONEG 0
100
101 /* Broadcast/Multicast registration types */
102 #define EHEA_BCMC_SCOPE_ALL 0x08
103 #define EHEA_BCMC_SCOPE_SINGLE 0x00
104 #define EHEA_BCMC_MULTICAST 0x04
105 #define EHEA_BCMC_BROADCAST 0x00
106 #define EHEA_BCMC_UNTAGGED 0x02
107 #define EHEA_BCMC_TAGGED 0x00
108 #define EHEA_BCMC_VLANID_ALL 0x01
109 #define EHEA_BCMC_VLANID_SINGLE 0x00
110
111 #define EHEA_CACHE_LINE 128
112
113 /* Memory Regions */
114 #define EHEA_MR_ACC_CTRL 0x00800000
115
116 #define EHEA_BUSMAP_START 0x8000000000000000ULL
117
118 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
119
120 /* utility functions */
121
122 #define ehea_info(fmt, args...) \
123 printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
124
125 #define ehea_error(fmt, args...) \
126 printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
127
128 #ifdef DEBUG
129 #define ehea_debug(fmt, args...) \
130 printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
131 #else
132 #define ehea_debug(fmt, args...) do {} while (0)
133 #endif
134
135 void ehea_dump(void *adr, int len, char *msg);
136
137 #define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
138
139 #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
140
141 #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
142
143 #define EHEA_BMASK_MASK(mask) \
144 (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
145
146 #define EHEA_BMASK_SET(mask, value) \
147 ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
148
149 #define EHEA_BMASK_GET(mask, value) \
150 (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
151
152 /*
153 * Generic ehea page
154 */
155 struct ehea_page {
156 u8 entries[PAGE_SIZE];
157 };
158
159 /*
160 * Generic queue in linux kernel virtual memory
161 */
162 struct hw_queue {
163 u64 current_q_offset; /* current queue entry */
164 struct ehea_page **queue_pages; /* array of pages belonging to queue */
165 u32 qe_size; /* queue entry size */
166 u32 queue_length; /* queue length allocated in bytes */
167 u32 pagesize;
168 u32 toggle_state; /* toggle flag - per page */
169 u32 reserved; /* 64 bit alignment */
170 };
171
172 /*
173 * For pSeries this is a 64bit memory address where
174 * I/O memory is mapped into CPU address space
175 */
176 struct h_epa {
177 void __iomem *addr;
178 };
179
180 struct h_epa_user {
181 u64 addr;
182 };
183
184 struct h_epas {
185 struct h_epa kernel; /* kernel space accessible resource,
186 set to 0 if unused */
187 struct h_epa_user user; /* user space accessible resource
188 set to 0 if unused */
189 };
190
191 struct ehea_busmap {
192 unsigned int entries; /* total number of entries */
193 unsigned int valid_sections; /* number of valid sections */
194 u64 *vaddr;
195 };
196
197 struct ehea_qp;
198 struct ehea_cq;
199 struct ehea_eq;
200 struct ehea_port;
201 struct ehea_av;
202
203 /*
204 * Queue attributes passed to ehea_create_qp()
205 */
206 struct ehea_qp_init_attr {
207 /* input parameter */
208 u32 qp_token; /* queue token */
209 u8 low_lat_rq1;
210 u8 signalingtype; /* cqe generation flag */
211 u8 rq_count; /* num of receive queues */
212 u8 eqe_gen; /* eqe generation flag */
213 u16 max_nr_send_wqes; /* max number of send wqes */
214 u16 max_nr_rwqes_rq1; /* max number of receive wqes */
215 u16 max_nr_rwqes_rq2;
216 u16 max_nr_rwqes_rq3;
217 u8 wqe_size_enc_sq;
218 u8 wqe_size_enc_rq1;
219 u8 wqe_size_enc_rq2;
220 u8 wqe_size_enc_rq3;
221 u8 swqe_imm_data_len; /* immediate data length for swqes */
222 u16 port_nr;
223 u16 rq2_threshold;
224 u16 rq3_threshold;
225 u64 send_cq_handle;
226 u64 recv_cq_handle;
227 u64 aff_eq_handle;
228
229 /* output parameter */
230 u32 qp_nr;
231 u16 act_nr_send_wqes;
232 u16 act_nr_rwqes_rq1;
233 u16 act_nr_rwqes_rq2;
234 u16 act_nr_rwqes_rq3;
235 u8 act_wqe_size_enc_sq;
236 u8 act_wqe_size_enc_rq1;
237 u8 act_wqe_size_enc_rq2;
238 u8 act_wqe_size_enc_rq3;
239 u32 nr_sq_pages;
240 u32 nr_rq1_pages;
241 u32 nr_rq2_pages;
242 u32 nr_rq3_pages;
243 u32 liobn_sq;
244 u32 liobn_rq1;
245 u32 liobn_rq2;
246 u32 liobn_rq3;
247 };
248
249 /*
250 * Event Queue attributes, passed as paramter
251 */
252 struct ehea_eq_attr {
253 u32 type;
254 u32 max_nr_of_eqes;
255 u8 eqe_gen; /* generate eqe flag */
256 u64 eq_handle;
257 u32 act_nr_of_eqes;
258 u32 nr_pages;
259 u32 ist1; /* Interrupt service token */
260 u32 ist2;
261 u32 ist3;
262 u32 ist4;
263 };
264
265
266 /*
267 * Event Queue
268 */
269 struct ehea_eq {
270 struct ehea_adapter *adapter;
271 struct hw_queue hw_queue;
272 u64 fw_handle;
273 struct h_epas epas;
274 spinlock_t spinlock;
275 struct ehea_eq_attr attr;
276 };
277
278 /*
279 * HEA Queues
280 */
281 struct ehea_qp {
282 struct ehea_adapter *adapter;
283 u64 fw_handle; /* QP handle for firmware calls */
284 struct hw_queue hw_squeue;
285 struct hw_queue hw_rqueue1;
286 struct hw_queue hw_rqueue2;
287 struct hw_queue hw_rqueue3;
288 struct h_epas epas;
289 struct ehea_qp_init_attr init_attr;
290 };
291
292 /*
293 * Completion Queue attributes
294 */
295 struct ehea_cq_attr {
296 /* input parameter */
297 u32 max_nr_of_cqes;
298 u32 cq_token;
299 u64 eq_handle;
300
301 /* output parameter */
302 u32 act_nr_of_cqes;
303 u32 nr_pages;
304 };
305
306 /*
307 * Completion Queue
308 */
309 struct ehea_cq {
310 struct ehea_adapter *adapter;
311 u64 fw_handle;
312 struct hw_queue hw_queue;
313 struct h_epas epas;
314 struct ehea_cq_attr attr;
315 };
316
317 /*
318 * Memory Region
319 */
320 struct ehea_mr {
321 struct ehea_adapter *adapter;
322 u64 handle;
323 u64 vaddr;
324 u32 lkey;
325 };
326
327 /*
328 * Port state information
329 */
330 struct port_stats {
331 int poll_receive_errors;
332 int queue_stopped;
333 int err_tcp_cksum;
334 int err_ip_cksum;
335 int err_frame_crc;
336 };
337
338 #define EHEA_IRQ_NAME_SIZE 20
339
340 /*
341 * Queue SKB Array
342 */
343 struct ehea_q_skb_arr {
344 struct sk_buff **arr; /* skb array for queue */
345 int len; /* array length */
346 int index; /* array index */
347 int os_skbs; /* rq2/rq3 only: outstanding skbs */
348 };
349
350 /*
351 * Port resources
352 */
353 struct ehea_port_res {
354 struct napi_struct napi;
355 struct port_stats p_stats;
356 struct ehea_mr send_mr; /* send memory region */
357 struct ehea_mr recv_mr; /* receive memory region */
358 spinlock_t xmit_lock;
359 struct ehea_port *port;
360 char int_recv_name[EHEA_IRQ_NAME_SIZE];
361 char int_send_name[EHEA_IRQ_NAME_SIZE];
362 struct ehea_qp *qp;
363 struct ehea_cq *send_cq;
364 struct ehea_cq *recv_cq;
365 struct ehea_eq *eq;
366 struct ehea_q_skb_arr rq1_skba;
367 struct ehea_q_skb_arr rq2_skba;
368 struct ehea_q_skb_arr rq3_skba;
369 struct ehea_q_skb_arr sq_skba;
370 spinlock_t netif_queue;
371 int queue_stopped;
372 int swqe_refill_th;
373 atomic_t swqe_avail;
374 int swqe_ll_count;
375 u32 swqe_id_counter;
376 u64 tx_packets;
377 u64 rx_packets;
378 u32 poll_counter;
379 };
380
381
382 #define EHEA_MAX_PORTS 16
383 struct ehea_adapter {
384 u64 handle;
385 struct ibmebus_dev *ebus_dev;
386 struct ehea_port *port[EHEA_MAX_PORTS];
387 struct ehea_eq *neq; /* notification event queue */
388 struct workqueue_struct *ehea_wq;
389 struct tasklet_struct neq_tasklet;
390 struct ehea_mr mr;
391 u32 pd; /* protection domain */
392 u64 max_mc_mac; /* max number of multicast mac addresses */
393 int active_ports;
394 struct list_head list;
395 };
396
397
398 struct ehea_mc_list {
399 struct list_head list;
400 u64 macaddr;
401 };
402
403 #define EHEA_PORT_UP 1
404 #define EHEA_PORT_DOWN 0
405 #define EHEA_PHY_LINK_UP 1
406 #define EHEA_PHY_LINK_DOWN 0
407 #define EHEA_MAX_PORT_RES 16
408 struct ehea_port {
409 struct ehea_adapter *adapter; /* adapter that owns this port */
410 struct net_device *netdev;
411 struct net_device_stats stats;
412 struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
413 struct of_device ofdev; /* Open Firmware Device */
414 struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
415 struct vlan_group *vgrp;
416 struct ehea_eq *qp_eq;
417 struct work_struct reset_task;
418 struct semaphore port_lock;
419 char int_aff_name[EHEA_IRQ_NAME_SIZE];
420 int allmulti; /* Indicates IFF_ALLMULTI state */
421 int promisc; /* Indicates IFF_PROMISC state */
422 int num_tx_qps;
423 int num_add_tx_qps;
424 int num_mcs;
425 int resets;
426 u64 mac_addr;
427 u32 logical_port_id;
428 u32 port_speed;
429 u32 msg_enable;
430 u32 sig_comp_iv;
431 u32 state;
432 u8 phy_link;
433 u8 full_duplex;
434 u8 autoneg;
435 u8 num_def_qps;
436 };
437
438 struct port_res_cfg {
439 int max_entries_rcq;
440 int max_entries_scq;
441 int max_entries_sq;
442 int max_entries_rq1;
443 int max_entries_rq2;
444 int max_entries_rq3;
445 };
446
447 enum ehea_flag_bits {
448 __EHEA_STOP_XFER
449 };
450
451 void ehea_set_ethtool_ops(struct net_device *netdev);
452 int ehea_sense_port_attr(struct ehea_port *port);
453 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
454
455 #endif /* __EHEA_H__ */
This page took 0.05703 seconds and 5 git commands to generate.