1 #ifndef _IPATH_KERNEL_H
2 #define _IPATH_KERNEL_H
4 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * This header file is the base header file for infinipath kernel code
38 * ipath_user.h serves a similar purpose for user code.
41 #include <linux/interrupt.h>
44 #include "ipath_common.h"
45 #include "ipath_debug.h"
46 #include "ipath_registers.h"
48 /* only s/w major version of InfiniPath we can handle */
49 #define IPATH_CHIP_VERS_MAJ 2U
51 /* don't care about this except printing */
52 #define IPATH_CHIP_VERS_MIN 0U
54 /* temporary, maybe always */
55 extern struct infinipath_stats ipath_stats
;
57 #define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
59 struct ipath_portdata
{
60 void **port_rcvegrbuf
;
61 dma_addr_t
*port_rcvegrbuf_phys
;
62 /* rcvhdrq base, needs mmap before useful */
64 /* kernel virtual address where hdrqtail is updated */
65 volatile __le64
*port_rcvhdrtail_kvaddr
;
67 * temp buffer for expected send setup, allocated at open, instead
70 void *port_tid_pg_list
;
71 /* when waiting for rcv or pioavail */
72 wait_queue_head_t port_wait
;
74 * rcvegr bufs base, physical, must fit
75 * in 44 bits so 32 bit programs mmap64 44 bit works)
77 dma_addr_t port_rcvegr_phys
;
78 /* mmap of hdrq, must fit in 44 bits */
79 dma_addr_t port_rcvhdrq_phys
;
80 dma_addr_t port_rcvhdrqtailaddr_phys
;
82 * number of opens (including slave subports) on this instance
83 * (ignoring forks, dup, etc. for now)
87 * how much space to leave at start of eager TID entries for
88 * protocol use, on each TID
90 /* instead of calculating it */
92 /* non-zero if port is being shared. */
94 /* non-zero if port is being shared. */
96 /* chip offset of PIO buffers for this port */
98 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
99 u32 port_rcvegrbuf_chunks
;
100 /* how many egrbufs per chunk */
101 u32 port_rcvegrbufs_perchunk
;
102 /* order for port_rcvegrbuf_pages */
103 size_t port_rcvegrbuf_size
;
104 /* rcvhdrq size (for freeing) */
105 size_t port_rcvhdrq_size
;
106 /* next expected TID to check when looking for free */
108 /* next expected TID to check */
109 unsigned long port_flag
;
110 /* WAIT_RCV that timed out, no interrupt */
112 /* WAIT_PIO that timed out, no interrupt */
114 /* WAIT_RCV already happened, no wait */
116 /* WAIT_PIO already happened, no wait */
118 /* total number of rcvhdrqfull errors */
120 /* pid of process using this port */
122 /* same size as task_struct .comm[] */
124 /* pkeys set by this use of this port */
126 /* so file ops can get at unit */
127 struct ipath_devdata
*port_dd
;
128 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
129 void *subport_uregbase
;
130 /* An array of pages for the eager receive buffers * N */
131 void *subport_rcvegrbuf
;
132 /* An array of pages for the eager header queue entries * N */
133 void *subport_rcvhdr_base
;
134 /* The version of the library which opened this port */
136 /* Bitmask of active slaves */
143 * control information for layered drivers
145 struct _ipath_layer
{
149 struct ipath_devdata
{
150 struct list_head ipath_list
;
152 struct ipath_kregs
const *ipath_kregs
;
153 struct ipath_cregs
const *ipath_cregs
;
155 /* mem-mapped pointer to base of chip regs */
156 u64 __iomem
*ipath_kregbase
;
157 /* end of mem-mapped chip space; range checking */
158 u64 __iomem
*ipath_kregend
;
159 /* physical address of chip for io_remap, etc. */
160 unsigned long ipath_physaddr
;
161 /* base of memory alloced for ipath_kregbase, for free */
162 u64
*ipath_kregalloc
;
164 * virtual address where port0 rcvhdrqtail updated for this unit.
165 * only written to by the chip, not the driver.
167 volatile __le64
*ipath_hdrqtailptr
;
168 /* ipath_cfgports pointers */
169 struct ipath_portdata
**ipath_pd
;
170 /* sk_buffs used by port 0 eager receive queue */
171 struct sk_buff
**ipath_port0_skbs
;
172 /* kvirt address of 1st 2k pio buffer */
173 void __iomem
*ipath_pio2kbase
;
174 /* kvirt address of 1st 4k pio buffer */
175 void __iomem
*ipath_pio4kbase
;
177 * points to area where PIOavail registers will be DMA'ed.
178 * Has to be on a page of it's own, because the page will be
179 * mapped into user program space. This copy is *ONLY* ever
180 * written by DMA, not by the driver! Need a copy per device
181 * when we get to multiple devices
183 volatile __le64
*ipath_pioavailregs_dma
;
184 /* physical address where updates occur */
185 dma_addr_t ipath_pioavailregs_phys
;
186 struct _ipath_layer ipath_layer
;
188 int (*ipath_f_intrsetup
)(struct ipath_devdata
*);
189 /* setup on-chip bus config */
190 int (*ipath_f_bus
)(struct ipath_devdata
*, struct pci_dev
*);
191 /* hard reset chip */
192 int (*ipath_f_reset
)(struct ipath_devdata
*);
193 int (*ipath_f_get_boardname
)(struct ipath_devdata
*, char *,
195 void (*ipath_f_init_hwerrors
)(struct ipath_devdata
*);
196 void (*ipath_f_handle_hwerrors
)(struct ipath_devdata
*, char *,
198 void (*ipath_f_quiet_serdes
)(struct ipath_devdata
*);
199 int (*ipath_f_bringup_serdes
)(struct ipath_devdata
*);
200 int (*ipath_f_early_init
)(struct ipath_devdata
*);
201 void (*ipath_f_clear_tids
)(struct ipath_devdata
*, unsigned);
202 void (*ipath_f_put_tid
)(struct ipath_devdata
*, u64 __iomem
*,
204 void (*ipath_f_tidtemplate
)(struct ipath_devdata
*);
205 void (*ipath_f_cleanup
)(struct ipath_devdata
*);
206 void (*ipath_f_setextled
)(struct ipath_devdata
*, u64
, u64
);
207 /* fill out chip-specific fields */
208 int (*ipath_f_get_base_info
)(struct ipath_portdata
*, void *);
209 struct ipath_ibdev
*verbs_dev
;
210 struct timer_list verbs_timer
;
211 /* total dwords sent (summed from counter) */
213 /* total dwords rcvd (summed from counter) */
215 /* total packets sent (summed from counter) */
217 /* total packets rcvd (summed from counter) */
219 /* ipath_statusp initially points to this. */
221 /* GUID for this interface, in network order */
224 * aggregrate of error bits reported since last cleared, for
225 * limiting of error reporting
227 ipath_err_t ipath_lasterror
;
229 * aggregrate of error bits reported since last cleared, for
230 * limiting of hwerror reporting
232 ipath_err_t ipath_lasthwerror
;
234 * errors masked because they occur too fast, also includes errors
235 * that are always ignored (ipath_ignorederrs)
237 ipath_err_t ipath_maskederrs
;
238 /* time in jiffies at which to re-enable maskederrs */
239 unsigned long ipath_unmasktime
;
241 * errors always ignored (masked), at least for a given
242 * chip/device, because they are wrong or not useful
244 ipath_err_t ipath_ignorederrs
;
245 /* count of egrfull errors, combined for all ports */
246 u64 ipath_last_tidfull
;
247 /* for ipath_qcheck() */
248 u64 ipath_lastport0rcv_cnt
;
249 /* template for writing TIDs */
250 u64 ipath_tidtemplate
;
251 /* value to write to free TIDs */
252 u64 ipath_tidinvalid
;
253 /* IBA6120 rcv interrupt setup */
254 u64 ipath_rhdrhead_intr_off
;
256 /* size of memory at ipath_kregbase */
258 /* number of registers used for pioavail */
260 /* IPATH_POLL, etc. */
262 /* ipath_flags driver is waiting for */
263 u32 ipath_state_wanted
;
264 /* last buffer for user use, first buf for kernel use is this
266 u32 ipath_lastport_piobuf
;
267 /* is a stats timer active */
268 u32 ipath_stats_timer_active
;
269 /* dwords sent read from counter */
271 /* dwords received read from counter */
273 /* sent packets read from counter */
275 /* received packets read from counter */
277 /* pio bufs allocated per port */
280 * number of ports configured as max; zero is set to number chip
281 * supports, less gives more pio bufs/port, etc.
284 /* port0 rcvhdrq head offset */
286 /* count of port 0 hdrqfull errors */
287 u32 ipath_p0_hdrqfull
;
290 * (*cfgports) used to suppress multiple instances of same
291 * port staying stuck at same point
293 u32
*ipath_lastrcvhdrqtails
;
295 * (*cfgports) used to suppress multiple instances of same
296 * port staying stuck at same point
298 u32
*ipath_lastegrheads
;
300 * index of last piobuffer we used. Speeds up searching, by
301 * starting at this point. Doesn't matter if multiple cpu's use and
302 * update, last updater is only write that matters. Whenever it
303 * wraps, we update shadow copies. Need a copy per device when we
304 * get to multiple devices
306 u32 ipath_lastpioindex
;
307 /* max length of freezemsg */
310 * consecutive times we wanted a PIO buffer but were unable to
313 u32 ipath_consec_nopiobuf
;
315 * hint that we should update ipath_pioavailshadow before
316 * looking for a PIO buffer
318 u32 ipath_upd_pio_shadow
;
319 /* so we can rewrite it after a chip reset */
321 /* so we can rewrite it after a chip reset */
324 /* HT/PCI Vendor ID (here for NodeInfo) */
326 /* HT/PCI Device ID (here for NodeInfo) */
328 /* offset in HT config space of slave/primary interface block */
329 u8 ipath_ht_slave_off
;
330 /* for write combining settings */
331 unsigned long ipath_wc_cookie
;
332 /* ref count for each pkey */
333 atomic_t ipath_pkeyrefs
[4];
334 /* shadow copy of all exptids physaddr; used only by funcsim */
335 u64
*ipath_tidsimshadow
;
336 /* shadow copy of struct page *'s for exp tid pages */
337 struct page
**ipath_pageshadow
;
338 /* lock to workaround chip bug 9437 */
339 spinlock_t ipath_tid_lock
;
343 * this address is mapped readonly into user processes so they can
344 * get status cheaply, whenever they want.
347 /* freeze msg if hw error put chip in freeze */
348 char *ipath_freezemsg
;
349 /* pci access data structure */
350 struct pci_dev
*pcidev
;
351 struct cdev
*user_cdev
;
352 struct cdev
*diag_cdev
;
353 struct class_device
*user_class_dev
;
354 struct class_device
*diag_class_dev
;
355 /* timer used to prevent stats overflow, error throttling, etc. */
356 struct timer_list ipath_stats_timer
;
357 /* check for stale messages in rcv queue */
358 /* only allow one intr at a time. */
359 unsigned long ipath_rcv_pending
;
360 void *ipath_dummy_hdrq
; /* used after port close */
361 dma_addr_t ipath_dummy_hdrq_phys
;
364 * Shadow copies of registers; size indicates read access size.
365 * Most of them are readonly, but some are write-only register,
366 * where we manipulate the bits in the shadow copy, and then write
367 * the shadow copy to infinipath.
369 * We deliberately make most of these 32 bits, since they have
370 * restricted range. For any that we read, we won't to generate 32
371 * bit accesses, since Opteron will generate 2 separate 32 bit HT
372 * transactions for a 64 bit read, and we want to avoid unnecessary
376 /* This is the 64 bit group */
379 * shadow of pioavail, check to be sure it's large enough at
382 unsigned long ipath_pioavailshadow
[8];
383 /* shadow of kr_gpio_out, for rmw ops */
385 /* kr_revision shadow */
388 * shadow of ibcctrl, for interrupt handling of link changes,
393 * last ibcstatus, to suppress "duplicate" status change messages,
396 u64 ipath_lastibcstat
;
397 /* hwerrmask shadow */
398 ipath_err_t ipath_hwerrmask
;
399 /* interrupt config reg shadow */
401 /* kr_sendpiobufbase value */
402 u64 ipath_piobufbase
;
404 /* these are the "32 bit" regs */
407 * number of GUIDs in the flash for this interface; may need some
408 * rethinking for setting on other ifaces
412 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
413 * all expect bit fields to be "unsigned long"
415 /* shadow kr_rcvctrl */
416 unsigned long ipath_rcvctrl
;
417 /* shadow kr_sendctrl */
418 unsigned long ipath_sendctrl
;
420 /* value we put in kr_rcvhdrcnt */
422 /* value we put in kr_rcvhdrsize */
423 u32 ipath_rcvhdrsize
;
424 /* value we put in kr_rcvhdrentsize */
425 u32 ipath_rcvhdrentsize
;
426 /* offset of last entry in rcvhdrq */
428 /* kr_portcnt value */
430 /* kr_pagealign value */
432 /* number of "2KB" PIO buffers */
434 /* size in bytes of "2KB" PIO buffers */
436 /* number of "4KB" PIO buffers */
438 /* size in bytes of "4KB" PIO buffers */
440 /* kr_rcvegrbase value */
441 u32 ipath_rcvegrbase
;
442 /* kr_rcvegrcnt value */
444 /* kr_rcvtidbase value */
445 u32 ipath_rcvtidbase
;
446 /* kr_rcvtidcnt value */
452 /* kr_counterregbase */
454 /* shadow the control register contents */
456 /* shadow the gpio output contents */
458 /* PCI revision register (HTC rev on FPGA) */
461 /* chip address space used by 4k pio buffers */
463 /* The MTU programmed for this unit */
466 * The max size IB packet, included IB headers that we can send.
467 * Starts same as ipath_piosize, but is affected when ibmtu is
468 * changed, or by size of eager buffers
472 * ibmaxlen at init time, limited by chip and by receive buffer
473 * size. Not changed after init.
475 u32 ipath_init_ibmaxlen
;
476 /* size of each rcvegrbuffer */
477 u32 ipath_rcvegrbufsize
;
478 /* width (2,4,8,16,32) from HT config reg */
480 /* HT speed (200,400,800,1000) from HT config */
482 /* ports waiting for PIOavail intr */
483 unsigned long ipath_portpiowait
;
485 * number of sequential ibcstatus change for polling active/quiet
486 * (i.e., link not coming up).
489 /* low and high portions of MSI capability/vector */
491 /* saved after PCIe init for restore after reset */
493 /* MSI data (vector) saved for restore */
495 /* MLID programmed for this instance */
497 /* LID programmed for this instance */
499 /* list of pkeys programmed; 0 if not set */
502 * ASCII serial number, from flash, large enough for original
503 * all digit strings, and longer QLogic serial number format
506 /* human readable board version */
507 u8 ipath_boardversion
[80];
508 /* chip major rev, from ipath_revision */
510 /* chip minor rev, from ipath_revision */
512 /* board rev, from ipath_revision */
514 /* unit # of this chip, if present */
516 /* saved for restore after reset */
517 u8 ipath_pci_cacheline
;
518 /* LID mask control */
520 /* Rx Polarity inversion (compensate for ~tx on partner) */
523 /* local link integrity counter */
524 u32 ipath_lli_counter
;
525 /* local link integrity errors */
526 u32 ipath_lli_errors
;
528 * Above counts only cases where _successive_ LocalLinkIntegrity
529 * errors were seen in the receive headers of kern-packets.
530 * Below are the three (monotonically increasing) counters
531 * maintained via GPIO interrupts on iba6120-rev2.
533 u32 ipath_rxfc_unsupvl_errs
;
534 u32 ipath_overrun_thresh_errs
;
538 /* Private data for file operations */
539 struct ipath_filedata
{
540 struct ipath_portdata
*pd
;
544 extern struct list_head ipath_dev_list
;
545 extern spinlock_t ipath_devs_lock
;
546 extern struct ipath_devdata
*ipath_lookup(int unit
);
548 int ipath_init_chip(struct ipath_devdata
*, int);
549 int ipath_enable_wc(struct ipath_devdata
*dd
);
550 void ipath_disable_wc(struct ipath_devdata
*dd
);
551 int ipath_count_units(int *npresentp
, int *nupp
, u32
*maxportsp
);
552 void ipath_shutdown_device(struct ipath_devdata
*);
554 struct file_operations
;
555 int ipath_cdev_init(int minor
, char *name
, struct file_operations
*fops
,
556 struct cdev
**cdevp
, struct class_device
**class_devp
);
557 void ipath_cdev_cleanup(struct cdev
**cdevp
,
558 struct class_device
**class_devp
);
560 int ipath_diag_add(struct ipath_devdata
*);
561 void ipath_diag_remove(struct ipath_devdata
*);
563 extern wait_queue_head_t ipath_state_wait
;
565 int ipath_user_add(struct ipath_devdata
*dd
);
566 void ipath_user_remove(struct ipath_devdata
*dd
);
568 struct sk_buff
*ipath_alloc_skb(struct ipath_devdata
*dd
, gfp_t
);
570 extern int ipath_diag_inuse
;
572 irqreturn_t
ipath_intr(int irq
, void *devid
, struct pt_regs
*regs
);
573 void ipath_decode_err(char *buf
, size_t blen
, ipath_err_t err
);
574 #if __IPATH_INFO || __IPATH_DBG
575 extern const char *ipath_ibcstatus_str
[];
578 /* clean up any per-chip chip-specific stuff */
579 void ipath_chip_cleanup(struct ipath_devdata
*);
580 /* clean up any chip type-specific stuff */
581 void ipath_chip_done(void);
583 /* check to see if we have to force ordering for write combining */
584 int ipath_unordered_wc(void);
586 void ipath_disarm_piobufs(struct ipath_devdata
*, unsigned first
,
589 int ipath_create_rcvhdrq(struct ipath_devdata
*, struct ipath_portdata
*);
590 void ipath_free_pddata(struct ipath_devdata
*, struct ipath_portdata
*);
592 int ipath_parse_ushort(const char *str
, unsigned short *valp
);
594 void ipath_kreceive(struct ipath_devdata
*);
595 int ipath_setrcvhdrsize(struct ipath_devdata
*, unsigned);
596 int ipath_reset_device(int);
597 void ipath_get_faststats(unsigned long);
598 int ipath_set_linkstate(struct ipath_devdata
*, u8
);
599 int ipath_set_mtu(struct ipath_devdata
*, u16
);
600 int ipath_set_lid(struct ipath_devdata
*, u32
, u8
);
601 int ipath_set_rx_pol_inv(struct ipath_devdata
*dd
, u8 new_pol_inv
);
603 /* for use in system calls, where we want to know device type, etc. */
604 #define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
605 #define subport_fp(fp) \
606 ((struct ipath_filedata *)(fp)->private_data)->subport
607 #define tidcursor_fp(fp) \
608 ((struct ipath_filedata *)(fp)->private_data)->tidcursor
611 * values for ipath_flags
613 /* The chip is up and initted */
614 #define IPATH_INITTED 0x2
615 /* set if any user code has set kr_rcvhdrsize */
616 #define IPATH_RCVHDRSZ_SET 0x4
617 /* The chip is present and valid for accesses */
618 #define IPATH_PRESENT 0x8
619 /* HT link0 is only 8 bits wide, ignore upper byte crc
621 #define IPATH_8BIT_IN_HT0 0x10
622 /* HT link1 is only 8 bits wide, ignore upper byte crc
624 #define IPATH_8BIT_IN_HT1 0x20
625 /* The link is down */
626 #define IPATH_LINKDOWN 0x40
627 /* The link level is up (0x11) */
628 #define IPATH_LINKINIT 0x80
629 /* The link is in the armed (0x21) state */
630 #define IPATH_LINKARMED 0x100
631 /* The link is in the active (0x31) state */
632 #define IPATH_LINKACTIVE 0x200
633 /* link current state is unknown */
634 #define IPATH_LINKUNK 0x400
635 /* no IB cable, or no device on IB cable */
636 #define IPATH_NOCABLE 0x4000
637 /* Supports port zero per packet receive interrupts via
639 #define IPATH_GPIO_INTR 0x8000
640 /* uses the coded 4byte TID, not 8 byte */
641 #define IPATH_4BYTE_TID 0x10000
642 /* packet/word counters are 32 bit, else those 4 counters
644 #define IPATH_32BITCOUNTERS 0x20000
645 /* can miss port0 rx interrupts */
646 #define IPATH_POLL_RX_INTR 0x40000
647 #define IPATH_DISABLED 0x80000 /* administratively disabled */
648 /* Use GPIO interrupts for new counters */
649 #define IPATH_GPIO_ERRINTRS 0x100000
651 /* Bits in GPIO for the added interrupts */
652 #define IPATH_GPIO_PORT0_BIT 2
653 #define IPATH_GPIO_RXUVL_BIT 3
654 #define IPATH_GPIO_OVRUN_BIT 4
655 #define IPATH_GPIO_LLI_BIT 5
656 #define IPATH_GPIO_ERRINTR_MASK 0x38
658 /* portdata flag bit offsets */
659 /* waiting for a packet to arrive */
660 #define IPATH_PORT_WAITING_RCV 2
661 /* waiting for a PIO buffer to be available */
662 #define IPATH_PORT_WAITING_PIO 3
664 /* free up any allocated data at closes */
665 void ipath_free_data(struct ipath_portdata
*dd
);
666 int ipath_waitfor_mdio_cmdready(struct ipath_devdata
*);
667 int ipath_waitfor_complete(struct ipath_devdata
*, ipath_kreg
, u64
, u64
*);
668 u32 __iomem
*ipath_getpiobuf(struct ipath_devdata
*, u32
*);
669 void ipath_init_iba6120_funcs(struct ipath_devdata
*);
670 void ipath_init_iba6110_funcs(struct ipath_devdata
*);
671 void ipath_get_eeprom_info(struct ipath_devdata
*);
672 u64
ipath_snap_cntr(struct ipath_devdata
*, ipath_creg
);
675 * number of words used for protocol header if not set by ipath_userinit();
677 #define IPATH_DFLT_RCVHDRSIZE 9
679 #define IPATH_MDIO_CMD_WRITE 1
680 #define IPATH_MDIO_CMD_READ 2
681 #define IPATH_MDIO_CLD_DIV 25 /* to get 2.5 Mhz mdio clock */
682 #define IPATH_MDIO_CMDVALID 0x40000000 /* bit 30 */
683 #define IPATH_MDIO_DATAVALID 0x80000000 /* bit 31 */
684 #define IPATH_MDIO_CTRL_STD 0x0
686 static inline u64
ipath_mdio_req(int cmd
, int dev
, int reg
, int data
)
688 return (((u64
) IPATH_MDIO_CLD_DIV
) << 32) |
695 /* signal and fifo status, in bank 31 */
696 #define IPATH_MDIO_CTRL_XGXS_REG_8 0x8
697 /* controls loopback, redundancy */
698 #define IPATH_MDIO_CTRL_8355_REG_1 0x10
699 /* premph, encdec, etc. */
700 #define IPATH_MDIO_CTRL_8355_REG_2 0x11
702 #define IPATH_MDIO_CTRL_8355_REG_6 0x15
703 #define IPATH_MDIO_CTRL_8355_REG_9 0x18
704 #define IPATH_MDIO_CTRL_8355_REG_10 0x1D
706 int ipath_get_user_pages(unsigned long, size_t, struct page
**);
707 int ipath_get_user_pages_nocopy(unsigned long, struct page
**);
708 void ipath_release_user_pages(struct page
**, size_t);
709 void ipath_release_user_pages_on_close(struct page
**, size_t);
710 int ipath_eeprom_read(struct ipath_devdata
*, u8
, void *, int);
711 int ipath_eeprom_write(struct ipath_devdata
*, u8
, const void *, int);
713 /* these are used for the registers that vary with port */
714 void ipath_write_kreg_port(const struct ipath_devdata
*, ipath_kreg
,
716 u64
ipath_read_kreg64_port(const struct ipath_devdata
*, ipath_kreg
,
720 * We could have a single register get/put routine, that takes a group type,
721 * but this is somewhat clearer and cleaner. It also gives us some error
722 * checking. 64 bit register reads should always work, but are inefficient
723 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
724 * so we use kreg32 wherever possible. User register and counter register
725 * reads are always 32 bit reads, so only one form of those routines.
729 * At the moment, none of the s-registers are writable, so no
730 * ipath_write_sreg(), and none of the c-registers are writable, so no
731 * ipath_write_creg().
735 * ipath_read_ureg32 - read 32-bit virtualized per-port register
737 * @regno: register number
740 * Return the contents of a register that is virtualized to be per port.
741 * Returns -1 on errors (not distinguishable from valid contents at
742 * runtime; we may add a separate error variable at some point).
744 static inline u32
ipath_read_ureg32(const struct ipath_devdata
*dd
,
745 ipath_ureg regno
, int port
)
747 if (!dd
->ipath_kregbase
|| !(dd
->ipath_flags
& IPATH_PRESENT
))
750 return readl(regno
+ (u64 __iomem
*)
751 (dd
->ipath_uregbase
+
752 (char __iomem
*)dd
->ipath_kregbase
+
753 dd
->ipath_palign
* port
));
757 * ipath_write_ureg - write 32-bit virtualized per-port register
759 * @regno: register number
763 * Write the contents of a register that is virtualized to be per port.
765 static inline void ipath_write_ureg(const struct ipath_devdata
*dd
,
766 ipath_ureg regno
, u64 value
, int port
)
768 u64 __iomem
*ubase
= (u64 __iomem
*)
769 (dd
->ipath_uregbase
+ (char __iomem
*) dd
->ipath_kregbase
+
770 dd
->ipath_palign
* port
);
771 if (dd
->ipath_kregbase
)
772 writeq(value
, &ubase
[regno
]);
775 static inline u32
ipath_read_kreg32(const struct ipath_devdata
*dd
,
778 if (!dd
->ipath_kregbase
|| !(dd
->ipath_flags
& IPATH_PRESENT
))
780 return readl((u32 __iomem
*) & dd
->ipath_kregbase
[regno
]);
783 static inline u64
ipath_read_kreg64(const struct ipath_devdata
*dd
,
786 if (!dd
->ipath_kregbase
|| !(dd
->ipath_flags
& IPATH_PRESENT
))
789 return readq(&dd
->ipath_kregbase
[regno
]);
792 static inline void ipath_write_kreg(const struct ipath_devdata
*dd
,
793 ipath_kreg regno
, u64 value
)
795 if (dd
->ipath_kregbase
)
796 writeq(value
, &dd
->ipath_kregbase
[regno
]);
799 static inline u64
ipath_read_creg(const struct ipath_devdata
*dd
,
802 if (!dd
->ipath_kregbase
|| !(dd
->ipath_flags
& IPATH_PRESENT
))
805 return readq(regno
+ (u64 __iomem
*)
806 (dd
->ipath_cregbase
+
807 (char __iomem
*)dd
->ipath_kregbase
));
810 static inline u32
ipath_read_creg32(const struct ipath_devdata
*dd
,
813 if (!dd
->ipath_kregbase
|| !(dd
->ipath_flags
& IPATH_PRESENT
))
815 return readl(regno
+ (u64 __iomem
*)
816 (dd
->ipath_cregbase
+
817 (char __iomem
*)dd
->ipath_kregbase
));
824 struct device_driver
;
826 extern const char ib_ipath_version
[];
828 int ipath_driver_create_group(struct device_driver
*);
829 void ipath_driver_remove_group(struct device_driver
*);
831 int ipath_device_create_group(struct device
*, struct ipath_devdata
*);
832 void ipath_device_remove_group(struct device
*, struct ipath_devdata
*);
833 int ipath_expose_reset(struct device
*);
835 int ipath_diagpkt_add(void);
836 void ipath_diagpkt_remove(void);
838 int ipath_init_ipathfs(void);
839 void ipath_exit_ipathfs(void);
840 int ipathfs_add_device(struct ipath_devdata
*);
841 int ipathfs_remove_device(struct ipath_devdata
*);
844 * Flush write combining store buffers (if present) and perform a write
847 #if defined(CONFIG_X86_64)
848 #define ipath_flush_wc() asm volatile("sfence" ::: "memory")
850 #define ipath_flush_wc() wmb()
853 extern unsigned ipath_debug
; /* debugging bit mask */
855 const char *ipath_get_unit_name(int unit
);
857 extern struct mutex ipath_mutex
;
859 #define IPATH_DRV_NAME "ib_ipath"
860 #define IPATH_MAJOR 233
861 #define IPATH_USER_MINOR_BASE 0
862 #define IPATH_DIAGPKT_MINOR 127
863 #define IPATH_DIAG_MINOR_BASE 129
864 #define IPATH_NMINORS 255
866 #define ipath_dev_err(dd,fmt,...) \
868 const struct ipath_devdata *__dd = (dd); \
870 dev_err(&__dd->pcidev->dev, "%s: " fmt, \
871 ipath_get_unit_name(__dd->ipath_unit), \
874 printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
875 ipath_get_unit_name(__dd->ipath_unit), \
881 # define __IPATH_DBG_WHICH(which,fmt,...) \
883 if(unlikely(ipath_debug&(which))) \
884 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
885 __func__,##__VA_ARGS__); \
888 # define ipath_dbg(fmt,...) \
889 __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
890 # define ipath_cdbg(which,fmt,...) \
891 __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
893 #else /* ! _IPATH_DEBUGGING */
895 # define ipath_dbg(fmt,...)
896 # define ipath_cdbg(which,fmt,...)
898 #endif /* _IPATH_DEBUGGING */
901 * this is used for formatting hw error messages...
903 struct ipath_hwerror_msgs
{
908 #define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
910 /* in ipath_intr.c... */
911 void ipath_format_hwerrors(u64 hwerrs
,
912 const struct ipath_hwerror_msgs
*hwerrmsgs
,
914 char *msg
, size_t lmsg
);
916 #endif /* _IPATH_KERNEL_H */