2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
35 #include <linux/idr.h>
36 #include <linux/pci.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/vmalloc.h>
41 #include "ipath_kernel.h"
42 #include "ipath_verbs.h"
43 #include "ipath_common.h"
45 static void ipath_update_pio_bufs(struct ipath_devdata
*);
47 const char *ipath_get_unit_name(int unit
)
49 static char iname
[16];
50 snprintf(iname
, sizeof iname
, "infinipath%u", unit
);
54 #define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
55 #define PFX IPATH_DRV_NAME ": "
58 * The size has to be longer than this string, so we can append
59 * board/chip information to it in the init code.
61 const char ib_ipath_version
[] = IPATH_IDSTR
"\n";
63 static struct idr unit_table
;
64 DEFINE_SPINLOCK(ipath_devs_lock
);
65 LIST_HEAD(ipath_dev_list
);
67 wait_queue_head_t ipath_state_wait
;
69 unsigned ipath_debug
= __IPATH_INFO
;
71 module_param_named(debug
, ipath_debug
, uint
, S_IWUSR
| S_IRUGO
);
72 MODULE_PARM_DESC(debug
, "mask for debug prints");
73 EXPORT_SYMBOL_GPL(ipath_debug
);
75 MODULE_LICENSE("GPL");
76 MODULE_AUTHOR("QLogic <support@pathscale.com>");
77 MODULE_DESCRIPTION("QLogic InfiniPath driver");
79 const char *ipath_ibcstatus_str
[] = {
86 "LState6", /* unused */
87 "LState7", /* unused */
93 "LState0xD", /* unused */
98 static void __devexit
ipath_remove_one(struct pci_dev
*);
99 static int __devinit
ipath_init_one(struct pci_dev
*,
100 const struct pci_device_id
*);
102 /* Only needed for registration, nothing else needs this info */
103 #define PCI_VENDOR_ID_PATHSCALE 0x1fc1
104 #define PCI_DEVICE_ID_INFINIPATH_HT 0xd
105 #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
107 /* Number of seconds before our card status check... */
108 #define STATUS_TIMEOUT 60
110 static const struct pci_device_id ipath_pci_tbl
[] = {
111 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE
, PCI_DEVICE_ID_INFINIPATH_HT
) },
112 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE
, PCI_DEVICE_ID_INFINIPATH_PE800
) },
116 MODULE_DEVICE_TABLE(pci
, ipath_pci_tbl
);
118 static struct pci_driver ipath_driver
= {
119 .name
= IPATH_DRV_NAME
,
120 .probe
= ipath_init_one
,
121 .remove
= __devexit_p(ipath_remove_one
),
122 .id_table
= ipath_pci_tbl
,
125 static void ipath_check_status(struct work_struct
*work
)
127 struct ipath_devdata
*dd
= container_of(work
, struct ipath_devdata
,
131 * If we don't have any interrupts, let the user know and
132 * don't bother checking again.
134 if (dd
->ipath_int_counter
== 0)
135 dev_err(&dd
->pcidev
->dev
, "No interrupts detected.\n");
138 static inline void read_bars(struct ipath_devdata
*dd
, struct pci_dev
*dev
,
139 u32
*bar0
, u32
*bar1
)
143 ret
= pci_read_config_dword(dev
, PCI_BASE_ADDRESS_0
, bar0
);
145 ipath_dev_err(dd
, "failed to read bar0 before enable: "
148 ret
= pci_read_config_dword(dev
, PCI_BASE_ADDRESS_1
, bar1
);
150 ipath_dev_err(dd
, "failed to read bar1 before enable: "
153 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0
, *bar1
);
156 static void ipath_free_devdata(struct pci_dev
*pdev
,
157 struct ipath_devdata
*dd
)
161 pci_set_drvdata(pdev
, NULL
);
163 if (dd
->ipath_unit
!= -1) {
164 spin_lock_irqsave(&ipath_devs_lock
, flags
);
165 idr_remove(&unit_table
, dd
->ipath_unit
);
166 list_del(&dd
->ipath_list
);
167 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
172 static struct ipath_devdata
*ipath_alloc_devdata(struct pci_dev
*pdev
)
175 struct ipath_devdata
*dd
;
178 if (!idr_pre_get(&unit_table
, GFP_KERNEL
)) {
179 dd
= ERR_PTR(-ENOMEM
);
183 dd
= vmalloc(sizeof(*dd
));
185 dd
= ERR_PTR(-ENOMEM
);
188 memset(dd
, 0, sizeof(*dd
));
191 spin_lock_irqsave(&ipath_devs_lock
, flags
);
193 ret
= idr_get_new(&unit_table
, dd
, &dd
->ipath_unit
);
195 printk(KERN_ERR IPATH_DRV_NAME
196 ": Could not allocate unit ID: error %d\n", -ret
);
197 ipath_free_devdata(pdev
, dd
);
203 pci_set_drvdata(pdev
, dd
);
205 INIT_DELAYED_WORK(&dd
->status_work
, ipath_check_status
);
207 list_add(&dd
->ipath_list
, &ipath_dev_list
);
210 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
216 static inline struct ipath_devdata
*__ipath_lookup(int unit
)
218 return idr_find(&unit_table
, unit
);
221 struct ipath_devdata
*ipath_lookup(int unit
)
223 struct ipath_devdata
*dd
;
226 spin_lock_irqsave(&ipath_devs_lock
, flags
);
227 dd
= __ipath_lookup(unit
);
228 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
233 int ipath_count_units(int *npresentp
, int *nupp
, u32
*maxportsp
)
235 int nunits
, npresent
, nup
;
236 struct ipath_devdata
*dd
;
240 nunits
= npresent
= nup
= maxports
= 0;
242 spin_lock_irqsave(&ipath_devs_lock
, flags
);
244 list_for_each_entry(dd
, &ipath_dev_list
, ipath_list
) {
246 if ((dd
->ipath_flags
& IPATH_PRESENT
) && dd
->ipath_kregbase
)
249 !(dd
->ipath_flags
& (IPATH_DISABLED
| IPATH_LINKDOWN
252 if (dd
->ipath_cfgports
> maxports
)
253 maxports
= dd
->ipath_cfgports
;
256 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
259 *npresentp
= npresent
;
263 *maxportsp
= maxports
;
269 * These next two routines are placeholders in case we don't have per-arch
270 * code for controlling write combining. If explicit control of write
271 * combining is not available, performance will probably be awful.
274 int __attribute__((weak
)) ipath_enable_wc(struct ipath_devdata
*dd
)
279 void __attribute__((weak
)) ipath_disable_wc(struct ipath_devdata
*dd
)
283 static int __devinit
ipath_init_one(struct pci_dev
*pdev
,
284 const struct pci_device_id
*ent
)
287 struct ipath_devdata
*dd
;
288 unsigned long long addr
;
289 u32 bar0
= 0, bar1
= 0;
291 dd
= ipath_alloc_devdata(pdev
);
294 printk(KERN_ERR IPATH_DRV_NAME
295 ": Could not allocate devdata: error %d\n", -ret
);
299 ipath_cdbg(VERBOSE
, "initializing unit #%u\n", dd
->ipath_unit
);
301 read_bars(dd
, pdev
, &bar0
, &bar1
);
303 ret
= pci_enable_device(pdev
);
305 /* This can happen iff:
307 * We did a chip reset, and then failed to reprogram the
308 * BAR, or the chip reset due to an internal error. We then
309 * unloaded the driver and reloaded it.
311 * Both reset cases set the BAR back to initial state. For
312 * the latter case, the AER sticky error bit at offset 0x718
313 * should be set, but the Linux kernel doesn't yet know
314 * about that, it appears. If the original BAR was retained
315 * in the kernel data structures, this may be OK.
317 ipath_dev_err(dd
, "enable unit %d failed: error %d\n",
318 dd
->ipath_unit
, -ret
);
321 addr
= pci_resource_start(pdev
, 0);
322 len
= pci_resource_len(pdev
, 0);
323 ipath_cdbg(VERBOSE
, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x "
324 "driver_data %lx\n", addr
, len
, pdev
->irq
, ent
->vendor
,
325 ent
->device
, ent
->driver_data
);
327 read_bars(dd
, pdev
, &bar0
, &bar1
);
329 if (!bar1
&& !(bar0
& ~0xf)) {
331 dev_info(&pdev
->dev
, "BAR is 0 (probable RESET), "
332 "rewriting as %llx\n", addr
);
333 ret
= pci_write_config_dword(
334 pdev
, PCI_BASE_ADDRESS_0
, addr
);
336 ipath_dev_err(dd
, "rewrite of BAR0 "
337 "failed: err %d\n", -ret
);
340 ret
= pci_write_config_dword(
341 pdev
, PCI_BASE_ADDRESS_1
, addr
>> 32);
343 ipath_dev_err(dd
, "rewrite of BAR1 "
344 "failed: err %d\n", -ret
);
348 ipath_dev_err(dd
, "BAR is 0 (probable RESET), "
349 "not usable until reboot\n");
355 ret
= pci_request_regions(pdev
, IPATH_DRV_NAME
);
357 dev_info(&pdev
->dev
, "pci_request_regions unit %u fails: "
358 "err %d\n", dd
->ipath_unit
, -ret
);
362 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
365 * if the 64 bit setup fails, try 32 bit. Some systems
366 * do not setup 64 bit maps on systems with 2GB or less
369 ret
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
372 "Unable to set DMA mask for unit %u: %d\n",
373 dd
->ipath_unit
, ret
);
377 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
378 ret
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
381 "Unable to set DMA consistent mask "
383 dd
->ipath_unit
, ret
);
388 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
391 "Unable to set DMA consistent mask "
393 dd
->ipath_unit
, ret
);
396 pci_set_master(pdev
);
399 * Save BARs to rewrite after device reset. Save all 64 bits of
402 dd
->ipath_pcibar0
= addr
;
403 dd
->ipath_pcibar1
= addr
>> 32;
404 dd
->ipath_deviceid
= ent
->device
; /* save for later use */
405 dd
->ipath_vendorid
= ent
->vendor
;
407 /* setup the chip-specific functions, as early as possible. */
408 switch (ent
->device
) {
409 case PCI_DEVICE_ID_INFINIPATH_HT
:
411 ipath_init_iba6110_funcs(dd
);
414 ipath_dev_err(dd
, "QLogic HT device 0x%x cannot work if "
415 "CONFIG_HT_IRQ is not enabled\n", ent
->device
);
418 case PCI_DEVICE_ID_INFINIPATH_PE800
:
419 #ifdef CONFIG_PCI_MSI
420 ipath_init_iba6120_funcs(dd
);
423 ipath_dev_err(dd
, "QLogic PCIE device 0x%x cannot work if "
424 "CONFIG_PCI_MSI is not enabled\n", ent
->device
);
428 ipath_dev_err(dd
, "Found unknown QLogic deviceid 0x%x, "
429 "failing\n", ent
->device
);
433 for (j
= 0; j
< 6; j
++) {
434 if (!pdev
->resource
[j
].start
)
436 ipath_cdbg(VERBOSE
, "BAR %d start %llx, end %llx, len %llx\n",
437 j
, (unsigned long long)pdev
->resource
[j
].start
,
438 (unsigned long long)pdev
->resource
[j
].end
,
439 (unsigned long long)pci_resource_len(pdev
, j
));
443 ipath_dev_err(dd
, "No valid address in BAR 0!\n");
448 dd
->ipath_deviceid
= ent
->device
; /* save for later use */
449 dd
->ipath_vendorid
= ent
->vendor
;
451 dd
->ipath_pcirev
= pdev
->revision
;
453 #if defined(__powerpc__)
454 /* There isn't a generic way to specify writethrough mappings */
455 dd
->ipath_kregbase
= __ioremap(addr
, len
,
456 (_PAGE_NO_CACHE
|_PAGE_WRITETHRU
));
458 dd
->ipath_kregbase
= ioremap_nocache(addr
, len
);
461 if (!dd
->ipath_kregbase
) {
462 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
467 dd
->ipath_kregend
= (u64 __iomem
*)
468 ((void __iomem
*)dd
->ipath_kregbase
+ len
);
469 dd
->ipath_physaddr
= addr
; /* used for io_remap, etc. */
471 ipath_cdbg(VERBOSE
, "mapped io addr %llx to kregbase %p\n",
472 addr
, dd
->ipath_kregbase
);
475 * clear ipath_flags here instead of in ipath_init_chip as it is set
476 * by ipath_setup_htconfig.
479 dd
->ipath_lli_counter
= 0;
480 dd
->ipath_lli_errors
= 0;
482 if (dd
->ipath_f_bus(dd
, pdev
))
483 ipath_dev_err(dd
, "Failed to setup config space; "
484 "continuing anyway\n");
487 * set up our interrupt handler; IRQF_SHARED probably not needed,
488 * since MSI interrupts shouldn't be shared but won't hurt for now.
489 * check 0 irq after we return from chip-specific bus setup, since
490 * that can affect this due to setup
493 ipath_dev_err(dd
, "irq is 0, BIOS error? Interrupts won't "
496 ret
= request_irq(dd
->ipath_irq
, ipath_intr
, IRQF_SHARED
,
499 ipath_dev_err(dd
, "Couldn't setup irq handler, "
500 "irq=%d: %d\n", dd
->ipath_irq
, ret
);
505 ret
= ipath_init_chip(dd
, 0); /* do the chip-specific init */
509 ret
= ipath_enable_wc(dd
);
512 ipath_dev_err(dd
, "Write combining not enabled "
513 "(err %d): performance may be poor\n",
518 ipath_device_create_group(&pdev
->dev
, dd
);
519 ipathfs_add_device(dd
);
522 ipath_register_ib_device(dd
);
524 /* Check that card status in STATUS_TIMEOUT seconds. */
525 schedule_delayed_work(&dd
->status_work
, HZ
* STATUS_TIMEOUT
);
530 if (pdev
->irq
) free_irq(pdev
->irq
, dd
);
533 iounmap((volatile void __iomem
*) dd
->ipath_kregbase
);
536 pci_release_regions(pdev
);
539 pci_disable_device(pdev
);
542 ipath_free_devdata(pdev
, dd
);
548 static void __devexit
cleanup_device(struct ipath_devdata
*dd
)
552 if (*dd
->ipath_statusp
& IPATH_STATUS_CHIP_PRESENT
) {
553 /* can't do anything more with chip; needs re-init */
554 *dd
->ipath_statusp
&= ~IPATH_STATUS_CHIP_PRESENT
;
555 if (dd
->ipath_kregbase
) {
557 * if we haven't already cleaned up before these are
558 * to ensure any register reads/writes "fail" until
561 dd
->ipath_kregbase
= NULL
;
562 dd
->ipath_uregbase
= 0;
563 dd
->ipath_sregbase
= 0;
564 dd
->ipath_cregbase
= 0;
565 dd
->ipath_kregsize
= 0;
567 ipath_disable_wc(dd
);
570 if (dd
->ipath_pioavailregs_dma
) {
571 dma_free_coherent(&dd
->pcidev
->dev
, PAGE_SIZE
,
572 (void *) dd
->ipath_pioavailregs_dma
,
573 dd
->ipath_pioavailregs_phys
);
574 dd
->ipath_pioavailregs_dma
= NULL
;
576 if (dd
->ipath_dummy_hdrq
) {
577 dma_free_coherent(&dd
->pcidev
->dev
,
578 dd
->ipath_pd
[0]->port_rcvhdrq_size
,
579 dd
->ipath_dummy_hdrq
, dd
->ipath_dummy_hdrq_phys
);
580 dd
->ipath_dummy_hdrq
= NULL
;
583 if (dd
->ipath_pageshadow
) {
584 struct page
**tmpp
= dd
->ipath_pageshadow
;
585 dma_addr_t
*tmpd
= dd
->ipath_physshadow
;
588 ipath_cdbg(VERBOSE
, "Unlocking any expTID pages still "
590 for (port
= 0; port
< dd
->ipath_cfgports
; port
++) {
591 int port_tidbase
= port
* dd
->ipath_rcvtidcnt
;
592 int maxtid
= port_tidbase
+ dd
->ipath_rcvtidcnt
;
593 for (i
= port_tidbase
; i
< maxtid
; i
++) {
596 pci_unmap_page(dd
->pcidev
, tmpd
[i
],
597 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
598 ipath_release_user_pages(&tmpp
[i
], 1);
604 ipath_stats
.sps_pageunlocks
+= cnt
;
605 ipath_cdbg(VERBOSE
, "There were still %u expTID "
606 "entries locked\n", cnt
);
608 if (ipath_stats
.sps_pagelocks
||
609 ipath_stats
.sps_pageunlocks
)
610 ipath_cdbg(VERBOSE
, "%llu pages locked, %llu "
611 "unlocked via ipath_m{un}lock\n",
613 ipath_stats
.sps_pagelocks
,
615 ipath_stats
.sps_pageunlocks
);
617 ipath_cdbg(VERBOSE
, "Free shadow page tid array at %p\n",
618 dd
->ipath_pageshadow
);
619 tmpp
= dd
->ipath_pageshadow
;
620 dd
->ipath_pageshadow
= NULL
;
625 * free any resources still in use (usually just kernel ports)
626 * at unload; we do for portcnt, not cfgports, because cfgports
627 * could have changed while we were loaded.
629 for (port
= 0; port
< dd
->ipath_portcnt
; port
++) {
630 struct ipath_portdata
*pd
= dd
->ipath_pd
[port
];
631 dd
->ipath_pd
[port
] = NULL
;
632 ipath_free_pddata(dd
, pd
);
636 * debuggability, in case some cleanup path tries to use it
642 static void __devexit
ipath_remove_one(struct pci_dev
*pdev
)
644 struct ipath_devdata
*dd
= pci_get_drvdata(pdev
);
646 ipath_cdbg(VERBOSE
, "removing, pdev=%p, dd=%p\n", pdev
, dd
);
649 * disable the IB link early, to be sure no new packets arrive, which
650 * complicates the shutdown process
652 ipath_shutdown_device(dd
);
654 cancel_delayed_work(&dd
->status_work
);
655 flush_scheduled_work();
658 ipath_unregister_ib_device(dd
->verbs_dev
);
660 ipath_diag_remove(dd
);
661 ipath_user_remove(dd
);
662 ipathfs_remove_device(dd
);
663 ipath_device_remove_group(&pdev
->dev
, dd
);
665 ipath_cdbg(VERBOSE
, "Releasing pci memory regions, dd %p, "
666 "unit %u\n", dd
, (u32
) dd
->ipath_unit
);
671 * turn off rcv, send, and interrupts for all ports, all drivers
672 * should also hard reset the chip here?
673 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
674 * for all versions of the driver, if they were allocated
677 ipath_cdbg(VERBOSE
, "unit %u free irq %d\n",
678 dd
->ipath_unit
, dd
->ipath_irq
);
679 dd
->ipath_f_free_irq(dd
);
681 ipath_dbg("irq is 0, not doing free_irq "
682 "for unit %u\n", dd
->ipath_unit
);
684 * we check for NULL here, because it's outside
685 * the kregbase check, and we need to call it
686 * after the free_irq. Thus it's possible that
687 * the function pointers were never initialized.
689 if (dd
->ipath_f_cleanup
)
690 /* clean up chip-specific stuff */
691 dd
->ipath_f_cleanup(dd
);
693 ipath_cdbg(VERBOSE
, "Unmapping kregbase %p\n", dd
->ipath_kregbase
);
694 iounmap((volatile void __iomem
*) dd
->ipath_kregbase
);
695 pci_release_regions(pdev
);
696 ipath_cdbg(VERBOSE
, "calling pci_disable_device\n");
697 pci_disable_device(pdev
);
699 ipath_free_devdata(pdev
, dd
);
702 /* general driver use */
703 DEFINE_MUTEX(ipath_mutex
);
705 static DEFINE_SPINLOCK(ipath_pioavail_lock
);
708 * ipath_disarm_piobufs - cancel a range of PIO buffers
709 * @dd: the infinipath device
710 * @first: the first PIO buffer to cancel
711 * @cnt: the number of PIO buffers to cancel
713 * cancel a range of PIO buffers, used when they might be armed, but
714 * not triggered. Used at init to ensure buffer state, and also user
715 * process close, in case it died while writing to a PIO buffer
718 void ipath_disarm_piobufs(struct ipath_devdata
*dd
, unsigned first
,
721 unsigned i
, last
= first
+ cnt
;
722 u64 sendctrl
, sendorig
;
724 ipath_cdbg(PKT
, "disarm %u PIObufs first=%u\n", cnt
, first
);
725 sendorig
= dd
->ipath_sendctrl
;
726 for (i
= first
; i
< last
; i
++) {
727 sendctrl
= sendorig
| INFINIPATH_S_DISARM
|
728 (i
<< INFINIPATH_S_DISARMPIOBUF_SHIFT
);
729 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
734 * Write it again with current value, in case ipath_sendctrl changed
735 * while we were looping; no critical bits that would require
738 * disable PIOAVAILUPD, then re-enable, reading scratch in
739 * between. This seems to avoid a chip timing race that causes
740 * pioavail updates to memory to stop.
742 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
743 sendorig
& ~IPATH_S_PIOBUFAVAILUPD
);
744 sendorig
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
745 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
750 * ipath_wait_linkstate - wait for an IB link state change to occur
751 * @dd: the infinipath device
752 * @state: the state to wait for
753 * @msecs: the number of milliseconds to wait
755 * wait up to msecs milliseconds for IB link state change to occur for
756 * now, take the easy polling route. Currently used only by
757 * ipath_set_linkstate. Returns 0 if state reached, otherwise
758 * -ETIMEDOUT state can have multiple states set, for any of several
761 static int ipath_wait_linkstate(struct ipath_devdata
*dd
, u32 state
,
764 dd
->ipath_state_wanted
= state
;
765 wait_event_interruptible_timeout(ipath_state_wait
,
766 (dd
->ipath_flags
& state
),
767 msecs_to_jiffies(msecs
));
768 dd
->ipath_state_wanted
= 0;
770 if (!(dd
->ipath_flags
& state
)) {
772 ipath_cdbg(VERBOSE
, "Didn't reach linkstate %s within %u"
774 /* test INIT ahead of DOWN, both can be set */
775 (state
& IPATH_LINKINIT
) ? "INIT" :
776 ((state
& IPATH_LINKDOWN
) ? "DOWN" :
777 ((state
& IPATH_LINKARMED
) ? "ARM" : "ACTIVE")),
779 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_ibcstatus
);
780 ipath_cdbg(VERBOSE
, "ibcc=%llx ibcstatus=%llx (%s)\n",
781 (unsigned long long) ipath_read_kreg64(
782 dd
, dd
->ipath_kregs
->kr_ibcctrl
),
783 (unsigned long long) val
,
784 ipath_ibcstatus_str
[val
& 0xf]);
786 return (dd
->ipath_flags
& state
) ? 0 : -ETIMEDOUT
;
790 * Decode the error status into strings, deciding whether to always
791 * print * it or not depending on "normal packet errors" vs everything
792 * else. Return 1 if "real" errors, otherwise 0 if only packet
793 * errors, so caller can decide what to print with the string.
795 int ipath_decode_err(char *buf
, size_t blen
, ipath_err_t err
)
799 if (err
& INFINIPATH_E_PKTERRS
) {
800 if (!(err
& ~INFINIPATH_E_PKTERRS
))
801 iserr
= 0; // if only packet errors.
802 if (ipath_debug
& __IPATH_ERRPKTDBG
) {
803 if (err
& INFINIPATH_E_REBP
)
804 strlcat(buf
, "EBP ", blen
);
805 if (err
& INFINIPATH_E_RVCRC
)
806 strlcat(buf
, "VCRC ", blen
);
807 if (err
& INFINIPATH_E_RICRC
) {
808 strlcat(buf
, "CRC ", blen
);
809 // clear for check below, so only once
810 err
&= INFINIPATH_E_RICRC
;
812 if (err
& INFINIPATH_E_RSHORTPKTLEN
)
813 strlcat(buf
, "rshortpktlen ", blen
);
814 if (err
& INFINIPATH_E_SDROPPEDDATAPKT
)
815 strlcat(buf
, "sdroppeddatapkt ", blen
);
816 if (err
& INFINIPATH_E_SPKTLEN
)
817 strlcat(buf
, "spktlen ", blen
);
819 if ((err
& INFINIPATH_E_RICRC
) &&
820 !(err
&(INFINIPATH_E_RVCRC
|INFINIPATH_E_REBP
)))
821 strlcat(buf
, "CRC ", blen
);
825 if (err
& INFINIPATH_E_RHDRLEN
)
826 strlcat(buf
, "rhdrlen ", blen
);
827 if (err
& INFINIPATH_E_RBADTID
)
828 strlcat(buf
, "rbadtid ", blen
);
829 if (err
& INFINIPATH_E_RBADVERSION
)
830 strlcat(buf
, "rbadversion ", blen
);
831 if (err
& INFINIPATH_E_RHDR
)
832 strlcat(buf
, "rhdr ", blen
);
833 if (err
& INFINIPATH_E_RLONGPKTLEN
)
834 strlcat(buf
, "rlongpktlen ", blen
);
835 if (err
& INFINIPATH_E_RMAXPKTLEN
)
836 strlcat(buf
, "rmaxpktlen ", blen
);
837 if (err
& INFINIPATH_E_RMINPKTLEN
)
838 strlcat(buf
, "rminpktlen ", blen
);
839 if (err
& INFINIPATH_E_SMINPKTLEN
)
840 strlcat(buf
, "sminpktlen ", blen
);
841 if (err
& INFINIPATH_E_RFORMATERR
)
842 strlcat(buf
, "rformaterr ", blen
);
843 if (err
& INFINIPATH_E_RUNSUPVL
)
844 strlcat(buf
, "runsupvl ", blen
);
845 if (err
& INFINIPATH_E_RUNEXPCHAR
)
846 strlcat(buf
, "runexpchar ", blen
);
847 if (err
& INFINIPATH_E_RIBFLOW
)
848 strlcat(buf
, "ribflow ", blen
);
849 if (err
& INFINIPATH_E_SUNDERRUN
)
850 strlcat(buf
, "sunderrun ", blen
);
851 if (err
& INFINIPATH_E_SPIOARMLAUNCH
)
852 strlcat(buf
, "spioarmlaunch ", blen
);
853 if (err
& INFINIPATH_E_SUNEXPERRPKTNUM
)
854 strlcat(buf
, "sunexperrpktnum ", blen
);
855 if (err
& INFINIPATH_E_SDROPPEDSMPPKT
)
856 strlcat(buf
, "sdroppedsmppkt ", blen
);
857 if (err
& INFINIPATH_E_SMAXPKTLEN
)
858 strlcat(buf
, "smaxpktlen ", blen
);
859 if (err
& INFINIPATH_E_SUNSUPVL
)
860 strlcat(buf
, "sunsupVL ", blen
);
861 if (err
& INFINIPATH_E_INVALIDADDR
)
862 strlcat(buf
, "invalidaddr ", blen
);
863 if (err
& INFINIPATH_E_RRCVEGRFULL
)
864 strlcat(buf
, "rcvegrfull ", blen
);
865 if (err
& INFINIPATH_E_RRCVHDRFULL
)
866 strlcat(buf
, "rcvhdrfull ", blen
);
867 if (err
& INFINIPATH_E_IBSTATUSCHANGED
)
868 strlcat(buf
, "ibcstatuschg ", blen
);
869 if (err
& INFINIPATH_E_RIBLOSTLINK
)
870 strlcat(buf
, "riblostlink ", blen
);
871 if (err
& INFINIPATH_E_HARDWARE
)
872 strlcat(buf
, "hardware ", blen
);
873 if (err
& INFINIPATH_E_RESET
)
874 strlcat(buf
, "reset ", blen
);
880 * get_rhf_errstring - decode RHF errors
881 * @err: the err number
882 * @msg: the output buffer
883 * @len: the length of the output buffer
885 * only used one place now, may want more later
887 static void get_rhf_errstring(u32 err
, char *msg
, size_t len
)
889 /* if no errors, and so don't need to check what's first */
892 if (err
& INFINIPATH_RHF_H_ICRCERR
)
893 strlcat(msg
, "icrcerr ", len
);
894 if (err
& INFINIPATH_RHF_H_VCRCERR
)
895 strlcat(msg
, "vcrcerr ", len
);
896 if (err
& INFINIPATH_RHF_H_PARITYERR
)
897 strlcat(msg
, "parityerr ", len
);
898 if (err
& INFINIPATH_RHF_H_LENERR
)
899 strlcat(msg
, "lenerr ", len
);
900 if (err
& INFINIPATH_RHF_H_MTUERR
)
901 strlcat(msg
, "mtuerr ", len
);
902 if (err
& INFINIPATH_RHF_H_IHDRERR
)
903 /* infinipath hdr checksum error */
904 strlcat(msg
, "ipathhdrerr ", len
);
905 if (err
& INFINIPATH_RHF_H_TIDERR
)
906 strlcat(msg
, "tiderr ", len
);
907 if (err
& INFINIPATH_RHF_H_MKERR
)
908 /* bad port, offset, etc. */
909 strlcat(msg
, "invalid ipathhdr ", len
);
910 if (err
& INFINIPATH_RHF_H_IBERR
)
911 strlcat(msg
, "iberr ", len
);
912 if (err
& INFINIPATH_RHF_L_SWA
)
913 strlcat(msg
, "swA ", len
);
914 if (err
& INFINIPATH_RHF_L_SWB
)
915 strlcat(msg
, "swB ", len
);
919 * ipath_get_egrbuf - get an eager buffer
920 * @dd: the infinipath device
921 * @bufnum: the eager buffer to get
924 * must only be called if ipath_pd[port] is known to be allocated
926 static inline void *ipath_get_egrbuf(struct ipath_devdata
*dd
, u32 bufnum
,
929 return dd
->ipath_port0_skbinfo
?
930 (void *) dd
->ipath_port0_skbinfo
[bufnum
].skb
->data
: NULL
;
934 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
935 * @dd: the infinipath device
936 * @gfp_mask: the sk_buff SFP mask
938 struct sk_buff
*ipath_alloc_skb(struct ipath_devdata
*dd
,
945 * Only fully supported way to handle this is to allocate lots
946 * extra, align as needed, and then do skb_reserve(). That wastes
947 * a lot of memory... I'll have to hack this into infinipath_copy
952 * We need 2 extra bytes for ipath_ether data sent in the
953 * key header. In order to keep everything dword aligned,
954 * we'll reserve 4 bytes.
956 len
= dd
->ipath_ibmaxlen
+ 4;
958 if (dd
->ipath_flags
& IPATH_4BYTE_TID
) {
959 /* We need a 2KB multiple alignment, and there is no way
960 * to do it except to allocate extra and then skb_reserve
961 * enough to bring it up to the right alignment.
966 skb
= __dev_alloc_skb(len
, gfp_mask
);
968 ipath_dev_err(dd
, "Failed to allocate skbuff, length %u\n",
975 if (dd
->ipath_flags
& IPATH_4BYTE_TID
) {
976 u32 una
= (unsigned long)skb
->data
& 2047;
978 skb_reserve(skb
, 2048 - una
);
985 static void ipath_rcv_hdrerr(struct ipath_devdata
*dd
,
992 struct ipath_message_header
*hdr
;
994 get_rhf_errstring(eflags
, emsg
, sizeof emsg
);
995 hdr
= (struct ipath_message_header
*)&rc
[1];
996 ipath_cdbg(PKT
, "RHFerrs %x hdrqtail=%x typ=%u "
997 "tlen=%x opcode=%x egridx=%x: %s\n",
999 ipath_hdrget_rcv_type((__le32
*) rc
),
1000 ipath_hdrget_length_in_bytes((__le32
*) rc
),
1001 be32_to_cpu(hdr
->bth
[0]) >> 24,
1004 /* Count local link integrity errors. */
1005 if (eflags
& (INFINIPATH_RHF_H_ICRCERR
| INFINIPATH_RHF_H_VCRCERR
)) {
1006 u8 n
= (dd
->ipath_ibcctrl
>>
1007 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
1008 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
1010 if (++dd
->ipath_lli_counter
> n
) {
1011 dd
->ipath_lli_counter
= 0;
1012 dd
->ipath_lli_errors
++;
1018 * ipath_kreceive - receive a packet
1019 * @dd: the infinipath device
1021 * called from interrupt handler for errors or receive interrupt
1023 void ipath_kreceive(struct ipath_devdata
*dd
)
1027 const u32 rsize
= dd
->ipath_rcvhdrentsize
; /* words */
1028 const u32 maxcnt
= dd
->ipath_rcvhdrcnt
* rsize
; /* words */
1029 u32 etail
= -1, l
, hdrqtail
;
1030 struct ipath_message_header
*hdr
;
1031 u32 eflags
, i
, etype
, tlen
, pkttot
= 0, updegr
=0, reloop
=0;
1032 static u64 totcalls
; /* stats, may eventually remove */
1034 if (!dd
->ipath_hdrqtailptr
) {
1036 "hdrqtailptr not set, can't do receives\n");
1040 l
= dd
->ipath_port0head
;
1041 hdrqtail
= (u32
) le64_to_cpu(*dd
->ipath_hdrqtailptr
);
1046 for (i
= 0; l
!= hdrqtail
; i
++) {
1050 rc
= (u64
*) (dd
->ipath_pd
[0]->port_rcvhdrq
+ (l
<< 2));
1051 hdr
= (struct ipath_message_header
*)&rc
[1];
1053 * could make a network order version of IPATH_KD_QP, and
1054 * do the obvious shift before masking to speed this up.
1056 qp
= ntohl(hdr
->bth
[1]) & 0xffffff;
1057 bthbytes
= (u8
*) hdr
->bth
;
1059 eflags
= ipath_hdrget_err_flags((__le32
*) rc
);
1060 etype
= ipath_hdrget_rcv_type((__le32
*) rc
);
1062 tlen
= ipath_hdrget_length_in_bytes((__le32
*) rc
);
1064 if (etype
!= RCVHQ_RCV_TYPE_EXPECTED
) {
1066 * it turns out that the chips uses an eager buffer
1067 * for all non-expected packets, whether it "needs"
1068 * one or not. So always get the index, but don't
1069 * set ebuf (so we try to copy data) unless the
1070 * length requires it.
1072 etail
= ipath_hdrget_index((__le32
*) rc
);
1073 if (tlen
> sizeof(*hdr
) ||
1074 etype
== RCVHQ_RCV_TYPE_NON_KD
)
1075 ebuf
= ipath_get_egrbuf(dd
, etail
, 0);
1079 * both tiderr and ipathhdrerr are set for all plain IB
1080 * packets; only ipathhdrerr should be set.
1083 if (etype
!= RCVHQ_RCV_TYPE_NON_KD
&& etype
!=
1084 RCVHQ_RCV_TYPE_ERROR
&& ipath_hdrget_ipath_ver(
1085 hdr
->iph
.ver_port_tid_offset
) !=
1086 IPS_PROTO_VERSION
) {
1087 ipath_cdbg(PKT
, "Bad InfiniPath protocol version "
1091 if (unlikely(eflags
))
1092 ipath_rcv_hdrerr(dd
, eflags
, l
, etail
, rc
);
1093 else if (etype
== RCVHQ_RCV_TYPE_NON_KD
) {
1094 ipath_ib_rcv(dd
->verbs_dev
, rc
+ 1, ebuf
, tlen
);
1095 if (dd
->ipath_lli_counter
)
1096 dd
->ipath_lli_counter
--;
1097 ipath_cdbg(PKT
, "typ %x, opcode %x (eager, "
1098 "qp=%x), len %x; ignored\n",
1099 etype
, bthbytes
[0], qp
, tlen
);
1101 else if (etype
== RCVHQ_RCV_TYPE_EAGER
)
1102 ipath_cdbg(PKT
, "typ %x, opcode %x (eager, "
1103 "qp=%x), len %x; ignored\n",
1104 etype
, bthbytes
[0], qp
, tlen
);
1105 else if (etype
== RCVHQ_RCV_TYPE_EXPECTED
)
1106 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1107 be32_to_cpu(hdr
->bth
[0]) & 0xff);
1110 * error packet, type of error unknown.
1111 * Probably type 3, but we don't know, so don't
1112 * even try to print the opcode, etc.
1114 ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
1115 "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
1116 "hdr %llx %llx %llx %llx %llx\n",
1117 etail
, tlen
, (unsigned long) rc
, l
,
1118 (unsigned long long) rc
[0],
1119 (unsigned long long) rc
[1],
1120 (unsigned long long) rc
[2],
1121 (unsigned long long) rc
[3],
1122 (unsigned long long) rc
[4],
1123 (unsigned long long) rc
[5]);
1128 if (etype
!= RCVHQ_RCV_TYPE_EXPECTED
)
1131 * update head regs on last packet, and every 16 packets.
1132 * Reduce bus traffic, while still trying to prevent
1133 * rcvhdrq overflows, for when the queue is nearly full
1135 if (l
== hdrqtail
|| (i
&& !(i
&0xf))) {
1138 /* request IBA6120 interrupt only on last */
1139 lval
= dd
->ipath_rhdrhead_intr_off
| l
;
1142 (void)ipath_write_ureg(dd
, ur_rcvhdrhead
, lval
, 0);
1144 (void)ipath_write_ureg(dd
, ur_rcvegrindexhead
,
1151 if (!dd
->ipath_rhdrhead_intr_off
&& !reloop
) {
1152 /* IBA6110 workaround; we can have a race clearing chip
1153 * interrupt with another interrupt about to be delivered,
1154 * and can clear it before it is delivered on the GPIO
1155 * workaround. By doing the extra check here for the
1156 * in-memory tail register updating while we were doing
1157 * earlier packets, we "almost" guarantee we have covered
1160 u32 hqtail
= (u32
)le64_to_cpu(*dd
->ipath_hdrqtailptr
);
1161 if (hqtail
!= hdrqtail
) {
1163 reloop
= 1; /* loop 1 extra time at most */
1170 dd
->ipath_port0head
= l
;
1172 if (pkttot
> ipath_stats
.sps_maxpkts_call
)
1173 ipath_stats
.sps_maxpkts_call
= pkttot
;
1174 ipath_stats
.sps_port0pkts
+= pkttot
;
1175 ipath_stats
.sps_avgpkts_call
=
1176 ipath_stats
.sps_port0pkts
/ ++totcalls
;
1182 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
1183 * @dd: the infinipath device
1185 * called whenever our local copy indicates we have run out of send buffers
1186 * NOTE: This can be called from interrupt context by some code
1187 * and from non-interrupt context by ipath_getpiobuf().
1190 static void ipath_update_pio_bufs(struct ipath_devdata
*dd
)
1192 unsigned long flags
;
1194 const unsigned piobregs
= (unsigned)dd
->ipath_pioavregs
;
1196 /* If the generation (check) bits have changed, then we update the
1197 * busy bit for the corresponding PIO buffer. This algorithm will
1198 * modify positions to the value they already have in some cases
1199 * (i.e., no change), but it's faster than changing only the bits
1200 * that have changed.
1202 * We would like to do this atomicly, to avoid spinlocks in the
1203 * critical send path, but that's not really possible, given the
1204 * type of changes, and that this routine could be called on
1205 * multiple cpu's simultaneously, so we lock in this routine only,
1206 * to avoid conflicting updates; all we change is the shadow, and
1207 * it's a single 64 bit memory location, so by definition the update
1208 * is atomic in terms of what other cpu's can see in testing the
1209 * bits. The spin_lock overhead isn't too bad, since it only
1210 * happens when all buffers are in use, so only cpu overhead, not
1211 * latency or bandwidth is affected.
1213 #define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
1214 if (!dd
->ipath_pioavailregs_dma
) {
1215 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1218 if (ipath_debug
& __IPATH_VERBDBG
) {
1219 /* only if packet debug and verbose */
1220 volatile __le64
*dma
= dd
->ipath_pioavailregs_dma
;
1221 unsigned long *shadow
= dd
->ipath_pioavailshadow
;
1223 ipath_cdbg(PKT
, "Refill avail, dma0=%llx shad0=%lx, "
1224 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1226 (unsigned long long) le64_to_cpu(dma
[0]),
1228 (unsigned long long) le64_to_cpu(dma
[1]),
1230 (unsigned long long) le64_to_cpu(dma
[2]),
1232 (unsigned long long) le64_to_cpu(dma
[3]),
1236 PKT
, "2nd group, dma4=%llx shad4=%lx, "
1237 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1239 (unsigned long long) le64_to_cpu(dma
[4]),
1241 (unsigned long long) le64_to_cpu(dma
[5]),
1243 (unsigned long long) le64_to_cpu(dma
[6]),
1245 (unsigned long long) le64_to_cpu(dma
[7]),
1248 spin_lock_irqsave(&ipath_pioavail_lock
, flags
);
1249 for (i
= 0; i
< piobregs
; i
++) {
1250 u64 pchbusy
, pchg
, piov
, pnew
;
1252 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1257 dd
->ipath_pioavailregs_dma
[i
- 1]);
1260 dd
->ipath_pioavailregs_dma
[i
+ 1]);
1262 piov
= le64_to_cpu(dd
->ipath_pioavailregs_dma
[i
]);
1263 pchg
= _IPATH_ALL_CHECKBITS
&
1264 ~(dd
->ipath_pioavailshadow
[i
] ^ piov
);
1265 pchbusy
= pchg
<< INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
;
1266 if (pchg
&& (pchbusy
& dd
->ipath_pioavailshadow
[i
])) {
1267 pnew
= dd
->ipath_pioavailshadow
[i
] & ~pchbusy
;
1268 pnew
|= piov
& pchbusy
;
1269 dd
->ipath_pioavailshadow
[i
] = pnew
;
1272 spin_unlock_irqrestore(&ipath_pioavail_lock
, flags
);
1276 * ipath_setrcvhdrsize - set the receive header size
1277 * @dd: the infinipath device
1278 * @rhdrsize: the receive header size
1280 * called from user init code, and also layered driver init
1282 int ipath_setrcvhdrsize(struct ipath_devdata
*dd
, unsigned rhdrsize
)
1286 if (dd
->ipath_flags
& IPATH_RCVHDRSZ_SET
) {
1287 if (dd
->ipath_rcvhdrsize
!= rhdrsize
) {
1288 dev_info(&dd
->pcidev
->dev
,
1289 "Error: can't set protocol header "
1290 "size %u, already %u\n",
1291 rhdrsize
, dd
->ipath_rcvhdrsize
);
1294 ipath_cdbg(VERBOSE
, "Reuse same protocol header "
1295 "size %u\n", dd
->ipath_rcvhdrsize
);
1296 } else if (rhdrsize
> (dd
->ipath_rcvhdrentsize
-
1297 (sizeof(u64
) / sizeof(u32
)))) {
1298 ipath_dbg("Error: can't set protocol header size %u "
1299 "(> max %u)\n", rhdrsize
,
1300 dd
->ipath_rcvhdrentsize
-
1301 (u32
) (sizeof(u64
) / sizeof(u32
)));
1304 dd
->ipath_flags
|= IPATH_RCVHDRSZ_SET
;
1305 dd
->ipath_rcvhdrsize
= rhdrsize
;
1306 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_rcvhdrsize
,
1307 dd
->ipath_rcvhdrsize
);
1308 ipath_cdbg(VERBOSE
, "Set protocol header size to %u\n",
1309 dd
->ipath_rcvhdrsize
);
1315 * ipath_getpiobuf - find an available pio buffer
1316 * @dd: the infinipath device
1317 * @pbufnum: the buffer number is placed here
1319 * do appropriate marking as busy, etc.
1320 * returns buffer number if one found (>=0), negative number is error.
1321 * Used by ipath_layer_send
1323 u32 __iomem
*ipath_getpiobuf(struct ipath_devdata
*dd
, u32
* pbufnum
)
1325 int i
, j
, starti
, updated
= 0;
1326 unsigned piobcnt
, iter
;
1327 unsigned long flags
;
1328 unsigned long *shadow
= dd
->ipath_pioavailshadow
;
1331 piobcnt
= (unsigned)(dd
->ipath_piobcnt2k
1332 + dd
->ipath_piobcnt4k
);
1333 starti
= dd
->ipath_lastport_piobuf
;
1334 iter
= piobcnt
- starti
;
1335 if (dd
->ipath_upd_pio_shadow
) {
1337 * Minor optimization. If we had no buffers on last call,
1338 * start out by doing the update; continue and do scan even
1339 * if no buffers were updated, to be paranoid
1341 ipath_update_pio_bufs(dd
);
1342 /* we scanned here, don't do it at end of scan */
1346 i
= dd
->ipath_lastpioindex
;
1350 * while test_and_set_bit() is atomic, we do that and then the
1351 * change_bit(), and the pair is not. See if this is the cause
1352 * of the remaining armlaunch errors.
1354 spin_lock_irqsave(&ipath_pioavail_lock
, flags
);
1355 for (j
= 0; j
< iter
; j
++, i
++) {
1359 * To avoid bus lock overhead, we first find a candidate
1360 * buffer, then do the test and set, and continue if that
1363 if (test_bit((2 * i
) + 1, shadow
) ||
1364 test_and_set_bit((2 * i
) + 1, shadow
))
1366 /* flip generation bit */
1367 change_bit(2 * i
, shadow
);
1370 spin_unlock_irqrestore(&ipath_pioavail_lock
, flags
);
1373 volatile __le64
*dma
= dd
->ipath_pioavailregs_dma
;
1376 * first time through; shadow exhausted, but may be real
1377 * buffers available, so go see; if any updated, rescan
1381 ipath_update_pio_bufs(dd
);
1386 dd
->ipath_upd_pio_shadow
= 1;
1388 * not atomic, but if we lose one once in a while, that's OK
1390 ipath_stats
.sps_nopiobufs
++;
1391 if (!(++dd
->ipath_consec_nopiobuf
% 100000)) {
1393 "%u pio sends with no bufavail; dmacopy: "
1394 "%llx %llx %llx %llx; shadow: "
1395 "%lx %lx %lx %lx\n",
1396 dd
->ipath_consec_nopiobuf
,
1397 (unsigned long long) le64_to_cpu(dma
[0]),
1398 (unsigned long long) le64_to_cpu(dma
[1]),
1399 (unsigned long long) le64_to_cpu(dma
[2]),
1400 (unsigned long long) le64_to_cpu(dma
[3]),
1401 shadow
[0], shadow
[1], shadow
[2],
1404 * 4 buffers per byte, 4 registers above, cover rest
1407 if ((dd
->ipath_piobcnt2k
+ dd
->ipath_piobcnt4k
) >
1408 (sizeof(shadow
[0]) * 4 * 4))
1409 ipath_dbg("2nd group: dmacopy: %llx %llx "
1410 "%llx %llx; shadow: %lx %lx "
1412 (unsigned long long)
1413 le64_to_cpu(dma
[4]),
1414 (unsigned long long)
1415 le64_to_cpu(dma
[5]),
1416 (unsigned long long)
1417 le64_to_cpu(dma
[6]),
1418 (unsigned long long)
1419 le64_to_cpu(dma
[7]),
1420 shadow
[4], shadow
[5],
1421 shadow
[6], shadow
[7]);
1428 * set next starting place. Since it's just an optimization,
1429 * it doesn't matter who wins on this, so no locking
1431 dd
->ipath_lastpioindex
= i
+ 1;
1432 if (dd
->ipath_upd_pio_shadow
)
1433 dd
->ipath_upd_pio_shadow
= 0;
1434 if (dd
->ipath_consec_nopiobuf
)
1435 dd
->ipath_consec_nopiobuf
= 0;
1436 if (i
< dd
->ipath_piobcnt2k
)
1437 buf
= (u32 __iomem
*) (dd
->ipath_pio2kbase
+
1438 i
* dd
->ipath_palign
);
1440 buf
= (u32 __iomem
*)
1441 (dd
->ipath_pio4kbase
+
1442 (i
- dd
->ipath_piobcnt2k
) * dd
->ipath_4kalign
);
1443 ipath_cdbg(VERBOSE
, "Return piobuf%u %uk @ %p\n",
1444 i
, (i
< dd
->ipath_piobcnt2k
) ? 2 : 4, buf
);
1453 * ipath_create_rcvhdrq - create a receive header queue
1454 * @dd: the infinipath device
1455 * @pd: the port data
1457 * this must be contiguous memory (from an i/o perspective), and must be
1458 * DMA'able (which means for some systems, it will go through an IOMMU,
1459 * or be forced into a low address range).
1461 int ipath_create_rcvhdrq(struct ipath_devdata
*dd
,
1462 struct ipath_portdata
*pd
)
1466 if (!pd
->port_rcvhdrq
) {
1467 dma_addr_t phys_hdrqtail
;
1468 gfp_t gfp_flags
= GFP_USER
| __GFP_COMP
;
1469 int amt
= ALIGN(dd
->ipath_rcvhdrcnt
* dd
->ipath_rcvhdrentsize
*
1470 sizeof(u32
), PAGE_SIZE
);
1472 pd
->port_rcvhdrq
= dma_alloc_coherent(
1473 &dd
->pcidev
->dev
, amt
, &pd
->port_rcvhdrq_phys
,
1476 if (!pd
->port_rcvhdrq
) {
1477 ipath_dev_err(dd
, "attempt to allocate %d bytes "
1478 "for port %u rcvhdrq failed\n",
1479 amt
, pd
->port_port
);
1483 pd
->port_rcvhdrtail_kvaddr
= dma_alloc_coherent(
1484 &dd
->pcidev
->dev
, PAGE_SIZE
, &phys_hdrqtail
, GFP_KERNEL
);
1485 if (!pd
->port_rcvhdrtail_kvaddr
) {
1486 ipath_dev_err(dd
, "attempt to allocate 1 page "
1487 "for port %u rcvhdrqtailaddr failed\n",
1490 dma_free_coherent(&dd
->pcidev
->dev
, amt
,
1491 pd
->port_rcvhdrq
, pd
->port_rcvhdrq_phys
);
1492 pd
->port_rcvhdrq
= NULL
;
1495 pd
->port_rcvhdrqtailaddr_phys
= phys_hdrqtail
;
1497 pd
->port_rcvhdrq_size
= amt
;
1499 ipath_cdbg(VERBOSE
, "%d pages at %p (phys %lx) size=%lu "
1500 "for port %u rcvhdr Q\n",
1501 amt
>> PAGE_SHIFT
, pd
->port_rcvhdrq
,
1502 (unsigned long) pd
->port_rcvhdrq_phys
,
1503 (unsigned long) pd
->port_rcvhdrq_size
,
1506 ipath_cdbg(VERBOSE
, "port %d hdrtailaddr, %llx physical\n",
1508 (unsigned long long) phys_hdrqtail
);
1511 ipath_cdbg(VERBOSE
, "reuse port %d rcvhdrq @%p %llx phys; "
1512 "hdrtailaddr@%p %llx physical\n",
1513 pd
->port_port
, pd
->port_rcvhdrq
,
1514 (unsigned long long) pd
->port_rcvhdrq_phys
,
1515 pd
->port_rcvhdrtail_kvaddr
, (unsigned long long)
1516 pd
->port_rcvhdrqtailaddr_phys
);
1518 /* clear for security and sanity on each use */
1519 memset(pd
->port_rcvhdrq
, 0, pd
->port_rcvhdrq_size
);
1520 memset(pd
->port_rcvhdrtail_kvaddr
, 0, PAGE_SIZE
);
1523 * tell chip each time we init it, even if we are re-using previous
1524 * memory (we zero the register at process close)
1526 ipath_write_kreg_port(dd
, dd
->ipath_kregs
->kr_rcvhdrtailaddr
,
1527 pd
->port_port
, pd
->port_rcvhdrqtailaddr_phys
);
1528 ipath_write_kreg_port(dd
, dd
->ipath_kregs
->kr_rcvhdraddr
,
1529 pd
->port_port
, pd
->port_rcvhdrq_phys
);
1536 int ipath_waitfor_complete(struct ipath_devdata
*dd
, ipath_kreg reg_id
,
1537 u64 bits_to_wait_for
, u64
* valp
)
1539 unsigned long timeout
;
1543 lastval
= ipath_read_kreg64(dd
, reg_id
);
1544 /* wait a ridiculously long time */
1545 timeout
= jiffies
+ msecs_to_jiffies(5);
1547 val
= ipath_read_kreg64(dd
, reg_id
);
1548 /* set so they have something, even on failures. */
1550 if ((val
& bits_to_wait_for
) == bits_to_wait_for
) {
1555 ipath_cdbg(VERBOSE
, "Changed from %llx to %llx, "
1556 "waiting for %llx bits\n",
1557 (unsigned long long) lastval
,
1558 (unsigned long long) val
,
1559 (unsigned long long) bits_to_wait_for
);
1561 if (time_after(jiffies
, timeout
)) {
1562 ipath_dbg("Didn't get bits %llx in register 0x%x, "
1564 (unsigned long long) bits_to_wait_for
,
1565 reg_id
, (unsigned long long) *valp
);
1575 * ipath_waitfor_mdio_cmdready - wait for last command to complete
1576 * @dd: the infinipath device
1578 * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
1579 * away indicating the last command has completed. It doesn't return data
1581 int ipath_waitfor_mdio_cmdready(struct ipath_devdata
*dd
)
1583 unsigned long timeout
;
1587 /* wait a ridiculously long time */
1588 timeout
= jiffies
+ msecs_to_jiffies(5);
1590 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_mdio
);
1591 if (!(val
& IPATH_MDIO_CMDVALID
)) {
1596 if (time_after(jiffies
, timeout
)) {
1597 ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
1598 (unsigned long long) val
);
1609 * Flush all sends that might be in the ready to send state, as well as any
1610 * that are in the process of being sent. Used whenever we need to be
1611 * sure the send side is idle. Cleans up all buffer state by canceling
1612 * all pio buffers, and issuing an abort, which cleans up anything in the
1613 * launch fifo. The cancel is superfluous on some chip versions, but
1614 * it's safer to always do it.
1615 * PIOAvail bits are updated by the chip as if normal send had happened.
1617 void ipath_cancel_sends(struct ipath_devdata
*dd
)
1619 ipath_dbg("Cancelling all in-progress send buffers\n");
1620 dd
->ipath_lastcancel
= jiffies
+HZ
/2; /* skip armlaunch errs a bit */
1622 * the abort bit is auto-clearing. We read scratch to be sure
1623 * that cancels and the abort have taken effect in the chip.
1625 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
1626 INFINIPATH_S_ABORT
);
1627 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
1628 ipath_disarm_piobufs(dd
, 0,
1629 (unsigned)(dd
->ipath_piobcnt2k
+ dd
->ipath_piobcnt4k
));
1631 /* and again, be sure all have hit the chip */
1632 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
1636 static void ipath_set_ib_lstate(struct ipath_devdata
*dd
, int which
)
1638 static const char *what
[4] = {
1640 [INFINIPATH_IBCC_LINKCMD_INIT
] = "INIT",
1641 [INFINIPATH_IBCC_LINKCMD_ARMED
] = "ARMED",
1642 [INFINIPATH_IBCC_LINKCMD_ACTIVE
] = "ACTIVE"
1644 int linkcmd
= (which
>> INFINIPATH_IBCC_LINKCMD_SHIFT
) &
1645 INFINIPATH_IBCC_LINKCMD_MASK
;
1647 ipath_cdbg(VERBOSE
, "Trying to move unit %u to %s, current ltstate "
1648 "is %s\n", dd
->ipath_unit
,
1650 ipath_ibcstatus_str
[
1652 (dd
, dd
->ipath_kregs
->kr_ibcstatus
) >>
1653 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT
) &
1654 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK
]);
1655 /* flush all queued sends when going to DOWN or INIT, to be sure that
1656 * they don't block MAD packets */
1657 if (!linkcmd
|| linkcmd
== INFINIPATH_IBCC_LINKCMD_INIT
)
1658 ipath_cancel_sends(dd
);
1660 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1661 dd
->ipath_ibcctrl
| which
);
1664 int ipath_set_linkstate(struct ipath_devdata
*dd
, u8 newstate
)
1670 case IPATH_IB_LINKDOWN
:
1671 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_POLL
<<
1672 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
1677 case IPATH_IB_LINKDOWN_SLEEP
:
1678 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_SLEEP
<<
1679 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
1684 case IPATH_IB_LINKDOWN_DISABLE
:
1685 ipath_set_ib_lstate(dd
,
1686 INFINIPATH_IBCC_LINKINITCMD_DISABLE
<<
1687 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
1692 case IPATH_IB_LINKINIT
:
1693 if (dd
->ipath_flags
& IPATH_LINKINIT
) {
1697 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_INIT
<<
1698 INFINIPATH_IBCC_LINKCMD_SHIFT
);
1699 lstate
= IPATH_LINKINIT
;
1702 case IPATH_IB_LINKARM
:
1703 if (dd
->ipath_flags
& IPATH_LINKARMED
) {
1707 if (!(dd
->ipath_flags
&
1708 (IPATH_LINKINIT
| IPATH_LINKACTIVE
))) {
1712 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ARMED
<<
1713 INFINIPATH_IBCC_LINKCMD_SHIFT
);
1715 * Since the port can transition to ACTIVE by receiving
1716 * a non VL 15 packet, wait for either state.
1718 lstate
= IPATH_LINKARMED
| IPATH_LINKACTIVE
;
1721 case IPATH_IB_LINKACTIVE
:
1722 if (dd
->ipath_flags
& IPATH_LINKACTIVE
) {
1726 if (!(dd
->ipath_flags
& IPATH_LINKARMED
)) {
1730 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ACTIVE
<<
1731 INFINIPATH_IBCC_LINKCMD_SHIFT
);
1732 lstate
= IPATH_LINKACTIVE
;
1735 case IPATH_IB_LINK_LOOPBACK
:
1736 dev_info(&dd
->pcidev
->dev
, "Enabling IB local loopback\n");
1737 dd
->ipath_ibcctrl
|= INFINIPATH_IBCC_LOOPBACK
;
1738 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1741 goto bail
; // no state change to wait for
1743 case IPATH_IB_LINK_EXTERNAL
:
1744 dev_info(&dd
->pcidev
->dev
, "Disabling IB local loopback (normal)\n");
1745 dd
->ipath_ibcctrl
&= ~INFINIPATH_IBCC_LOOPBACK
;
1746 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1749 goto bail
; // no state change to wait for
1752 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate
);
1756 ret
= ipath_wait_linkstate(dd
, lstate
, 2000);
1763 * ipath_set_mtu - set the MTU
1764 * @dd: the infinipath device
1767 * we can handle "any" incoming size, the issue here is whether we
1768 * need to restrict our outgoing size. For now, we don't do any
1769 * sanity checking on this, and we don't deal with what happens to
1770 * programs that are already running when the size changes.
1771 * NOTE: changing the MTU will usually cause the IBC to go back to
1772 * link initialize (IPATH_IBSTATE_INIT) state...
1774 int ipath_set_mtu(struct ipath_devdata
*dd
, u16 arg
)
1781 * mtu is IB data payload max. It's the largest power of 2 less
1782 * than piosize (or even larger, since it only really controls the
1783 * largest we can receive; we can send the max of the mtu and
1784 * piosize). We check that it's one of the valid IB sizes.
1786 if (arg
!= 256 && arg
!= 512 && arg
!= 1024 && arg
!= 2048 &&
1788 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg
);
1792 if (dd
->ipath_ibmtu
== arg
) {
1793 ret
= 0; /* same as current */
1797 piosize
= dd
->ipath_ibmaxlen
;
1798 dd
->ipath_ibmtu
= arg
;
1800 if (arg
>= (piosize
- IPATH_PIO_MAXIBHDR
)) {
1801 /* Only if it's not the initial value (or reset to it) */
1802 if (piosize
!= dd
->ipath_init_ibmaxlen
) {
1803 dd
->ipath_ibmaxlen
= piosize
;
1806 } else if ((arg
+ IPATH_PIO_MAXIBHDR
) != dd
->ipath_ibmaxlen
) {
1807 piosize
= arg
+ IPATH_PIO_MAXIBHDR
;
1808 ipath_cdbg(VERBOSE
, "ibmaxlen was 0x%x, setting to 0x%x "
1809 "(mtu 0x%x)\n", dd
->ipath_ibmaxlen
, piosize
,
1811 dd
->ipath_ibmaxlen
= piosize
;
1817 * set the IBC maxpktlength to the size of our pio
1820 u64 ibc
= dd
->ipath_ibcctrl
;
1821 ibc
&= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK
<<
1822 INFINIPATH_IBCC_MAXPKTLEN_SHIFT
);
1824 piosize
= piosize
- 2 * sizeof(u32
); /* ignore pbc */
1825 dd
->ipath_ibmaxlen
= piosize
;
1826 piosize
/= sizeof(u32
); /* in words */
1828 * for ICRC, which we only send in diag test pkt mode, and
1829 * we don't need to worry about that for mtu
1833 ibc
|= piosize
<< INFINIPATH_IBCC_MAXPKTLEN_SHIFT
;
1834 dd
->ipath_ibcctrl
= ibc
;
1835 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1837 dd
->ipath_f_tidtemplate(dd
);
1846 int ipath_set_lid(struct ipath_devdata
*dd
, u32 arg
, u8 lmc
)
1848 dd
->ipath_lid
= arg
;
1849 dd
->ipath_lmc
= lmc
;
1856 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
1857 * @dd: the infinipath device
1858 * @regno: the register number to write
1859 * @port: the port containing the register
1860 * @value: the value to write
1862 * Registers that vary with the chip implementation constants (port)
1865 void ipath_write_kreg_port(const struct ipath_devdata
*dd
, ipath_kreg regno
,
1866 unsigned port
, u64 value
)
1870 if (port
< dd
->ipath_portcnt
&&
1871 (regno
== dd
->ipath_kregs
->kr_rcvhdraddr
||
1872 regno
== dd
->ipath_kregs
->kr_rcvhdrtailaddr
))
1873 where
= regno
+ port
;
1877 ipath_write_kreg(dd
, where
, value
);
1881 * Following deal with the "obviously simple" task of overriding the state
1882 * of the LEDS, which normally indicate link physical and logical status.
1883 * The complications arise in dealing with different hardware mappings
1884 * and the board-dependent routine being called from interrupts.
1885 * and then there's the requirement to _flash_ them.
1887 #define LED_OVER_FREQ_SHIFT 8
1888 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
1889 /* Below is "non-zero" to force override, but both actual LEDs are off */
1890 #define LED_OVER_BOTH_OFF (8)
1892 void ipath_run_led_override(unsigned long opaque
)
1894 struct ipath_devdata
*dd
= (struct ipath_devdata
*)opaque
;
1897 u64 lstate
, ltstate
, val
;
1899 if (!(dd
->ipath_flags
& IPATH_INITTED
))
1902 pidx
= dd
->ipath_led_override_phase
++ & 1;
1903 dd
->ipath_led_override
= dd
->ipath_led_override_vals
[pidx
];
1904 timeoff
= dd
->ipath_led_override_timeoff
;
1907 * below potentially restores the LED values per current status,
1908 * should also possibly setup the traffic-blink register,
1909 * but leave that to per-chip functions.
1911 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_ibcstatus
);
1912 ltstate
= (val
>> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT
) &
1913 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK
;
1914 lstate
= (val
>> INFINIPATH_IBCS_LINKSTATE_SHIFT
) &
1915 INFINIPATH_IBCS_LINKSTATE_MASK
;
1917 dd
->ipath_f_setextled(dd
, lstate
, ltstate
);
1918 mod_timer(&dd
->ipath_led_override_timer
, jiffies
+ timeoff
);
1921 void ipath_set_led_override(struct ipath_devdata
*dd
, unsigned int val
)
1925 if (!(dd
->ipath_flags
& IPATH_INITTED
))
1928 /* First check if we are blinking. If not, use 1HZ polling */
1930 freq
= (val
& LED_OVER_FREQ_MASK
) >> LED_OVER_FREQ_SHIFT
;
1933 /* For blink, set each phase from one nybble of val */
1934 dd
->ipath_led_override_vals
[0] = val
& 0xF;
1935 dd
->ipath_led_override_vals
[1] = (val
>> 4) & 0xF;
1936 timeoff
= (HZ
<< 4)/freq
;
1938 /* Non-blink set both phases the same. */
1939 dd
->ipath_led_override_vals
[0] = val
& 0xF;
1940 dd
->ipath_led_override_vals
[1] = val
& 0xF;
1942 dd
->ipath_led_override_timeoff
= timeoff
;
1945 * If the timer has not already been started, do so. Use a "quick"
1946 * timeout so the function will be called soon, to look at our request.
1948 if (atomic_inc_return(&dd
->ipath_led_override_timer_active
) == 1) {
1949 /* Need to start timer */
1950 init_timer(&dd
->ipath_led_override_timer
);
1951 dd
->ipath_led_override_timer
.function
=
1952 ipath_run_led_override
;
1953 dd
->ipath_led_override_timer
.data
= (unsigned long) dd
;
1954 dd
->ipath_led_override_timer
.expires
= jiffies
+ 1;
1955 add_timer(&dd
->ipath_led_override_timer
);
1957 atomic_dec(&dd
->ipath_led_override_timer_active
);
1962 * ipath_shutdown_device - shut down a device
1963 * @dd: the infinipath device
1965 * This is called to make the device quiet when we are about to
1966 * unload the driver, and also when the device is administratively
1967 * disabled. It does not free any data structures.
1968 * Everything it does has to be setup again by ipath_init_chip(dd,1)
1970 void ipath_shutdown_device(struct ipath_devdata
*dd
)
1972 ipath_dbg("Shutting down the device\n");
1974 dd
->ipath_flags
|= IPATH_LINKUNK
;
1975 dd
->ipath_flags
&= ~(IPATH_INITTED
| IPATH_LINKDOWN
|
1976 IPATH_LINKINIT
| IPATH_LINKARMED
|
1978 *dd
->ipath_statusp
&= ~(IPATH_STATUS_IB_CONF
|
1979 IPATH_STATUS_IB_READY
);
1981 /* mask interrupts, but not errors */
1982 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_intmask
, 0ULL);
1984 dd
->ipath_rcvctrl
= 0;
1985 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_rcvctrl
,
1989 * gracefully stop all sends allowing any in progress to trickle out
1992 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
, 0ULL);
1994 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
1996 * enough for anything that's going to trickle out to have actually
2001 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_DISABLE
<<
2002 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
2003 ipath_cancel_sends(dd
);
2006 dd
->ipath_control
&= ~INFINIPATH_C_LINKENABLE
;
2007 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_control
,
2008 dd
->ipath_control
| INFINIPATH_C_FREEZEMODE
);
2011 * clear SerdesEnable and turn the leds off; do this here because
2012 * we are unloading, so don't count on interrupts to move along
2013 * Turn the LEDs off explictly for the same reason.
2015 dd
->ipath_f_quiet_serdes(dd
);
2017 if (dd
->ipath_stats_timer_active
) {
2018 del_timer_sync(&dd
->ipath_stats_timer
);
2019 dd
->ipath_stats_timer_active
= 0;
2023 * clear all interrupts and errors, so that the next time the driver
2024 * is loaded or device is enabled, we know that whatever is set
2025 * happened while we were unloaded
2027 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrclear
,
2028 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED
);
2029 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_errorclear
, -1LL);
2030 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_intclear
, -1LL);
2032 ipath_cdbg(VERBOSE
, "Flush time and errors to EEPROM\n");
2033 ipath_update_eeprom_log(dd
);
2037 * ipath_free_pddata - free a port's allocated data
2038 * @dd: the infinipath device
2039 * @pd: the portdata structure
2041 * free up any allocated data for a port
2042 * This should not touch anything that would affect a simultaneous
2043 * re-allocation of port data, because it is called after ipath_mutex
2044 * is released (and can be called from reinit as well).
2045 * It should never change any chip state, or global driver state.
2046 * (The only exception to global state is freeing the port0 port0_skbs.)
2048 void ipath_free_pddata(struct ipath_devdata
*dd
, struct ipath_portdata
*pd
)
2053 if (pd
->port_rcvhdrq
) {
2054 ipath_cdbg(VERBOSE
, "free closed port %d rcvhdrq @ %p "
2055 "(size=%lu)\n", pd
->port_port
, pd
->port_rcvhdrq
,
2056 (unsigned long) pd
->port_rcvhdrq_size
);
2057 dma_free_coherent(&dd
->pcidev
->dev
, pd
->port_rcvhdrq_size
,
2058 pd
->port_rcvhdrq
, pd
->port_rcvhdrq_phys
);
2059 pd
->port_rcvhdrq
= NULL
;
2060 if (pd
->port_rcvhdrtail_kvaddr
) {
2061 dma_free_coherent(&dd
->pcidev
->dev
, PAGE_SIZE
,
2062 pd
->port_rcvhdrtail_kvaddr
,
2063 pd
->port_rcvhdrqtailaddr_phys
);
2064 pd
->port_rcvhdrtail_kvaddr
= NULL
;
2067 if (pd
->port_port
&& pd
->port_rcvegrbuf
) {
2070 for (e
= 0; e
< pd
->port_rcvegrbuf_chunks
; e
++) {
2071 void *base
= pd
->port_rcvegrbuf
[e
];
2072 size_t size
= pd
->port_rcvegrbuf_size
;
2074 ipath_cdbg(VERBOSE
, "egrbuf free(%p, %lu), "
2075 "chunk %u/%u\n", base
,
2076 (unsigned long) size
,
2077 e
, pd
->port_rcvegrbuf_chunks
);
2078 dma_free_coherent(&dd
->pcidev
->dev
, size
,
2079 base
, pd
->port_rcvegrbuf_phys
[e
]);
2081 kfree(pd
->port_rcvegrbuf
);
2082 pd
->port_rcvegrbuf
= NULL
;
2083 kfree(pd
->port_rcvegrbuf_phys
);
2084 pd
->port_rcvegrbuf_phys
= NULL
;
2085 pd
->port_rcvegrbuf_chunks
= 0;
2086 } else if (pd
->port_port
== 0 && dd
->ipath_port0_skbinfo
) {
2088 struct ipath_skbinfo
*skbinfo
= dd
->ipath_port0_skbinfo
;
2090 dd
->ipath_port0_skbinfo
= NULL
;
2091 ipath_cdbg(VERBOSE
, "free closed port %d "
2092 "ipath_port0_skbinfo @ %p\n", pd
->port_port
,
2094 for (e
= 0; e
< dd
->ipath_rcvegrcnt
; e
++)
2095 if (skbinfo
[e
].skb
) {
2096 pci_unmap_single(dd
->pcidev
, skbinfo
[e
].phys
,
2098 PCI_DMA_FROMDEVICE
);
2099 dev_kfree_skb(skbinfo
[e
].skb
);
2103 kfree(pd
->port_tid_pg_list
);
2104 vfree(pd
->subport_uregbase
);
2105 vfree(pd
->subport_rcvegrbuf
);
2106 vfree(pd
->subport_rcvhdr_base
);
2110 static int __init
infinipath_init(void)
2114 if (ipath_debug
& __IPATH_DBG
)
2115 printk(KERN_INFO DRIVER_LOAD_MSG
"%s", ib_ipath_version
);
2118 * These must be called before the driver is registered with
2119 * the PCI subsystem.
2121 idr_init(&unit_table
);
2122 if (!idr_pre_get(&unit_table
, GFP_KERNEL
)) {
2127 ret
= pci_register_driver(&ipath_driver
);
2129 printk(KERN_ERR IPATH_DRV_NAME
2130 ": Unable to register driver: error %d\n", -ret
);
2134 ret
= ipath_driver_create_group(&ipath_driver
.driver
);
2136 printk(KERN_ERR IPATH_DRV_NAME
": Unable to create driver "
2137 "sysfs entries: error %d\n", -ret
);
2141 ret
= ipath_init_ipathfs();
2143 printk(KERN_ERR IPATH_DRV_NAME
": Unable to create "
2144 "ipathfs: error %d\n", -ret
);
2151 ipath_driver_remove_group(&ipath_driver
.driver
);
2154 pci_unregister_driver(&ipath_driver
);
2157 idr_destroy(&unit_table
);
2163 static void __exit
infinipath_cleanup(void)
2165 ipath_exit_ipathfs();
2167 ipath_driver_remove_group(&ipath_driver
.driver
);
2169 ipath_cdbg(VERBOSE
, "Unregistering pci driver\n");
2170 pci_unregister_driver(&ipath_driver
);
2172 idr_destroy(&unit_table
);
2176 * ipath_reset_device - reset the chip if possible
2177 * @unit: the device to reset
2179 * Whether or not reset is successful, we attempt to re-initialize the chip
2180 * (that is, much like a driver unload/reload). We clear the INITTED flag
2181 * so that the various entry points will fail until we reinitialize. For
2182 * now, we only allow this if no user ports are open that use chip resources
2184 int ipath_reset_device(int unit
)
2187 struct ipath_devdata
*dd
= ipath_lookup(unit
);
2194 if (atomic_read(&dd
->ipath_led_override_timer_active
)) {
2195 /* Need to stop LED timer, _then_ shut off LEDs */
2196 del_timer_sync(&dd
->ipath_led_override_timer
);
2197 atomic_set(&dd
->ipath_led_override_timer_active
, 0);
2200 /* Shut off LEDs after we are sure timer is not running */
2201 dd
->ipath_led_override
= LED_OVER_BOTH_OFF
;
2202 dd
->ipath_f_setextled(dd
, 0, 0);
2204 dev_info(&dd
->pcidev
->dev
, "Reset on unit %u requested\n", unit
);
2206 if (!dd
->ipath_kregbase
|| !(dd
->ipath_flags
& IPATH_PRESENT
)) {
2207 dev_info(&dd
->pcidev
->dev
, "Invalid unit number %u or "
2208 "not initialized or not present\n", unit
);
2214 for (i
= 1; i
< dd
->ipath_cfgports
; i
++) {
2215 if (dd
->ipath_pd
[i
] && dd
->ipath_pd
[i
]->port_cnt
) {
2216 ipath_dbg("unit %u port %d is in use "
2217 "(PID %u cmd %s), can't reset\n",
2219 dd
->ipath_pd
[i
]->port_pid
,
2220 dd
->ipath_pd
[i
]->port_comm
);
2226 dd
->ipath_flags
&= ~IPATH_INITTED
;
2227 ret
= dd
->ipath_f_reset(dd
);
2229 ipath_dbg("reset was not successful\n");
2230 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
2232 ret
= ipath_init_chip(dd
, 1);
2234 ipath_dev_err(dd
, "Reinitialize unit %u after "
2235 "reset failed with %d\n", unit
, ret
);
2237 dev_info(&dd
->pcidev
->dev
, "Reinitialized unit %u after "
2238 "resetting\n", unit
);
2244 int ipath_set_rx_pol_inv(struct ipath_devdata
*dd
, u8 new_pol_inv
)
2247 if ( new_pol_inv
> INFINIPATH_XGXS_RX_POL_MASK
) {
2250 if ( dd
->ipath_rx_pol_inv
!= new_pol_inv
) {
2251 dd
->ipath_rx_pol_inv
= new_pol_inv
;
2252 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
);
2253 val
&= ~(INFINIPATH_XGXS_RX_POL_MASK
<<
2254 INFINIPATH_XGXS_RX_POL_SHIFT
);
2255 val
|= ((u64
)dd
->ipath_rx_pol_inv
) <<
2256 INFINIPATH_XGXS_RX_POL_SHIFT
;
2257 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_xgxsconfig
, val
);
2261 module_init(infinipath_init
);
2262 module_exit(infinipath_cleanup
);