2 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm-generic/pci-bridge.h>
26 #include <asm/setup.h>
29 const char *pci_power_names
[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
32 EXPORT_SYMBOL_GPL(pci_power_names
);
34 int isa_dma_bridge_buggy
;
35 EXPORT_SYMBOL(isa_dma_bridge_buggy
);
38 EXPORT_SYMBOL(pci_pci_problems
);
40 unsigned int pci_pm_d3_delay
;
42 static void pci_pme_list_scan(struct work_struct
*work
);
44 static LIST_HEAD(pci_pme_list
);
45 static DEFINE_MUTEX(pci_pme_list_mutex
);
46 static DECLARE_DELAYED_WORK(pci_pme_work
, pci_pme_list_scan
);
48 struct pci_pme_device
{
49 struct list_head list
;
53 #define PME_TIMEOUT 1000 /* How long between PME checks */
55 static void pci_dev_d3_sleep(struct pci_dev
*dev
)
57 unsigned int delay
= dev
->d3_delay
;
59 if (delay
< pci_pm_d3_delay
)
60 delay
= pci_pm_d3_delay
;
65 #ifdef CONFIG_PCI_DOMAINS
66 int pci_domains_supported
= 1;
69 #define DEFAULT_CARDBUS_IO_SIZE (256)
70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
72 unsigned long pci_cardbus_io_size
= DEFAULT_CARDBUS_IO_SIZE
;
73 unsigned long pci_cardbus_mem_size
= DEFAULT_CARDBUS_MEM_SIZE
;
75 #define DEFAULT_HOTPLUG_IO_SIZE (256)
76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
78 unsigned long pci_hotplug_io_size
= DEFAULT_HOTPLUG_IO_SIZE
;
79 unsigned long pci_hotplug_mem_size
= DEFAULT_HOTPLUG_MEM_SIZE
;
81 enum pcie_bus_config_types pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
89 u8 pci_dfl_cache_line_size
= L1_CACHE_BYTES
>> 2;
90 u8 pci_cache_line_size
;
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
96 unsigned int pcibios_max_latency
= 255;
98 /* If set, the PCIe ARI capability will not be used. */
99 static bool pcie_ari_disabled
;
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
108 unsigned char pci_bus_max_busnr(struct pci_bus
* bus
)
110 struct list_head
*tmp
;
111 unsigned char max
, n
;
113 max
= bus
->busn_res
.end
;
114 list_for_each(tmp
, &bus
->children
) {
115 n
= pci_bus_max_busnr(pci_bus_b(tmp
));
121 EXPORT_SYMBOL_GPL(pci_bus_max_busnr
);
123 #ifdef CONFIG_HAS_IOMEM
124 void __iomem
*pci_ioremap_bar(struct pci_dev
*pdev
, int bar
)
127 * Make sure the BAR is actually a memory resource, not an IO resource
129 if (!(pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)) {
133 return ioremap_nocache(pci_resource_start(pdev
, bar
),
134 pci_resource_len(pdev
, bar
));
136 EXPORT_SYMBOL_GPL(pci_ioremap_bar
);
139 #define PCI_FIND_CAP_TTL 48
141 static int __pci_find_next_cap_ttl(struct pci_bus
*bus
, unsigned int devfn
,
142 u8 pos
, int cap
, int *ttl
)
147 pci_bus_read_config_byte(bus
, devfn
, pos
, &pos
);
151 pci_bus_read_config_byte(bus
, devfn
, pos
+ PCI_CAP_LIST_ID
,
157 pos
+= PCI_CAP_LIST_NEXT
;
162 static int __pci_find_next_cap(struct pci_bus
*bus
, unsigned int devfn
,
165 int ttl
= PCI_FIND_CAP_TTL
;
167 return __pci_find_next_cap_ttl(bus
, devfn
, pos
, cap
, &ttl
);
170 int pci_find_next_capability(struct pci_dev
*dev
, u8 pos
, int cap
)
172 return __pci_find_next_cap(dev
->bus
, dev
->devfn
,
173 pos
+ PCI_CAP_LIST_NEXT
, cap
);
175 EXPORT_SYMBOL_GPL(pci_find_next_capability
);
177 static int __pci_bus_find_cap_start(struct pci_bus
*bus
,
178 unsigned int devfn
, u8 hdr_type
)
182 pci_bus_read_config_word(bus
, devfn
, PCI_STATUS
, &status
);
183 if (!(status
& PCI_STATUS_CAP_LIST
))
187 case PCI_HEADER_TYPE_NORMAL
:
188 case PCI_HEADER_TYPE_BRIDGE
:
189 return PCI_CAPABILITY_LIST
;
190 case PCI_HEADER_TYPE_CARDBUS
:
191 return PCI_CB_CAPABILITY_LIST
;
200 * pci_find_capability - query for devices' capabilities
201 * @dev: PCI device to query
202 * @cap: capability code
204 * Tell if a device supports a given PCI capability.
205 * Returns the address of the requested capability structure within the
206 * device's PCI configuration space or 0 in case the device does not
207 * support it. Possible values for @cap:
209 * %PCI_CAP_ID_PM Power Management
210 * %PCI_CAP_ID_AGP Accelerated Graphics Port
211 * %PCI_CAP_ID_VPD Vital Product Data
212 * %PCI_CAP_ID_SLOTID Slot Identification
213 * %PCI_CAP_ID_MSI Message Signalled Interrupts
214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
215 * %PCI_CAP_ID_PCIX PCI-X
216 * %PCI_CAP_ID_EXP PCI Express
218 int pci_find_capability(struct pci_dev
*dev
, int cap
)
222 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
224 pos
= __pci_find_next_cap(dev
->bus
, dev
->devfn
, pos
, cap
);
230 * pci_bus_find_capability - query for devices' capabilities
231 * @bus: the PCI bus to query
232 * @devfn: PCI device to query
233 * @cap: capability code
235 * Like pci_find_capability() but works for pci devices that do not have a
236 * pci_dev structure set up yet.
238 * Returns the address of the requested capability structure within the
239 * device's PCI configuration space or 0 in case the device does not
242 int pci_bus_find_capability(struct pci_bus
*bus
, unsigned int devfn
, int cap
)
247 pci_bus_read_config_byte(bus
, devfn
, PCI_HEADER_TYPE
, &hdr_type
);
249 pos
= __pci_bus_find_cap_start(bus
, devfn
, hdr_type
& 0x7f);
251 pos
= __pci_find_next_cap(bus
, devfn
, pos
, cap
);
257 * pci_find_next_ext_capability - Find an extended capability
258 * @dev: PCI device to query
259 * @start: address at which to start looking (0 to start at beginning of list)
260 * @cap: capability code
262 * Returns the address of the next matching extended capability structure
263 * within the device's PCI configuration space or 0 if the device does
264 * not support it. Some capabilities can occur several times, e.g., the
265 * vendor-specific capability, and this provides a way to find them all.
267 int pci_find_next_ext_capability(struct pci_dev
*dev
, int start
, int cap
)
271 int pos
= PCI_CFG_SPACE_SIZE
;
273 /* minimum 8 bytes per capability */
274 ttl
= (PCI_CFG_SPACE_EXP_SIZE
- PCI_CFG_SPACE_SIZE
) / 8;
276 if (dev
->cfg_size
<= PCI_CFG_SPACE_SIZE
)
282 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
286 * If we have no capabilities, this is indicated by cap ID,
287 * cap version and next pointer all being 0.
293 if (PCI_EXT_CAP_ID(header
) == cap
&& pos
!= start
)
296 pos
= PCI_EXT_CAP_NEXT(header
);
297 if (pos
< PCI_CFG_SPACE_SIZE
)
300 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
306 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability
);
309 * pci_find_ext_capability - Find an extended capability
310 * @dev: PCI device to query
311 * @cap: capability code
313 * Returns the address of the requested extended capability structure
314 * within the device's PCI configuration space or 0 if the device does
315 * not support it. Possible values for @cap:
317 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
318 * %PCI_EXT_CAP_ID_VC Virtual Channel
319 * %PCI_EXT_CAP_ID_DSN Device Serial Number
320 * %PCI_EXT_CAP_ID_PWR Power Budgeting
322 int pci_find_ext_capability(struct pci_dev
*dev
, int cap
)
324 return pci_find_next_ext_capability(dev
, 0, cap
);
326 EXPORT_SYMBOL_GPL(pci_find_ext_capability
);
328 static int __pci_find_next_ht_cap(struct pci_dev
*dev
, int pos
, int ht_cap
)
330 int rc
, ttl
= PCI_FIND_CAP_TTL
;
333 if (ht_cap
== HT_CAPTYPE_SLAVE
|| ht_cap
== HT_CAPTYPE_HOST
)
334 mask
= HT_3BIT_CAP_MASK
;
336 mask
= HT_5BIT_CAP_MASK
;
338 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
, pos
,
339 PCI_CAP_ID_HT
, &ttl
);
341 rc
= pci_read_config_byte(dev
, pos
+ 3, &cap
);
342 if (rc
!= PCIBIOS_SUCCESSFUL
)
345 if ((cap
& mask
) == ht_cap
)
348 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
,
349 pos
+ PCI_CAP_LIST_NEXT
,
350 PCI_CAP_ID_HT
, &ttl
);
356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
357 * @dev: PCI device to query
358 * @pos: Position from which to continue searching
359 * @ht_cap: Hypertransport capability code
361 * To be used in conjunction with pci_find_ht_capability() to search for
362 * all capabilities matching @ht_cap. @pos should always be a value returned
363 * from pci_find_ht_capability().
365 * NB. To be 100% safe against broken PCI devices, the caller should take
366 * steps to avoid an infinite loop.
368 int pci_find_next_ht_capability(struct pci_dev
*dev
, int pos
, int ht_cap
)
370 return __pci_find_next_ht_cap(dev
, pos
+ PCI_CAP_LIST_NEXT
, ht_cap
);
372 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability
);
375 * pci_find_ht_capability - query a device's Hypertransport capabilities
376 * @dev: PCI device to query
377 * @ht_cap: Hypertransport capability code
379 * Tell if a device supports a given Hypertransport capability.
380 * Returns an address within the device's PCI configuration space
381 * or 0 in case the device does not support the request capability.
382 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
383 * which has a Hypertransport capability matching @ht_cap.
385 int pci_find_ht_capability(struct pci_dev
*dev
, int ht_cap
)
389 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
391 pos
= __pci_find_next_ht_cap(dev
, pos
, ht_cap
);
395 EXPORT_SYMBOL_GPL(pci_find_ht_capability
);
398 * pci_find_parent_resource - return resource region of parent bus of given region
399 * @dev: PCI device structure contains resources to be searched
400 * @res: child resource record for which parent is sought
402 * For given resource region of given device, return the resource
403 * region of parent bus the given region is contained in or where
404 * it should be allocated from.
407 pci_find_parent_resource(const struct pci_dev
*dev
, struct resource
*res
)
409 const struct pci_bus
*bus
= dev
->bus
;
411 struct resource
*best
= NULL
, *r
;
413 pci_bus_for_each_resource(bus
, r
, i
) {
416 if (res
->start
&& !(res
->start
>= r
->start
&& res
->end
<= r
->end
))
417 continue; /* Not contained */
418 if ((res
->flags
^ r
->flags
) & (IORESOURCE_IO
| IORESOURCE_MEM
))
419 continue; /* Wrong type */
420 if (!((res
->flags
^ r
->flags
) & IORESOURCE_PREFETCH
))
421 return r
; /* Exact match */
422 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
423 if (r
->flags
& IORESOURCE_PREFETCH
)
425 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
434 * @dev: PCI device to have its BARs restored
436 * Restore the BAR values for a given device, so as to make it
437 * accessible by its driver.
440 pci_restore_bars(struct pci_dev
*dev
)
444 for (i
= 0; i
< PCI_BRIDGE_RESOURCES
; i
++)
445 pci_update_resource(dev
, i
);
448 static struct pci_platform_pm_ops
*pci_platform_pm
;
450 int pci_set_platform_pm(struct pci_platform_pm_ops
*ops
)
452 if (!ops
->is_manageable
|| !ops
->set_state
|| !ops
->choose_state
455 pci_platform_pm
= ops
;
459 static inline bool platform_pci_power_manageable(struct pci_dev
*dev
)
461 return pci_platform_pm
? pci_platform_pm
->is_manageable(dev
) : false;
464 static inline int platform_pci_set_power_state(struct pci_dev
*dev
,
467 return pci_platform_pm
? pci_platform_pm
->set_state(dev
, t
) : -ENOSYS
;
470 static inline pci_power_t
platform_pci_choose_state(struct pci_dev
*dev
)
472 return pci_platform_pm
?
473 pci_platform_pm
->choose_state(dev
) : PCI_POWER_ERROR
;
476 static inline int platform_pci_sleep_wake(struct pci_dev
*dev
, bool enable
)
478 return pci_platform_pm
?
479 pci_platform_pm
->sleep_wake(dev
, enable
) : -ENODEV
;
482 static inline int platform_pci_run_wake(struct pci_dev
*dev
, bool enable
)
484 return pci_platform_pm
?
485 pci_platform_pm
->run_wake(dev
, enable
) : -ENODEV
;
489 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
491 * @dev: PCI device to handle.
492 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
495 * -EINVAL if the requested state is invalid.
496 * -EIO if device does not support PCI PM or its PM capabilities register has a
497 * wrong version, or device doesn't support the requested state.
498 * 0 if device already is in the requested state.
499 * 0 if device's power state has been successfully changed.
501 static int pci_raw_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
504 bool need_restore
= false;
506 /* Check if we're already there */
507 if (dev
->current_state
== state
)
513 if (state
< PCI_D0
|| state
> PCI_D3hot
)
516 /* Validate current state:
517 * Can enter D0 from any state, but if we can only go deeper
518 * to sleep if we're already in a low power state
520 if (state
!= PCI_D0
&& dev
->current_state
<= PCI_D3cold
521 && dev
->current_state
> state
) {
522 dev_err(&dev
->dev
, "invalid power transition "
523 "(from state %d to %d)\n", dev
->current_state
, state
);
527 /* check if this device supports the desired state */
528 if ((state
== PCI_D1
&& !dev
->d1_support
)
529 || (state
== PCI_D2
&& !dev
->d2_support
))
532 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
534 /* If we're (effectively) in D3, force entire word to 0.
535 * This doesn't affect PME_Status, disables PME_En, and
536 * sets PowerState to 0.
538 switch (dev
->current_state
) {
542 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
547 case PCI_UNKNOWN
: /* Boot-up */
548 if ((pmcsr
& PCI_PM_CTRL_STATE_MASK
) == PCI_D3hot
549 && !(pmcsr
& PCI_PM_CTRL_NO_SOFT_RESET
))
551 /* Fall-through: force to D0 */
557 /* enter specified state */
558 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
560 /* Mandatory power management transition delays */
561 /* see PCI PM 1.1 5.6.1 table 18 */
562 if (state
== PCI_D3hot
|| dev
->current_state
== PCI_D3hot
)
563 pci_dev_d3_sleep(dev
);
564 else if (state
== PCI_D2
|| dev
->current_state
== PCI_D2
)
565 udelay(PCI_PM_D2_DELAY
);
567 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
568 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
569 if (dev
->current_state
!= state
&& printk_ratelimit())
570 dev_info(&dev
->dev
, "Refused to change power state, "
571 "currently in D%d\n", dev
->current_state
);
574 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
575 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
576 * from D3hot to D0 _may_ perform an internal reset, thereby
577 * going to "D0 Uninitialized" rather than "D0 Initialized".
578 * For example, at least some versions of the 3c905B and the
579 * 3c556B exhibit this behaviour.
581 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
582 * devices in a D3hot state at boot. Consequently, we need to
583 * restore at least the BARs so that the device will be
584 * accessible to its driver.
587 pci_restore_bars(dev
);
590 pcie_aspm_pm_state_change(dev
->bus
->self
);
596 * pci_update_current_state - Read PCI power state of given device from its
597 * PCI PM registers and cache it
598 * @dev: PCI device to handle.
599 * @state: State to cache in case the device doesn't have the PM capability
601 void pci_update_current_state(struct pci_dev
*dev
, pci_power_t state
)
607 * Configuration space is not accessible for device in
608 * D3cold, so just keep or set D3cold for safety
610 if (dev
->current_state
== PCI_D3cold
)
612 if (state
== PCI_D3cold
) {
613 dev
->current_state
= PCI_D3cold
;
616 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
617 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
619 dev
->current_state
= state
;
624 * pci_power_up - Put the given device into D0 forcibly
625 * @dev: PCI device to power up
627 void pci_power_up(struct pci_dev
*dev
)
629 if (platform_pci_power_manageable(dev
))
630 platform_pci_set_power_state(dev
, PCI_D0
);
632 pci_raw_set_power_state(dev
, PCI_D0
);
633 pci_update_current_state(dev
, PCI_D0
);
637 * pci_platform_power_transition - Use platform to change device power state
638 * @dev: PCI device to handle.
639 * @state: State to put the device into.
641 static int pci_platform_power_transition(struct pci_dev
*dev
, pci_power_t state
)
645 if (platform_pci_power_manageable(dev
)) {
646 error
= platform_pci_set_power_state(dev
, state
);
648 pci_update_current_state(dev
, state
);
652 if (error
&& !dev
->pm_cap
) /* Fall back to PCI_D0 */
653 dev
->current_state
= PCI_D0
;
659 * __pci_start_power_transition - Start power transition of a PCI device
660 * @dev: PCI device to handle.
661 * @state: State to put the device into.
663 static void __pci_start_power_transition(struct pci_dev
*dev
, pci_power_t state
)
665 if (state
== PCI_D0
) {
666 pci_platform_power_transition(dev
, PCI_D0
);
668 * Mandatory power management transition delays, see
669 * PCI Express Base Specification Revision 2.0 Section
670 * 6.6.1: Conventional Reset. Do not delay for
671 * devices powered on/off by corresponding bridge,
672 * because have already delayed for the bridge.
674 if (dev
->runtime_d3cold
) {
675 msleep(dev
->d3cold_delay
);
677 * When powering on a bridge from D3cold, the
678 * whole hierarchy may be powered on into
679 * D0uninitialized state, resume them to give
680 * them a chance to suspend again
682 pci_wakeup_bus(dev
->subordinate
);
688 * __pci_dev_set_current_state - Set current state of a PCI device
689 * @dev: Device to handle
690 * @data: pointer to state to be set
692 static int __pci_dev_set_current_state(struct pci_dev
*dev
, void *data
)
694 pci_power_t state
= *(pci_power_t
*)data
;
696 dev
->current_state
= state
;
701 * __pci_bus_set_current_state - Walk given bus and set current state of devices
702 * @bus: Top bus of the subtree to walk.
703 * @state: state to be set
705 static void __pci_bus_set_current_state(struct pci_bus
*bus
, pci_power_t state
)
708 pci_walk_bus(bus
, __pci_dev_set_current_state
, &state
);
712 * __pci_complete_power_transition - Complete power transition of a PCI device
713 * @dev: PCI device to handle.
714 * @state: State to put the device into.
716 * This function should not be called directly by device drivers.
718 int __pci_complete_power_transition(struct pci_dev
*dev
, pci_power_t state
)
724 ret
= pci_platform_power_transition(dev
, state
);
725 /* Power off the bridge may power off the whole hierarchy */
726 if (!ret
&& state
== PCI_D3cold
)
727 __pci_bus_set_current_state(dev
->subordinate
, PCI_D3cold
);
730 EXPORT_SYMBOL_GPL(__pci_complete_power_transition
);
733 * pci_set_power_state - Set the power state of a PCI device
734 * @dev: PCI device to handle.
735 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
737 * Transition a device to a new power state, using the platform firmware and/or
738 * the device's PCI PM registers.
741 * -EINVAL if the requested state is invalid.
742 * -EIO if device does not support PCI PM or its PM capabilities register has a
743 * wrong version, or device doesn't support the requested state.
744 * 0 if device already is in the requested state.
745 * 0 if device's power state has been successfully changed.
747 int pci_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
751 /* bound the state we're entering */
752 if (state
> PCI_D3cold
)
754 else if (state
< PCI_D0
)
756 else if ((state
== PCI_D1
|| state
== PCI_D2
) && pci_no_d1d2(dev
))
758 * If the device or the parent bridge do not support PCI PM,
759 * ignore the request if we're doing anything other than putting
760 * it into D0 (which would only happen on boot).
764 /* Check if we're already there */
765 if (dev
->current_state
== state
)
768 __pci_start_power_transition(dev
, state
);
770 /* This device is quirked not to be put into D3, so
771 don't put it in D3 */
772 if (state
>= PCI_D3hot
&& (dev
->dev_flags
& PCI_DEV_FLAGS_NO_D3
))
776 * To put device in D3cold, we put device into D3hot in native
777 * way, then put device into D3cold with platform ops
779 error
= pci_raw_set_power_state(dev
, state
> PCI_D3hot
?
782 if (!__pci_complete_power_transition(dev
, state
))
785 * When aspm_policy is "powersave" this call ensures
786 * that ASPM is configured.
788 if (!error
&& dev
->bus
->self
)
789 pcie_aspm_powersave_config_link(dev
->bus
->self
);
795 * pci_choose_state - Choose the power state of a PCI device
796 * @dev: PCI device to be suspended
797 * @state: target sleep state for the whole system. This is the value
798 * that is passed to suspend() function.
800 * Returns PCI power state suitable for given device and given system
804 pci_power_t
pci_choose_state(struct pci_dev
*dev
, pm_message_t state
)
811 ret
= platform_pci_choose_state(dev
);
812 if (ret
!= PCI_POWER_ERROR
)
815 switch (state
.event
) {
818 case PM_EVENT_FREEZE
:
819 case PM_EVENT_PRETHAW
:
820 /* REVISIT both freeze and pre-thaw "should" use D0 */
821 case PM_EVENT_SUSPEND
:
822 case PM_EVENT_HIBERNATE
:
825 dev_info(&dev
->dev
, "unrecognized suspend event %d\n",
832 EXPORT_SYMBOL(pci_choose_state
);
834 #define PCI_EXP_SAVE_REGS 7
837 static struct pci_cap_saved_state
*pci_find_saved_cap(
838 struct pci_dev
*pci_dev
, char cap
)
840 struct pci_cap_saved_state
*tmp
;
842 hlist_for_each_entry(tmp
, &pci_dev
->saved_cap_space
, next
) {
843 if (tmp
->cap
.cap_nr
== cap
)
849 static int pci_save_pcie_state(struct pci_dev
*dev
)
852 struct pci_cap_saved_state
*save_state
;
855 if (!pci_is_pcie(dev
))
858 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
860 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
864 cap
= (u16
*)&save_state
->cap
.data
[0];
865 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &cap
[i
++]);
866 pcie_capability_read_word(dev
, PCI_EXP_LNKCTL
, &cap
[i
++]);
867 pcie_capability_read_word(dev
, PCI_EXP_SLTCTL
, &cap
[i
++]);
868 pcie_capability_read_word(dev
, PCI_EXP_RTCTL
, &cap
[i
++]);
869 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL2
, &cap
[i
++]);
870 pcie_capability_read_word(dev
, PCI_EXP_LNKCTL2
, &cap
[i
++]);
871 pcie_capability_read_word(dev
, PCI_EXP_SLTCTL2
, &cap
[i
++]);
876 static void pci_restore_pcie_state(struct pci_dev
*dev
)
879 struct pci_cap_saved_state
*save_state
;
882 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
886 cap
= (u16
*)&save_state
->cap
.data
[0];
887 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL
, cap
[i
++]);
888 pcie_capability_write_word(dev
, PCI_EXP_LNKCTL
, cap
[i
++]);
889 pcie_capability_write_word(dev
, PCI_EXP_SLTCTL
, cap
[i
++]);
890 pcie_capability_write_word(dev
, PCI_EXP_RTCTL
, cap
[i
++]);
891 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL2
, cap
[i
++]);
892 pcie_capability_write_word(dev
, PCI_EXP_LNKCTL2
, cap
[i
++]);
893 pcie_capability_write_word(dev
, PCI_EXP_SLTCTL2
, cap
[i
++]);
897 static int pci_save_pcix_state(struct pci_dev
*dev
)
900 struct pci_cap_saved_state
*save_state
;
902 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
906 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
908 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
912 pci_read_config_word(dev
, pos
+ PCI_X_CMD
,
913 (u16
*)save_state
->cap
.data
);
918 static void pci_restore_pcix_state(struct pci_dev
*dev
)
921 struct pci_cap_saved_state
*save_state
;
924 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
925 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
926 if (!save_state
|| pos
<= 0)
928 cap
= (u16
*)&save_state
->cap
.data
[0];
930 pci_write_config_word(dev
, pos
+ PCI_X_CMD
, cap
[i
++]);
935 * pci_save_state - save the PCI configuration space of a device before suspending
936 * @dev: - PCI device that we're dealing with
939 pci_save_state(struct pci_dev
*dev
)
942 /* XXX: 100% dword access ok here? */
943 for (i
= 0; i
< 16; i
++)
944 pci_read_config_dword(dev
, i
* 4, &dev
->saved_config_space
[i
]);
945 dev
->state_saved
= true;
946 if ((i
= pci_save_pcie_state(dev
)) != 0)
948 if ((i
= pci_save_pcix_state(dev
)) != 0)
953 static void pci_restore_config_dword(struct pci_dev
*pdev
, int offset
,
954 u32 saved_val
, int retry
)
958 pci_read_config_dword(pdev
, offset
, &val
);
959 if (val
== saved_val
)
963 dev_dbg(&pdev
->dev
, "restoring config space at offset "
964 "%#x (was %#x, writing %#x)\n", offset
, val
, saved_val
);
965 pci_write_config_dword(pdev
, offset
, saved_val
);
969 pci_read_config_dword(pdev
, offset
, &val
);
970 if (val
== saved_val
)
977 static void pci_restore_config_space_range(struct pci_dev
*pdev
,
978 int start
, int end
, int retry
)
982 for (index
= end
; index
>= start
; index
--)
983 pci_restore_config_dword(pdev
, 4 * index
,
984 pdev
->saved_config_space
[index
],
988 static void pci_restore_config_space(struct pci_dev
*pdev
)
990 if (pdev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
) {
991 pci_restore_config_space_range(pdev
, 10, 15, 0);
992 /* Restore BARs before the command register. */
993 pci_restore_config_space_range(pdev
, 4, 9, 10);
994 pci_restore_config_space_range(pdev
, 0, 3, 0);
996 pci_restore_config_space_range(pdev
, 0, 15, 0);
1001 * pci_restore_state - Restore the saved state of a PCI device
1002 * @dev: - PCI device that we're dealing with
1004 void pci_restore_state(struct pci_dev
*dev
)
1006 if (!dev
->state_saved
)
1009 /* PCI Express register must be restored first */
1010 pci_restore_pcie_state(dev
);
1011 pci_restore_ats_state(dev
);
1013 pci_restore_config_space(dev
);
1015 pci_restore_pcix_state(dev
);
1016 pci_restore_msi_state(dev
);
1017 pci_restore_iov_state(dev
);
1019 dev
->state_saved
= false;
1022 struct pci_saved_state
{
1023 u32 config_space
[16];
1024 struct pci_cap_saved_data cap
[0];
1028 * pci_store_saved_state - Allocate and return an opaque struct containing
1029 * the device saved state.
1030 * @dev: PCI device that we're dealing with
1032 * Rerturn NULL if no state or error.
1034 struct pci_saved_state
*pci_store_saved_state(struct pci_dev
*dev
)
1036 struct pci_saved_state
*state
;
1037 struct pci_cap_saved_state
*tmp
;
1038 struct pci_cap_saved_data
*cap
;
1041 if (!dev
->state_saved
)
1044 size
= sizeof(*state
) + sizeof(struct pci_cap_saved_data
);
1046 hlist_for_each_entry(tmp
, &dev
->saved_cap_space
, next
)
1047 size
+= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1049 state
= kzalloc(size
, GFP_KERNEL
);
1053 memcpy(state
->config_space
, dev
->saved_config_space
,
1054 sizeof(state
->config_space
));
1057 hlist_for_each_entry(tmp
, &dev
->saved_cap_space
, next
) {
1058 size_t len
= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1059 memcpy(cap
, &tmp
->cap
, len
);
1060 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+ len
);
1062 /* Empty cap_save terminates list */
1066 EXPORT_SYMBOL_GPL(pci_store_saved_state
);
1069 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1070 * @dev: PCI device that we're dealing with
1071 * @state: Saved state returned from pci_store_saved_state()
1073 int pci_load_saved_state(struct pci_dev
*dev
, struct pci_saved_state
*state
)
1075 struct pci_cap_saved_data
*cap
;
1077 dev
->state_saved
= false;
1082 memcpy(dev
->saved_config_space
, state
->config_space
,
1083 sizeof(state
->config_space
));
1087 struct pci_cap_saved_state
*tmp
;
1089 tmp
= pci_find_saved_cap(dev
, cap
->cap_nr
);
1090 if (!tmp
|| tmp
->cap
.size
!= cap
->size
)
1093 memcpy(tmp
->cap
.data
, cap
->data
, tmp
->cap
.size
);
1094 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+
1095 sizeof(struct pci_cap_saved_data
) + cap
->size
);
1098 dev
->state_saved
= true;
1101 EXPORT_SYMBOL_GPL(pci_load_saved_state
);
1104 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1105 * and free the memory allocated for it.
1106 * @dev: PCI device that we're dealing with
1107 * @state: Pointer to saved state returned from pci_store_saved_state()
1109 int pci_load_and_free_saved_state(struct pci_dev
*dev
,
1110 struct pci_saved_state
**state
)
1112 int ret
= pci_load_saved_state(dev
, *state
);
1117 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state
);
1119 static int do_pci_enable_device(struct pci_dev
*dev
, int bars
)
1123 err
= pci_set_power_state(dev
, PCI_D0
);
1124 if (err
< 0 && err
!= -EIO
)
1126 err
= pcibios_enable_device(dev
, bars
);
1129 pci_fixup_device(pci_fixup_enable
, dev
);
1135 * pci_reenable_device - Resume abandoned device
1136 * @dev: PCI device to be resumed
1138 * Note this function is a backend of pci_default_resume and is not supposed
1139 * to be called by normal code, write proper resume handler and use it instead.
1141 int pci_reenable_device(struct pci_dev
*dev
)
1143 if (pci_is_enabled(dev
))
1144 return do_pci_enable_device(dev
, (1 << PCI_NUM_RESOURCES
) - 1);
1148 static int pci_enable_device_flags(struct pci_dev
*dev
, unsigned long flags
)
1154 * Power state could be unknown at this point, either due to a fresh
1155 * boot or a device removal call. So get the current power state
1156 * so that things like MSI message writing will behave as expected
1157 * (e.g. if the device really is in D0 at enable time).
1161 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1162 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
1165 if (atomic_inc_return(&dev
->enable_cnt
) > 1)
1166 return 0; /* already enabled */
1168 /* only skip sriov related */
1169 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++)
1170 if (dev
->resource
[i
].flags
& flags
)
1172 for (i
= PCI_BRIDGE_RESOURCES
; i
< DEVICE_COUNT_RESOURCE
; i
++)
1173 if (dev
->resource
[i
].flags
& flags
)
1176 err
= do_pci_enable_device(dev
, bars
);
1178 atomic_dec(&dev
->enable_cnt
);
1183 * pci_enable_device_io - Initialize a device for use with IO space
1184 * @dev: PCI device to be initialized
1186 * Initialize device before it's used by a driver. Ask low-level code
1187 * to enable I/O resources. Wake up the device if it was suspended.
1188 * Beware, this function can fail.
1190 int pci_enable_device_io(struct pci_dev
*dev
)
1192 return pci_enable_device_flags(dev
, IORESOURCE_IO
);
1196 * pci_enable_device_mem - Initialize a device for use with Memory space
1197 * @dev: PCI device to be initialized
1199 * Initialize device before it's used by a driver. Ask low-level code
1200 * to enable Memory resources. Wake up the device if it was suspended.
1201 * Beware, this function can fail.
1203 int pci_enable_device_mem(struct pci_dev
*dev
)
1205 return pci_enable_device_flags(dev
, IORESOURCE_MEM
);
1209 * pci_enable_device - Initialize device before it's used by a driver.
1210 * @dev: PCI device to be initialized
1212 * Initialize device before it's used by a driver. Ask low-level code
1213 * to enable I/O and memory. Wake up the device if it was suspended.
1214 * Beware, this function can fail.
1216 * Note we don't actually enable the device many times if we call
1217 * this function repeatedly (we just increment the count).
1219 int pci_enable_device(struct pci_dev
*dev
)
1221 return pci_enable_device_flags(dev
, IORESOURCE_MEM
| IORESOURCE_IO
);
1225 * Managed PCI resources. This manages device on/off, intx/msi/msix
1226 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1227 * there's no need to track it separately. pci_devres is initialized
1228 * when a device is enabled using managed PCI device enable interface.
1231 unsigned int enabled
:1;
1232 unsigned int pinned
:1;
1233 unsigned int orig_intx
:1;
1234 unsigned int restore_intx
:1;
1238 static void pcim_release(struct device
*gendev
, void *res
)
1240 struct pci_dev
*dev
= container_of(gendev
, struct pci_dev
, dev
);
1241 struct pci_devres
*this = res
;
1244 if (dev
->msi_enabled
)
1245 pci_disable_msi(dev
);
1246 if (dev
->msix_enabled
)
1247 pci_disable_msix(dev
);
1249 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
1250 if (this->region_mask
& (1 << i
))
1251 pci_release_region(dev
, i
);
1253 if (this->restore_intx
)
1254 pci_intx(dev
, this->orig_intx
);
1256 if (this->enabled
&& !this->pinned
)
1257 pci_disable_device(dev
);
1260 static struct pci_devres
* get_pci_dr(struct pci_dev
*pdev
)
1262 struct pci_devres
*dr
, *new_dr
;
1264 dr
= devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1268 new_dr
= devres_alloc(pcim_release
, sizeof(*new_dr
), GFP_KERNEL
);
1271 return devres_get(&pdev
->dev
, new_dr
, NULL
, NULL
);
1274 static struct pci_devres
* find_pci_dr(struct pci_dev
*pdev
)
1276 if (pci_is_managed(pdev
))
1277 return devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1282 * pcim_enable_device - Managed pci_enable_device()
1283 * @pdev: PCI device to be initialized
1285 * Managed pci_enable_device().
1287 int pcim_enable_device(struct pci_dev
*pdev
)
1289 struct pci_devres
*dr
;
1292 dr
= get_pci_dr(pdev
);
1298 rc
= pci_enable_device(pdev
);
1300 pdev
->is_managed
= 1;
1307 * pcim_pin_device - Pin managed PCI device
1308 * @pdev: PCI device to pin
1310 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1311 * driver detach. @pdev must have been enabled with
1312 * pcim_enable_device().
1314 void pcim_pin_device(struct pci_dev
*pdev
)
1316 struct pci_devres
*dr
;
1318 dr
= find_pci_dr(pdev
);
1319 WARN_ON(!dr
|| !dr
->enabled
);
1325 * pcibios_add_device - provide arch specific hooks when adding device dev
1326 * @dev: the PCI device being added
1328 * Permits the platform to provide architecture specific functionality when
1329 * devices are added. This is the default implementation. Architecture
1330 * implementations can override this.
1332 int __weak
pcibios_add_device (struct pci_dev
*dev
)
1338 * pcibios_release_device - provide arch specific hooks when releasing device dev
1339 * @dev: the PCI device being released
1341 * Permits the platform to provide architecture specific functionality when
1342 * devices are released. This is the default implementation. Architecture
1343 * implementations can override this.
1345 void __weak
pcibios_release_device(struct pci_dev
*dev
) {}
1348 * pcibios_disable_device - disable arch specific PCI resources for device dev
1349 * @dev: the PCI device to disable
1351 * Disables architecture specific PCI resources for the device. This
1352 * is the default implementation. Architecture implementations can
1355 void __weak
pcibios_disable_device (struct pci_dev
*dev
) {}
1357 static void do_pci_disable_device(struct pci_dev
*dev
)
1361 pci_read_config_word(dev
, PCI_COMMAND
, &pci_command
);
1362 if (pci_command
& PCI_COMMAND_MASTER
) {
1363 pci_command
&= ~PCI_COMMAND_MASTER
;
1364 pci_write_config_word(dev
, PCI_COMMAND
, pci_command
);
1367 pcibios_disable_device(dev
);
1371 * pci_disable_enabled_device - Disable device without updating enable_cnt
1372 * @dev: PCI device to disable
1374 * NOTE: This function is a backend of PCI power management routines and is
1375 * not supposed to be called drivers.
1377 void pci_disable_enabled_device(struct pci_dev
*dev
)
1379 if (pci_is_enabled(dev
))
1380 do_pci_disable_device(dev
);
1384 * pci_disable_device - Disable PCI device after use
1385 * @dev: PCI device to be disabled
1387 * Signal to the system that the PCI device is not in use by the system
1388 * anymore. This only involves disabling PCI bus-mastering, if active.
1390 * Note we don't actually disable the device until all callers of
1391 * pci_enable_device() have called pci_disable_device().
1394 pci_disable_device(struct pci_dev
*dev
)
1396 struct pci_devres
*dr
;
1398 dr
= find_pci_dr(dev
);
1402 dev_WARN_ONCE(&dev
->dev
, atomic_read(&dev
->enable_cnt
) <= 0,
1403 "disabling already-disabled device");
1405 if (atomic_dec_return(&dev
->enable_cnt
) != 0)
1408 do_pci_disable_device(dev
);
1410 dev
->is_busmaster
= 0;
1414 * pcibios_set_pcie_reset_state - set reset state for device dev
1415 * @dev: the PCIe device reset
1416 * @state: Reset state to enter into
1419 * Sets the PCIe reset state for the device. This is the default
1420 * implementation. Architecture implementations can override this.
1422 int __weak
pcibios_set_pcie_reset_state(struct pci_dev
*dev
,
1423 enum pcie_reset_state state
)
1429 * pci_set_pcie_reset_state - set reset state for device dev
1430 * @dev: the PCIe device reset
1431 * @state: Reset state to enter into
1434 * Sets the PCI reset state for the device.
1436 int pci_set_pcie_reset_state(struct pci_dev
*dev
, enum pcie_reset_state state
)
1438 return pcibios_set_pcie_reset_state(dev
, state
);
1442 * pci_check_pme_status - Check if given device has generated PME.
1443 * @dev: Device to check.
1445 * Check the PME status of the device and if set, clear it and clear PME enable
1446 * (if set). Return 'true' if PME status and PME enable were both set or
1447 * 'false' otherwise.
1449 bool pci_check_pme_status(struct pci_dev
*dev
)
1458 pmcsr_pos
= dev
->pm_cap
+ PCI_PM_CTRL
;
1459 pci_read_config_word(dev
, pmcsr_pos
, &pmcsr
);
1460 if (!(pmcsr
& PCI_PM_CTRL_PME_STATUS
))
1463 /* Clear PME status. */
1464 pmcsr
|= PCI_PM_CTRL_PME_STATUS
;
1465 if (pmcsr
& PCI_PM_CTRL_PME_ENABLE
) {
1466 /* Disable PME to avoid interrupt flood. */
1467 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1471 pci_write_config_word(dev
, pmcsr_pos
, pmcsr
);
1477 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1478 * @dev: Device to handle.
1479 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1481 * Check if @dev has generated PME and queue a resume request for it in that
1484 static int pci_pme_wakeup(struct pci_dev
*dev
, void *pme_poll_reset
)
1486 if (pme_poll_reset
&& dev
->pme_poll
)
1487 dev
->pme_poll
= false;
1489 if (pci_check_pme_status(dev
)) {
1490 pci_wakeup_event(dev
);
1491 pm_request_resume(&dev
->dev
);
1497 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1498 * @bus: Top bus of the subtree to walk.
1500 void pci_pme_wakeup_bus(struct pci_bus
*bus
)
1503 pci_walk_bus(bus
, pci_pme_wakeup
, (void *)true);
1507 * pci_wakeup - Wake up a PCI device
1508 * @pci_dev: Device to handle.
1509 * @ign: ignored parameter
1511 static int pci_wakeup(struct pci_dev
*pci_dev
, void *ign
)
1513 pci_wakeup_event(pci_dev
);
1514 pm_request_resume(&pci_dev
->dev
);
1519 * pci_wakeup_bus - Walk given bus and wake up devices on it
1520 * @bus: Top bus of the subtree to walk.
1522 void pci_wakeup_bus(struct pci_bus
*bus
)
1525 pci_walk_bus(bus
, pci_wakeup
, NULL
);
1529 * pci_pme_capable - check the capability of PCI device to generate PME#
1530 * @dev: PCI device to handle.
1531 * @state: PCI state from which device will issue PME#.
1533 bool pci_pme_capable(struct pci_dev
*dev
, pci_power_t state
)
1538 return !!(dev
->pme_support
& (1 << state
));
1541 static void pci_pme_list_scan(struct work_struct
*work
)
1543 struct pci_pme_device
*pme_dev
, *n
;
1545 mutex_lock(&pci_pme_list_mutex
);
1546 if (!list_empty(&pci_pme_list
)) {
1547 list_for_each_entry_safe(pme_dev
, n
, &pci_pme_list
, list
) {
1548 if (pme_dev
->dev
->pme_poll
) {
1549 struct pci_dev
*bridge
;
1551 bridge
= pme_dev
->dev
->bus
->self
;
1553 * If bridge is in low power state, the
1554 * configuration space of subordinate devices
1555 * may be not accessible
1557 if (bridge
&& bridge
->current_state
!= PCI_D0
)
1559 pci_pme_wakeup(pme_dev
->dev
, NULL
);
1561 list_del(&pme_dev
->list
);
1565 if (!list_empty(&pci_pme_list
))
1566 schedule_delayed_work(&pci_pme_work
,
1567 msecs_to_jiffies(PME_TIMEOUT
));
1569 mutex_unlock(&pci_pme_list_mutex
);
1573 * pci_pme_active - enable or disable PCI device's PME# function
1574 * @dev: PCI device to handle.
1575 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1577 * The caller must verify that the device is capable of generating PME# before
1578 * calling this function with @enable equal to 'true'.
1580 void pci_pme_active(struct pci_dev
*dev
, bool enable
)
1584 if (!dev
->pme_support
)
1587 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1588 /* Clear PME_Status by writing 1 to it and enable PME# */
1589 pmcsr
|= PCI_PM_CTRL_PME_STATUS
| PCI_PM_CTRL_PME_ENABLE
;
1591 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1593 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
1596 * PCI (as opposed to PCIe) PME requires that the device have
1597 * its PME# line hooked up correctly. Not all hardware vendors
1598 * do this, so the PME never gets delivered and the device
1599 * remains asleep. The easiest way around this is to
1600 * periodically walk the list of suspended devices and check
1601 * whether any have their PME flag set. The assumption is that
1602 * we'll wake up often enough anyway that this won't be a huge
1603 * hit, and the power savings from the devices will still be a
1606 * Although PCIe uses in-band PME message instead of PME# line
1607 * to report PME, PME does not work for some PCIe devices in
1608 * reality. For example, there are devices that set their PME
1609 * status bits, but don't really bother to send a PME message;
1610 * there are PCI Express Root Ports that don't bother to
1611 * trigger interrupts when they receive PME messages from the
1612 * devices below. So PME poll is used for PCIe devices too.
1615 if (dev
->pme_poll
) {
1616 struct pci_pme_device
*pme_dev
;
1618 pme_dev
= kmalloc(sizeof(struct pci_pme_device
),
1623 mutex_lock(&pci_pme_list_mutex
);
1624 list_add(&pme_dev
->list
, &pci_pme_list
);
1625 if (list_is_singular(&pci_pme_list
))
1626 schedule_delayed_work(&pci_pme_work
,
1627 msecs_to_jiffies(PME_TIMEOUT
));
1628 mutex_unlock(&pci_pme_list_mutex
);
1630 mutex_lock(&pci_pme_list_mutex
);
1631 list_for_each_entry(pme_dev
, &pci_pme_list
, list
) {
1632 if (pme_dev
->dev
== dev
) {
1633 list_del(&pme_dev
->list
);
1638 mutex_unlock(&pci_pme_list_mutex
);
1643 dev_dbg(&dev
->dev
, "PME# %s\n", enable
? "enabled" : "disabled");
1647 * __pci_enable_wake - enable PCI device as wakeup event source
1648 * @dev: PCI device affected
1649 * @state: PCI state from which device will issue wakeup events
1650 * @runtime: True if the events are to be generated at run time
1651 * @enable: True to enable event generation; false to disable
1653 * This enables the device as a wakeup event source, or disables it.
1654 * When such events involves platform-specific hooks, those hooks are
1655 * called automatically by this routine.
1657 * Devices with legacy power management (no standard PCI PM capabilities)
1658 * always require such platform hooks.
1661 * 0 is returned on success
1662 * -EINVAL is returned if device is not supposed to wake up the system
1663 * Error code depending on the platform is returned if both the platform and
1664 * the native mechanism fail to enable the generation of wake-up events
1666 int __pci_enable_wake(struct pci_dev
*dev
, pci_power_t state
,
1667 bool runtime
, bool enable
)
1671 if (enable
&& !runtime
&& !device_may_wakeup(&dev
->dev
))
1674 /* Don't do the same thing twice in a row for one device. */
1675 if (!!enable
== !!dev
->wakeup_prepared
)
1679 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1680 * Anderson we should be doing PME# wake enable followed by ACPI wake
1681 * enable. To disable wake-up we call the platform first, for symmetry.
1687 if (pci_pme_capable(dev
, state
))
1688 pci_pme_active(dev
, true);
1691 error
= runtime
? platform_pci_run_wake(dev
, true) :
1692 platform_pci_sleep_wake(dev
, true);
1696 dev
->wakeup_prepared
= true;
1699 platform_pci_run_wake(dev
, false);
1701 platform_pci_sleep_wake(dev
, false);
1702 pci_pme_active(dev
, false);
1703 dev
->wakeup_prepared
= false;
1708 EXPORT_SYMBOL(__pci_enable_wake
);
1711 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1712 * @dev: PCI device to prepare
1713 * @enable: True to enable wake-up event generation; false to disable
1715 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1716 * and this function allows them to set that up cleanly - pci_enable_wake()
1717 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1718 * ordering constraints.
1720 * This function only returns error code if the device is not capable of
1721 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1722 * enable wake-up power for it.
1724 int pci_wake_from_d3(struct pci_dev
*dev
, bool enable
)
1726 return pci_pme_capable(dev
, PCI_D3cold
) ?
1727 pci_enable_wake(dev
, PCI_D3cold
, enable
) :
1728 pci_enable_wake(dev
, PCI_D3hot
, enable
);
1732 * pci_target_state - find an appropriate low power state for a given PCI dev
1735 * Use underlying platform code to find a supported low power state for @dev.
1736 * If the platform can't manage @dev, return the deepest state from which it
1737 * can generate wake events, based on any available PME info.
1739 pci_power_t
pci_target_state(struct pci_dev
*dev
)
1741 pci_power_t target_state
= PCI_D3hot
;
1743 if (platform_pci_power_manageable(dev
)) {
1745 * Call the platform to choose the target state of the device
1746 * and enable wake-up from this state if supported.
1748 pci_power_t state
= platform_pci_choose_state(dev
);
1751 case PCI_POWER_ERROR
:
1756 if (pci_no_d1d2(dev
))
1759 target_state
= state
;
1761 } else if (!dev
->pm_cap
) {
1762 target_state
= PCI_D0
;
1763 } else if (device_may_wakeup(&dev
->dev
)) {
1765 * Find the deepest state from which the device can generate
1766 * wake-up events, make it the target state and enable device
1769 if (dev
->pme_support
) {
1771 && !(dev
->pme_support
& (1 << target_state
)))
1776 return target_state
;
1780 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1781 * @dev: Device to handle.
1783 * Choose the power state appropriate for the device depending on whether
1784 * it can wake up the system and/or is power manageable by the platform
1785 * (PCI_D3hot is the default) and put the device into that state.
1787 int pci_prepare_to_sleep(struct pci_dev
*dev
)
1789 pci_power_t target_state
= pci_target_state(dev
);
1792 if (target_state
== PCI_POWER_ERROR
)
1795 /* D3cold during system suspend/hibernate is not supported */
1796 if (target_state
> PCI_D3hot
)
1797 target_state
= PCI_D3hot
;
1799 pci_enable_wake(dev
, target_state
, device_may_wakeup(&dev
->dev
));
1801 error
= pci_set_power_state(dev
, target_state
);
1804 pci_enable_wake(dev
, target_state
, false);
1810 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1811 * @dev: Device to handle.
1813 * Disable device's system wake-up capability and put it into D0.
1815 int pci_back_from_sleep(struct pci_dev
*dev
)
1817 pci_enable_wake(dev
, PCI_D0
, false);
1818 return pci_set_power_state(dev
, PCI_D0
);
1822 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1823 * @dev: PCI device being suspended.
1825 * Prepare @dev to generate wake-up events at run time and put it into a low
1828 int pci_finish_runtime_suspend(struct pci_dev
*dev
)
1830 pci_power_t target_state
= pci_target_state(dev
);
1833 if (target_state
== PCI_POWER_ERROR
)
1836 dev
->runtime_d3cold
= target_state
== PCI_D3cold
;
1838 __pci_enable_wake(dev
, target_state
, true, pci_dev_run_wake(dev
));
1840 error
= pci_set_power_state(dev
, target_state
);
1843 __pci_enable_wake(dev
, target_state
, true, false);
1844 dev
->runtime_d3cold
= false;
1851 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1852 * @dev: Device to check.
1854 * Return true if the device itself is cabable of generating wake-up events
1855 * (through the platform or using the native PCIe PME) or if the device supports
1856 * PME and one of its upstream bridges can generate wake-up events.
1858 bool pci_dev_run_wake(struct pci_dev
*dev
)
1860 struct pci_bus
*bus
= dev
->bus
;
1862 if (device_run_wake(&dev
->dev
))
1865 if (!dev
->pme_support
)
1868 while (bus
->parent
) {
1869 struct pci_dev
*bridge
= bus
->self
;
1871 if (device_run_wake(&bridge
->dev
))
1877 /* We have reached the root bus. */
1879 return device_run_wake(bus
->bridge
);
1883 EXPORT_SYMBOL_GPL(pci_dev_run_wake
);
1885 void pci_config_pm_runtime_get(struct pci_dev
*pdev
)
1887 struct device
*dev
= &pdev
->dev
;
1888 struct device
*parent
= dev
->parent
;
1891 pm_runtime_get_sync(parent
);
1892 pm_runtime_get_noresume(dev
);
1894 * pdev->current_state is set to PCI_D3cold during suspending,
1895 * so wait until suspending completes
1897 pm_runtime_barrier(dev
);
1899 * Only need to resume devices in D3cold, because config
1900 * registers are still accessible for devices suspended but
1903 if (pdev
->current_state
== PCI_D3cold
)
1904 pm_runtime_resume(dev
);
1907 void pci_config_pm_runtime_put(struct pci_dev
*pdev
)
1909 struct device
*dev
= &pdev
->dev
;
1910 struct device
*parent
= dev
->parent
;
1912 pm_runtime_put(dev
);
1914 pm_runtime_put_sync(parent
);
1918 * pci_pm_init - Initialize PM functions of given PCI device
1919 * @dev: PCI device to handle.
1921 void pci_pm_init(struct pci_dev
*dev
)
1926 pm_runtime_forbid(&dev
->dev
);
1927 pm_runtime_set_active(&dev
->dev
);
1928 pm_runtime_enable(&dev
->dev
);
1929 device_enable_async_suspend(&dev
->dev
);
1930 dev
->wakeup_prepared
= false;
1933 dev
->pme_support
= 0;
1935 /* find PCI PM capability in list */
1936 pm
= pci_find_capability(dev
, PCI_CAP_ID_PM
);
1939 /* Check device's ability to generate PME# */
1940 pci_read_config_word(dev
, pm
+ PCI_PM_PMC
, &pmc
);
1942 if ((pmc
& PCI_PM_CAP_VER_MASK
) > 3) {
1943 dev_err(&dev
->dev
, "unsupported PM cap regs version (%u)\n",
1944 pmc
& PCI_PM_CAP_VER_MASK
);
1949 dev
->d3_delay
= PCI_PM_D3_WAIT
;
1950 dev
->d3cold_delay
= PCI_PM_D3COLD_WAIT
;
1951 dev
->d3cold_allowed
= true;
1953 dev
->d1_support
= false;
1954 dev
->d2_support
= false;
1955 if (!pci_no_d1d2(dev
)) {
1956 if (pmc
& PCI_PM_CAP_D1
)
1957 dev
->d1_support
= true;
1958 if (pmc
& PCI_PM_CAP_D2
)
1959 dev
->d2_support
= true;
1961 if (dev
->d1_support
|| dev
->d2_support
)
1962 dev_printk(KERN_DEBUG
, &dev
->dev
, "supports%s%s\n",
1963 dev
->d1_support
? " D1" : "",
1964 dev
->d2_support
? " D2" : "");
1967 pmc
&= PCI_PM_CAP_PME_MASK
;
1969 dev_printk(KERN_DEBUG
, &dev
->dev
,
1970 "PME# supported from%s%s%s%s%s\n",
1971 (pmc
& PCI_PM_CAP_PME_D0
) ? " D0" : "",
1972 (pmc
& PCI_PM_CAP_PME_D1
) ? " D1" : "",
1973 (pmc
& PCI_PM_CAP_PME_D2
) ? " D2" : "",
1974 (pmc
& PCI_PM_CAP_PME_D3
) ? " D3hot" : "",
1975 (pmc
& PCI_PM_CAP_PME_D3cold
) ? " D3cold" : "");
1976 dev
->pme_support
= pmc
>> PCI_PM_CAP_PME_SHIFT
;
1977 dev
->pme_poll
= true;
1979 * Make device's PM flags reflect the wake-up capability, but
1980 * let the user space enable it to wake up the system as needed.
1982 device_set_wakeup_capable(&dev
->dev
, true);
1983 /* Disable the PME# generation functionality */
1984 pci_pme_active(dev
, false);
1988 static void pci_add_saved_cap(struct pci_dev
*pci_dev
,
1989 struct pci_cap_saved_state
*new_cap
)
1991 hlist_add_head(&new_cap
->next
, &pci_dev
->saved_cap_space
);
1995 * pci_add_save_buffer - allocate buffer for saving given capability registers
1996 * @dev: the PCI device
1997 * @cap: the capability to allocate the buffer for
1998 * @size: requested size of the buffer
2000 static int pci_add_cap_save_buffer(
2001 struct pci_dev
*dev
, char cap
, unsigned int size
)
2004 struct pci_cap_saved_state
*save_state
;
2006 pos
= pci_find_capability(dev
, cap
);
2010 save_state
= kzalloc(sizeof(*save_state
) + size
, GFP_KERNEL
);
2014 save_state
->cap
.cap_nr
= cap
;
2015 save_state
->cap
.size
= size
;
2016 pci_add_saved_cap(dev
, save_state
);
2022 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2023 * @dev: the PCI device
2025 void pci_allocate_cap_save_buffers(struct pci_dev
*dev
)
2029 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_EXP
,
2030 PCI_EXP_SAVE_REGS
* sizeof(u16
));
2033 "unable to preallocate PCI Express save buffer\n");
2035 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_PCIX
, sizeof(u16
));
2038 "unable to preallocate PCI-X save buffer\n");
2041 void pci_free_cap_save_buffers(struct pci_dev
*dev
)
2043 struct pci_cap_saved_state
*tmp
;
2044 struct hlist_node
*n
;
2046 hlist_for_each_entry_safe(tmp
, n
, &dev
->saved_cap_space
, next
)
2051 * pci_configure_ari - enable or disable ARI forwarding
2052 * @dev: the PCI device
2054 * If @dev and its upstream bridge both support ARI, enable ARI in the
2055 * bridge. Otherwise, disable ARI in the bridge.
2057 void pci_configure_ari(struct pci_dev
*dev
)
2060 struct pci_dev
*bridge
;
2062 if (pcie_ari_disabled
|| !pci_is_pcie(dev
) || dev
->devfn
)
2065 bridge
= dev
->bus
->self
;
2069 pcie_capability_read_dword(bridge
, PCI_EXP_DEVCAP2
, &cap
);
2070 if (!(cap
& PCI_EXP_DEVCAP2_ARI
))
2073 if (pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ARI
)) {
2074 pcie_capability_set_word(bridge
, PCI_EXP_DEVCTL2
,
2075 PCI_EXP_DEVCTL2_ARI
);
2076 bridge
->ari_enabled
= 1;
2078 pcie_capability_clear_word(bridge
, PCI_EXP_DEVCTL2
,
2079 PCI_EXP_DEVCTL2_ARI
);
2080 bridge
->ari_enabled
= 0;
2085 * pci_enable_ido - enable ID-based Ordering on a device
2086 * @dev: the PCI device
2087 * @type: which types of IDO to enable
2089 * Enable ID-based ordering on @dev. @type can contain the bits
2090 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2091 * which types of transactions are allowed to be re-ordered.
2093 void pci_enable_ido(struct pci_dev
*dev
, unsigned long type
)
2097 if (type
& PCI_EXP_IDO_REQUEST
)
2098 ctrl
|= PCI_EXP_IDO_REQ_EN
;
2099 if (type
& PCI_EXP_IDO_COMPLETION
)
2100 ctrl
|= PCI_EXP_IDO_CMP_EN
;
2102 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2104 EXPORT_SYMBOL(pci_enable_ido
);
2107 * pci_disable_ido - disable ID-based ordering on a device
2108 * @dev: the PCI device
2109 * @type: which types of IDO to disable
2111 void pci_disable_ido(struct pci_dev
*dev
, unsigned long type
)
2115 if (type
& PCI_EXP_IDO_REQUEST
)
2116 ctrl
|= PCI_EXP_IDO_REQ_EN
;
2117 if (type
& PCI_EXP_IDO_COMPLETION
)
2118 ctrl
|= PCI_EXP_IDO_CMP_EN
;
2120 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2122 EXPORT_SYMBOL(pci_disable_ido
);
2125 * pci_enable_obff - enable optimized buffer flush/fill
2127 * @type: type of signaling to use
2129 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2130 * signaling if possible, falling back to message signaling only if
2131 * WAKE# isn't supported. @type should indicate whether the PCIe link
2132 * be brought out of L0s or L1 to send the message. It should be either
2133 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2135 * If your device can benefit from receiving all messages, even at the
2136 * power cost of bringing the link back up from a low power state, use
2137 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2141 * Zero on success, appropriate error number on failure.
2143 int pci_enable_obff(struct pci_dev
*dev
, enum pci_obff_signal_type type
)
2149 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP2
, &cap
);
2150 if (!(cap
& PCI_EXP_OBFF_MASK
))
2151 return -ENOTSUPP
; /* no OBFF support at all */
2153 /* Make sure the topology supports OBFF as well */
2154 if (dev
->bus
->self
) {
2155 ret
= pci_enable_obff(dev
->bus
->self
, type
);
2160 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL2
, &ctrl
);
2161 if (cap
& PCI_EXP_OBFF_WAKE
)
2162 ctrl
|= PCI_EXP_OBFF_WAKE_EN
;
2165 case PCI_EXP_OBFF_SIGNAL_L0
:
2166 if (!(ctrl
& PCI_EXP_OBFF_WAKE_EN
))
2167 ctrl
|= PCI_EXP_OBFF_MSGA_EN
;
2169 case PCI_EXP_OBFF_SIGNAL_ALWAYS
:
2170 ctrl
&= ~PCI_EXP_OBFF_WAKE_EN
;
2171 ctrl
|= PCI_EXP_OBFF_MSGB_EN
;
2174 WARN(1, "bad OBFF signal type\n");
2178 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2182 EXPORT_SYMBOL(pci_enable_obff
);
2185 * pci_disable_obff - disable optimized buffer flush/fill
2188 * Disable OBFF on @dev.
2190 void pci_disable_obff(struct pci_dev
*dev
)
2192 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_OBFF_WAKE_EN
);
2194 EXPORT_SYMBOL(pci_disable_obff
);
2197 * pci_ltr_supported - check whether a device supports LTR
2201 * True if @dev supports latency tolerance reporting, false otherwise.
2203 static bool pci_ltr_supported(struct pci_dev
*dev
)
2207 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP2
, &cap
);
2209 return cap
& PCI_EXP_DEVCAP2_LTR
;
2213 * pci_enable_ltr - enable latency tolerance reporting
2216 * Enable LTR on @dev if possible, which means enabling it first on
2220 * Zero on success, errno on failure.
2222 int pci_enable_ltr(struct pci_dev
*dev
)
2226 /* Only primary function can enable/disable LTR */
2227 if (PCI_FUNC(dev
->devfn
) != 0)
2230 if (!pci_ltr_supported(dev
))
2233 /* Enable upstream ports first */
2234 if (dev
->bus
->self
) {
2235 ret
= pci_enable_ltr(dev
->bus
->self
);
2240 return pcie_capability_set_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_LTR_EN
);
2242 EXPORT_SYMBOL(pci_enable_ltr
);
2245 * pci_disable_ltr - disable latency tolerance reporting
2248 void pci_disable_ltr(struct pci_dev
*dev
)
2250 /* Only primary function can enable/disable LTR */
2251 if (PCI_FUNC(dev
->devfn
) != 0)
2254 if (!pci_ltr_supported(dev
))
2257 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_LTR_EN
);
2259 EXPORT_SYMBOL(pci_disable_ltr
);
2261 static int __pci_ltr_scale(int *val
)
2265 while (*val
> 1023) {
2266 *val
= (*val
+ 31) / 32;
2273 * pci_set_ltr - set LTR latency values
2275 * @snoop_lat_ns: snoop latency in nanoseconds
2276 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2278 * Figure out the scale and set the LTR values accordingly.
2280 int pci_set_ltr(struct pci_dev
*dev
, int snoop_lat_ns
, int nosnoop_lat_ns
)
2282 int pos
, ret
, snoop_scale
, nosnoop_scale
;
2285 if (!pci_ltr_supported(dev
))
2288 snoop_scale
= __pci_ltr_scale(&snoop_lat_ns
);
2289 nosnoop_scale
= __pci_ltr_scale(&nosnoop_lat_ns
);
2291 if (snoop_lat_ns
> PCI_LTR_VALUE_MASK
||
2292 nosnoop_lat_ns
> PCI_LTR_VALUE_MASK
)
2295 if ((snoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)) ||
2296 (nosnoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)))
2299 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_LTR
);
2303 val
= (snoop_scale
<< PCI_LTR_SCALE_SHIFT
) | snoop_lat_ns
;
2304 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_SNOOP_LAT
, val
);
2308 val
= (nosnoop_scale
<< PCI_LTR_SCALE_SHIFT
) | nosnoop_lat_ns
;
2309 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_NOSNOOP_LAT
, val
);
2315 EXPORT_SYMBOL(pci_set_ltr
);
2317 static int pci_acs_enable
;
2320 * pci_request_acs - ask for ACS to be enabled if supported
2322 void pci_request_acs(void)
2328 * pci_enable_acs - enable ACS if hardware support it
2329 * @dev: the PCI device
2331 void pci_enable_acs(struct pci_dev
*dev
)
2337 if (!pci_acs_enable
)
2340 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ACS
);
2344 pci_read_config_word(dev
, pos
+ PCI_ACS_CAP
, &cap
);
2345 pci_read_config_word(dev
, pos
+ PCI_ACS_CTRL
, &ctrl
);
2347 /* Source Validation */
2348 ctrl
|= (cap
& PCI_ACS_SV
);
2350 /* P2P Request Redirect */
2351 ctrl
|= (cap
& PCI_ACS_RR
);
2353 /* P2P Completion Redirect */
2354 ctrl
|= (cap
& PCI_ACS_CR
);
2356 /* Upstream Forwarding */
2357 ctrl
|= (cap
& PCI_ACS_UF
);
2359 pci_write_config_word(dev
, pos
+ PCI_ACS_CTRL
, ctrl
);
2363 * pci_acs_enabled - test ACS against required flags for a given device
2364 * @pdev: device to test
2365 * @acs_flags: required PCI ACS flags
2367 * Return true if the device supports the provided flags. Automatically
2368 * filters out flags that are not implemented on multifunction devices.
2370 bool pci_acs_enabled(struct pci_dev
*pdev
, u16 acs_flags
)
2375 ret
= pci_dev_specific_acs_enabled(pdev
, acs_flags
);
2379 if (!pci_is_pcie(pdev
))
2382 /* Filter out flags not applicable to multifunction */
2383 if (pdev
->multifunction
)
2384 acs_flags
&= (PCI_ACS_RR
| PCI_ACS_CR
|
2385 PCI_ACS_EC
| PCI_ACS_DT
);
2387 if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_DOWNSTREAM
||
2388 pci_pcie_type(pdev
) == PCI_EXP_TYPE_ROOT_PORT
||
2389 pdev
->multifunction
) {
2390 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ACS
);
2394 pci_read_config_word(pdev
, pos
+ PCI_ACS_CTRL
, &ctrl
);
2395 if ((ctrl
& acs_flags
) != acs_flags
)
2403 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2404 * @start: starting downstream device
2405 * @end: ending upstream device or NULL to search to the root bus
2406 * @acs_flags: required flags
2408 * Walk up a device tree from start to end testing PCI ACS support. If
2409 * any step along the way does not support the required flags, return false.
2411 bool pci_acs_path_enabled(struct pci_dev
*start
,
2412 struct pci_dev
*end
, u16 acs_flags
)
2414 struct pci_dev
*pdev
, *parent
= start
;
2419 if (!pci_acs_enabled(pdev
, acs_flags
))
2422 if (pci_is_root_bus(pdev
->bus
))
2423 return (end
== NULL
);
2425 parent
= pdev
->bus
->self
;
2426 } while (pdev
!= end
);
2432 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2433 * @dev: the PCI device
2434 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2436 * Perform INTx swizzling for a device behind one level of bridge. This is
2437 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2438 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2439 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2440 * the PCI Express Base Specification, Revision 2.1)
2442 u8
pci_swizzle_interrupt_pin(const struct pci_dev
*dev
, u8 pin
)
2446 if (pci_ari_enabled(dev
->bus
))
2449 slot
= PCI_SLOT(dev
->devfn
);
2451 return (((pin
- 1) + slot
) % 4) + 1;
2455 pci_get_interrupt_pin(struct pci_dev
*dev
, struct pci_dev
**bridge
)
2463 while (!pci_is_root_bus(dev
->bus
)) {
2464 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2465 dev
= dev
->bus
->self
;
2472 * pci_common_swizzle - swizzle INTx all the way to root bridge
2473 * @dev: the PCI device
2474 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2476 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2477 * bridges all the way up to a PCI root bus.
2479 u8
pci_common_swizzle(struct pci_dev
*dev
, u8
*pinp
)
2483 while (!pci_is_root_bus(dev
->bus
)) {
2484 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2485 dev
= dev
->bus
->self
;
2488 return PCI_SLOT(dev
->devfn
);
2492 * pci_release_region - Release a PCI bar
2493 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2494 * @bar: BAR to release
2496 * Releases the PCI I/O and memory resources previously reserved by a
2497 * successful call to pci_request_region. Call this function only
2498 * after all use of the PCI regions has ceased.
2500 void pci_release_region(struct pci_dev
*pdev
, int bar
)
2502 struct pci_devres
*dr
;
2504 if (pci_resource_len(pdev
, bar
) == 0)
2506 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
2507 release_region(pci_resource_start(pdev
, bar
),
2508 pci_resource_len(pdev
, bar
));
2509 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
2510 release_mem_region(pci_resource_start(pdev
, bar
),
2511 pci_resource_len(pdev
, bar
));
2513 dr
= find_pci_dr(pdev
);
2515 dr
->region_mask
&= ~(1 << bar
);
2519 * __pci_request_region - Reserved PCI I/O and memory resource
2520 * @pdev: PCI device whose resources are to be reserved
2521 * @bar: BAR to be reserved
2522 * @res_name: Name to be associated with resource.
2523 * @exclusive: whether the region access is exclusive or not
2525 * Mark the PCI region associated with PCI device @pdev BR @bar as
2526 * being reserved by owner @res_name. Do not access any
2527 * address inside the PCI regions unless this call returns
2530 * If @exclusive is set, then the region is marked so that userspace
2531 * is explicitly not allowed to map the resource via /dev/mem or
2532 * sysfs MMIO access.
2534 * Returns 0 on success, or %EBUSY on error. A warning
2535 * message is also printed on failure.
2537 static int __pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
,
2540 struct pci_devres
*dr
;
2542 if (pci_resource_len(pdev
, bar
) == 0)
2545 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
) {
2546 if (!request_region(pci_resource_start(pdev
, bar
),
2547 pci_resource_len(pdev
, bar
), res_name
))
2550 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
) {
2551 if (!__request_mem_region(pci_resource_start(pdev
, bar
),
2552 pci_resource_len(pdev
, bar
), res_name
,
2557 dr
= find_pci_dr(pdev
);
2559 dr
->region_mask
|= 1 << bar
;
2564 dev_warn(&pdev
->dev
, "BAR %d: can't reserve %pR\n", bar
,
2565 &pdev
->resource
[bar
]);
2570 * pci_request_region - Reserve PCI I/O and memory resource
2571 * @pdev: PCI device whose resources are to be reserved
2572 * @bar: BAR to be reserved
2573 * @res_name: Name to be associated with resource
2575 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2576 * being reserved by owner @res_name. Do not access any
2577 * address inside the PCI regions unless this call returns
2580 * Returns 0 on success, or %EBUSY on error. A warning
2581 * message is also printed on failure.
2583 int pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2585 return __pci_request_region(pdev
, bar
, res_name
, 0);
2589 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2590 * @pdev: PCI device whose resources are to be reserved
2591 * @bar: BAR to be reserved
2592 * @res_name: Name to be associated with resource.
2594 * Mark the PCI region associated with PCI device @pdev BR @bar as
2595 * being reserved by owner @res_name. Do not access any
2596 * address inside the PCI regions unless this call returns
2599 * Returns 0 on success, or %EBUSY on error. A warning
2600 * message is also printed on failure.
2602 * The key difference that _exclusive makes it that userspace is
2603 * explicitly not allowed to map the resource via /dev/mem or
2606 int pci_request_region_exclusive(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2608 return __pci_request_region(pdev
, bar
, res_name
, IORESOURCE_EXCLUSIVE
);
2611 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2612 * @pdev: PCI device whose resources were previously reserved
2613 * @bars: Bitmask of BARs to be released
2615 * Release selected PCI I/O and memory resources previously reserved.
2616 * Call this function only after all use of the PCI regions has ceased.
2618 void pci_release_selected_regions(struct pci_dev
*pdev
, int bars
)
2622 for (i
= 0; i
< 6; i
++)
2623 if (bars
& (1 << i
))
2624 pci_release_region(pdev
, i
);
2627 static int __pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2628 const char *res_name
, int excl
)
2632 for (i
= 0; i
< 6; i
++)
2633 if (bars
& (1 << i
))
2634 if (__pci_request_region(pdev
, i
, res_name
, excl
))
2640 if (bars
& (1 << i
))
2641 pci_release_region(pdev
, i
);
2648 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2649 * @pdev: PCI device whose resources are to be reserved
2650 * @bars: Bitmask of BARs to be requested
2651 * @res_name: Name to be associated with resource
2653 int pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2654 const char *res_name
)
2656 return __pci_request_selected_regions(pdev
, bars
, res_name
, 0);
2659 int pci_request_selected_regions_exclusive(struct pci_dev
*pdev
,
2660 int bars
, const char *res_name
)
2662 return __pci_request_selected_regions(pdev
, bars
, res_name
,
2663 IORESOURCE_EXCLUSIVE
);
2667 * pci_release_regions - Release reserved PCI I/O and memory resources
2668 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2670 * Releases all PCI I/O and memory resources previously reserved by a
2671 * successful call to pci_request_regions. Call this function only
2672 * after all use of the PCI regions has ceased.
2675 void pci_release_regions(struct pci_dev
*pdev
)
2677 pci_release_selected_regions(pdev
, (1 << 6) - 1);
2681 * pci_request_regions - Reserved PCI I/O and memory resources
2682 * @pdev: PCI device whose resources are to be reserved
2683 * @res_name: Name to be associated with resource.
2685 * Mark all PCI regions associated with PCI device @pdev as
2686 * being reserved by owner @res_name. Do not access any
2687 * address inside the PCI regions unless this call returns
2690 * Returns 0 on success, or %EBUSY on error. A warning
2691 * message is also printed on failure.
2693 int pci_request_regions(struct pci_dev
*pdev
, const char *res_name
)
2695 return pci_request_selected_regions(pdev
, ((1 << 6) - 1), res_name
);
2699 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2700 * @pdev: PCI device whose resources are to be reserved
2701 * @res_name: Name to be associated with resource.
2703 * Mark all PCI regions associated with PCI device @pdev as
2704 * being reserved by owner @res_name. Do not access any
2705 * address inside the PCI regions unless this call returns
2708 * pci_request_regions_exclusive() will mark the region so that
2709 * /dev/mem and the sysfs MMIO access will not be allowed.
2711 * Returns 0 on success, or %EBUSY on error. A warning
2712 * message is also printed on failure.
2714 int pci_request_regions_exclusive(struct pci_dev
*pdev
, const char *res_name
)
2716 return pci_request_selected_regions_exclusive(pdev
,
2717 ((1 << 6) - 1), res_name
);
2720 static void __pci_set_master(struct pci_dev
*dev
, bool enable
)
2724 pci_read_config_word(dev
, PCI_COMMAND
, &old_cmd
);
2726 cmd
= old_cmd
| PCI_COMMAND_MASTER
;
2728 cmd
= old_cmd
& ~PCI_COMMAND_MASTER
;
2729 if (cmd
!= old_cmd
) {
2730 dev_dbg(&dev
->dev
, "%s bus mastering\n",
2731 enable
? "enabling" : "disabling");
2732 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2734 dev
->is_busmaster
= enable
;
2738 * pcibios_setup - process "pci=" kernel boot arguments
2739 * @str: string used to pass in "pci=" kernel boot arguments
2741 * Process kernel boot arguments. This is the default implementation.
2742 * Architecture specific implementations can override this as necessary.
2744 char * __weak __init
pcibios_setup(char *str
)
2750 * pcibios_set_master - enable PCI bus-mastering for device dev
2751 * @dev: the PCI device to enable
2753 * Enables PCI bus-mastering for the device. This is the default
2754 * implementation. Architecture specific implementations can override
2755 * this if necessary.
2757 void __weak
pcibios_set_master(struct pci_dev
*dev
)
2761 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2762 if (pci_is_pcie(dev
))
2765 pci_read_config_byte(dev
, PCI_LATENCY_TIMER
, &lat
);
2767 lat
= (64 <= pcibios_max_latency
) ? 64 : pcibios_max_latency
;
2768 else if (lat
> pcibios_max_latency
)
2769 lat
= pcibios_max_latency
;
2772 dev_printk(KERN_DEBUG
, &dev
->dev
, "setting latency timer to %d\n", lat
);
2773 pci_write_config_byte(dev
, PCI_LATENCY_TIMER
, lat
);
2777 * pci_set_master - enables bus-mastering for device dev
2778 * @dev: the PCI device to enable
2780 * Enables bus-mastering on the device and calls pcibios_set_master()
2781 * to do the needed arch specific settings.
2783 void pci_set_master(struct pci_dev
*dev
)
2785 __pci_set_master(dev
, true);
2786 pcibios_set_master(dev
);
2790 * pci_clear_master - disables bus-mastering for device dev
2791 * @dev: the PCI device to disable
2793 void pci_clear_master(struct pci_dev
*dev
)
2795 __pci_set_master(dev
, false);
2799 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2800 * @dev: the PCI device for which MWI is to be enabled
2802 * Helper function for pci_set_mwi.
2803 * Originally copied from drivers/net/acenic.c.
2804 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2806 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2808 int pci_set_cacheline_size(struct pci_dev
*dev
)
2812 if (!pci_cache_line_size
)
2815 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2816 equal to or multiple of the right value. */
2817 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2818 if (cacheline_size
>= pci_cache_line_size
&&
2819 (cacheline_size
% pci_cache_line_size
) == 0)
2822 /* Write the correct value. */
2823 pci_write_config_byte(dev
, PCI_CACHE_LINE_SIZE
, pci_cache_line_size
);
2825 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2826 if (cacheline_size
== pci_cache_line_size
)
2829 dev_printk(KERN_DEBUG
, &dev
->dev
, "cache line size of %d is not "
2830 "supported\n", pci_cache_line_size
<< 2);
2834 EXPORT_SYMBOL_GPL(pci_set_cacheline_size
);
2836 #ifdef PCI_DISABLE_MWI
2837 int pci_set_mwi(struct pci_dev
*dev
)
2842 int pci_try_set_mwi(struct pci_dev
*dev
)
2847 void pci_clear_mwi(struct pci_dev
*dev
)
2854 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2855 * @dev: the PCI device for which MWI is enabled
2857 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2859 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2862 pci_set_mwi(struct pci_dev
*dev
)
2867 rc
= pci_set_cacheline_size(dev
);
2871 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2872 if (! (cmd
& PCI_COMMAND_INVALIDATE
)) {
2873 dev_dbg(&dev
->dev
, "enabling Mem-Wr-Inval\n");
2874 cmd
|= PCI_COMMAND_INVALIDATE
;
2875 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2882 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2883 * @dev: the PCI device for which MWI is enabled
2885 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2886 * Callers are not required to check the return value.
2888 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2890 int pci_try_set_mwi(struct pci_dev
*dev
)
2892 int rc
= pci_set_mwi(dev
);
2897 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2898 * @dev: the PCI device to disable
2900 * Disables PCI Memory-Write-Invalidate transaction on the device
2903 pci_clear_mwi(struct pci_dev
*dev
)
2907 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2908 if (cmd
& PCI_COMMAND_INVALIDATE
) {
2909 cmd
&= ~PCI_COMMAND_INVALIDATE
;
2910 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2913 #endif /* ! PCI_DISABLE_MWI */
2916 * pci_intx - enables/disables PCI INTx for device dev
2917 * @pdev: the PCI device to operate on
2918 * @enable: boolean: whether to enable or disable PCI INTx
2920 * Enables/disables PCI INTx for device dev
2923 pci_intx(struct pci_dev
*pdev
, int enable
)
2925 u16 pci_command
, new;
2927 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_command
);
2930 new = pci_command
& ~PCI_COMMAND_INTX_DISABLE
;
2932 new = pci_command
| PCI_COMMAND_INTX_DISABLE
;
2935 if (new != pci_command
) {
2936 struct pci_devres
*dr
;
2938 pci_write_config_word(pdev
, PCI_COMMAND
, new);
2940 dr
= find_pci_dr(pdev
);
2941 if (dr
&& !dr
->restore_intx
) {
2942 dr
->restore_intx
= 1;
2943 dr
->orig_intx
= !enable
;
2949 * pci_intx_mask_supported - probe for INTx masking support
2950 * @dev: the PCI device to operate on
2952 * Check if the device dev support INTx masking via the config space
2955 bool pci_intx_mask_supported(struct pci_dev
*dev
)
2957 bool mask_supported
= false;
2960 if (dev
->broken_intx_masking
)
2963 pci_cfg_access_lock(dev
);
2965 pci_read_config_word(dev
, PCI_COMMAND
, &orig
);
2966 pci_write_config_word(dev
, PCI_COMMAND
,
2967 orig
^ PCI_COMMAND_INTX_DISABLE
);
2968 pci_read_config_word(dev
, PCI_COMMAND
, &new);
2971 * There's no way to protect against hardware bugs or detect them
2972 * reliably, but as long as we know what the value should be, let's
2973 * go ahead and check it.
2975 if ((new ^ orig
) & ~PCI_COMMAND_INTX_DISABLE
) {
2976 dev_err(&dev
->dev
, "Command register changed from "
2977 "0x%x to 0x%x: driver or hardware bug?\n", orig
, new);
2978 } else if ((new ^ orig
) & PCI_COMMAND_INTX_DISABLE
) {
2979 mask_supported
= true;
2980 pci_write_config_word(dev
, PCI_COMMAND
, orig
);
2983 pci_cfg_access_unlock(dev
);
2984 return mask_supported
;
2986 EXPORT_SYMBOL_GPL(pci_intx_mask_supported
);
2988 static bool pci_check_and_set_intx_mask(struct pci_dev
*dev
, bool mask
)
2990 struct pci_bus
*bus
= dev
->bus
;
2991 bool mask_updated
= true;
2992 u32 cmd_status_dword
;
2993 u16 origcmd
, newcmd
;
2994 unsigned long flags
;
2998 * We do a single dword read to retrieve both command and status.
2999 * Document assumptions that make this possible.
3001 BUILD_BUG_ON(PCI_COMMAND
% 4);
3002 BUILD_BUG_ON(PCI_COMMAND
+ 2 != PCI_STATUS
);
3004 raw_spin_lock_irqsave(&pci_lock
, flags
);
3006 bus
->ops
->read(bus
, dev
->devfn
, PCI_COMMAND
, 4, &cmd_status_dword
);
3008 irq_pending
= (cmd_status_dword
>> 16) & PCI_STATUS_INTERRUPT
;
3011 * Check interrupt status register to see whether our device
3012 * triggered the interrupt (when masking) or the next IRQ is
3013 * already pending (when unmasking).
3015 if (mask
!= irq_pending
) {
3016 mask_updated
= false;
3020 origcmd
= cmd_status_dword
;
3021 newcmd
= origcmd
& ~PCI_COMMAND_INTX_DISABLE
;
3023 newcmd
|= PCI_COMMAND_INTX_DISABLE
;
3024 if (newcmd
!= origcmd
)
3025 bus
->ops
->write(bus
, dev
->devfn
, PCI_COMMAND
, 2, newcmd
);
3028 raw_spin_unlock_irqrestore(&pci_lock
, flags
);
3030 return mask_updated
;
3034 * pci_check_and_mask_intx - mask INTx on pending interrupt
3035 * @dev: the PCI device to operate on
3037 * Check if the device dev has its INTx line asserted, mask it and
3038 * return true in that case. False is returned if not interrupt was
3041 bool pci_check_and_mask_intx(struct pci_dev
*dev
)
3043 return pci_check_and_set_intx_mask(dev
, true);
3045 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx
);
3048 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
3049 * @dev: the PCI device to operate on
3051 * Check if the device dev has its INTx line asserted, unmask it if not
3052 * and return true. False is returned and the mask remains active if
3053 * there was still an interrupt pending.
3055 bool pci_check_and_unmask_intx(struct pci_dev
*dev
)
3057 return pci_check_and_set_intx_mask(dev
, false);
3059 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx
);
3062 * pci_msi_off - disables any msi or msix capabilities
3063 * @dev: the PCI device to operate on
3065 * If you want to use msi see pci_enable_msi and friends.
3066 * This is a lower level primitive that allows us to disable
3067 * msi operation at the device level.
3069 void pci_msi_off(struct pci_dev
*dev
)
3074 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
3076 pci_read_config_word(dev
, pos
+ PCI_MSI_FLAGS
, &control
);
3077 control
&= ~PCI_MSI_FLAGS_ENABLE
;
3078 pci_write_config_word(dev
, pos
+ PCI_MSI_FLAGS
, control
);
3080 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
3082 pci_read_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, &control
);
3083 control
&= ~PCI_MSIX_FLAGS_ENABLE
;
3084 pci_write_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, control
);
3087 EXPORT_SYMBOL_GPL(pci_msi_off
);
3089 int pci_set_dma_max_seg_size(struct pci_dev
*dev
, unsigned int size
)
3091 return dma_set_max_seg_size(&dev
->dev
, size
);
3093 EXPORT_SYMBOL(pci_set_dma_max_seg_size
);
3095 int pci_set_dma_seg_boundary(struct pci_dev
*dev
, unsigned long mask
)
3097 return dma_set_seg_boundary(&dev
->dev
, mask
);
3099 EXPORT_SYMBOL(pci_set_dma_seg_boundary
);
3101 static int pcie_flr(struct pci_dev
*dev
, int probe
)
3107 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP
, &cap
);
3108 if (!(cap
& PCI_EXP_DEVCAP_FLR
))
3114 /* Wait for Transaction Pending bit clean */
3115 for (i
= 0; i
< 4; i
++) {
3117 msleep((1 << (i
- 1)) * 100);
3119 pcie_capability_read_word(dev
, PCI_EXP_DEVSTA
, &status
);
3120 if (!(status
& PCI_EXP_DEVSTA_TRPND
))
3124 dev_err(&dev
->dev
, "transaction is not cleared; "
3125 "proceeding with reset anyway\n");
3128 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_BCR_FLR
);
3135 static int pci_af_flr(struct pci_dev
*dev
, int probe
)
3142 pos
= pci_find_capability(dev
, PCI_CAP_ID_AF
);
3146 pci_read_config_byte(dev
, pos
+ PCI_AF_CAP
, &cap
);
3147 if (!(cap
& PCI_AF_CAP_TP
) || !(cap
& PCI_AF_CAP_FLR
))
3153 /* Wait for Transaction Pending bit clean */
3154 for (i
= 0; i
< 4; i
++) {
3156 msleep((1 << (i
- 1)) * 100);
3158 pci_read_config_byte(dev
, pos
+ PCI_AF_STATUS
, &status
);
3159 if (!(status
& PCI_AF_STATUS_TP
))
3163 dev_err(&dev
->dev
, "transaction is not cleared; "
3164 "proceeding with reset anyway\n");
3167 pci_write_config_byte(dev
, pos
+ PCI_AF_CTRL
, PCI_AF_CTRL_FLR
);
3174 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3175 * @dev: Device to reset.
3176 * @probe: If set, only check if the device can be reset this way.
3178 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3179 * unset, it will be reinitialized internally when going from PCI_D3hot to
3180 * PCI_D0. If that's the case and the device is not in a low-power state
3181 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3183 * NOTE: This causes the caller to sleep for twice the device power transition
3184 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3185 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3186 * Moreover, only devices in D0 can be reset by this function.
3188 static int pci_pm_reset(struct pci_dev
*dev
, int probe
)
3195 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &csr
);
3196 if (csr
& PCI_PM_CTRL_NO_SOFT_RESET
)
3202 if (dev
->current_state
!= PCI_D0
)
3205 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3207 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3208 pci_dev_d3_sleep(dev
);
3210 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3212 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3213 pci_dev_d3_sleep(dev
);
3218 static int pci_parent_bus_reset(struct pci_dev
*dev
, int probe
)
3221 struct pci_dev
*pdev
;
3223 if (pci_is_root_bus(dev
->bus
) || dev
->subordinate
|| !dev
->bus
->self
)
3226 list_for_each_entry(pdev
, &dev
->bus
->devices
, bus_list
)
3233 pci_read_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, &ctrl
);
3234 ctrl
|= PCI_BRIDGE_CTL_BUS_RESET
;
3235 pci_write_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, ctrl
);
3238 ctrl
&= ~PCI_BRIDGE_CTL_BUS_RESET
;
3239 pci_write_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, ctrl
);
3245 static int __pci_dev_reset(struct pci_dev
*dev
, int probe
)
3251 rc
= pci_dev_specific_reset(dev
, probe
);
3255 rc
= pcie_flr(dev
, probe
);
3259 rc
= pci_af_flr(dev
, probe
);
3263 rc
= pci_pm_reset(dev
, probe
);
3267 rc
= pci_parent_bus_reset(dev
, probe
);
3272 static int pci_dev_reset(struct pci_dev
*dev
, int probe
)
3277 pci_cfg_access_lock(dev
);
3278 /* block PM suspend, driver probe, etc. */
3279 device_lock(&dev
->dev
);
3282 rc
= __pci_dev_reset(dev
, probe
);
3285 device_unlock(&dev
->dev
);
3286 pci_cfg_access_unlock(dev
);
3291 * __pci_reset_function - reset a PCI device function
3292 * @dev: PCI device to reset
3294 * Some devices allow an individual function to be reset without affecting
3295 * other functions in the same device. The PCI device must be responsive
3296 * to PCI config space in order to use this function.
3298 * The device function is presumed to be unused when this function is called.
3299 * Resetting the device will make the contents of PCI configuration space
3300 * random, so any caller of this must be prepared to reinitialise the
3301 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3304 * Returns 0 if the device function was successfully reset or negative if the
3305 * device doesn't support resetting a single function.
3307 int __pci_reset_function(struct pci_dev
*dev
)
3309 return pci_dev_reset(dev
, 0);
3311 EXPORT_SYMBOL_GPL(__pci_reset_function
);
3314 * __pci_reset_function_locked - reset a PCI device function while holding
3315 * the @dev mutex lock.
3316 * @dev: PCI device to reset
3318 * Some devices allow an individual function to be reset without affecting
3319 * other functions in the same device. The PCI device must be responsive
3320 * to PCI config space in order to use this function.
3322 * The device function is presumed to be unused and the caller is holding
3323 * the device mutex lock when this function is called.
3324 * Resetting the device will make the contents of PCI configuration space
3325 * random, so any caller of this must be prepared to reinitialise the
3326 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3329 * Returns 0 if the device function was successfully reset or negative if the
3330 * device doesn't support resetting a single function.
3332 int __pci_reset_function_locked(struct pci_dev
*dev
)
3334 return __pci_dev_reset(dev
, 0);
3336 EXPORT_SYMBOL_GPL(__pci_reset_function_locked
);
3339 * pci_probe_reset_function - check whether the device can be safely reset
3340 * @dev: PCI device to reset
3342 * Some devices allow an individual function to be reset without affecting
3343 * other functions in the same device. The PCI device must be responsive
3344 * to PCI config space in order to use this function.
3346 * Returns 0 if the device function can be reset or negative if the
3347 * device doesn't support resetting a single function.
3349 int pci_probe_reset_function(struct pci_dev
*dev
)
3351 return pci_dev_reset(dev
, 1);
3355 * pci_reset_function - quiesce and reset a PCI device function
3356 * @dev: PCI device to reset
3358 * Some devices allow an individual function to be reset without affecting
3359 * other functions in the same device. The PCI device must be responsive
3360 * to PCI config space in order to use this function.
3362 * This function does not just reset the PCI portion of a device, but
3363 * clears all the state associated with the device. This function differs
3364 * from __pci_reset_function in that it saves and restores device state
3367 * Returns 0 if the device function was successfully reset or negative if the
3368 * device doesn't support resetting a single function.
3370 int pci_reset_function(struct pci_dev
*dev
)
3374 rc
= pci_dev_reset(dev
, 1);
3378 pci_save_state(dev
);
3381 * both INTx and MSI are disabled after the Interrupt Disable bit
3382 * is set and the Bus Master bit is cleared.
3384 pci_write_config_word(dev
, PCI_COMMAND
, PCI_COMMAND_INTX_DISABLE
);
3386 rc
= pci_dev_reset(dev
, 0);
3388 pci_restore_state(dev
);
3392 EXPORT_SYMBOL_GPL(pci_reset_function
);
3395 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3396 * @dev: PCI device to query
3398 * Returns mmrbc: maximum designed memory read count in bytes
3399 * or appropriate error value.
3401 int pcix_get_max_mmrbc(struct pci_dev
*dev
)
3406 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3410 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3413 return 512 << ((stat
& PCI_X_STATUS_MAX_READ
) >> 21);
3415 EXPORT_SYMBOL(pcix_get_max_mmrbc
);
3418 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3419 * @dev: PCI device to query
3421 * Returns mmrbc: maximum memory read count in bytes
3422 * or appropriate error value.
3424 int pcix_get_mmrbc(struct pci_dev
*dev
)
3429 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3433 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3436 return 512 << ((cmd
& PCI_X_CMD_MAX_READ
) >> 2);
3438 EXPORT_SYMBOL(pcix_get_mmrbc
);
3441 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3442 * @dev: PCI device to query
3443 * @mmrbc: maximum memory read count in bytes
3444 * valid values are 512, 1024, 2048, 4096
3446 * If possible sets maximum memory read byte count, some bridges have erratas
3447 * that prevent this.
3449 int pcix_set_mmrbc(struct pci_dev
*dev
, int mmrbc
)
3455 if (mmrbc
< 512 || mmrbc
> 4096 || !is_power_of_2(mmrbc
))
3458 v
= ffs(mmrbc
) - 10;
3460 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3464 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3467 if (v
> (stat
& PCI_X_STATUS_MAX_READ
) >> 21)
3470 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3473 o
= (cmd
& PCI_X_CMD_MAX_READ
) >> 2;
3475 if (v
> o
&& (dev
->bus
->bus_flags
& PCI_BUS_FLAGS_NO_MMRBC
))
3478 cmd
&= ~PCI_X_CMD_MAX_READ
;
3480 if (pci_write_config_word(dev
, cap
+ PCI_X_CMD
, cmd
))
3485 EXPORT_SYMBOL(pcix_set_mmrbc
);
3488 * pcie_get_readrq - get PCI Express read request size
3489 * @dev: PCI device to query
3491 * Returns maximum memory read request in bytes
3492 * or appropriate error value.
3494 int pcie_get_readrq(struct pci_dev
*dev
)
3498 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &ctl
);
3500 return 128 << ((ctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
3502 EXPORT_SYMBOL(pcie_get_readrq
);
3505 * pcie_set_readrq - set PCI Express maximum memory read request
3506 * @dev: PCI device to query
3507 * @rq: maximum memory read count in bytes
3508 * valid values are 128, 256, 512, 1024, 2048, 4096
3510 * If possible sets maximum memory read request in bytes
3512 int pcie_set_readrq(struct pci_dev
*dev
, int rq
)
3516 if (rq
< 128 || rq
> 4096 || !is_power_of_2(rq
))
3520 * If using the "performance" PCIe config, we clamp the
3521 * read rq size to the max packet size to prevent the
3522 * host bridge generating requests larger than we can
3525 if (pcie_bus_config
== PCIE_BUS_PERFORMANCE
) {
3526 int mps
= pcie_get_mps(dev
);
3534 v
= (ffs(rq
) - 8) << 12;
3536 return pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
3537 PCI_EXP_DEVCTL_READRQ
, v
);
3539 EXPORT_SYMBOL(pcie_set_readrq
);
3542 * pcie_get_mps - get PCI Express maximum payload size
3543 * @dev: PCI device to query
3545 * Returns maximum payload size in bytes
3546 * or appropriate error value.
3548 int pcie_get_mps(struct pci_dev
*dev
)
3552 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &ctl
);
3554 return 128 << ((ctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
3558 * pcie_set_mps - set PCI Express maximum payload size
3559 * @dev: PCI device to query
3560 * @mps: maximum payload size in bytes
3561 * valid values are 128, 256, 512, 1024, 2048, 4096
3563 * If possible sets maximum payload size
3565 int pcie_set_mps(struct pci_dev
*dev
, int mps
)
3569 if (mps
< 128 || mps
> 4096 || !is_power_of_2(mps
))
3573 if (v
> dev
->pcie_mpss
)
3577 return pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
3578 PCI_EXP_DEVCTL_PAYLOAD
, v
);
3582 * pci_select_bars - Make BAR mask from the type of resource
3583 * @dev: the PCI device for which BAR mask is made
3584 * @flags: resource type mask to be selected
3586 * This helper routine makes bar mask from the type of resource.
3588 int pci_select_bars(struct pci_dev
*dev
, unsigned long flags
)
3591 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++)
3592 if (pci_resource_flags(dev
, i
) & flags
)
3598 * pci_resource_bar - get position of the BAR associated with a resource
3599 * @dev: the PCI device
3600 * @resno: the resource number
3601 * @type: the BAR type to be filled in
3603 * Returns BAR position in config space, or 0 if the BAR is invalid.
3605 int pci_resource_bar(struct pci_dev
*dev
, int resno
, enum pci_bar_type
*type
)
3609 if (resno
< PCI_ROM_RESOURCE
) {
3610 *type
= pci_bar_unknown
;
3611 return PCI_BASE_ADDRESS_0
+ 4 * resno
;
3612 } else if (resno
== PCI_ROM_RESOURCE
) {
3613 *type
= pci_bar_mem32
;
3614 return dev
->rom_base_reg
;
3615 } else if (resno
< PCI_BRIDGE_RESOURCES
) {
3616 /* device specific resource */
3617 reg
= pci_iov_resource_bar(dev
, resno
, type
);
3622 dev_err(&dev
->dev
, "BAR %d: invalid resource\n", resno
);
3626 /* Some architectures require additional programming to enable VGA */
3627 static arch_set_vga_state_t arch_set_vga_state
;
3629 void __init
pci_register_set_vga_state(arch_set_vga_state_t func
)
3631 arch_set_vga_state
= func
; /* NULL disables */
3634 static int pci_set_vga_state_arch(struct pci_dev
*dev
, bool decode
,
3635 unsigned int command_bits
, u32 flags
)
3637 if (arch_set_vga_state
)
3638 return arch_set_vga_state(dev
, decode
, command_bits
,
3644 * pci_set_vga_state - set VGA decode state on device and parents if requested
3645 * @dev: the PCI device
3646 * @decode: true = enable decoding, false = disable decoding
3647 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3648 * @flags: traverse ancestors and change bridges
3649 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3651 int pci_set_vga_state(struct pci_dev
*dev
, bool decode
,
3652 unsigned int command_bits
, u32 flags
)
3654 struct pci_bus
*bus
;
3655 struct pci_dev
*bridge
;
3659 WARN_ON((flags
& PCI_VGA_STATE_CHANGE_DECODES
) & (command_bits
& ~(PCI_COMMAND_IO
|PCI_COMMAND_MEMORY
)));
3661 /* ARCH specific VGA enables */
3662 rc
= pci_set_vga_state_arch(dev
, decode
, command_bits
, flags
);
3666 if (flags
& PCI_VGA_STATE_CHANGE_DECODES
) {
3667 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
3669 cmd
|= command_bits
;
3671 cmd
&= ~command_bits
;
3672 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
3675 if (!(flags
& PCI_VGA_STATE_CHANGE_BRIDGE
))
3682 pci_read_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3685 cmd
|= PCI_BRIDGE_CTL_VGA
;
3687 cmd
&= ~PCI_BRIDGE_CTL_VGA
;
3688 pci_write_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3696 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3697 static char resource_alignment_param
[RESOURCE_ALIGNMENT_PARAM_SIZE
] = {0};
3698 static DEFINE_SPINLOCK(resource_alignment_lock
);
3701 * pci_specified_resource_alignment - get resource alignment specified by user.
3702 * @dev: the PCI device to get
3704 * RETURNS: Resource alignment if it is specified.
3705 * Zero if it is not specified.
3707 static resource_size_t
pci_specified_resource_alignment(struct pci_dev
*dev
)
3709 int seg
, bus
, slot
, func
, align_order
, count
;
3710 resource_size_t align
= 0;
3713 spin_lock(&resource_alignment_lock
);
3714 p
= resource_alignment_param
;
3717 if (sscanf(p
, "%d%n", &align_order
, &count
) == 1 &&
3723 if (sscanf(p
, "%x:%x:%x.%x%n",
3724 &seg
, &bus
, &slot
, &func
, &count
) != 4) {
3726 if (sscanf(p
, "%x:%x.%x%n",
3727 &bus
, &slot
, &func
, &count
) != 3) {
3728 /* Invalid format */
3729 printk(KERN_ERR
"PCI: Can't parse resource_alignment parameter: %s\n",
3735 if (seg
== pci_domain_nr(dev
->bus
) &&
3736 bus
== dev
->bus
->number
&&
3737 slot
== PCI_SLOT(dev
->devfn
) &&
3738 func
== PCI_FUNC(dev
->devfn
)) {
3739 if (align_order
== -1) {
3742 align
= 1 << align_order
;
3747 if (*p
!= ';' && *p
!= ',') {
3748 /* End of param or invalid format */
3753 spin_unlock(&resource_alignment_lock
);
3758 * This function disables memory decoding and releases memory resources
3759 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3760 * It also rounds up size to specified alignment.
3761 * Later on, the kernel will assign page-aligned memory resource back
3764 void pci_reassigndev_resource_alignment(struct pci_dev
*dev
)
3768 resource_size_t align
, size
;
3771 /* check if specified PCI is target device to reassign */
3772 align
= pci_specified_resource_alignment(dev
);
3776 if (dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
&&
3777 (dev
->class >> 8) == PCI_CLASS_BRIDGE_HOST
) {
3779 "Can't reassign resources to host bridge.\n");
3784 "Disabling memory decoding and releasing memory resources.\n");
3785 pci_read_config_word(dev
, PCI_COMMAND
, &command
);
3786 command
&= ~PCI_COMMAND_MEMORY
;
3787 pci_write_config_word(dev
, PCI_COMMAND
, command
);
3789 for (i
= 0; i
< PCI_BRIDGE_RESOURCES
; i
++) {
3790 r
= &dev
->resource
[i
];
3791 if (!(r
->flags
& IORESOURCE_MEM
))
3793 size
= resource_size(r
);
3797 "Rounding up size of resource #%d to %#llx.\n",
3798 i
, (unsigned long long)size
);
3803 /* Need to disable bridge's resource window,
3804 * to enable the kernel to reassign new resource
3807 if (dev
->hdr_type
== PCI_HEADER_TYPE_BRIDGE
&&
3808 (dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
) {
3809 for (i
= PCI_BRIDGE_RESOURCES
; i
< PCI_NUM_RESOURCES
; i
++) {
3810 r
= &dev
->resource
[i
];
3811 if (!(r
->flags
& IORESOURCE_MEM
))
3813 r
->end
= resource_size(r
) - 1;
3816 pci_disable_bridge_window(dev
);
3820 static ssize_t
pci_set_resource_alignment_param(const char *buf
, size_t count
)
3822 if (count
> RESOURCE_ALIGNMENT_PARAM_SIZE
- 1)
3823 count
= RESOURCE_ALIGNMENT_PARAM_SIZE
- 1;
3824 spin_lock(&resource_alignment_lock
);
3825 strncpy(resource_alignment_param
, buf
, count
);
3826 resource_alignment_param
[count
] = '\0';
3827 spin_unlock(&resource_alignment_lock
);
3831 static ssize_t
pci_get_resource_alignment_param(char *buf
, size_t size
)
3834 spin_lock(&resource_alignment_lock
);
3835 count
= snprintf(buf
, size
, "%s", resource_alignment_param
);
3836 spin_unlock(&resource_alignment_lock
);
3840 static ssize_t
pci_resource_alignment_show(struct bus_type
*bus
, char *buf
)
3842 return pci_get_resource_alignment_param(buf
, PAGE_SIZE
);
3845 static ssize_t
pci_resource_alignment_store(struct bus_type
*bus
,
3846 const char *buf
, size_t count
)
3848 return pci_set_resource_alignment_param(buf
, count
);
3851 BUS_ATTR(resource_alignment
, 0644, pci_resource_alignment_show
,
3852 pci_resource_alignment_store
);
3854 static int __init
pci_resource_alignment_sysfs_init(void)
3856 return bus_create_file(&pci_bus_type
,
3857 &bus_attr_resource_alignment
);
3860 late_initcall(pci_resource_alignment_sysfs_init
);
3862 static void pci_no_domains(void)
3864 #ifdef CONFIG_PCI_DOMAINS
3865 pci_domains_supported
= 0;
3870 * pci_ext_cfg_avail - can we access extended PCI config space?
3872 * Returns 1 if we can access PCI extended config space (offsets
3873 * greater than 0xff). This is the default implementation. Architecture
3874 * implementations can override this.
3876 int __weak
pci_ext_cfg_avail(void)
3881 void __weak
pci_fixup_cardbus(struct pci_bus
*bus
)
3884 EXPORT_SYMBOL(pci_fixup_cardbus
);
3886 static int __init
pci_setup(char *str
)
3889 char *k
= strchr(str
, ',');
3892 if (*str
&& (str
= pcibios_setup(str
)) && *str
) {
3893 if (!strcmp(str
, "nomsi")) {
3895 } else if (!strcmp(str
, "noaer")) {
3897 } else if (!strncmp(str
, "realloc=", 8)) {
3898 pci_realloc_get_opt(str
+ 8);
3899 } else if (!strncmp(str
, "realloc", 7)) {
3900 pci_realloc_get_opt("on");
3901 } else if (!strcmp(str
, "nodomains")) {
3903 } else if (!strncmp(str
, "noari", 5)) {
3904 pcie_ari_disabled
= true;
3905 } else if (!strncmp(str
, "cbiosize=", 9)) {
3906 pci_cardbus_io_size
= memparse(str
+ 9, &str
);
3907 } else if (!strncmp(str
, "cbmemsize=", 10)) {
3908 pci_cardbus_mem_size
= memparse(str
+ 10, &str
);
3909 } else if (!strncmp(str
, "resource_alignment=", 19)) {
3910 pci_set_resource_alignment_param(str
+ 19,
3912 } else if (!strncmp(str
, "ecrc=", 5)) {
3913 pcie_ecrc_get_policy(str
+ 5);
3914 } else if (!strncmp(str
, "hpiosize=", 9)) {
3915 pci_hotplug_io_size
= memparse(str
+ 9, &str
);
3916 } else if (!strncmp(str
, "hpmemsize=", 10)) {
3917 pci_hotplug_mem_size
= memparse(str
+ 10, &str
);
3918 } else if (!strncmp(str
, "pcie_bus_tune_off", 17)) {
3919 pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
3920 } else if (!strncmp(str
, "pcie_bus_safe", 13)) {
3921 pcie_bus_config
= PCIE_BUS_SAFE
;
3922 } else if (!strncmp(str
, "pcie_bus_perf", 13)) {
3923 pcie_bus_config
= PCIE_BUS_PERFORMANCE
;
3924 } else if (!strncmp(str
, "pcie_bus_peer2peer", 18)) {
3925 pcie_bus_config
= PCIE_BUS_PEER2PEER
;
3926 } else if (!strncmp(str
, "pcie_scan_all", 13)) {
3927 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS
);
3929 printk(KERN_ERR
"PCI: Unknown option `%s'\n",
3937 early_param("pci", pci_setup
);
3939 EXPORT_SYMBOL(pci_reenable_device
);
3940 EXPORT_SYMBOL(pci_enable_device_io
);
3941 EXPORT_SYMBOL(pci_enable_device_mem
);
3942 EXPORT_SYMBOL(pci_enable_device
);
3943 EXPORT_SYMBOL(pcim_enable_device
);
3944 EXPORT_SYMBOL(pcim_pin_device
);
3945 EXPORT_SYMBOL(pci_disable_device
);
3946 EXPORT_SYMBOL(pci_find_capability
);
3947 EXPORT_SYMBOL(pci_bus_find_capability
);
3948 EXPORT_SYMBOL(pci_release_regions
);
3949 EXPORT_SYMBOL(pci_request_regions
);
3950 EXPORT_SYMBOL(pci_request_regions_exclusive
);
3951 EXPORT_SYMBOL(pci_release_region
);
3952 EXPORT_SYMBOL(pci_request_region
);
3953 EXPORT_SYMBOL(pci_request_region_exclusive
);
3954 EXPORT_SYMBOL(pci_release_selected_regions
);
3955 EXPORT_SYMBOL(pci_request_selected_regions
);
3956 EXPORT_SYMBOL(pci_request_selected_regions_exclusive
);
3957 EXPORT_SYMBOL(pci_set_master
);
3958 EXPORT_SYMBOL(pci_clear_master
);
3959 EXPORT_SYMBOL(pci_set_mwi
);
3960 EXPORT_SYMBOL(pci_try_set_mwi
);
3961 EXPORT_SYMBOL(pci_clear_mwi
);
3962 EXPORT_SYMBOL_GPL(pci_intx
);
3963 EXPORT_SYMBOL(pci_assign_resource
);
3964 EXPORT_SYMBOL(pci_find_parent_resource
);
3965 EXPORT_SYMBOL(pci_select_bars
);
3967 EXPORT_SYMBOL(pci_set_power_state
);
3968 EXPORT_SYMBOL(pci_save_state
);
3969 EXPORT_SYMBOL(pci_restore_state
);
3970 EXPORT_SYMBOL(pci_pme_capable
);
3971 EXPORT_SYMBOL(pci_pme_active
);
3972 EXPORT_SYMBOL(pci_wake_from_d3
);
3973 EXPORT_SYMBOL(pci_target_state
);
3974 EXPORT_SYMBOL(pci_prepare_to_sleep
);
3975 EXPORT_SYMBOL(pci_back_from_sleep
);
3976 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state
);