Merge branches 'pci/host-designware', 'pci/host-imx6', 'pci/host-mvebu' and 'pci...
[deliverable/linux.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
608c3881 25#include <linux/pci_hotplug.h>
284f5f9d 26#include <asm-generic/pci-bridge.h>
32a9a682 27#include <asm/setup.h>
bc56b9e0 28#include "pci.h"
1da177e4 29
00240c38
AS
30const char *pci_power_names[] = {
31 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
32};
33EXPORT_SYMBOL_GPL(pci_power_names);
34
93177a74
RW
35int isa_dma_bridge_buggy;
36EXPORT_SYMBOL(isa_dma_bridge_buggy);
37
38int pci_pci_problems;
39EXPORT_SYMBOL(pci_pci_problems);
40
1ae861e6
RW
41unsigned int pci_pm_d3_delay;
42
df17e62e
MG
43static void pci_pme_list_scan(struct work_struct *work);
44
45static LIST_HEAD(pci_pme_list);
46static DEFINE_MUTEX(pci_pme_list_mutex);
47static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
48
49struct pci_pme_device {
50 struct list_head list;
51 struct pci_dev *dev;
52};
53
54#define PME_TIMEOUT 1000 /* How long between PME checks */
55
1ae861e6
RW
56static void pci_dev_d3_sleep(struct pci_dev *dev)
57{
58 unsigned int delay = dev->d3_delay;
59
60 if (delay < pci_pm_d3_delay)
61 delay = pci_pm_d3_delay;
62
63 msleep(delay);
64}
1da177e4 65
32a2eea7
JG
66#ifdef CONFIG_PCI_DOMAINS
67int pci_domains_supported = 1;
68#endif
69
4516a618
AN
70#define DEFAULT_CARDBUS_IO_SIZE (256)
71#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
72/* pci=cbmemsize=nnM,cbiosize=nn can override this */
73unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
74unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
75
28760489
EB
76#define DEFAULT_HOTPLUG_IO_SIZE (256)
77#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
78/* pci=hpmemsize=nnM,hpiosize=nn can override this */
79unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
80unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
81
5f39e670 82enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 83
ac1aa47b
JB
84/*
85 * The default CLS is used if arch didn't set CLS explicitly and not
86 * all pci devices agree on the same value. Arch can override either
87 * the dfl or actual value as it sees fit. Don't forget this is
88 * measured in 32-bit words, not bytes.
89 */
15856ad5 90u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
91u8 pci_cache_line_size;
92
96c55900
MS
93/*
94 * If we set up a device for bus mastering, we need to check the latency
95 * timer as certain BIOSes forget to set it properly.
96 */
97unsigned int pcibios_max_latency = 255;
98
6748dcc2
RW
99/* If set, the PCIe ARI capability will not be used. */
100static bool pcie_ari_disabled;
101
1da177e4
LT
102/**
103 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
104 * @bus: pointer to PCI bus structure to search
105 *
106 * Given a PCI bus, returns the highest PCI bus number present in the set
107 * including the given PCI bus and its list of child PCI buses.
108 */
96bde06a 109unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4 110{
94e6a9b9 111 struct pci_bus *tmp;
1da177e4
LT
112 unsigned char max, n;
113
b918c62e 114 max = bus->busn_res.end;
94e6a9b9
YW
115 list_for_each_entry(tmp, &bus->children, node) {
116 n = pci_bus_max_busnr(tmp);
1da177e4
LT
117 if(n > max)
118 max = n;
119 }
120 return max;
121}
b82db5ce 122EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 123
1684f5dd
AM
124#ifdef CONFIG_HAS_IOMEM
125void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
126{
127 /*
128 * Make sure the BAR is actually a memory resource, not an IO resource
129 */
130 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
131 WARN_ON(1);
132 return NULL;
133 }
134 return ioremap_nocache(pci_resource_start(pdev, bar),
135 pci_resource_len(pdev, bar));
136}
137EXPORT_SYMBOL_GPL(pci_ioremap_bar);
138#endif
139
687d5fe3
ME
140#define PCI_FIND_CAP_TTL 48
141
142static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
143 u8 pos, int cap, int *ttl)
24a4e377
RD
144{
145 u8 id;
24a4e377 146
687d5fe3 147 while ((*ttl)--) {
24a4e377
RD
148 pci_bus_read_config_byte(bus, devfn, pos, &pos);
149 if (pos < 0x40)
150 break;
151 pos &= ~3;
152 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
153 &id);
154 if (id == 0xff)
155 break;
156 if (id == cap)
157 return pos;
158 pos += PCI_CAP_LIST_NEXT;
159 }
160 return 0;
161}
162
687d5fe3
ME
163static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
164 u8 pos, int cap)
165{
166 int ttl = PCI_FIND_CAP_TTL;
167
168 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
169}
170
24a4e377
RD
171int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
172{
173 return __pci_find_next_cap(dev->bus, dev->devfn,
174 pos + PCI_CAP_LIST_NEXT, cap);
175}
176EXPORT_SYMBOL_GPL(pci_find_next_capability);
177
d3bac118
ME
178static int __pci_bus_find_cap_start(struct pci_bus *bus,
179 unsigned int devfn, u8 hdr_type)
1da177e4
LT
180{
181 u16 status;
1da177e4
LT
182
183 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
184 if (!(status & PCI_STATUS_CAP_LIST))
185 return 0;
186
187 switch (hdr_type) {
188 case PCI_HEADER_TYPE_NORMAL:
189 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 190 return PCI_CAPABILITY_LIST;
1da177e4 191 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 192 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
193 default:
194 return 0;
195 }
d3bac118
ME
196
197 return 0;
1da177e4
LT
198}
199
200/**
f7625980 201 * pci_find_capability - query for devices' capabilities
1da177e4
LT
202 * @dev: PCI device to query
203 * @cap: capability code
204 *
205 * Tell if a device supports a given PCI capability.
206 * Returns the address of the requested capability structure within the
207 * device's PCI configuration space or 0 in case the device does not
208 * support it. Possible values for @cap:
209 *
f7625980
BH
210 * %PCI_CAP_ID_PM Power Management
211 * %PCI_CAP_ID_AGP Accelerated Graphics Port
212 * %PCI_CAP_ID_VPD Vital Product Data
213 * %PCI_CAP_ID_SLOTID Slot Identification
1da177e4 214 * %PCI_CAP_ID_MSI Message Signalled Interrupts
f7625980 215 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
1da177e4
LT
216 * %PCI_CAP_ID_PCIX PCI-X
217 * %PCI_CAP_ID_EXP PCI Express
218 */
219int pci_find_capability(struct pci_dev *dev, int cap)
220{
d3bac118
ME
221 int pos;
222
223 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
224 if (pos)
225 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
226
227 return pos;
1da177e4
LT
228}
229
230/**
f7625980 231 * pci_bus_find_capability - query for devices' capabilities
1da177e4
LT
232 * @bus: the PCI bus to query
233 * @devfn: PCI device to query
234 * @cap: capability code
235 *
236 * Like pci_find_capability() but works for pci devices that do not have a
f7625980 237 * pci_dev structure set up yet.
1da177e4
LT
238 *
239 * Returns the address of the requested capability structure within the
240 * device's PCI configuration space or 0 in case the device does not
241 * support it.
242 */
243int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
244{
d3bac118 245 int pos;
1da177e4
LT
246 u8 hdr_type;
247
248 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
249
d3bac118
ME
250 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
251 if (pos)
252 pos = __pci_find_next_cap(bus, devfn, pos, cap);
253
254 return pos;
1da177e4
LT
255}
256
257/**
44a9a36f 258 * pci_find_next_ext_capability - Find an extended capability
1da177e4 259 * @dev: PCI device to query
44a9a36f 260 * @start: address at which to start looking (0 to start at beginning of list)
1da177e4
LT
261 * @cap: capability code
262 *
44a9a36f 263 * Returns the address of the next matching extended capability structure
1da177e4 264 * within the device's PCI configuration space or 0 if the device does
44a9a36f
BH
265 * not support it. Some capabilities can occur several times, e.g., the
266 * vendor-specific capability, and this provides a way to find them all.
1da177e4 267 */
44a9a36f 268int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
1da177e4
LT
269{
270 u32 header;
557848c3
ZY
271 int ttl;
272 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 273
557848c3
ZY
274 /* minimum 8 bytes per capability */
275 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
276
277 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
278 return 0;
279
44a9a36f
BH
280 if (start)
281 pos = start;
282
1da177e4
LT
283 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
284 return 0;
285
286 /*
287 * If we have no capabilities, this is indicated by cap ID,
288 * cap version and next pointer all being 0.
289 */
290 if (header == 0)
291 return 0;
292
293 while (ttl-- > 0) {
44a9a36f 294 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1da177e4
LT
295 return pos;
296
297 pos = PCI_EXT_CAP_NEXT(header);
557848c3 298 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
299 break;
300
301 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
302 break;
303 }
304
305 return 0;
306}
44a9a36f
BH
307EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
308
309/**
310 * pci_find_ext_capability - Find an extended capability
311 * @dev: PCI device to query
312 * @cap: capability code
313 *
314 * Returns the address of the requested extended capability structure
315 * within the device's PCI configuration space or 0 if the device does
316 * not support it. Possible values for @cap:
317 *
318 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
319 * %PCI_EXT_CAP_ID_VC Virtual Channel
320 * %PCI_EXT_CAP_ID_DSN Device Serial Number
321 * %PCI_EXT_CAP_ID_PWR Power Budgeting
322 */
323int pci_find_ext_capability(struct pci_dev *dev, int cap)
324{
325 return pci_find_next_ext_capability(dev, 0, cap);
326}
3a720d72 327EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 328
687d5fe3
ME
329static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
330{
331 int rc, ttl = PCI_FIND_CAP_TTL;
332 u8 cap, mask;
333
334 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
335 mask = HT_3BIT_CAP_MASK;
336 else
337 mask = HT_5BIT_CAP_MASK;
338
339 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
340 PCI_CAP_ID_HT, &ttl);
341 while (pos) {
342 rc = pci_read_config_byte(dev, pos + 3, &cap);
343 if (rc != PCIBIOS_SUCCESSFUL)
344 return 0;
345
346 if ((cap & mask) == ht_cap)
347 return pos;
348
47a4d5be
BG
349 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
350 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
351 PCI_CAP_ID_HT, &ttl);
352 }
353
354 return 0;
355}
356/**
357 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
358 * @dev: PCI device to query
359 * @pos: Position from which to continue searching
360 * @ht_cap: Hypertransport capability code
361 *
362 * To be used in conjunction with pci_find_ht_capability() to search for
363 * all capabilities matching @ht_cap. @pos should always be a value returned
364 * from pci_find_ht_capability().
365 *
366 * NB. To be 100% safe against broken PCI devices, the caller should take
367 * steps to avoid an infinite loop.
368 */
369int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
370{
371 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
372}
373EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
374
375/**
376 * pci_find_ht_capability - query a device's Hypertransport capabilities
377 * @dev: PCI device to query
378 * @ht_cap: Hypertransport capability code
379 *
380 * Tell if a device supports a given Hypertransport capability.
381 * Returns an address within the device's PCI configuration space
382 * or 0 in case the device does not support the request capability.
383 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
384 * which has a Hypertransport capability matching @ht_cap.
385 */
386int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
387{
388 int pos;
389
390 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
391 if (pos)
392 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
393
394 return pos;
395}
396EXPORT_SYMBOL_GPL(pci_find_ht_capability);
397
1da177e4
LT
398/**
399 * pci_find_parent_resource - return resource region of parent bus of given region
400 * @dev: PCI device structure contains resources to be searched
401 * @res: child resource record for which parent is sought
402 *
403 * For given resource region of given device, return the resource
f44116ae 404 * region of parent bus the given region is contained in.
1da177e4
LT
405 */
406struct resource *
407pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
408{
409 const struct pci_bus *bus = dev->bus;
f44116ae 410 struct resource *r;
1da177e4 411 int i;
1da177e4 412
89a74ecc 413 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
414 if (!r)
415 continue;
f44116ae
BH
416 if (res->start && resource_contains(r, res)) {
417
418 /*
419 * If the window is prefetchable but the BAR is
420 * not, the allocator made a mistake.
421 */
422 if (r->flags & IORESOURCE_PREFETCH &&
423 !(res->flags & IORESOURCE_PREFETCH))
424 return NULL;
425
426 /*
427 * If we're below a transparent bridge, there may
428 * be both a positively-decoded aperture and a
429 * subtractively-decoded region that contain the BAR.
430 * We want the positively-decoded one, so this depends
431 * on pci_bus_for_each_resource() giving us those
432 * first.
433 */
434 return r;
435 }
1da177e4 436 }
f44116ae 437 return NULL;
1da177e4
LT
438}
439
157e876f
AW
440/**
441 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
442 * @dev: the PCI device to operate on
443 * @pos: config space offset of status word
444 * @mask: mask of bit(s) to care about in status word
445 *
446 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
447 */
448int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
449{
450 int i;
451
452 /* Wait for Transaction Pending bit clean */
453 for (i = 0; i < 4; i++) {
454 u16 status;
455 if (i)
456 msleep((1 << (i - 1)) * 100);
457
458 pci_read_config_word(dev, pos, &status);
459 if (!(status & mask))
460 return 1;
461 }
462
463 return 0;
464}
465
064b53db
JL
466/**
467 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
468 * @dev: PCI device to have its BARs restored
469 *
470 * Restore the BAR values for a given device, so as to make it
471 * accessible by its driver.
472 */
ad668599 473static void
064b53db
JL
474pci_restore_bars(struct pci_dev *dev)
475{
bc5f5a82 476 int i;
064b53db 477
bc5f5a82 478 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 479 pci_update_resource(dev, i);
064b53db
JL
480}
481
961d9120
RW
482static struct pci_platform_pm_ops *pci_platform_pm;
483
484int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
485{
eb9d0fe4 486 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
d2e5f0c1 487 || !ops->sleep_wake)
961d9120
RW
488 return -EINVAL;
489 pci_platform_pm = ops;
490 return 0;
491}
492
493static inline bool platform_pci_power_manageable(struct pci_dev *dev)
494{
495 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
496}
497
498static inline int platform_pci_set_power_state(struct pci_dev *dev,
499 pci_power_t t)
500{
501 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
502}
503
504static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
505{
506 return pci_platform_pm ?
507 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
508}
8f7020d3 509
eb9d0fe4
RW
510static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
511{
512 return pci_platform_pm ?
513 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
514}
515
b67ea761
RW
516static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
517{
518 return pci_platform_pm ?
519 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
520}
521
1da177e4 522/**
44e4e66e
RW
523 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
524 * given PCI device
525 * @dev: PCI device to handle.
44e4e66e 526 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 527 *
44e4e66e
RW
528 * RETURN VALUE:
529 * -EINVAL if the requested state is invalid.
530 * -EIO if device does not support PCI PM or its PM capabilities register has a
531 * wrong version, or device doesn't support the requested state.
532 * 0 if device already is in the requested state.
533 * 0 if device's power state has been successfully changed.
1da177e4 534 */
f00a20ef 535static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 536{
337001b6 537 u16 pmcsr;
44e4e66e 538 bool need_restore = false;
1da177e4 539
4a865905
RW
540 /* Check if we're already there */
541 if (dev->current_state == state)
542 return 0;
543
337001b6 544 if (!dev->pm_cap)
cca03dec
AL
545 return -EIO;
546
44e4e66e
RW
547 if (state < PCI_D0 || state > PCI_D3hot)
548 return -EINVAL;
549
1da177e4 550 /* Validate current state:
f7625980 551 * Can enter D0 from any state, but if we can only go deeper
1da177e4
LT
552 * to sleep if we're already in a low power state
553 */
4a865905 554 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 555 && dev->current_state > state) {
80ccba11
BH
556 dev_err(&dev->dev, "invalid power transition "
557 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 558 return -EINVAL;
44e4e66e 559 }
1da177e4 560
1da177e4 561 /* check if this device supports the desired state */
337001b6
RW
562 if ((state == PCI_D1 && !dev->d1_support)
563 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 564 return -EIO;
1da177e4 565
337001b6 566 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 567
32a36585 568 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
569 * This doesn't affect PME_Status, disables PME_En, and
570 * sets PowerState to 0.
571 */
32a36585 572 switch (dev->current_state) {
d3535fbb
JL
573 case PCI_D0:
574 case PCI_D1:
575 case PCI_D2:
576 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
577 pmcsr |= state;
578 break;
f62795f1
RW
579 case PCI_D3hot:
580 case PCI_D3cold:
32a36585
JL
581 case PCI_UNKNOWN: /* Boot-up */
582 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 583 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 584 need_restore = true;
32a36585 585 /* Fall-through: force to D0 */
32a36585 586 default:
d3535fbb 587 pmcsr = 0;
32a36585 588 break;
1da177e4
LT
589 }
590
591 /* enter specified state */
337001b6 592 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
593
594 /* Mandatory power management transition delays */
595 /* see PCI PM 1.1 5.6.1 table 18 */
596 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 597 pci_dev_d3_sleep(dev);
1da177e4 598 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 599 udelay(PCI_PM_D2_DELAY);
1da177e4 600
e13cdbd7
RW
601 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
602 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
603 if (dev->current_state != state && printk_ratelimit())
604 dev_info(&dev->dev, "Refused to change power state, "
605 "currently in D%d\n", dev->current_state);
064b53db 606
448bd857
HY
607 /*
608 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
064b53db
JL
609 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
610 * from D3hot to D0 _may_ perform an internal reset, thereby
611 * going to "D0 Uninitialized" rather than "D0 Initialized".
612 * For example, at least some versions of the 3c905B and the
613 * 3c556B exhibit this behaviour.
614 *
615 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
616 * devices in a D3hot state at boot. Consequently, we need to
617 * restore at least the BARs so that the device will be
618 * accessible to its driver.
619 */
620 if (need_restore)
621 pci_restore_bars(dev);
622
f00a20ef 623 if (dev->bus->self)
7d715a6c
SL
624 pcie_aspm_pm_state_change(dev->bus->self);
625
1da177e4
LT
626 return 0;
627}
628
44e4e66e
RW
629/**
630 * pci_update_current_state - Read PCI power state of given device from its
631 * PCI PM registers and cache it
632 * @dev: PCI device to handle.
f06fc0b6 633 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 634 */
73410429 635void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 636{
337001b6 637 if (dev->pm_cap) {
44e4e66e
RW
638 u16 pmcsr;
639
448bd857
HY
640 /*
641 * Configuration space is not accessible for device in
642 * D3cold, so just keep or set D3cold for safety
643 */
644 if (dev->current_state == PCI_D3cold)
645 return;
646 if (state == PCI_D3cold) {
647 dev->current_state = PCI_D3cold;
648 return;
649 }
337001b6 650 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 651 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
652 } else {
653 dev->current_state = state;
44e4e66e
RW
654 }
655}
656
db288c9c
RW
657/**
658 * pci_power_up - Put the given device into D0 forcibly
659 * @dev: PCI device to power up
660 */
661void pci_power_up(struct pci_dev *dev)
662{
663 if (platform_pci_power_manageable(dev))
664 platform_pci_set_power_state(dev, PCI_D0);
665
666 pci_raw_set_power_state(dev, PCI_D0);
667 pci_update_current_state(dev, PCI_D0);
668}
669
0e5dd46b
RW
670/**
671 * pci_platform_power_transition - Use platform to change device power state
672 * @dev: PCI device to handle.
673 * @state: State to put the device into.
674 */
675static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
676{
677 int error;
678
679 if (platform_pci_power_manageable(dev)) {
680 error = platform_pci_set_power_state(dev, state);
681 if (!error)
682 pci_update_current_state(dev, state);
769ba721 683 } else
0e5dd46b 684 error = -ENODEV;
769ba721
RW
685
686 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
687 dev->current_state = PCI_D0;
0e5dd46b
RW
688
689 return error;
690}
691
0b950f0f
SH
692/**
693 * pci_wakeup - Wake up a PCI device
694 * @pci_dev: Device to handle.
695 * @ign: ignored parameter
696 */
697static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
698{
699 pci_wakeup_event(pci_dev);
700 pm_request_resume(&pci_dev->dev);
701 return 0;
702}
703
704/**
705 * pci_wakeup_bus - Walk given bus and wake up devices on it
706 * @bus: Top bus of the subtree to walk.
707 */
708static void pci_wakeup_bus(struct pci_bus *bus)
709{
710 if (bus)
711 pci_walk_bus(bus, pci_wakeup, NULL);
712}
713
0e5dd46b
RW
714/**
715 * __pci_start_power_transition - Start power transition of a PCI device
716 * @dev: PCI device to handle.
717 * @state: State to put the device into.
718 */
719static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
720{
448bd857 721 if (state == PCI_D0) {
0e5dd46b 722 pci_platform_power_transition(dev, PCI_D0);
448bd857
HY
723 /*
724 * Mandatory power management transition delays, see
725 * PCI Express Base Specification Revision 2.0 Section
726 * 6.6.1: Conventional Reset. Do not delay for
727 * devices powered on/off by corresponding bridge,
728 * because have already delayed for the bridge.
729 */
730 if (dev->runtime_d3cold) {
731 msleep(dev->d3cold_delay);
732 /*
733 * When powering on a bridge from D3cold, the
734 * whole hierarchy may be powered on into
735 * D0uninitialized state, resume them to give
736 * them a chance to suspend again
737 */
738 pci_wakeup_bus(dev->subordinate);
739 }
740 }
741}
742
743/**
744 * __pci_dev_set_current_state - Set current state of a PCI device
745 * @dev: Device to handle
746 * @data: pointer to state to be set
747 */
748static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
749{
750 pci_power_t state = *(pci_power_t *)data;
751
752 dev->current_state = state;
753 return 0;
754}
755
756/**
757 * __pci_bus_set_current_state - Walk given bus and set current state of devices
758 * @bus: Top bus of the subtree to walk.
759 * @state: state to be set
760 */
761static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
762{
763 if (bus)
764 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
0e5dd46b
RW
765}
766
767/**
768 * __pci_complete_power_transition - Complete power transition of a PCI device
769 * @dev: PCI device to handle.
770 * @state: State to put the device into.
771 *
772 * This function should not be called directly by device drivers.
773 */
774int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
775{
448bd857
HY
776 int ret;
777
db288c9c 778 if (state <= PCI_D0)
448bd857
HY
779 return -EINVAL;
780 ret = pci_platform_power_transition(dev, state);
781 /* Power off the bridge may power off the whole hierarchy */
782 if (!ret && state == PCI_D3cold)
783 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
784 return ret;
0e5dd46b
RW
785}
786EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
787
44e4e66e
RW
788/**
789 * pci_set_power_state - Set the power state of a PCI device
790 * @dev: PCI device to handle.
791 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
792 *
877d0310 793 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
794 * the device's PCI PM registers.
795 *
796 * RETURN VALUE:
797 * -EINVAL if the requested state is invalid.
798 * -EIO if device does not support PCI PM or its PM capabilities register has a
799 * wrong version, or device doesn't support the requested state.
800 * 0 if device already is in the requested state.
801 * 0 if device's power state has been successfully changed.
802 */
803int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
804{
337001b6 805 int error;
44e4e66e
RW
806
807 /* bound the state we're entering */
448bd857
HY
808 if (state > PCI_D3cold)
809 state = PCI_D3cold;
44e4e66e
RW
810 else if (state < PCI_D0)
811 state = PCI_D0;
812 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
813 /*
814 * If the device or the parent bridge do not support PCI PM,
815 * ignore the request if we're doing anything other than putting
816 * it into D0 (which would only happen on boot).
817 */
818 return 0;
819
db288c9c
RW
820 /* Check if we're already there */
821 if (dev->current_state == state)
822 return 0;
823
0e5dd46b
RW
824 __pci_start_power_transition(dev, state);
825
979b1791
AC
826 /* This device is quirked not to be put into D3, so
827 don't put it in D3 */
448bd857 828 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
979b1791 829 return 0;
44e4e66e 830
448bd857
HY
831 /*
832 * To put device in D3cold, we put device into D3hot in native
833 * way, then put device into D3cold with platform ops
834 */
835 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
836 PCI_D3hot : state);
44e4e66e 837
0e5dd46b
RW
838 if (!__pci_complete_power_transition(dev, state))
839 error = 0;
1a680b7c
NC
840 /*
841 * When aspm_policy is "powersave" this call ensures
842 * that ASPM is configured.
843 */
844 if (!error && dev->bus->self)
845 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
846
847 return error;
848}
849
1da177e4
LT
850/**
851 * pci_choose_state - Choose the power state of a PCI device
852 * @dev: PCI device to be suspended
853 * @state: target sleep state for the whole system. This is the value
854 * that is passed to suspend() function.
855 *
856 * Returns PCI power state suitable for given device and given system
857 * message.
858 */
859
860pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
861{
ab826ca4 862 pci_power_t ret;
0f64474b 863
728cdb75 864 if (!dev->pm_cap)
1da177e4
LT
865 return PCI_D0;
866
961d9120
RW
867 ret = platform_pci_choose_state(dev);
868 if (ret != PCI_POWER_ERROR)
869 return ret;
ca078bae
PM
870
871 switch (state.event) {
872 case PM_EVENT_ON:
873 return PCI_D0;
874 case PM_EVENT_FREEZE:
b887d2e6
DB
875 case PM_EVENT_PRETHAW:
876 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 877 case PM_EVENT_SUSPEND:
3a2d5b70 878 case PM_EVENT_HIBERNATE:
ca078bae 879 return PCI_D3hot;
1da177e4 880 default:
80ccba11
BH
881 dev_info(&dev->dev, "unrecognized suspend event %d\n",
882 state.event);
1da177e4
LT
883 BUG();
884 }
885 return PCI_D0;
886}
887
888EXPORT_SYMBOL(pci_choose_state);
889
89858517
YZ
890#define PCI_EXP_SAVE_REGS 7
891
1b6b8ce2 892
fd0f7f73
AW
893static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
894 u16 cap, bool extended)
34a4876e
YL
895{
896 struct pci_cap_saved_state *tmp;
34a4876e 897
b67bfe0d 898 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
fd0f7f73 899 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
34a4876e
YL
900 return tmp;
901 }
902 return NULL;
903}
904
fd0f7f73
AW
905struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
906{
907 return _pci_find_saved_cap(dev, cap, false);
908}
909
910struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
911{
912 return _pci_find_saved_cap(dev, cap, true);
913}
914
b56a5a23
MT
915static int pci_save_pcie_state(struct pci_dev *dev)
916{
59875ae4 917 int i = 0;
b56a5a23
MT
918 struct pci_cap_saved_state *save_state;
919 u16 *cap;
920
59875ae4 921 if (!pci_is_pcie(dev))
b56a5a23
MT
922 return 0;
923
9f35575d 924 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 925 if (!save_state) {
e496b617 926 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
927 return -ENOMEM;
928 }
63f4898a 929
59875ae4
JL
930 cap = (u16 *)&save_state->cap.data[0];
931 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
932 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
933 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
934 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
935 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
936 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
937 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
9cb604ed 938
b56a5a23
MT
939 return 0;
940}
941
942static void pci_restore_pcie_state(struct pci_dev *dev)
943{
59875ae4 944 int i = 0;
b56a5a23
MT
945 struct pci_cap_saved_state *save_state;
946 u16 *cap;
947
948 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
59875ae4 949 if (!save_state)
9cb604ed
MS
950 return;
951
59875ae4
JL
952 cap = (u16 *)&save_state->cap.data[0];
953 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
954 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
955 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
956 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
957 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
958 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
959 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
960}
961
cc692a5f
SH
962
963static int pci_save_pcix_state(struct pci_dev *dev)
964{
63f4898a 965 int pos;
cc692a5f 966 struct pci_cap_saved_state *save_state;
cc692a5f
SH
967
968 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
969 if (pos <= 0)
970 return 0;
971
f34303de 972 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 973 if (!save_state) {
e496b617 974 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
975 return -ENOMEM;
976 }
cc692a5f 977
24a4742f
AW
978 pci_read_config_word(dev, pos + PCI_X_CMD,
979 (u16 *)save_state->cap.data);
63f4898a 980
cc692a5f
SH
981 return 0;
982}
983
984static void pci_restore_pcix_state(struct pci_dev *dev)
985{
986 int i = 0, pos;
987 struct pci_cap_saved_state *save_state;
988 u16 *cap;
989
990 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
991 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
992 if (!save_state || pos <= 0)
993 return;
24a4742f 994 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
995
996 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
997}
998
999
1da177e4
LT
1000/**
1001 * pci_save_state - save the PCI configuration space of a device before suspending
1002 * @dev: - PCI device that we're dealing with
1da177e4
LT
1003 */
1004int
1005pci_save_state(struct pci_dev *dev)
1006{
1007 int i;
1008 /* XXX: 100% dword access ok here? */
1009 for (i = 0; i < 16; i++)
9e0b5b2c 1010 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 1011 dev->state_saved = true;
b56a5a23
MT
1012 if ((i = pci_save_pcie_state(dev)) != 0)
1013 return i;
cc692a5f
SH
1014 if ((i = pci_save_pcix_state(dev)) != 0)
1015 return i;
425c1b22
AW
1016 if ((i = pci_save_vc_state(dev)) != 0)
1017 return i;
1da177e4
LT
1018 return 0;
1019}
1020
ebfc5b80
RW
1021static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1022 u32 saved_val, int retry)
1023{
1024 u32 val;
1025
1026 pci_read_config_dword(pdev, offset, &val);
1027 if (val == saved_val)
1028 return;
1029
1030 for (;;) {
1031 dev_dbg(&pdev->dev, "restoring config space at offset "
1032 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
1033 pci_write_config_dword(pdev, offset, saved_val);
1034 if (retry-- <= 0)
1035 return;
1036
1037 pci_read_config_dword(pdev, offset, &val);
1038 if (val == saved_val)
1039 return;
1040
1041 mdelay(1);
1042 }
1043}
1044
a6cb9ee7
RW
1045static void pci_restore_config_space_range(struct pci_dev *pdev,
1046 int start, int end, int retry)
ebfc5b80
RW
1047{
1048 int index;
1049
1050 for (index = end; index >= start; index--)
1051 pci_restore_config_dword(pdev, 4 * index,
1052 pdev->saved_config_space[index],
1053 retry);
1054}
1055
a6cb9ee7
RW
1056static void pci_restore_config_space(struct pci_dev *pdev)
1057{
1058 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1059 pci_restore_config_space_range(pdev, 10, 15, 0);
1060 /* Restore BARs before the command register. */
1061 pci_restore_config_space_range(pdev, 4, 9, 10);
1062 pci_restore_config_space_range(pdev, 0, 3, 0);
1063 } else {
1064 pci_restore_config_space_range(pdev, 0, 15, 0);
1065 }
1066}
1067
f7625980 1068/**
1da177e4
LT
1069 * pci_restore_state - Restore the saved state of a PCI device
1070 * @dev: - PCI device that we're dealing with
1da177e4 1071 */
1d3c16a8 1072void pci_restore_state(struct pci_dev *dev)
1da177e4 1073{
c82f63e4 1074 if (!dev->state_saved)
1d3c16a8 1075 return;
4b77b0a2 1076
b56a5a23
MT
1077 /* PCI Express register must be restored first */
1078 pci_restore_pcie_state(dev);
1900ca13 1079 pci_restore_ats_state(dev);
425c1b22 1080 pci_restore_vc_state(dev);
b56a5a23 1081
a6cb9ee7 1082 pci_restore_config_space(dev);
ebfc5b80 1083
cc692a5f 1084 pci_restore_pcix_state(dev);
41017f0c 1085 pci_restore_msi_state(dev);
8c5cdb6a 1086 pci_restore_iov_state(dev);
8fed4b65 1087
4b77b0a2 1088 dev->state_saved = false;
1da177e4
LT
1089}
1090
ffbdd3f7
AW
1091struct pci_saved_state {
1092 u32 config_space[16];
1093 struct pci_cap_saved_data cap[0];
1094};
1095
1096/**
1097 * pci_store_saved_state - Allocate and return an opaque struct containing
1098 * the device saved state.
1099 * @dev: PCI device that we're dealing with
1100 *
f7625980 1101 * Return NULL if no state or error.
ffbdd3f7
AW
1102 */
1103struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1104{
1105 struct pci_saved_state *state;
1106 struct pci_cap_saved_state *tmp;
1107 struct pci_cap_saved_data *cap;
ffbdd3f7
AW
1108 size_t size;
1109
1110 if (!dev->state_saved)
1111 return NULL;
1112
1113 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1114
b67bfe0d 1115 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
ffbdd3f7
AW
1116 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1117
1118 state = kzalloc(size, GFP_KERNEL);
1119 if (!state)
1120 return NULL;
1121
1122 memcpy(state->config_space, dev->saved_config_space,
1123 sizeof(state->config_space));
1124
1125 cap = state->cap;
b67bfe0d 1126 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
ffbdd3f7
AW
1127 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1128 memcpy(cap, &tmp->cap, len);
1129 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1130 }
1131 /* Empty cap_save terminates list */
1132
1133 return state;
1134}
1135EXPORT_SYMBOL_GPL(pci_store_saved_state);
1136
1137/**
1138 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1139 * @dev: PCI device that we're dealing with
1140 * @state: Saved state returned from pci_store_saved_state()
1141 */
0b950f0f
SH
1142static int pci_load_saved_state(struct pci_dev *dev,
1143 struct pci_saved_state *state)
ffbdd3f7
AW
1144{
1145 struct pci_cap_saved_data *cap;
1146
1147 dev->state_saved = false;
1148
1149 if (!state)
1150 return 0;
1151
1152 memcpy(dev->saved_config_space, state->config_space,
1153 sizeof(state->config_space));
1154
1155 cap = state->cap;
1156 while (cap->size) {
1157 struct pci_cap_saved_state *tmp;
1158
fd0f7f73 1159 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
ffbdd3f7
AW
1160 if (!tmp || tmp->cap.size != cap->size)
1161 return -EINVAL;
1162
1163 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1164 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1165 sizeof(struct pci_cap_saved_data) + cap->size);
1166 }
1167
1168 dev->state_saved = true;
1169 return 0;
1170}
ffbdd3f7
AW
1171
1172/**
1173 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1174 * and free the memory allocated for it.
1175 * @dev: PCI device that we're dealing with
1176 * @state: Pointer to saved state returned from pci_store_saved_state()
1177 */
1178int pci_load_and_free_saved_state(struct pci_dev *dev,
1179 struct pci_saved_state **state)
1180{
1181 int ret = pci_load_saved_state(dev, *state);
1182 kfree(*state);
1183 *state = NULL;
1184 return ret;
1185}
1186EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1187
8a9d5609
BH
1188int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1189{
1190 return pci_enable_resources(dev, bars);
1191}
1192
38cc1302
HS
1193static int do_pci_enable_device(struct pci_dev *dev, int bars)
1194{
1195 int err;
1e2571a7
BH
1196 u16 cmd;
1197 u8 pin;
38cc1302
HS
1198
1199 err = pci_set_power_state(dev, PCI_D0);
1200 if (err < 0 && err != -EIO)
1201 return err;
1202 err = pcibios_enable_device(dev, bars);
1203 if (err < 0)
1204 return err;
1205 pci_fixup_device(pci_fixup_enable, dev);
1206
866d5417
BH
1207 if (dev->msi_enabled || dev->msix_enabled)
1208 return 0;
1209
1e2571a7
BH
1210 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1211 if (pin) {
1212 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1213 if (cmd & PCI_COMMAND_INTX_DISABLE)
1214 pci_write_config_word(dev, PCI_COMMAND,
1215 cmd & ~PCI_COMMAND_INTX_DISABLE);
1216 }
1217
38cc1302
HS
1218 return 0;
1219}
1220
1221/**
0b62e13b 1222 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1223 * @dev: PCI device to be resumed
1224 *
1225 * Note this function is a backend of pci_default_resume and is not supposed
1226 * to be called by normal code, write proper resume handler and use it instead.
1227 */
0b62e13b 1228int pci_reenable_device(struct pci_dev *dev)
38cc1302 1229{
296ccb08 1230 if (pci_is_enabled(dev))
38cc1302
HS
1231 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1232 return 0;
1233}
1234
928bea96
YL
1235static void pci_enable_bridge(struct pci_dev *dev)
1236{
79272138 1237 struct pci_dev *bridge;
928bea96
YL
1238 int retval;
1239
79272138
BH
1240 bridge = pci_upstream_bridge(dev);
1241 if (bridge)
1242 pci_enable_bridge(bridge);
928bea96 1243
cf3e1feb 1244 if (pci_is_enabled(dev)) {
fbeeb822 1245 if (!dev->is_busmaster)
cf3e1feb 1246 pci_set_master(dev);
928bea96 1247 return;
cf3e1feb
YL
1248 }
1249
928bea96
YL
1250 retval = pci_enable_device(dev);
1251 if (retval)
1252 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1253 retval);
1254 pci_set_master(dev);
1255}
1256
b4b4fbba 1257static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1da177e4 1258{
79272138 1259 struct pci_dev *bridge;
1da177e4 1260 int err;
b718989d 1261 int i, bars = 0;
1da177e4 1262
97c145f7
JB
1263 /*
1264 * Power state could be unknown at this point, either due to a fresh
1265 * boot or a device removal call. So get the current power state
1266 * so that things like MSI message writing will behave as expected
1267 * (e.g. if the device really is in D0 at enable time).
1268 */
1269 if (dev->pm_cap) {
1270 u16 pmcsr;
1271 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1272 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1273 }
1274
cc7ba39b 1275 if (atomic_inc_return(&dev->enable_cnt) > 1)
9fb625c3
HS
1276 return 0; /* already enabled */
1277
79272138
BH
1278 bridge = pci_upstream_bridge(dev);
1279 if (bridge)
1280 pci_enable_bridge(bridge);
928bea96 1281
497f16f2
YL
1282 /* only skip sriov related */
1283 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1284 if (dev->resource[i].flags & flags)
1285 bars |= (1 << i);
1286 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1287 if (dev->resource[i].flags & flags)
1288 bars |= (1 << i);
1289
38cc1302 1290 err = do_pci_enable_device(dev, bars);
95a62965 1291 if (err < 0)
38cc1302 1292 atomic_dec(&dev->enable_cnt);
9fb625c3 1293 return err;
1da177e4
LT
1294}
1295
b718989d
BH
1296/**
1297 * pci_enable_device_io - Initialize a device for use with IO space
1298 * @dev: PCI device to be initialized
1299 *
1300 * Initialize device before it's used by a driver. Ask low-level code
1301 * to enable I/O resources. Wake up the device if it was suspended.
1302 * Beware, this function can fail.
1303 */
1304int pci_enable_device_io(struct pci_dev *dev)
1305{
b4b4fbba 1306 return pci_enable_device_flags(dev, IORESOURCE_IO);
b718989d
BH
1307}
1308
1309/**
1310 * pci_enable_device_mem - Initialize a device for use with Memory space
1311 * @dev: PCI device to be initialized
1312 *
1313 * Initialize device before it's used by a driver. Ask low-level code
1314 * to enable Memory resources. Wake up the device if it was suspended.
1315 * Beware, this function can fail.
1316 */
1317int pci_enable_device_mem(struct pci_dev *dev)
1318{
b4b4fbba 1319 return pci_enable_device_flags(dev, IORESOURCE_MEM);
b718989d
BH
1320}
1321
bae94d02
IPG
1322/**
1323 * pci_enable_device - Initialize device before it's used by a driver.
1324 * @dev: PCI device to be initialized
1325 *
1326 * Initialize device before it's used by a driver. Ask low-level code
1327 * to enable I/O and memory. Wake up the device if it was suspended.
1328 * Beware, this function can fail.
1329 *
1330 * Note we don't actually enable the device many times if we call
1331 * this function repeatedly (we just increment the count).
1332 */
1333int pci_enable_device(struct pci_dev *dev)
1334{
b4b4fbba 1335 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1336}
1337
9ac7849e
TH
1338/*
1339 * Managed PCI resources. This manages device on/off, intx/msi/msix
1340 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1341 * there's no need to track it separately. pci_devres is initialized
1342 * when a device is enabled using managed PCI device enable interface.
1343 */
1344struct pci_devres {
7f375f32
TH
1345 unsigned int enabled:1;
1346 unsigned int pinned:1;
9ac7849e
TH
1347 unsigned int orig_intx:1;
1348 unsigned int restore_intx:1;
1349 u32 region_mask;
1350};
1351
1352static void pcim_release(struct device *gendev, void *res)
1353{
1354 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1355 struct pci_devres *this = res;
1356 int i;
1357
1358 if (dev->msi_enabled)
1359 pci_disable_msi(dev);
1360 if (dev->msix_enabled)
1361 pci_disable_msix(dev);
1362
1363 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1364 if (this->region_mask & (1 << i))
1365 pci_release_region(dev, i);
1366
1367 if (this->restore_intx)
1368 pci_intx(dev, this->orig_intx);
1369
7f375f32 1370 if (this->enabled && !this->pinned)
9ac7849e
TH
1371 pci_disable_device(dev);
1372}
1373
1374static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1375{
1376 struct pci_devres *dr, *new_dr;
1377
1378 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1379 if (dr)
1380 return dr;
1381
1382 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1383 if (!new_dr)
1384 return NULL;
1385 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1386}
1387
1388static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1389{
1390 if (pci_is_managed(pdev))
1391 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1392 return NULL;
1393}
1394
1395/**
1396 * pcim_enable_device - Managed pci_enable_device()
1397 * @pdev: PCI device to be initialized
1398 *
1399 * Managed pci_enable_device().
1400 */
1401int pcim_enable_device(struct pci_dev *pdev)
1402{
1403 struct pci_devres *dr;
1404 int rc;
1405
1406 dr = get_pci_dr(pdev);
1407 if (unlikely(!dr))
1408 return -ENOMEM;
b95d58ea
TH
1409 if (dr->enabled)
1410 return 0;
9ac7849e
TH
1411
1412 rc = pci_enable_device(pdev);
1413 if (!rc) {
1414 pdev->is_managed = 1;
7f375f32 1415 dr->enabled = 1;
9ac7849e
TH
1416 }
1417 return rc;
1418}
1419
1420/**
1421 * pcim_pin_device - Pin managed PCI device
1422 * @pdev: PCI device to pin
1423 *
1424 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1425 * driver detach. @pdev must have been enabled with
1426 * pcim_enable_device().
1427 */
1428void pcim_pin_device(struct pci_dev *pdev)
1429{
1430 struct pci_devres *dr;
1431
1432 dr = find_pci_dr(pdev);
7f375f32 1433 WARN_ON(!dr || !dr->enabled);
9ac7849e 1434 if (dr)
7f375f32 1435 dr->pinned = 1;
9ac7849e
TH
1436}
1437
eca0d467
MG
1438/*
1439 * pcibios_add_device - provide arch specific hooks when adding device dev
1440 * @dev: the PCI device being added
1441 *
1442 * Permits the platform to provide architecture specific functionality when
1443 * devices are added. This is the default implementation. Architecture
1444 * implementations can override this.
1445 */
1446int __weak pcibios_add_device (struct pci_dev *dev)
1447{
1448 return 0;
1449}
1450
6ae32c53
SO
1451/**
1452 * pcibios_release_device - provide arch specific hooks when releasing device dev
1453 * @dev: the PCI device being released
1454 *
1455 * Permits the platform to provide architecture specific functionality when
1456 * devices are released. This is the default implementation. Architecture
1457 * implementations can override this.
1458 */
1459void __weak pcibios_release_device(struct pci_dev *dev) {}
1460
1da177e4
LT
1461/**
1462 * pcibios_disable_device - disable arch specific PCI resources for device dev
1463 * @dev: the PCI device to disable
1464 *
1465 * Disables architecture specific PCI resources for the device. This
1466 * is the default implementation. Architecture implementations can
1467 * override this.
1468 */
d6d88c83 1469void __weak pcibios_disable_device (struct pci_dev *dev) {}
1da177e4 1470
a43ae58c
HG
1471/**
1472 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1473 * @irq: ISA IRQ to penalize
1474 * @active: IRQ active or not
1475 *
1476 * Permits the platform to provide architecture-specific functionality when
1477 * penalizing ISA IRQs. This is the default implementation. Architecture
1478 * implementations can override this.
1479 */
1480void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1481
fa58d305
RW
1482static void do_pci_disable_device(struct pci_dev *dev)
1483{
1484 u16 pci_command;
1485
1486 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1487 if (pci_command & PCI_COMMAND_MASTER) {
1488 pci_command &= ~PCI_COMMAND_MASTER;
1489 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1490 }
1491
1492 pcibios_disable_device(dev);
1493}
1494
1495/**
1496 * pci_disable_enabled_device - Disable device without updating enable_cnt
1497 * @dev: PCI device to disable
1498 *
1499 * NOTE: This function is a backend of PCI power management routines and is
1500 * not supposed to be called drivers.
1501 */
1502void pci_disable_enabled_device(struct pci_dev *dev)
1503{
296ccb08 1504 if (pci_is_enabled(dev))
fa58d305
RW
1505 do_pci_disable_device(dev);
1506}
1507
1da177e4
LT
1508/**
1509 * pci_disable_device - Disable PCI device after use
1510 * @dev: PCI device to be disabled
1511 *
1512 * Signal to the system that the PCI device is not in use by the system
1513 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1514 *
1515 * Note we don't actually disable the device until all callers of
ee6583f6 1516 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1517 */
1518void
1519pci_disable_device(struct pci_dev *dev)
1520{
9ac7849e 1521 struct pci_devres *dr;
99dc804d 1522
9ac7849e
TH
1523 dr = find_pci_dr(dev);
1524 if (dr)
7f375f32 1525 dr->enabled = 0;
9ac7849e 1526
fd6dceab
KK
1527 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1528 "disabling already-disabled device");
1529
cc7ba39b 1530 if (atomic_dec_return(&dev->enable_cnt) != 0)
bae94d02
IPG
1531 return;
1532
fa58d305 1533 do_pci_disable_device(dev);
1da177e4 1534
fa58d305 1535 dev->is_busmaster = 0;
1da177e4
LT
1536}
1537
f7bdd12d
BK
1538/**
1539 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1540 * @dev: the PCIe device reset
f7bdd12d
BK
1541 * @state: Reset state to enter into
1542 *
1543 *
45e829ea 1544 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1545 * implementation. Architecture implementations can override this.
1546 */
d6d88c83
BH
1547int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1548 enum pcie_reset_state state)
f7bdd12d
BK
1549{
1550 return -EINVAL;
1551}
1552
1553/**
1554 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1555 * @dev: the PCIe device reset
f7bdd12d
BK
1556 * @state: Reset state to enter into
1557 *
1558 *
1559 * Sets the PCI reset state for the device.
1560 */
1561int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1562{
1563 return pcibios_set_pcie_reset_state(dev, state);
1564}
1565
58ff4633
RW
1566/**
1567 * pci_check_pme_status - Check if given device has generated PME.
1568 * @dev: Device to check.
1569 *
1570 * Check the PME status of the device and if set, clear it and clear PME enable
1571 * (if set). Return 'true' if PME status and PME enable were both set or
1572 * 'false' otherwise.
1573 */
1574bool pci_check_pme_status(struct pci_dev *dev)
1575{
1576 int pmcsr_pos;
1577 u16 pmcsr;
1578 bool ret = false;
1579
1580 if (!dev->pm_cap)
1581 return false;
1582
1583 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1584 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1585 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1586 return false;
1587
1588 /* Clear PME status. */
1589 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1590 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1591 /* Disable PME to avoid interrupt flood. */
1592 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1593 ret = true;
1594 }
1595
1596 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1597
1598 return ret;
1599}
1600
b67ea761
RW
1601/**
1602 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1603 * @dev: Device to handle.
379021d5 1604 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1605 *
1606 * Check if @dev has generated PME and queue a resume request for it in that
1607 * case.
1608 */
379021d5 1609static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1610{
379021d5
RW
1611 if (pme_poll_reset && dev->pme_poll)
1612 dev->pme_poll = false;
1613
c125e96f 1614 if (pci_check_pme_status(dev)) {
c125e96f 1615 pci_wakeup_event(dev);
0f953bf6 1616 pm_request_resume(&dev->dev);
c125e96f 1617 }
b67ea761
RW
1618 return 0;
1619}
1620
1621/**
1622 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1623 * @bus: Top bus of the subtree to walk.
1624 */
1625void pci_pme_wakeup_bus(struct pci_bus *bus)
1626{
1627 if (bus)
379021d5 1628 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1629}
1630
448bd857 1631
eb9d0fe4
RW
1632/**
1633 * pci_pme_capable - check the capability of PCI device to generate PME#
1634 * @dev: PCI device to handle.
eb9d0fe4
RW
1635 * @state: PCI state from which device will issue PME#.
1636 */
e5899e1b 1637bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1638{
337001b6 1639 if (!dev->pm_cap)
eb9d0fe4
RW
1640 return false;
1641
337001b6 1642 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1643}
1644
df17e62e
MG
1645static void pci_pme_list_scan(struct work_struct *work)
1646{
379021d5 1647 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1648
1649 mutex_lock(&pci_pme_list_mutex);
ce300008
BH
1650 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1651 if (pme_dev->dev->pme_poll) {
1652 struct pci_dev *bridge;
1653
1654 bridge = pme_dev->dev->bus->self;
1655 /*
1656 * If bridge is in low power state, the
1657 * configuration space of subordinate devices
1658 * may be not accessible
1659 */
1660 if (bridge && bridge->current_state != PCI_D0)
1661 continue;
1662 pci_pme_wakeup(pme_dev->dev, NULL);
1663 } else {
1664 list_del(&pme_dev->list);
1665 kfree(pme_dev);
379021d5 1666 }
df17e62e 1667 }
ce300008
BH
1668 if (!list_empty(&pci_pme_list))
1669 schedule_delayed_work(&pci_pme_work,
1670 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1671 mutex_unlock(&pci_pme_list_mutex);
1672}
1673
eb9d0fe4
RW
1674/**
1675 * pci_pme_active - enable or disable PCI device's PME# function
1676 * @dev: PCI device to handle.
eb9d0fe4
RW
1677 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1678 *
1679 * The caller must verify that the device is capable of generating PME# before
1680 * calling this function with @enable equal to 'true'.
1681 */
5a6c9b60 1682void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1683{
1684 u16 pmcsr;
1685
ffaddbe8 1686 if (!dev->pme_support)
eb9d0fe4
RW
1687 return;
1688
337001b6 1689 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1690 /* Clear PME_Status by writing 1 to it and enable PME# */
1691 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1692 if (!enable)
1693 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1694
337001b6 1695 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1696
6e965e0d
HY
1697 /*
1698 * PCI (as opposed to PCIe) PME requires that the device have
1699 * its PME# line hooked up correctly. Not all hardware vendors
1700 * do this, so the PME never gets delivered and the device
1701 * remains asleep. The easiest way around this is to
1702 * periodically walk the list of suspended devices and check
1703 * whether any have their PME flag set. The assumption is that
1704 * we'll wake up often enough anyway that this won't be a huge
1705 * hit, and the power savings from the devices will still be a
1706 * win.
1707 *
1708 * Although PCIe uses in-band PME message instead of PME# line
1709 * to report PME, PME does not work for some PCIe devices in
1710 * reality. For example, there are devices that set their PME
1711 * status bits, but don't really bother to send a PME message;
1712 * there are PCI Express Root Ports that don't bother to
1713 * trigger interrupts when they receive PME messages from the
1714 * devices below. So PME poll is used for PCIe devices too.
1715 */
df17e62e 1716
379021d5 1717 if (dev->pme_poll) {
df17e62e
MG
1718 struct pci_pme_device *pme_dev;
1719 if (enable) {
1720 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1721 GFP_KERNEL);
0394cb19
BH
1722 if (!pme_dev) {
1723 dev_warn(&dev->dev, "can't enable PME#\n");
1724 return;
1725 }
df17e62e
MG
1726 pme_dev->dev = dev;
1727 mutex_lock(&pci_pme_list_mutex);
1728 list_add(&pme_dev->list, &pci_pme_list);
1729 if (list_is_singular(&pci_pme_list))
1730 schedule_delayed_work(&pci_pme_work,
1731 msecs_to_jiffies(PME_TIMEOUT));
1732 mutex_unlock(&pci_pme_list_mutex);
1733 } else {
1734 mutex_lock(&pci_pme_list_mutex);
1735 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1736 if (pme_dev->dev == dev) {
1737 list_del(&pme_dev->list);
1738 kfree(pme_dev);
1739 break;
1740 }
1741 }
1742 mutex_unlock(&pci_pme_list_mutex);
1743 }
1744 }
1745
85b8582d 1746 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1747}
1748
1da177e4 1749/**
6cbf8214 1750 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1751 * @dev: PCI device affected
1752 * @state: PCI state from which device will issue wakeup events
6cbf8214 1753 * @runtime: True if the events are to be generated at run time
075c1771
DB
1754 * @enable: True to enable event generation; false to disable
1755 *
1756 * This enables the device as a wakeup event source, or disables it.
1757 * When such events involves platform-specific hooks, those hooks are
1758 * called automatically by this routine.
1759 *
1760 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1761 * always require such platform hooks.
075c1771 1762 *
eb9d0fe4
RW
1763 * RETURN VALUE:
1764 * 0 is returned on success
1765 * -EINVAL is returned if device is not supposed to wake up the system
1766 * Error code depending on the platform is returned if both the platform and
1767 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1768 */
6cbf8214
RW
1769int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1770 bool runtime, bool enable)
1da177e4 1771{
5bcc2fb4 1772 int ret = 0;
075c1771 1773
6cbf8214 1774 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1775 return -EINVAL;
1da177e4 1776
e80bb09d
RW
1777 /* Don't do the same thing twice in a row for one device. */
1778 if (!!enable == !!dev->wakeup_prepared)
1779 return 0;
1780
eb9d0fe4
RW
1781 /*
1782 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1783 * Anderson we should be doing PME# wake enable followed by ACPI wake
1784 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1785 */
1da177e4 1786
5bcc2fb4
RW
1787 if (enable) {
1788 int error;
1da177e4 1789
5bcc2fb4
RW
1790 if (pci_pme_capable(dev, state))
1791 pci_pme_active(dev, true);
1792 else
1793 ret = 1;
6cbf8214
RW
1794 error = runtime ? platform_pci_run_wake(dev, true) :
1795 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1796 if (ret)
1797 ret = error;
e80bb09d
RW
1798 if (!ret)
1799 dev->wakeup_prepared = true;
5bcc2fb4 1800 } else {
6cbf8214
RW
1801 if (runtime)
1802 platform_pci_run_wake(dev, false);
1803 else
1804 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1805 pci_pme_active(dev, false);
e80bb09d 1806 dev->wakeup_prepared = false;
5bcc2fb4 1807 }
1da177e4 1808
5bcc2fb4 1809 return ret;
eb9d0fe4 1810}
6cbf8214 1811EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1812
0235c4fc
RW
1813/**
1814 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1815 * @dev: PCI device to prepare
1816 * @enable: True to enable wake-up event generation; false to disable
1817 *
1818 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1819 * and this function allows them to set that up cleanly - pci_enable_wake()
1820 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1821 * ordering constraints.
1822 *
1823 * This function only returns error code if the device is not capable of
1824 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1825 * enable wake-up power for it.
1826 */
1827int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1828{
1829 return pci_pme_capable(dev, PCI_D3cold) ?
1830 pci_enable_wake(dev, PCI_D3cold, enable) :
1831 pci_enable_wake(dev, PCI_D3hot, enable);
1832}
1833
404cc2d8 1834/**
37139074
JB
1835 * pci_target_state - find an appropriate low power state for a given PCI dev
1836 * @dev: PCI device
1837 *
1838 * Use underlying platform code to find a supported low power state for @dev.
1839 * If the platform can't manage @dev, return the deepest state from which it
1840 * can generate wake events, based on any available PME info.
404cc2d8 1841 */
0b950f0f 1842static pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1843{
1844 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1845
1846 if (platform_pci_power_manageable(dev)) {
1847 /*
1848 * Call the platform to choose the target state of the device
1849 * and enable wake-up from this state if supported.
1850 */
1851 pci_power_t state = platform_pci_choose_state(dev);
1852
1853 switch (state) {
1854 case PCI_POWER_ERROR:
1855 case PCI_UNKNOWN:
1856 break;
1857 case PCI_D1:
1858 case PCI_D2:
1859 if (pci_no_d1d2(dev))
1860 break;
1861 default:
1862 target_state = state;
404cc2d8 1863 }
d2abdf62
RW
1864 } else if (!dev->pm_cap) {
1865 target_state = PCI_D0;
404cc2d8
RW
1866 } else if (device_may_wakeup(&dev->dev)) {
1867 /*
1868 * Find the deepest state from which the device can generate
1869 * wake-up events, make it the target state and enable device
1870 * to generate PME#.
1871 */
337001b6
RW
1872 if (dev->pme_support) {
1873 while (target_state
1874 && !(dev->pme_support & (1 << target_state)))
1875 target_state--;
404cc2d8
RW
1876 }
1877 }
1878
e5899e1b
RW
1879 return target_state;
1880}
1881
1882/**
1883 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1884 * @dev: Device to handle.
1885 *
1886 * Choose the power state appropriate for the device depending on whether
1887 * it can wake up the system and/or is power manageable by the platform
1888 * (PCI_D3hot is the default) and put the device into that state.
1889 */
1890int pci_prepare_to_sleep(struct pci_dev *dev)
1891{
1892 pci_power_t target_state = pci_target_state(dev);
1893 int error;
1894
1895 if (target_state == PCI_POWER_ERROR)
1896 return -EIO;
1897
448bd857
HY
1898 /* D3cold during system suspend/hibernate is not supported */
1899 if (target_state > PCI_D3hot)
1900 target_state = PCI_D3hot;
1901
8efb8c76 1902 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1903
404cc2d8
RW
1904 error = pci_set_power_state(dev, target_state);
1905
1906 if (error)
1907 pci_enable_wake(dev, target_state, false);
1908
1909 return error;
1910}
1911
1912/**
443bd1c4 1913 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1914 * @dev: Device to handle.
1915 *
88393161 1916 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1917 */
1918int pci_back_from_sleep(struct pci_dev *dev)
1919{
1920 pci_enable_wake(dev, PCI_D0, false);
1921 return pci_set_power_state(dev, PCI_D0);
1922}
1923
6cbf8214
RW
1924/**
1925 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1926 * @dev: PCI device being suspended.
1927 *
1928 * Prepare @dev to generate wake-up events at run time and put it into a low
1929 * power state.
1930 */
1931int pci_finish_runtime_suspend(struct pci_dev *dev)
1932{
1933 pci_power_t target_state = pci_target_state(dev);
1934 int error;
1935
1936 if (target_state == PCI_POWER_ERROR)
1937 return -EIO;
1938
448bd857
HY
1939 dev->runtime_d3cold = target_state == PCI_D3cold;
1940
6cbf8214
RW
1941 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1942
1943 error = pci_set_power_state(dev, target_state);
1944
448bd857 1945 if (error) {
6cbf8214 1946 __pci_enable_wake(dev, target_state, true, false);
448bd857
HY
1947 dev->runtime_d3cold = false;
1948 }
6cbf8214
RW
1949
1950 return error;
1951}
1952
b67ea761
RW
1953/**
1954 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1955 * @dev: Device to check.
1956 *
f7625980 1957 * Return true if the device itself is capable of generating wake-up events
b67ea761
RW
1958 * (through the platform or using the native PCIe PME) or if the device supports
1959 * PME and one of its upstream bridges can generate wake-up events.
1960 */
1961bool pci_dev_run_wake(struct pci_dev *dev)
1962{
1963 struct pci_bus *bus = dev->bus;
1964
1965 if (device_run_wake(&dev->dev))
1966 return true;
1967
1968 if (!dev->pme_support)
1969 return false;
1970
1971 while (bus->parent) {
1972 struct pci_dev *bridge = bus->self;
1973
1974 if (device_run_wake(&bridge->dev))
1975 return true;
1976
1977 bus = bus->parent;
1978 }
1979
1980 /* We have reached the root bus. */
1981 if (bus->bridge)
1982 return device_run_wake(bus->bridge);
1983
1984 return false;
1985}
1986EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1987
b3c32c4f
HY
1988void pci_config_pm_runtime_get(struct pci_dev *pdev)
1989{
1990 struct device *dev = &pdev->dev;
1991 struct device *parent = dev->parent;
1992
1993 if (parent)
1994 pm_runtime_get_sync(parent);
1995 pm_runtime_get_noresume(dev);
1996 /*
1997 * pdev->current_state is set to PCI_D3cold during suspending,
1998 * so wait until suspending completes
1999 */
2000 pm_runtime_barrier(dev);
2001 /*
2002 * Only need to resume devices in D3cold, because config
2003 * registers are still accessible for devices suspended but
2004 * not in D3cold.
2005 */
2006 if (pdev->current_state == PCI_D3cold)
2007 pm_runtime_resume(dev);
2008}
2009
2010void pci_config_pm_runtime_put(struct pci_dev *pdev)
2011{
2012 struct device *dev = &pdev->dev;
2013 struct device *parent = dev->parent;
2014
2015 pm_runtime_put(dev);
2016 if (parent)
2017 pm_runtime_put_sync(parent);
2018}
2019
eb9d0fe4
RW
2020/**
2021 * pci_pm_init - Initialize PM functions of given PCI device
2022 * @dev: PCI device to handle.
2023 */
2024void pci_pm_init(struct pci_dev *dev)
2025{
2026 int pm;
2027 u16 pmc;
1da177e4 2028
bb910a70 2029 pm_runtime_forbid(&dev->dev);
967577b0
HY
2030 pm_runtime_set_active(&dev->dev);
2031 pm_runtime_enable(&dev->dev);
a1e4d72c 2032 device_enable_async_suspend(&dev->dev);
e80bb09d 2033 dev->wakeup_prepared = false;
bb910a70 2034
337001b6 2035 dev->pm_cap = 0;
ffaddbe8 2036 dev->pme_support = 0;
337001b6 2037
eb9d0fe4
RW
2038 /* find PCI PM capability in list */
2039 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2040 if (!pm)
50246dd4 2041 return;
eb9d0fe4
RW
2042 /* Check device's ability to generate PME# */
2043 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 2044
eb9d0fe4
RW
2045 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2046 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2047 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 2048 return;
eb9d0fe4
RW
2049 }
2050
337001b6 2051 dev->pm_cap = pm;
1ae861e6 2052 dev->d3_delay = PCI_PM_D3_WAIT;
448bd857 2053 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
4f9c1397 2054 dev->d3cold_allowed = true;
337001b6
RW
2055
2056 dev->d1_support = false;
2057 dev->d2_support = false;
2058 if (!pci_no_d1d2(dev)) {
c9ed77ee 2059 if (pmc & PCI_PM_CAP_D1)
337001b6 2060 dev->d1_support = true;
c9ed77ee 2061 if (pmc & PCI_PM_CAP_D2)
337001b6 2062 dev->d2_support = true;
c9ed77ee
BH
2063
2064 if (dev->d1_support || dev->d2_support)
2065 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
2066 dev->d1_support ? " D1" : "",
2067 dev->d2_support ? " D2" : "");
337001b6
RW
2068 }
2069
2070 pmc &= PCI_PM_CAP_PME_MASK;
2071 if (pmc) {
10c3d71d
BH
2072 dev_printk(KERN_DEBUG, &dev->dev,
2073 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
2074 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2075 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2076 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2077 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2078 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 2079 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 2080 dev->pme_poll = true;
eb9d0fe4
RW
2081 /*
2082 * Make device's PM flags reflect the wake-up capability, but
2083 * let the user space enable it to wake up the system as needed.
2084 */
2085 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 2086 /* Disable the PME# generation functionality */
337001b6 2087 pci_pme_active(dev, false);
eb9d0fe4 2088 }
1da177e4
LT
2089}
2090
34a4876e
YL
2091static void pci_add_saved_cap(struct pci_dev *pci_dev,
2092 struct pci_cap_saved_state *new_cap)
2093{
2094 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2095}
2096
63f4898a 2097/**
fd0f7f73
AW
2098 * _pci_add_cap_save_buffer - allocate buffer for saving given
2099 * capability registers
63f4898a
RW
2100 * @dev: the PCI device
2101 * @cap: the capability to allocate the buffer for
fd0f7f73 2102 * @extended: Standard or Extended capability ID
63f4898a
RW
2103 * @size: requested size of the buffer
2104 */
fd0f7f73
AW
2105static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2106 bool extended, unsigned int size)
63f4898a
RW
2107{
2108 int pos;
2109 struct pci_cap_saved_state *save_state;
2110
fd0f7f73
AW
2111 if (extended)
2112 pos = pci_find_ext_capability(dev, cap);
2113 else
2114 pos = pci_find_capability(dev, cap);
2115
63f4898a
RW
2116 if (pos <= 0)
2117 return 0;
2118
2119 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2120 if (!save_state)
2121 return -ENOMEM;
2122
24a4742f 2123 save_state->cap.cap_nr = cap;
fd0f7f73 2124 save_state->cap.cap_extended = extended;
24a4742f 2125 save_state->cap.size = size;
63f4898a
RW
2126 pci_add_saved_cap(dev, save_state);
2127
2128 return 0;
2129}
2130
fd0f7f73
AW
2131int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2132{
2133 return _pci_add_cap_save_buffer(dev, cap, false, size);
2134}
2135
2136int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2137{
2138 return _pci_add_cap_save_buffer(dev, cap, true, size);
2139}
2140
63f4898a
RW
2141/**
2142 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2143 * @dev: the PCI device
2144 */
2145void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2146{
2147 int error;
2148
89858517
YZ
2149 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2150 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
2151 if (error)
2152 dev_err(&dev->dev,
2153 "unable to preallocate PCI Express save buffer\n");
2154
2155 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2156 if (error)
2157 dev_err(&dev->dev,
2158 "unable to preallocate PCI-X save buffer\n");
425c1b22
AW
2159
2160 pci_allocate_vc_save_buffers(dev);
63f4898a
RW
2161}
2162
f796841e
YL
2163void pci_free_cap_save_buffers(struct pci_dev *dev)
2164{
2165 struct pci_cap_saved_state *tmp;
b67bfe0d 2166 struct hlist_node *n;
f796841e 2167
b67bfe0d 2168 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
f796841e
YL
2169 kfree(tmp);
2170}
2171
58c3a727 2172/**
31ab2476 2173 * pci_configure_ari - enable or disable ARI forwarding
58c3a727 2174 * @dev: the PCI device
b0cc6020
YW
2175 *
2176 * If @dev and its upstream bridge both support ARI, enable ARI in the
2177 * bridge. Otherwise, disable ARI in the bridge.
58c3a727 2178 */
31ab2476 2179void pci_configure_ari(struct pci_dev *dev)
58c3a727 2180{
58c3a727 2181 u32 cap;
8113587c 2182 struct pci_dev *bridge;
58c3a727 2183
6748dcc2 2184 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
2185 return;
2186
8113587c 2187 bridge = dev->bus->self;
cb97ae34 2188 if (!bridge)
8113587c
ZY
2189 return;
2190
59875ae4 2191 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2192 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2193 return;
2194
b0cc6020
YW
2195 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2196 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2197 PCI_EXP_DEVCTL2_ARI);
2198 bridge->ari_enabled = 1;
2199 } else {
2200 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2201 PCI_EXP_DEVCTL2_ARI);
2202 bridge->ari_enabled = 0;
2203 }
58c3a727
YZ
2204}
2205
5d990b62
CW
2206static int pci_acs_enable;
2207
2208/**
2209 * pci_request_acs - ask for ACS to be enabled if supported
2210 */
2211void pci_request_acs(void)
2212{
2213 pci_acs_enable = 1;
2214}
2215
ae21ee65 2216/**
2c744244 2217 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
ae21ee65
AK
2218 * @dev: the PCI device
2219 */
2c744244 2220static int pci_std_enable_acs(struct pci_dev *dev)
ae21ee65
AK
2221{
2222 int pos;
2223 u16 cap;
2224 u16 ctrl;
2225
ae21ee65
AK
2226 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2227 if (!pos)
2c744244 2228 return -ENODEV;
ae21ee65
AK
2229
2230 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2231 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2232
2233 /* Source Validation */
2234 ctrl |= (cap & PCI_ACS_SV);
2235
2236 /* P2P Request Redirect */
2237 ctrl |= (cap & PCI_ACS_RR);
2238
2239 /* P2P Completion Redirect */
2240 ctrl |= (cap & PCI_ACS_CR);
2241
2242 /* Upstream Forwarding */
2243 ctrl |= (cap & PCI_ACS_UF);
2244
2245 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2c744244
AW
2246
2247 return 0;
2248}
2249
2250/**
2251 * pci_enable_acs - enable ACS if hardware support it
2252 * @dev: the PCI device
2253 */
2254void pci_enable_acs(struct pci_dev *dev)
2255{
2256 if (!pci_acs_enable)
2257 return;
2258
2259 if (!pci_std_enable_acs(dev))
2260 return;
2261
2262 pci_dev_specific_enable_acs(dev);
ae21ee65
AK
2263}
2264
0a67119f
AW
2265static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2266{
2267 int pos;
83db7e0b 2268 u16 cap, ctrl;
0a67119f
AW
2269
2270 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2271 if (!pos)
2272 return false;
2273
83db7e0b
AW
2274 /*
2275 * Except for egress control, capabilities are either required
2276 * or only required if controllable. Features missing from the
2277 * capability field can therefore be assumed as hard-wired enabled.
2278 */
2279 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2280 acs_flags &= (cap | PCI_ACS_EC);
2281
0a67119f
AW
2282 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2283 return (ctrl & acs_flags) == acs_flags;
2284}
2285
ad805758
AW
2286/**
2287 * pci_acs_enabled - test ACS against required flags for a given device
2288 * @pdev: device to test
2289 * @acs_flags: required PCI ACS flags
2290 *
2291 * Return true if the device supports the provided flags. Automatically
2292 * filters out flags that are not implemented on multifunction devices.
0a67119f
AW
2293 *
2294 * Note that this interface checks the effective ACS capabilities of the
2295 * device rather than the actual capabilities. For instance, most single
2296 * function endpoints are not required to support ACS because they have no
2297 * opportunity for peer-to-peer access. We therefore return 'true'
2298 * regardless of whether the device exposes an ACS capability. This makes
2299 * it much easier for callers of this function to ignore the actual type
2300 * or topology of the device when testing ACS support.
ad805758
AW
2301 */
2302bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2303{
0a67119f 2304 int ret;
ad805758
AW
2305
2306 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2307 if (ret >= 0)
2308 return ret > 0;
2309
0a67119f
AW
2310 /*
2311 * Conventional PCI and PCI-X devices never support ACS, either
2312 * effectively or actually. The shared bus topology implies that
2313 * any device on the bus can receive or snoop DMA.
2314 */
ad805758
AW
2315 if (!pci_is_pcie(pdev))
2316 return false;
2317
0a67119f
AW
2318 switch (pci_pcie_type(pdev)) {
2319 /*
2320 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
f7625980 2321 * but since their primary interface is PCI/X, we conservatively
0a67119f
AW
2322 * handle them as we would a non-PCIe device.
2323 */
2324 case PCI_EXP_TYPE_PCIE_BRIDGE:
2325 /*
2326 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2327 * applicable... must never implement an ACS Extended Capability...".
2328 * This seems arbitrary, but we take a conservative interpretation
2329 * of this statement.
2330 */
2331 case PCI_EXP_TYPE_PCI_BRIDGE:
2332 case PCI_EXP_TYPE_RC_EC:
2333 return false;
2334 /*
2335 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2336 * implement ACS in order to indicate their peer-to-peer capabilities,
2337 * regardless of whether they are single- or multi-function devices.
2338 */
2339 case PCI_EXP_TYPE_DOWNSTREAM:
2340 case PCI_EXP_TYPE_ROOT_PORT:
2341 return pci_acs_flags_enabled(pdev, acs_flags);
2342 /*
2343 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2344 * implemented by the remaining PCIe types to indicate peer-to-peer
f7625980 2345 * capabilities, but only when they are part of a multifunction
0a67119f
AW
2346 * device. The footnote for section 6.12 indicates the specific
2347 * PCIe types included here.
2348 */
2349 case PCI_EXP_TYPE_ENDPOINT:
2350 case PCI_EXP_TYPE_UPSTREAM:
2351 case PCI_EXP_TYPE_LEG_END:
2352 case PCI_EXP_TYPE_RC_END:
2353 if (!pdev->multifunction)
2354 break;
2355
0a67119f 2356 return pci_acs_flags_enabled(pdev, acs_flags);
ad805758
AW
2357 }
2358
0a67119f 2359 /*
f7625980 2360 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
0a67119f
AW
2361 * to single function devices with the exception of downstream ports.
2362 */
ad805758
AW
2363 return true;
2364}
2365
2366/**
2367 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2368 * @start: starting downstream device
2369 * @end: ending upstream device or NULL to search to the root bus
2370 * @acs_flags: required flags
2371 *
2372 * Walk up a device tree from start to end testing PCI ACS support. If
2373 * any step along the way does not support the required flags, return false.
2374 */
2375bool pci_acs_path_enabled(struct pci_dev *start,
2376 struct pci_dev *end, u16 acs_flags)
2377{
2378 struct pci_dev *pdev, *parent = start;
2379
2380 do {
2381 pdev = parent;
2382
2383 if (!pci_acs_enabled(pdev, acs_flags))
2384 return false;
2385
2386 if (pci_is_root_bus(pdev->bus))
2387 return (end == NULL);
2388
2389 parent = pdev->bus->self;
2390 } while (pdev != end);
2391
2392 return true;
2393}
2394
57c2cf71
BH
2395/**
2396 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2397 * @dev: the PCI device
bb5c2de2 2398 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
57c2cf71
BH
2399 *
2400 * Perform INTx swizzling for a device behind one level of bridge. This is
2401 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2402 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2403 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2404 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 2405 */
3df425f3 2406u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 2407{
46b952a3
MW
2408 int slot;
2409
2410 if (pci_ari_enabled(dev->bus))
2411 slot = 0;
2412 else
2413 slot = PCI_SLOT(dev->devfn);
2414
2415 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2416}
2417
1da177e4
LT
2418int
2419pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2420{
2421 u8 pin;
2422
514d207d 2423 pin = dev->pin;
1da177e4
LT
2424 if (!pin)
2425 return -1;
878f2e50 2426
8784fd4d 2427 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2428 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2429 dev = dev->bus->self;
2430 }
2431 *bridge = dev;
2432 return pin;
2433}
2434
68feac87
BH
2435/**
2436 * pci_common_swizzle - swizzle INTx all the way to root bridge
2437 * @dev: the PCI device
2438 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2439 *
2440 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2441 * bridges all the way up to a PCI root bus.
2442 */
2443u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2444{
2445 u8 pin = *pinp;
2446
1eb39487 2447 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2448 pin = pci_swizzle_interrupt_pin(dev, pin);
2449 dev = dev->bus->self;
2450 }
2451 *pinp = pin;
2452 return PCI_SLOT(dev->devfn);
2453}
2454
1da177e4
LT
2455/**
2456 * pci_release_region - Release a PCI bar
2457 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2458 * @bar: BAR to release
2459 *
2460 * Releases the PCI I/O and memory resources previously reserved by a
2461 * successful call to pci_request_region. Call this function only
2462 * after all use of the PCI regions has ceased.
2463 */
2464void pci_release_region(struct pci_dev *pdev, int bar)
2465{
9ac7849e
TH
2466 struct pci_devres *dr;
2467
1da177e4
LT
2468 if (pci_resource_len(pdev, bar) == 0)
2469 return;
2470 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2471 release_region(pci_resource_start(pdev, bar),
2472 pci_resource_len(pdev, bar));
2473 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2474 release_mem_region(pci_resource_start(pdev, bar),
2475 pci_resource_len(pdev, bar));
9ac7849e
TH
2476
2477 dr = find_pci_dr(pdev);
2478 if (dr)
2479 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2480}
2481
2482/**
f5ddcac4 2483 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2484 * @pdev: PCI device whose resources are to be reserved
2485 * @bar: BAR to be reserved
2486 * @res_name: Name to be associated with resource.
f5ddcac4 2487 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2488 *
2489 * Mark the PCI region associated with PCI device @pdev BR @bar as
2490 * being reserved by owner @res_name. Do not access any
2491 * address inside the PCI regions unless this call returns
2492 * successfully.
2493 *
f5ddcac4
RD
2494 * If @exclusive is set, then the region is marked so that userspace
2495 * is explicitly not allowed to map the resource via /dev/mem or
f7625980 2496 * sysfs MMIO access.
f5ddcac4 2497 *
1da177e4
LT
2498 * Returns 0 on success, or %EBUSY on error. A warning
2499 * message is also printed on failure.
2500 */
e8de1481
AV
2501static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2502 int exclusive)
1da177e4 2503{
9ac7849e
TH
2504 struct pci_devres *dr;
2505
1da177e4
LT
2506 if (pci_resource_len(pdev, bar) == 0)
2507 return 0;
f7625980 2508
1da177e4
LT
2509 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2510 if (!request_region(pci_resource_start(pdev, bar),
2511 pci_resource_len(pdev, bar), res_name))
2512 goto err_out;
2513 }
2514 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2515 if (!__request_mem_region(pci_resource_start(pdev, bar),
2516 pci_resource_len(pdev, bar), res_name,
2517 exclusive))
1da177e4
LT
2518 goto err_out;
2519 }
9ac7849e
TH
2520
2521 dr = find_pci_dr(pdev);
2522 if (dr)
2523 dr->region_mask |= 1 << bar;
2524
1da177e4
LT
2525 return 0;
2526
2527err_out:
c7dabef8 2528 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2529 &pdev->resource[bar]);
1da177e4
LT
2530 return -EBUSY;
2531}
2532
e8de1481 2533/**
f5ddcac4 2534 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2535 * @pdev: PCI device whose resources are to be reserved
2536 * @bar: BAR to be reserved
f5ddcac4 2537 * @res_name: Name to be associated with resource
e8de1481 2538 *
f5ddcac4 2539 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2540 * being reserved by owner @res_name. Do not access any
2541 * address inside the PCI regions unless this call returns
2542 * successfully.
2543 *
2544 * Returns 0 on success, or %EBUSY on error. A warning
2545 * message is also printed on failure.
2546 */
2547int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2548{
2549 return __pci_request_region(pdev, bar, res_name, 0);
2550}
2551
2552/**
2553 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2554 * @pdev: PCI device whose resources are to be reserved
2555 * @bar: BAR to be reserved
2556 * @res_name: Name to be associated with resource.
2557 *
2558 * Mark the PCI region associated with PCI device @pdev BR @bar as
2559 * being reserved by owner @res_name. Do not access any
2560 * address inside the PCI regions unless this call returns
2561 * successfully.
2562 *
2563 * Returns 0 on success, or %EBUSY on error. A warning
2564 * message is also printed on failure.
2565 *
2566 * The key difference that _exclusive makes it that userspace is
2567 * explicitly not allowed to map the resource via /dev/mem or
f7625980 2568 * sysfs.
e8de1481
AV
2569 */
2570int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2571{
2572 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2573}
c87deff7
HS
2574/**
2575 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2576 * @pdev: PCI device whose resources were previously reserved
2577 * @bars: Bitmask of BARs to be released
2578 *
2579 * Release selected PCI I/O and memory resources previously reserved.
2580 * Call this function only after all use of the PCI regions has ceased.
2581 */
2582void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2583{
2584 int i;
2585
2586 for (i = 0; i < 6; i++)
2587 if (bars & (1 << i))
2588 pci_release_region(pdev, i);
2589}
2590
9738abed 2591static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
e8de1481 2592 const char *res_name, int excl)
c87deff7
HS
2593{
2594 int i;
2595
2596 for (i = 0; i < 6; i++)
2597 if (bars & (1 << i))
e8de1481 2598 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2599 goto err_out;
2600 return 0;
2601
2602err_out:
2603 while(--i >= 0)
2604 if (bars & (1 << i))
2605 pci_release_region(pdev, i);
2606
2607 return -EBUSY;
2608}
1da177e4 2609
e8de1481
AV
2610
2611/**
2612 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2613 * @pdev: PCI device whose resources are to be reserved
2614 * @bars: Bitmask of BARs to be requested
2615 * @res_name: Name to be associated with resource
2616 */
2617int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2618 const char *res_name)
2619{
2620 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2621}
2622
2623int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2624 int bars, const char *res_name)
2625{
2626 return __pci_request_selected_regions(pdev, bars, res_name,
2627 IORESOURCE_EXCLUSIVE);
2628}
2629
1da177e4
LT
2630/**
2631 * pci_release_regions - Release reserved PCI I/O and memory resources
2632 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2633 *
2634 * Releases all PCI I/O and memory resources previously reserved by a
2635 * successful call to pci_request_regions. Call this function only
2636 * after all use of the PCI regions has ceased.
2637 */
2638
2639void pci_release_regions(struct pci_dev *pdev)
2640{
c87deff7 2641 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2642}
2643
2644/**
2645 * pci_request_regions - Reserved PCI I/O and memory resources
2646 * @pdev: PCI device whose resources are to be reserved
2647 * @res_name: Name to be associated with resource.
2648 *
2649 * Mark all PCI regions associated with PCI device @pdev as
2650 * being reserved by owner @res_name. Do not access any
2651 * address inside the PCI regions unless this call returns
2652 * successfully.
2653 *
2654 * Returns 0 on success, or %EBUSY on error. A warning
2655 * message is also printed on failure.
2656 */
3c990e92 2657int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2658{
c87deff7 2659 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2660}
2661
e8de1481
AV
2662/**
2663 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2664 * @pdev: PCI device whose resources are to be reserved
2665 * @res_name: Name to be associated with resource.
2666 *
2667 * Mark all PCI regions associated with PCI device @pdev as
2668 * being reserved by owner @res_name. Do not access any
2669 * address inside the PCI regions unless this call returns
2670 * successfully.
2671 *
2672 * pci_request_regions_exclusive() will mark the region so that
f7625980 2673 * /dev/mem and the sysfs MMIO access will not be allowed.
e8de1481
AV
2674 *
2675 * Returns 0 on success, or %EBUSY on error. A warning
2676 * message is also printed on failure.
2677 */
2678int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2679{
2680 return pci_request_selected_regions_exclusive(pdev,
2681 ((1 << 6) - 1), res_name);
2682}
2683
6a479079
BH
2684static void __pci_set_master(struct pci_dev *dev, bool enable)
2685{
2686 u16 old_cmd, cmd;
2687
2688 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2689 if (enable)
2690 cmd = old_cmd | PCI_COMMAND_MASTER;
2691 else
2692 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2693 if (cmd != old_cmd) {
2694 dev_dbg(&dev->dev, "%s bus mastering\n",
2695 enable ? "enabling" : "disabling");
2696 pci_write_config_word(dev, PCI_COMMAND, cmd);
2697 }
2698 dev->is_busmaster = enable;
2699}
e8de1481 2700
2b6f2c35
MS
2701/**
2702 * pcibios_setup - process "pci=" kernel boot arguments
2703 * @str: string used to pass in "pci=" kernel boot arguments
2704 *
2705 * Process kernel boot arguments. This is the default implementation.
2706 * Architecture specific implementations can override this as necessary.
2707 */
2708char * __weak __init pcibios_setup(char *str)
2709{
2710 return str;
2711}
2712
96c55900
MS
2713/**
2714 * pcibios_set_master - enable PCI bus-mastering for device dev
2715 * @dev: the PCI device to enable
2716 *
2717 * Enables PCI bus-mastering for the device. This is the default
2718 * implementation. Architecture specific implementations can override
2719 * this if necessary.
2720 */
2721void __weak pcibios_set_master(struct pci_dev *dev)
2722{
2723 u8 lat;
2724
f676678f
MS
2725 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2726 if (pci_is_pcie(dev))
2727 return;
2728
96c55900
MS
2729 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2730 if (lat < 16)
2731 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2732 else if (lat > pcibios_max_latency)
2733 lat = pcibios_max_latency;
2734 else
2735 return;
a006482b 2736
96c55900
MS
2737 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2738}
2739
1da177e4
LT
2740/**
2741 * pci_set_master - enables bus-mastering for device dev
2742 * @dev: the PCI device to enable
2743 *
2744 * Enables bus-mastering on the device and calls pcibios_set_master()
2745 * to do the needed arch specific settings.
2746 */
6a479079 2747void pci_set_master(struct pci_dev *dev)
1da177e4 2748{
6a479079 2749 __pci_set_master(dev, true);
1da177e4
LT
2750 pcibios_set_master(dev);
2751}
2752
6a479079
BH
2753/**
2754 * pci_clear_master - disables bus-mastering for device dev
2755 * @dev: the PCI device to disable
2756 */
2757void pci_clear_master(struct pci_dev *dev)
2758{
2759 __pci_set_master(dev, false);
2760}
2761
1da177e4 2762/**
edb2d97e
MW
2763 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2764 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2765 *
edb2d97e
MW
2766 * Helper function for pci_set_mwi.
2767 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2768 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2769 *
2770 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2771 */
15ea76d4 2772int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2773{
2774 u8 cacheline_size;
2775
2776 if (!pci_cache_line_size)
15ea76d4 2777 return -EINVAL;
1da177e4
LT
2778
2779 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2780 equal to or multiple of the right value. */
2781 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2782 if (cacheline_size >= pci_cache_line_size &&
2783 (cacheline_size % pci_cache_line_size) == 0)
2784 return 0;
2785
2786 /* Write the correct value. */
2787 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2788 /* Read it back. */
2789 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2790 if (cacheline_size == pci_cache_line_size)
2791 return 0;
2792
80ccba11
BH
2793 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2794 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2795
2796 return -EINVAL;
2797}
15ea76d4
TH
2798EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2799
2800#ifdef PCI_DISABLE_MWI
2801int pci_set_mwi(struct pci_dev *dev)
2802{
2803 return 0;
2804}
2805
2806int pci_try_set_mwi(struct pci_dev *dev)
2807{
2808 return 0;
2809}
2810
2811void pci_clear_mwi(struct pci_dev *dev)
2812{
2813}
2814
2815#else
1da177e4
LT
2816
2817/**
2818 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2819 * @dev: the PCI device for which MWI is enabled
2820 *
694625c0 2821 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2822 *
2823 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2824 */
2825int
2826pci_set_mwi(struct pci_dev *dev)
2827{
2828 int rc;
2829 u16 cmd;
2830
edb2d97e 2831 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2832 if (rc)
2833 return rc;
2834
2835 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2836 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2837 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2838 cmd |= PCI_COMMAND_INVALIDATE;
2839 pci_write_config_word(dev, PCI_COMMAND, cmd);
2840 }
f7625980 2841
1da177e4
LT
2842 return 0;
2843}
2844
694625c0
RD
2845/**
2846 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2847 * @dev: the PCI device for which MWI is enabled
2848 *
2849 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2850 * Callers are not required to check the return value.
2851 *
2852 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2853 */
2854int pci_try_set_mwi(struct pci_dev *dev)
2855{
2856 int rc = pci_set_mwi(dev);
2857 return rc;
2858}
2859
1da177e4
LT
2860/**
2861 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2862 * @dev: the PCI device to disable
2863 *
2864 * Disables PCI Memory-Write-Invalidate transaction on the device
2865 */
2866void
2867pci_clear_mwi(struct pci_dev *dev)
2868{
2869 u16 cmd;
2870
2871 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2872 if (cmd & PCI_COMMAND_INVALIDATE) {
2873 cmd &= ~PCI_COMMAND_INVALIDATE;
2874 pci_write_config_word(dev, PCI_COMMAND, cmd);
2875 }
2876}
edb2d97e 2877#endif /* ! PCI_DISABLE_MWI */
1da177e4 2878
a04ce0ff
BR
2879/**
2880 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2881 * @pdev: the PCI device to operate on
2882 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2883 *
2884 * Enables/disables PCI INTx for device dev
2885 */
2886void
2887pci_intx(struct pci_dev *pdev, int enable)
2888{
2889 u16 pci_command, new;
2890
2891 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2892
2893 if (enable) {
2894 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2895 } else {
2896 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2897 }
2898
2899 if (new != pci_command) {
9ac7849e
TH
2900 struct pci_devres *dr;
2901
2fd9d74b 2902 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2903
2904 dr = find_pci_dr(pdev);
2905 if (dr && !dr->restore_intx) {
2906 dr->restore_intx = 1;
2907 dr->orig_intx = !enable;
2908 }
a04ce0ff
BR
2909 }
2910}
2911
a2e27787
JK
2912/**
2913 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2914 * @dev: the PCI device to operate on
a2e27787
JK
2915 *
2916 * Check if the device dev support INTx masking via the config space
2917 * command word.
2918 */
2919bool pci_intx_mask_supported(struct pci_dev *dev)
2920{
2921 bool mask_supported = false;
2922 u16 orig, new;
2923
fbebb9fd
BH
2924 if (dev->broken_intx_masking)
2925 return false;
2926
a2e27787
JK
2927 pci_cfg_access_lock(dev);
2928
2929 pci_read_config_word(dev, PCI_COMMAND, &orig);
2930 pci_write_config_word(dev, PCI_COMMAND,
2931 orig ^ PCI_COMMAND_INTX_DISABLE);
2932 pci_read_config_word(dev, PCI_COMMAND, &new);
2933
2934 /*
2935 * There's no way to protect against hardware bugs or detect them
2936 * reliably, but as long as we know what the value should be, let's
2937 * go ahead and check it.
2938 */
2939 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2940 dev_err(&dev->dev, "Command register changed from "
2941 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2942 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2943 mask_supported = true;
2944 pci_write_config_word(dev, PCI_COMMAND, orig);
2945 }
2946
2947 pci_cfg_access_unlock(dev);
2948 return mask_supported;
2949}
2950EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2951
2952static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2953{
2954 struct pci_bus *bus = dev->bus;
2955 bool mask_updated = true;
2956 u32 cmd_status_dword;
2957 u16 origcmd, newcmd;
2958 unsigned long flags;
2959 bool irq_pending;
2960
2961 /*
2962 * We do a single dword read to retrieve both command and status.
2963 * Document assumptions that make this possible.
2964 */
2965 BUILD_BUG_ON(PCI_COMMAND % 4);
2966 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2967
2968 raw_spin_lock_irqsave(&pci_lock, flags);
2969
2970 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2971
2972 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2973
2974 /*
2975 * Check interrupt status register to see whether our device
2976 * triggered the interrupt (when masking) or the next IRQ is
2977 * already pending (when unmasking).
2978 */
2979 if (mask != irq_pending) {
2980 mask_updated = false;
2981 goto done;
2982 }
2983
2984 origcmd = cmd_status_dword;
2985 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2986 if (mask)
2987 newcmd |= PCI_COMMAND_INTX_DISABLE;
2988 if (newcmd != origcmd)
2989 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2990
2991done:
2992 raw_spin_unlock_irqrestore(&pci_lock, flags);
2993
2994 return mask_updated;
2995}
2996
2997/**
2998 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 2999 * @dev: the PCI device to operate on
a2e27787
JK
3000 *
3001 * Check if the device dev has its INTx line asserted, mask it and
3002 * return true in that case. False is returned if not interrupt was
3003 * pending.
3004 */
3005bool pci_check_and_mask_intx(struct pci_dev *dev)
3006{
3007 return pci_check_and_set_intx_mask(dev, true);
3008}
3009EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3010
3011/**
ebd50b93 3012 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
6e9292c5 3013 * @dev: the PCI device to operate on
a2e27787
JK
3014 *
3015 * Check if the device dev has its INTx line asserted, unmask it if not
3016 * and return true. False is returned and the mask remains active if
3017 * there was still an interrupt pending.
3018 */
3019bool pci_check_and_unmask_intx(struct pci_dev *dev)
3020{
3021 return pci_check_and_set_intx_mask(dev, false);
3022}
3023EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3024
f5f2b131 3025/**
da27f4b3 3026 * pci_msi_off - disables any MSI or MSI-X capabilities
8d7d86e9 3027 * @dev: the PCI device to operate on
f5f2b131 3028 *
da27f4b3
BH
3029 * If you want to use MSI, see pci_enable_msi() and friends.
3030 * This is a lower-level primitive that allows us to disable
3031 * MSI operation at the device level.
f5f2b131
EB
3032 */
3033void pci_msi_off(struct pci_dev *dev)
3034{
3035 int pos;
3036 u16 control;
3037
da27f4b3
BH
3038 /*
3039 * This looks like it could go in msi.c, but we need it even when
3040 * CONFIG_PCI_MSI=n. For the same reason, we can't use
3041 * dev->msi_cap or dev->msix_cap here.
3042 */
f5f2b131
EB
3043 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3044 if (pos) {
3045 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3046 control &= ~PCI_MSI_FLAGS_ENABLE;
3047 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3048 }
3049 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3050 if (pos) {
3051 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3052 control &= ~PCI_MSIX_FLAGS_ENABLE;
3053 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3054 }
3055}
b03214d5 3056EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 3057
4d57cdfa
FT
3058int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3059{
3060 return dma_set_max_seg_size(&dev->dev, size);
3061}
3062EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3063
59fc67de
FT
3064int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3065{
3066 return dma_set_seg_boundary(&dev->dev, mask);
3067}
3068EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3069
3775a209
CL
3070/**
3071 * pci_wait_for_pending_transaction - waits for pending transaction
3072 * @dev: the PCI device to operate on
3073 *
3074 * Return 0 if transaction is pending 1 otherwise.
3075 */
3076int pci_wait_for_pending_transaction(struct pci_dev *dev)
8dd7f803 3077{
157e876f
AW
3078 if (!pci_is_pcie(dev))
3079 return 1;
8c1c699f 3080
157e876f 3081 return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
3775a209
CL
3082}
3083EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3084
3085static int pcie_flr(struct pci_dev *dev, int probe)
3086{
3087 u32 cap;
3088
3089 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3090 if (!(cap & PCI_EXP_DEVCAP_FLR))
3091 return -ENOTTY;
3092
3093 if (probe)
3094 return 0;
3095
3096 if (!pci_wait_for_pending_transaction(dev))
3097 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
8c1c699f 3098
59875ae4 3099 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
04b55c47 3100
8c1c699f 3101 msleep(100);
8dd7f803 3102
8dd7f803
SY
3103 return 0;
3104}
d91cdc74 3105
8c1c699f 3106static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3107{
8c1c699f 3108 int pos;
1ca88797
SY
3109 u8 cap;
3110
8c1c699f
YZ
3111 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3112 if (!pos)
1ca88797 3113 return -ENOTTY;
8c1c699f
YZ
3114
3115 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3116 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3117 return -ENOTTY;
3118
3119 if (probe)
3120 return 0;
3121
1ca88797 3122 /* Wait for Transaction Pending bit clean */
157e876f
AW
3123 if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
3124 goto clear;
5fe5db05 3125
8c1c699f
YZ
3126 dev_err(&dev->dev, "transaction is not cleared; "
3127 "proceeding with reset anyway\n");
5fe5db05 3128
8c1c699f
YZ
3129clear:
3130 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3131 msleep(100);
8c1c699f 3132
1ca88797
SY
3133 return 0;
3134}
3135
83d74e03
RW
3136/**
3137 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3138 * @dev: Device to reset.
3139 * @probe: If set, only check if the device can be reset this way.
3140 *
3141 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3142 * unset, it will be reinitialized internally when going from PCI_D3hot to
3143 * PCI_D0. If that's the case and the device is not in a low-power state
3144 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3145 *
3146 * NOTE: This causes the caller to sleep for twice the device power transition
3147 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
f7625980 3148 * by default (i.e. unless the @dev's d3_delay field has a different value).
83d74e03
RW
3149 * Moreover, only devices in D0 can be reset by this function.
3150 */
f85876ba 3151static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3152{
f85876ba
YZ
3153 u16 csr;
3154
3155 if (!dev->pm_cap)
3156 return -ENOTTY;
d91cdc74 3157
f85876ba
YZ
3158 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3159 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3160 return -ENOTTY;
d91cdc74 3161
f85876ba
YZ
3162 if (probe)
3163 return 0;
1ca88797 3164
f85876ba
YZ
3165 if (dev->current_state != PCI_D0)
3166 return -EINVAL;
3167
3168 csr &= ~PCI_PM_CTRL_STATE_MASK;
3169 csr |= PCI_D3hot;
3170 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3171 pci_dev_d3_sleep(dev);
f85876ba
YZ
3172
3173 csr &= ~PCI_PM_CTRL_STATE_MASK;
3174 csr |= PCI_D0;
3175 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3176 pci_dev_d3_sleep(dev);
f85876ba
YZ
3177
3178 return 0;
3179}
3180
64e8674f
AW
3181/**
3182 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3183 * @dev: Bridge device
3184 *
3185 * Use the bridge control register to assert reset on the secondary bus.
3186 * Devices on the secondary bus are left in power-on state.
3187 */
3188void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
c12ff1df
YZ
3189{
3190 u16 ctrl;
64e8674f
AW
3191
3192 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3193 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3194 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
de0c548c
AW
3195 /*
3196 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
f7625980 3197 * this to 2ms to ensure that we meet the minimum requirement.
de0c548c
AW
3198 */
3199 msleep(2);
64e8674f
AW
3200
3201 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3202 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
de0c548c
AW
3203
3204 /*
3205 * Trhfa for conventional PCI is 2^25 clock cycles.
3206 * Assuming a minimum 33MHz clock this results in a 1s
3207 * delay before we can consider subordinate devices to
3208 * be re-initialized. PCIe has some ways to shorten this,
3209 * but we don't make use of them yet.
3210 */
3211 ssleep(1);
64e8674f
AW
3212}
3213EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3214
3215static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3216{
c12ff1df
YZ
3217 struct pci_dev *pdev;
3218
654b75e0 3219 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3220 return -ENOTTY;
3221
3222 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3223 if (pdev != dev)
3224 return -ENOTTY;
3225
3226 if (probe)
3227 return 0;
3228
64e8674f 3229 pci_reset_bridge_secondary_bus(dev->bus->self);
c12ff1df
YZ
3230
3231 return 0;
3232}
3233
608c3881
AW
3234static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3235{
3236 int rc = -ENOTTY;
3237
3238 if (!hotplug || !try_module_get(hotplug->ops->owner))
3239 return rc;
3240
3241 if (hotplug->ops->reset_slot)
3242 rc = hotplug->ops->reset_slot(hotplug, probe);
3243
3244 module_put(hotplug->ops->owner);
3245
3246 return rc;
3247}
3248
3249static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3250{
3251 struct pci_dev *pdev;
3252
3253 if (dev->subordinate || !dev->slot)
3254 return -ENOTTY;
3255
3256 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3257 if (pdev != dev && pdev->slot == dev->slot)
3258 return -ENOTTY;
3259
3260 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3261}
3262
977f857c 3263static int __pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3264{
8c1c699f
YZ
3265 int rc;
3266
3267 might_sleep();
3268
b9c3b266
DC
3269 rc = pci_dev_specific_reset(dev, probe);
3270 if (rc != -ENOTTY)
3271 goto done;
3272
8c1c699f
YZ
3273 rc = pcie_flr(dev, probe);
3274 if (rc != -ENOTTY)
3275 goto done;
d91cdc74 3276
8c1c699f 3277 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3278 if (rc != -ENOTTY)
3279 goto done;
3280
3281 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3282 if (rc != -ENOTTY)
3283 goto done;
3284
608c3881
AW
3285 rc = pci_dev_reset_slot_function(dev, probe);
3286 if (rc != -ENOTTY)
3287 goto done;
3288
c12ff1df 3289 rc = pci_parent_bus_reset(dev, probe);
8c1c699f 3290done:
977f857c
KRW
3291 return rc;
3292}
3293
77cb985a
AW
3294static void pci_dev_lock(struct pci_dev *dev)
3295{
3296 pci_cfg_access_lock(dev);
3297 /* block PM suspend, driver probe, etc. */
3298 device_lock(&dev->dev);
3299}
3300
61cf16d8
AW
3301/* Return 1 on successful lock, 0 on contention */
3302static int pci_dev_trylock(struct pci_dev *dev)
3303{
3304 if (pci_cfg_access_trylock(dev)) {
3305 if (device_trylock(&dev->dev))
3306 return 1;
3307 pci_cfg_access_unlock(dev);
3308 }
3309
3310 return 0;
3311}
3312
77cb985a
AW
3313static void pci_dev_unlock(struct pci_dev *dev)
3314{
3315 device_unlock(&dev->dev);
3316 pci_cfg_access_unlock(dev);
3317}
3318
3ebe7f9f
KB
3319/**
3320 * pci_reset_notify - notify device driver of reset
3321 * @dev: device to be notified of reset
3322 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
3323 * completed
3324 *
3325 * Must be called prior to device access being disabled and after device
3326 * access is restored.
3327 */
3328static void pci_reset_notify(struct pci_dev *dev, bool prepare)
3329{
3330 const struct pci_error_handlers *err_handler =
3331 dev->driver ? dev->driver->err_handler : NULL;
3332 if (err_handler && err_handler->reset_notify)
3333 err_handler->reset_notify(dev, prepare);
3334}
3335
77cb985a
AW
3336static void pci_dev_save_and_disable(struct pci_dev *dev)
3337{
3ebe7f9f
KB
3338 pci_reset_notify(dev, true);
3339
a6cbaade
AW
3340 /*
3341 * Wake-up device prior to save. PM registers default to D0 after
3342 * reset and a simple register restore doesn't reliably return
3343 * to a non-D0 state anyway.
3344 */
3345 pci_set_power_state(dev, PCI_D0);
3346
77cb985a
AW
3347 pci_save_state(dev);
3348 /*
3349 * Disable the device by clearing the Command register, except for
3350 * INTx-disable which is set. This not only disables MMIO and I/O port
3351 * BARs, but also prevents the device from being Bus Master, preventing
3352 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
3353 * compliant devices, INTx-disable prevents legacy interrupts.
3354 */
3355 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3356}
3357
3358static void pci_dev_restore(struct pci_dev *dev)
3359{
3360 pci_restore_state(dev);
3ebe7f9f 3361 pci_reset_notify(dev, false);
77cb985a
AW
3362}
3363
977f857c
KRW
3364static int pci_dev_reset(struct pci_dev *dev, int probe)
3365{
3366 int rc;
3367
77cb985a
AW
3368 if (!probe)
3369 pci_dev_lock(dev);
977f857c
KRW
3370
3371 rc = __pci_dev_reset(dev, probe);
3372
77cb985a
AW
3373 if (!probe)
3374 pci_dev_unlock(dev);
3375
8c1c699f 3376 return rc;
d91cdc74 3377}
3ebe7f9f 3378
d91cdc74 3379/**
8c1c699f
YZ
3380 * __pci_reset_function - reset a PCI device function
3381 * @dev: PCI device to reset
d91cdc74
SY
3382 *
3383 * Some devices allow an individual function to be reset without affecting
3384 * other functions in the same device. The PCI device must be responsive
3385 * to PCI config space in order to use this function.
3386 *
3387 * The device function is presumed to be unused when this function is called.
3388 * Resetting the device will make the contents of PCI configuration space
3389 * random, so any caller of this must be prepared to reinitialise the
3390 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3391 * etc.
3392 *
8c1c699f 3393 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3394 * device doesn't support resetting a single function.
3395 */
8c1c699f 3396int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3397{
8c1c699f 3398 return pci_dev_reset(dev, 0);
d91cdc74 3399}
8c1c699f 3400EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3401
6fbf9e7a
KRW
3402/**
3403 * __pci_reset_function_locked - reset a PCI device function while holding
3404 * the @dev mutex lock.
3405 * @dev: PCI device to reset
3406 *
3407 * Some devices allow an individual function to be reset without affecting
3408 * other functions in the same device. The PCI device must be responsive
3409 * to PCI config space in order to use this function.
3410 *
3411 * The device function is presumed to be unused and the caller is holding
3412 * the device mutex lock when this function is called.
3413 * Resetting the device will make the contents of PCI configuration space
3414 * random, so any caller of this must be prepared to reinitialise the
3415 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3416 * etc.
3417 *
3418 * Returns 0 if the device function was successfully reset or negative if the
3419 * device doesn't support resetting a single function.
3420 */
3421int __pci_reset_function_locked(struct pci_dev *dev)
3422{
977f857c 3423 return __pci_dev_reset(dev, 0);
6fbf9e7a
KRW
3424}
3425EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3426
711d5779
MT
3427/**
3428 * pci_probe_reset_function - check whether the device can be safely reset
3429 * @dev: PCI device to reset
3430 *
3431 * Some devices allow an individual function to be reset without affecting
3432 * other functions in the same device. The PCI device must be responsive
3433 * to PCI config space in order to use this function.
3434 *
3435 * Returns 0 if the device function can be reset or negative if the
3436 * device doesn't support resetting a single function.
3437 */
3438int pci_probe_reset_function(struct pci_dev *dev)
3439{
3440 return pci_dev_reset(dev, 1);
3441}
3442
8dd7f803 3443/**
8c1c699f
YZ
3444 * pci_reset_function - quiesce and reset a PCI device function
3445 * @dev: PCI device to reset
8dd7f803
SY
3446 *
3447 * Some devices allow an individual function to be reset without affecting
3448 * other functions in the same device. The PCI device must be responsive
3449 * to PCI config space in order to use this function.
3450 *
3451 * This function does not just reset the PCI portion of a device, but
3452 * clears all the state associated with the device. This function differs
8c1c699f 3453 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3454 * over the reset.
3455 *
8c1c699f 3456 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3457 * device doesn't support resetting a single function.
3458 */
3459int pci_reset_function(struct pci_dev *dev)
3460{
8c1c699f 3461 int rc;
8dd7f803 3462
8c1c699f
YZ
3463 rc = pci_dev_reset(dev, 1);
3464 if (rc)
3465 return rc;
8dd7f803 3466
77cb985a 3467 pci_dev_save_and_disable(dev);
8dd7f803 3468
8c1c699f 3469 rc = pci_dev_reset(dev, 0);
8dd7f803 3470
77cb985a 3471 pci_dev_restore(dev);
8dd7f803 3472
8c1c699f 3473 return rc;
8dd7f803
SY
3474}
3475EXPORT_SYMBOL_GPL(pci_reset_function);
3476
61cf16d8
AW
3477/**
3478 * pci_try_reset_function - quiesce and reset a PCI device function
3479 * @dev: PCI device to reset
3480 *
3481 * Same as above, except return -EAGAIN if unable to lock device.
3482 */
3483int pci_try_reset_function(struct pci_dev *dev)
3484{
3485 int rc;
3486
3487 rc = pci_dev_reset(dev, 1);
3488 if (rc)
3489 return rc;
3490
3491 pci_dev_save_and_disable(dev);
3492
3493 if (pci_dev_trylock(dev)) {
3494 rc = __pci_dev_reset(dev, 0);
3495 pci_dev_unlock(dev);
3496 } else
3497 rc = -EAGAIN;
3498
3499 pci_dev_restore(dev);
3500
3501 return rc;
3502}
3503EXPORT_SYMBOL_GPL(pci_try_reset_function);
3504
090a3c53
AW
3505/* Lock devices from the top of the tree down */
3506static void pci_bus_lock(struct pci_bus *bus)
3507{
3508 struct pci_dev *dev;
3509
3510 list_for_each_entry(dev, &bus->devices, bus_list) {
3511 pci_dev_lock(dev);
3512 if (dev->subordinate)
3513 pci_bus_lock(dev->subordinate);
3514 }
3515}
3516
3517/* Unlock devices from the bottom of the tree up */
3518static void pci_bus_unlock(struct pci_bus *bus)
3519{
3520 struct pci_dev *dev;
3521
3522 list_for_each_entry(dev, &bus->devices, bus_list) {
3523 if (dev->subordinate)
3524 pci_bus_unlock(dev->subordinate);
3525 pci_dev_unlock(dev);
3526 }
3527}
3528
61cf16d8
AW
3529/* Return 1 on successful lock, 0 on contention */
3530static int pci_bus_trylock(struct pci_bus *bus)
3531{
3532 struct pci_dev *dev;
3533
3534 list_for_each_entry(dev, &bus->devices, bus_list) {
3535 if (!pci_dev_trylock(dev))
3536 goto unlock;
3537 if (dev->subordinate) {
3538 if (!pci_bus_trylock(dev->subordinate)) {
3539 pci_dev_unlock(dev);
3540 goto unlock;
3541 }
3542 }
3543 }
3544 return 1;
3545
3546unlock:
3547 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
3548 if (dev->subordinate)
3549 pci_bus_unlock(dev->subordinate);
3550 pci_dev_unlock(dev);
3551 }
3552 return 0;
3553}
3554
090a3c53
AW
3555/* Lock devices from the top of the tree down */
3556static void pci_slot_lock(struct pci_slot *slot)
3557{
3558 struct pci_dev *dev;
3559
3560 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3561 if (!dev->slot || dev->slot != slot)
3562 continue;
3563 pci_dev_lock(dev);
3564 if (dev->subordinate)
3565 pci_bus_lock(dev->subordinate);
3566 }
3567}
3568
3569/* Unlock devices from the bottom of the tree up */
3570static void pci_slot_unlock(struct pci_slot *slot)
3571{
3572 struct pci_dev *dev;
3573
3574 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3575 if (!dev->slot || dev->slot != slot)
3576 continue;
3577 if (dev->subordinate)
3578 pci_bus_unlock(dev->subordinate);
3579 pci_dev_unlock(dev);
3580 }
3581}
3582
61cf16d8
AW
3583/* Return 1 on successful lock, 0 on contention */
3584static int pci_slot_trylock(struct pci_slot *slot)
3585{
3586 struct pci_dev *dev;
3587
3588 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3589 if (!dev->slot || dev->slot != slot)
3590 continue;
3591 if (!pci_dev_trylock(dev))
3592 goto unlock;
3593 if (dev->subordinate) {
3594 if (!pci_bus_trylock(dev->subordinate)) {
3595 pci_dev_unlock(dev);
3596 goto unlock;
3597 }
3598 }
3599 }
3600 return 1;
3601
3602unlock:
3603 list_for_each_entry_continue_reverse(dev,
3604 &slot->bus->devices, bus_list) {
3605 if (!dev->slot || dev->slot != slot)
3606 continue;
3607 if (dev->subordinate)
3608 pci_bus_unlock(dev->subordinate);
3609 pci_dev_unlock(dev);
3610 }
3611 return 0;
3612}
3613
090a3c53
AW
3614/* Save and disable devices from the top of the tree down */
3615static void pci_bus_save_and_disable(struct pci_bus *bus)
3616{
3617 struct pci_dev *dev;
3618
3619 list_for_each_entry(dev, &bus->devices, bus_list) {
3620 pci_dev_save_and_disable(dev);
3621 if (dev->subordinate)
3622 pci_bus_save_and_disable(dev->subordinate);
3623 }
3624}
3625
3626/*
3627 * Restore devices from top of the tree down - parent bridges need to be
3628 * restored before we can get to subordinate devices.
3629 */
3630static void pci_bus_restore(struct pci_bus *bus)
3631{
3632 struct pci_dev *dev;
3633
3634 list_for_each_entry(dev, &bus->devices, bus_list) {
3635 pci_dev_restore(dev);
3636 if (dev->subordinate)
3637 pci_bus_restore(dev->subordinate);
3638 }
3639}
3640
3641/* Save and disable devices from the top of the tree down */
3642static void pci_slot_save_and_disable(struct pci_slot *slot)
3643{
3644 struct pci_dev *dev;
3645
3646 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3647 if (!dev->slot || dev->slot != slot)
3648 continue;
3649 pci_dev_save_and_disable(dev);
3650 if (dev->subordinate)
3651 pci_bus_save_and_disable(dev->subordinate);
3652 }
3653}
3654
3655/*
3656 * Restore devices from top of the tree down - parent bridges need to be
3657 * restored before we can get to subordinate devices.
3658 */
3659static void pci_slot_restore(struct pci_slot *slot)
3660{
3661 struct pci_dev *dev;
3662
3663 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3664 if (!dev->slot || dev->slot != slot)
3665 continue;
3666 pci_dev_restore(dev);
3667 if (dev->subordinate)
3668 pci_bus_restore(dev->subordinate);
3669 }
3670}
3671
3672static int pci_slot_reset(struct pci_slot *slot, int probe)
3673{
3674 int rc;
3675
3676 if (!slot)
3677 return -ENOTTY;
3678
3679 if (!probe)
3680 pci_slot_lock(slot);
3681
3682 might_sleep();
3683
3684 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
3685
3686 if (!probe)
3687 pci_slot_unlock(slot);
3688
3689 return rc;
3690}
3691
9a3d2b9b
AW
3692/**
3693 * pci_probe_reset_slot - probe whether a PCI slot can be reset
3694 * @slot: PCI slot to probe
3695 *
3696 * Return 0 if slot can be reset, negative if a slot reset is not supported.
3697 */
3698int pci_probe_reset_slot(struct pci_slot *slot)
3699{
3700 return pci_slot_reset(slot, 1);
3701}
3702EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
3703
090a3c53
AW
3704/**
3705 * pci_reset_slot - reset a PCI slot
3706 * @slot: PCI slot to reset
3707 *
3708 * A PCI bus may host multiple slots, each slot may support a reset mechanism
3709 * independent of other slots. For instance, some slots may support slot power
3710 * control. In the case of a 1:1 bus to slot architecture, this function may
3711 * wrap the bus reset to avoid spurious slot related events such as hotplug.
3712 * Generally a slot reset should be attempted before a bus reset. All of the
3713 * function of the slot and any subordinate buses behind the slot are reset
3714 * through this function. PCI config space of all devices in the slot and
3715 * behind the slot is saved before and restored after reset.
3716 *
3717 * Return 0 on success, non-zero on error.
3718 */
3719int pci_reset_slot(struct pci_slot *slot)
3720{
3721 int rc;
3722
3723 rc = pci_slot_reset(slot, 1);
3724 if (rc)
3725 return rc;
3726
3727 pci_slot_save_and_disable(slot);
3728
3729 rc = pci_slot_reset(slot, 0);
3730
3731 pci_slot_restore(slot);
3732
3733 return rc;
3734}
3735EXPORT_SYMBOL_GPL(pci_reset_slot);
3736
61cf16d8
AW
3737/**
3738 * pci_try_reset_slot - Try to reset a PCI slot
3739 * @slot: PCI slot to reset
3740 *
3741 * Same as above except return -EAGAIN if the slot cannot be locked
3742 */
3743int pci_try_reset_slot(struct pci_slot *slot)
3744{
3745 int rc;
3746
3747 rc = pci_slot_reset(slot, 1);
3748 if (rc)
3749 return rc;
3750
3751 pci_slot_save_and_disable(slot);
3752
3753 if (pci_slot_trylock(slot)) {
3754 might_sleep();
3755 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
3756 pci_slot_unlock(slot);
3757 } else
3758 rc = -EAGAIN;
3759
3760 pci_slot_restore(slot);
3761
3762 return rc;
3763}
3764EXPORT_SYMBOL_GPL(pci_try_reset_slot);
3765
090a3c53
AW
3766static int pci_bus_reset(struct pci_bus *bus, int probe)
3767{
3768 if (!bus->self)
3769 return -ENOTTY;
3770
3771 if (probe)
3772 return 0;
3773
3774 pci_bus_lock(bus);
3775
3776 might_sleep();
3777
3778 pci_reset_bridge_secondary_bus(bus->self);
3779
3780 pci_bus_unlock(bus);
3781
3782 return 0;
3783}
3784
9a3d2b9b
AW
3785/**
3786 * pci_probe_reset_bus - probe whether a PCI bus can be reset
3787 * @bus: PCI bus to probe
3788 *
3789 * Return 0 if bus can be reset, negative if a bus reset is not supported.
3790 */
3791int pci_probe_reset_bus(struct pci_bus *bus)
3792{
3793 return pci_bus_reset(bus, 1);
3794}
3795EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
3796
090a3c53
AW
3797/**
3798 * pci_reset_bus - reset a PCI bus
3799 * @bus: top level PCI bus to reset
3800 *
3801 * Do a bus reset on the given bus and any subordinate buses, saving
3802 * and restoring state of all devices.
3803 *
3804 * Return 0 on success, non-zero on error.
3805 */
3806int pci_reset_bus(struct pci_bus *bus)
3807{
3808 int rc;
3809
3810 rc = pci_bus_reset(bus, 1);
3811 if (rc)
3812 return rc;
3813
3814 pci_bus_save_and_disable(bus);
3815
3816 rc = pci_bus_reset(bus, 0);
3817
3818 pci_bus_restore(bus);
3819
3820 return rc;
3821}
3822EXPORT_SYMBOL_GPL(pci_reset_bus);
3823
61cf16d8
AW
3824/**
3825 * pci_try_reset_bus - Try to reset a PCI bus
3826 * @bus: top level PCI bus to reset
3827 *
3828 * Same as above except return -EAGAIN if the bus cannot be locked
3829 */
3830int pci_try_reset_bus(struct pci_bus *bus)
3831{
3832 int rc;
3833
3834 rc = pci_bus_reset(bus, 1);
3835 if (rc)
3836 return rc;
3837
3838 pci_bus_save_and_disable(bus);
3839
3840 if (pci_bus_trylock(bus)) {
3841 might_sleep();
3842 pci_reset_bridge_secondary_bus(bus->self);
3843 pci_bus_unlock(bus);
3844 } else
3845 rc = -EAGAIN;
3846
3847 pci_bus_restore(bus);
3848
3849 return rc;
3850}
3851EXPORT_SYMBOL_GPL(pci_try_reset_bus);
3852
d556ad4b
PO
3853/**
3854 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3855 * @dev: PCI device to query
3856 *
3857 * Returns mmrbc: maximum designed memory read count in bytes
3858 * or appropriate error value.
3859 */
3860int pcix_get_max_mmrbc(struct pci_dev *dev)
3861{
7c9e2b1c 3862 int cap;
d556ad4b
PO
3863 u32 stat;
3864
3865 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3866 if (!cap)
3867 return -EINVAL;
3868
7c9e2b1c 3869 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3870 return -EINVAL;
3871
25daeb55 3872 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3873}
3874EXPORT_SYMBOL(pcix_get_max_mmrbc);
3875
3876/**
3877 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3878 * @dev: PCI device to query
3879 *
3880 * Returns mmrbc: maximum memory read count in bytes
3881 * or appropriate error value.
3882 */
3883int pcix_get_mmrbc(struct pci_dev *dev)
3884{
7c9e2b1c 3885 int cap;
bdc2bda7 3886 u16 cmd;
d556ad4b
PO
3887
3888 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3889 if (!cap)
3890 return -EINVAL;
3891
7c9e2b1c
DN
3892 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3893 return -EINVAL;
d556ad4b 3894
7c9e2b1c 3895 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3896}
3897EXPORT_SYMBOL(pcix_get_mmrbc);
3898
3899/**
3900 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3901 * @dev: PCI device to query
3902 * @mmrbc: maximum memory read count in bytes
3903 * valid values are 512, 1024, 2048, 4096
3904 *
3905 * If possible sets maximum memory read byte count, some bridges have erratas
3906 * that prevent this.
3907 */
3908int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3909{
7c9e2b1c 3910 int cap;
bdc2bda7
DN
3911 u32 stat, v, o;
3912 u16 cmd;
d556ad4b 3913
229f5afd 3914 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3915 return -EINVAL;
d556ad4b
PO
3916
3917 v = ffs(mmrbc) - 10;
3918
3919 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3920 if (!cap)
7c9e2b1c 3921 return -EINVAL;
d556ad4b 3922
7c9e2b1c
DN
3923 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3924 return -EINVAL;
d556ad4b
PO
3925
3926 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3927 return -E2BIG;
3928
7c9e2b1c
DN
3929 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3930 return -EINVAL;
d556ad4b
PO
3931
3932 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3933 if (o != v) {
809a3bf9 3934 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
d556ad4b
PO
3935 return -EIO;
3936
3937 cmd &= ~PCI_X_CMD_MAX_READ;
3938 cmd |= v << 2;
7c9e2b1c
DN
3939 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3940 return -EIO;
d556ad4b 3941 }
7c9e2b1c 3942 return 0;
d556ad4b
PO
3943}
3944EXPORT_SYMBOL(pcix_set_mmrbc);
3945
3946/**
3947 * pcie_get_readrq - get PCI Express read request size
3948 * @dev: PCI device to query
3949 *
3950 * Returns maximum memory read request in bytes
3951 * or appropriate error value.
3952 */
3953int pcie_get_readrq(struct pci_dev *dev)
3954{
d556ad4b
PO
3955 u16 ctl;
3956
59875ae4 3957 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
d556ad4b 3958
59875ae4 3959 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3960}
3961EXPORT_SYMBOL(pcie_get_readrq);
3962
3963/**
3964 * pcie_set_readrq - set PCI Express maximum memory read request
3965 * @dev: PCI device to query
42e61f4a 3966 * @rq: maximum memory read count in bytes
d556ad4b
PO
3967 * valid values are 128, 256, 512, 1024, 2048, 4096
3968 *
c9b378c7 3969 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3970 */
3971int pcie_set_readrq(struct pci_dev *dev, int rq)
3972{
59875ae4 3973 u16 v;
d556ad4b 3974
229f5afd 3975 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
59875ae4 3976 return -EINVAL;
d556ad4b 3977
a1c473aa
BH
3978 /*
3979 * If using the "performance" PCIe config, we clamp the
3980 * read rq size to the max packet size to prevent the
3981 * host bridge generating requests larger than we can
3982 * cope with
3983 */
3984 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3985 int mps = pcie_get_mps(dev);
3986
a1c473aa
BH
3987 if (mps < rq)
3988 rq = mps;
3989 }
3990
3991 v = (ffs(rq) - 8) << 12;
d556ad4b 3992
59875ae4
JL
3993 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3994 PCI_EXP_DEVCTL_READRQ, v);
d556ad4b
PO
3995}
3996EXPORT_SYMBOL(pcie_set_readrq);
3997
b03e7495
JM
3998/**
3999 * pcie_get_mps - get PCI Express maximum payload size
4000 * @dev: PCI device to query
4001 *
4002 * Returns maximum payload size in bytes
b03e7495
JM
4003 */
4004int pcie_get_mps(struct pci_dev *dev)
4005{
b03e7495
JM
4006 u16 ctl;
4007
59875ae4 4008 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
b03e7495 4009
59875ae4 4010 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
b03e7495 4011}
f1c66c46 4012EXPORT_SYMBOL(pcie_get_mps);
b03e7495
JM
4013
4014/**
4015 * pcie_set_mps - set PCI Express maximum payload size
4016 * @dev: PCI device to query
47c08f31 4017 * @mps: maximum payload size in bytes
b03e7495
JM
4018 * valid values are 128, 256, 512, 1024, 2048, 4096
4019 *
4020 * If possible sets maximum payload size
4021 */
4022int pcie_set_mps(struct pci_dev *dev, int mps)
4023{
59875ae4 4024 u16 v;
b03e7495
JM
4025
4026 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
59875ae4 4027 return -EINVAL;
b03e7495
JM
4028
4029 v = ffs(mps) - 8;
f7625980 4030 if (v > dev->pcie_mpss)
59875ae4 4031 return -EINVAL;
b03e7495
JM
4032 v <<= 5;
4033
59875ae4
JL
4034 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4035 PCI_EXP_DEVCTL_PAYLOAD, v);
b03e7495 4036}
f1c66c46 4037EXPORT_SYMBOL(pcie_set_mps);
b03e7495 4038
81377c8d
JK
4039/**
4040 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4041 * @dev: PCI device to query
4042 * @speed: storage for minimum speed
4043 * @width: storage for minimum width
4044 *
4045 * This function will walk up the PCI device chain and determine the minimum
4046 * link width and speed of the device.
4047 */
4048int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4049 enum pcie_link_width *width)
4050{
4051 int ret;
4052
4053 *speed = PCI_SPEED_UNKNOWN;
4054 *width = PCIE_LNK_WIDTH_UNKNOWN;
4055
4056 while (dev) {
4057 u16 lnksta;
4058 enum pci_bus_speed next_speed;
4059 enum pcie_link_width next_width;
4060
4061 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4062 if (ret)
4063 return ret;
4064
4065 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4066 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4067 PCI_EXP_LNKSTA_NLW_SHIFT;
4068
4069 if (next_speed < *speed)
4070 *speed = next_speed;
4071
4072 if (next_width < *width)
4073 *width = next_width;
4074
4075 dev = dev->bus->self;
4076 }
4077
4078 return 0;
4079}
4080EXPORT_SYMBOL(pcie_get_minimum_link);
4081
c87deff7
HS
4082/**
4083 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 4084 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
4085 * @flags: resource type mask to be selected
4086 *
4087 * This helper routine makes bar mask from the type of resource.
4088 */
4089int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4090{
4091 int i, bars = 0;
4092 for (i = 0; i < PCI_NUM_RESOURCES; i++)
4093 if (pci_resource_flags(dev, i) & flags)
4094 bars |= (1 << i);
4095 return bars;
4096}
4097
613e7ed6
YZ
4098/**
4099 * pci_resource_bar - get position of the BAR associated with a resource
4100 * @dev: the PCI device
4101 * @resno: the resource number
4102 * @type: the BAR type to be filled in
4103 *
4104 * Returns BAR position in config space, or 0 if the BAR is invalid.
4105 */
4106int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4107{
d1b054da
YZ
4108 int reg;
4109
613e7ed6
YZ
4110 if (resno < PCI_ROM_RESOURCE) {
4111 *type = pci_bar_unknown;
4112 return PCI_BASE_ADDRESS_0 + 4 * resno;
4113 } else if (resno == PCI_ROM_RESOURCE) {
4114 *type = pci_bar_mem32;
4115 return dev->rom_base_reg;
d1b054da
YZ
4116 } else if (resno < PCI_BRIDGE_RESOURCES) {
4117 /* device specific resource */
4118 reg = pci_iov_resource_bar(dev, resno, type);
4119 if (reg)
4120 return reg;
613e7ed6
YZ
4121 }
4122
865df576 4123 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
4124 return 0;
4125}
4126
95a8b6ef
MT
4127/* Some architectures require additional programming to enable VGA */
4128static arch_set_vga_state_t arch_set_vga_state;
4129
4130void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4131{
4132 arch_set_vga_state = func; /* NULL disables */
4133}
4134
4135static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 4136 unsigned int command_bits, u32 flags)
95a8b6ef
MT
4137{
4138 if (arch_set_vga_state)
4139 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 4140 flags);
95a8b6ef
MT
4141 return 0;
4142}
4143
deb2d2ec
BH
4144/**
4145 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
4146 * @dev: the PCI device
4147 * @decode: true = enable decoding, false = disable decoding
4148 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 4149 * @flags: traverse ancestors and change bridges
3448a19d 4150 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
4151 */
4152int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 4153 unsigned int command_bits, u32 flags)
deb2d2ec
BH
4154{
4155 struct pci_bus *bus;
4156 struct pci_dev *bridge;
4157 u16 cmd;
95a8b6ef 4158 int rc;
deb2d2ec 4159
67ebd814 4160 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 4161
95a8b6ef 4162 /* ARCH specific VGA enables */
3448a19d 4163 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
4164 if (rc)
4165 return rc;
4166
3448a19d
DA
4167 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4168 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4169 if (decode == true)
4170 cmd |= command_bits;
4171 else
4172 cmd &= ~command_bits;
4173 pci_write_config_word(dev, PCI_COMMAND, cmd);
4174 }
deb2d2ec 4175
3448a19d 4176 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
4177 return 0;
4178
4179 bus = dev->bus;
4180 while (bus) {
4181 bridge = bus->self;
4182 if (bridge) {
4183 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4184 &cmd);
4185 if (decode == true)
4186 cmd |= PCI_BRIDGE_CTL_VGA;
4187 else
4188 cmd &= ~PCI_BRIDGE_CTL_VGA;
4189 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4190 cmd);
4191 }
4192 bus = bus->parent;
4193 }
4194 return 0;
4195}
4196
8496e85c
RW
4197bool pci_device_is_present(struct pci_dev *pdev)
4198{
4199 u32 v;
4200
4201 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4202}
4203EXPORT_SYMBOL_GPL(pci_device_is_present);
4204
32a9a682
YS
4205#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4206static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 4207static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
4208
4209/**
4210 * pci_specified_resource_alignment - get resource alignment specified by user.
4211 * @dev: the PCI device to get
4212 *
4213 * RETURNS: Resource alignment if it is specified.
4214 * Zero if it is not specified.
4215 */
9738abed 4216static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
32a9a682
YS
4217{
4218 int seg, bus, slot, func, align_order, count;
4219 resource_size_t align = 0;
4220 char *p;
4221
4222 spin_lock(&resource_alignment_lock);
4223 p = resource_alignment_param;
4224 while (*p) {
4225 count = 0;
4226 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
4227 p[count] == '@') {
4228 p += count + 1;
4229 } else {
4230 align_order = -1;
4231 }
4232 if (sscanf(p, "%x:%x:%x.%x%n",
4233 &seg, &bus, &slot, &func, &count) != 4) {
4234 seg = 0;
4235 if (sscanf(p, "%x:%x.%x%n",
4236 &bus, &slot, &func, &count) != 3) {
4237 /* Invalid format */
4238 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
4239 p);
4240 break;
4241 }
4242 }
4243 p += count;
4244 if (seg == pci_domain_nr(dev->bus) &&
4245 bus == dev->bus->number &&
4246 slot == PCI_SLOT(dev->devfn) &&
4247 func == PCI_FUNC(dev->devfn)) {
4248 if (align_order == -1) {
4249 align = PAGE_SIZE;
4250 } else {
4251 align = 1 << align_order;
4252 }
4253 /* Found */
4254 break;
4255 }
4256 if (*p != ';' && *p != ',') {
4257 /* End of param or invalid format */
4258 break;
4259 }
4260 p++;
4261 }
4262 spin_unlock(&resource_alignment_lock);
4263 return align;
4264}
4265
2069ecfb
YL
4266/*
4267 * This function disables memory decoding and releases memory resources
4268 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
4269 * It also rounds up size to specified alignment.
4270 * Later on, the kernel will assign page-aligned memory resource back
4271 * to the device.
4272 */
4273void pci_reassigndev_resource_alignment(struct pci_dev *dev)
4274{
4275 int i;
4276 struct resource *r;
4277 resource_size_t align, size;
4278 u16 command;
4279
10c463a7
YL
4280 /* check if specified PCI is target device to reassign */
4281 align = pci_specified_resource_alignment(dev);
4282 if (!align)
2069ecfb
YL
4283 return;
4284
4285 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
4286 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
4287 dev_warn(&dev->dev,
4288 "Can't reassign resources to host bridge.\n");
4289 return;
4290 }
4291
4292 dev_info(&dev->dev,
4293 "Disabling memory decoding and releasing memory resources.\n");
4294 pci_read_config_word(dev, PCI_COMMAND, &command);
4295 command &= ~PCI_COMMAND_MEMORY;
4296 pci_write_config_word(dev, PCI_COMMAND, command);
4297
2069ecfb
YL
4298 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
4299 r = &dev->resource[i];
4300 if (!(r->flags & IORESOURCE_MEM))
4301 continue;
4302 size = resource_size(r);
4303 if (size < align) {
4304 size = align;
4305 dev_info(&dev->dev,
4306 "Rounding up size of resource #%d to %#llx.\n",
4307 i, (unsigned long long)size);
4308 }
bd064f0a 4309 r->flags |= IORESOURCE_UNSET;
2069ecfb
YL
4310 r->end = size - 1;
4311 r->start = 0;
4312 }
4313 /* Need to disable bridge's resource window,
4314 * to enable the kernel to reassign new resource
4315 * window later on.
4316 */
4317 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4318 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
4319 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
4320 r = &dev->resource[i];
4321 if (!(r->flags & IORESOURCE_MEM))
4322 continue;
bd064f0a 4323 r->flags |= IORESOURCE_UNSET;
2069ecfb
YL
4324 r->end = resource_size(r) - 1;
4325 r->start = 0;
4326 }
4327 pci_disable_bridge_window(dev);
4328 }
4329}
4330
9738abed 4331static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
32a9a682
YS
4332{
4333 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
4334 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
4335 spin_lock(&resource_alignment_lock);
4336 strncpy(resource_alignment_param, buf, count);
4337 resource_alignment_param[count] = '\0';
4338 spin_unlock(&resource_alignment_lock);
4339 return count;
4340}
4341
9738abed 4342static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
32a9a682
YS
4343{
4344 size_t count;
4345 spin_lock(&resource_alignment_lock);
4346 count = snprintf(buf, size, "%s", resource_alignment_param);
4347 spin_unlock(&resource_alignment_lock);
4348 return count;
4349}
4350
4351static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
4352{
4353 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
4354}
4355
4356static ssize_t pci_resource_alignment_store(struct bus_type *bus,
4357 const char *buf, size_t count)
4358{
4359 return pci_set_resource_alignment_param(buf, count);
4360}
4361
4362BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
4363 pci_resource_alignment_store);
4364
4365static int __init pci_resource_alignment_sysfs_init(void)
4366{
4367 return bus_create_file(&pci_bus_type,
4368 &bus_attr_resource_alignment);
4369}
4370
4371late_initcall(pci_resource_alignment_sysfs_init);
4372
15856ad5 4373static void pci_no_domains(void)
32a2eea7
JG
4374{
4375#ifdef CONFIG_PCI_DOMAINS
4376 pci_domains_supported = 0;
4377#endif
4378}
4379
0ef5f8f6 4380/**
642c92da 4381 * pci_ext_cfg_avail - can we access extended PCI config space?
0ef5f8f6
AP
4382 *
4383 * Returns 1 if we can access PCI extended config space (offsets
4384 * greater than 0xff). This is the default implementation. Architecture
4385 * implementations can override this.
4386 */
642c92da 4387int __weak pci_ext_cfg_avail(void)
0ef5f8f6
AP
4388{
4389 return 1;
4390}
4391
2d1c8618
BH
4392void __weak pci_fixup_cardbus(struct pci_bus *bus)
4393{
4394}
4395EXPORT_SYMBOL(pci_fixup_cardbus);
4396
ad04d31e 4397static int __init pci_setup(char *str)
1da177e4
LT
4398{
4399 while (str) {
4400 char *k = strchr(str, ',');
4401 if (k)
4402 *k++ = 0;
4403 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
4404 if (!strcmp(str, "nomsi")) {
4405 pci_no_msi();
7f785763
RD
4406 } else if (!strcmp(str, "noaer")) {
4407 pci_no_aer();
b55438fd
YL
4408 } else if (!strncmp(str, "realloc=", 8)) {
4409 pci_realloc_get_opt(str + 8);
f483d392 4410 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 4411 pci_realloc_get_opt("on");
32a2eea7
JG
4412 } else if (!strcmp(str, "nodomains")) {
4413 pci_no_domains();
6748dcc2
RW
4414 } else if (!strncmp(str, "noari", 5)) {
4415 pcie_ari_disabled = true;
4516a618
AN
4416 } else if (!strncmp(str, "cbiosize=", 9)) {
4417 pci_cardbus_io_size = memparse(str + 9, &str);
4418 } else if (!strncmp(str, "cbmemsize=", 10)) {
4419 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
4420 } else if (!strncmp(str, "resource_alignment=", 19)) {
4421 pci_set_resource_alignment_param(str + 19,
4422 strlen(str + 19));
43c16408
AP
4423 } else if (!strncmp(str, "ecrc=", 5)) {
4424 pcie_ecrc_get_policy(str + 5);
28760489
EB
4425 } else if (!strncmp(str, "hpiosize=", 9)) {
4426 pci_hotplug_io_size = memparse(str + 9, &str);
4427 } else if (!strncmp(str, "hpmemsize=", 10)) {
4428 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
4429 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4430 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
4431 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
4432 pcie_bus_config = PCIE_BUS_SAFE;
4433 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
4434 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
4435 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4436 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
4437 } else if (!strncmp(str, "pcie_scan_all", 13)) {
4438 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
4439 } else {
4440 printk(KERN_ERR "PCI: Unknown option `%s'\n",
4441 str);
4442 }
1da177e4
LT
4443 }
4444 str = k;
4445 }
0637a70a 4446 return 0;
1da177e4 4447}
0637a70a 4448early_param("pci", pci_setup);
1da177e4 4449
0b62e13b 4450EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
4451EXPORT_SYMBOL(pci_enable_device_io);
4452EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 4453EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
4454EXPORT_SYMBOL(pcim_enable_device);
4455EXPORT_SYMBOL(pcim_pin_device);
1da177e4 4456EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
4457EXPORT_SYMBOL(pci_find_capability);
4458EXPORT_SYMBOL(pci_bus_find_capability);
4459EXPORT_SYMBOL(pci_release_regions);
4460EXPORT_SYMBOL(pci_request_regions);
e8de1481 4461EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
4462EXPORT_SYMBOL(pci_release_region);
4463EXPORT_SYMBOL(pci_request_region);
e8de1481 4464EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
4465EXPORT_SYMBOL(pci_release_selected_regions);
4466EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 4467EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 4468EXPORT_SYMBOL(pci_set_master);
6a479079 4469EXPORT_SYMBOL(pci_clear_master);
1da177e4 4470EXPORT_SYMBOL(pci_set_mwi);
694625c0 4471EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 4472EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 4473EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
4474EXPORT_SYMBOL(pci_assign_resource);
4475EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 4476EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
4477
4478EXPORT_SYMBOL(pci_set_power_state);
4479EXPORT_SYMBOL(pci_save_state);
4480EXPORT_SYMBOL(pci_restore_state);
e5899e1b 4481EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 4482EXPORT_SYMBOL(pci_pme_active);
0235c4fc 4483EXPORT_SYMBOL(pci_wake_from_d3);
404cc2d8
RW
4484EXPORT_SYMBOL(pci_prepare_to_sleep);
4485EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 4486EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
This page took 1.084831 seconds and 5 git commands to generate.