PCI: do not call pci_set_power_state with PCI_D3cold
[deliverable/linux.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
284f5f9d 25#include <asm-generic/pci-bridge.h>
32a9a682 26#include <asm/setup.h>
bc56b9e0 27#include "pci.h"
1da177e4 28
00240c38
AS
29const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31};
32EXPORT_SYMBOL_GPL(pci_power_names);
33
93177a74
RW
34int isa_dma_bridge_buggy;
35EXPORT_SYMBOL(isa_dma_bridge_buggy);
36
37int pci_pci_problems;
38EXPORT_SYMBOL(pci_pci_problems);
39
1ae861e6
RW
40unsigned int pci_pm_d3_delay;
41
df17e62e
MG
42static void pci_pme_list_scan(struct work_struct *work);
43
44static LIST_HEAD(pci_pme_list);
45static DEFINE_MUTEX(pci_pme_list_mutex);
46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47
48struct pci_pme_device {
49 struct list_head list;
50 struct pci_dev *dev;
51};
52
53#define PME_TIMEOUT 1000 /* How long between PME checks */
54
1ae861e6
RW
55static void pci_dev_d3_sleep(struct pci_dev *dev)
56{
57 unsigned int delay = dev->d3_delay;
58
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
61
62 msleep(delay);
63}
1da177e4 64
32a2eea7
JG
65#ifdef CONFIG_PCI_DOMAINS
66int pci_domains_supported = 1;
67#endif
68
4516a618
AN
69#define DEFAULT_CARDBUS_IO_SIZE (256)
70#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74
28760489
EB
75#define DEFAULT_HOTPLUG_IO_SIZE (256)
76#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
78unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80
5f39e670 81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 82
ac1aa47b
JB
83/*
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
88 */
98e724c7 89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
90u8 pci_cache_line_size;
91
96c55900
MS
92/*
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
95 */
96unsigned int pcibios_max_latency = 255;
97
6748dcc2
RW
98/* If set, the PCIe ARI capability will not be used. */
99static bool pcie_ari_disabled;
100
1da177e4
LT
101/**
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
104 *
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
107 */
96bde06a 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4
LT
109{
110 struct list_head *tmp;
111 unsigned char max, n;
112
b82db5ce 113 max = bus->subordinate;
1da177e4
LT
114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max)
117 max = n;
118 }
119 return max;
120}
b82db5ce 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 122
1684f5dd
AM
123#ifdef CONFIG_HAS_IOMEM
124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125{
126 /*
127 * Make sure the BAR is actually a memory resource, not an IO resource
128 */
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 WARN_ON(1);
131 return NULL;
132 }
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
135}
136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif
138
b82db5ce 139#if 0
1da177e4
LT
140/**
141 * pci_max_busnr - returns maximum PCI bus number
142 *
143 * Returns the highest PCI bus number present in the system global list of
144 * PCI buses.
145 */
146unsigned char __devinit
147pci_max_busnr(void)
148{
149 struct pci_bus *bus = NULL;
150 unsigned char max, n;
151
152 max = 0;
153 while ((bus = pci_find_next_bus(bus)) != NULL) {
154 n = pci_bus_max_busnr(bus);
155 if(n > max)
156 max = n;
157 }
158 return max;
159}
160
54c762fe
AB
161#endif /* 0 */
162
687d5fe3
ME
163#define PCI_FIND_CAP_TTL 48
164
165static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
166 u8 pos, int cap, int *ttl)
24a4e377
RD
167{
168 u8 id;
24a4e377 169
687d5fe3 170 while ((*ttl)--) {
24a4e377
RD
171 pci_bus_read_config_byte(bus, devfn, pos, &pos);
172 if (pos < 0x40)
173 break;
174 pos &= ~3;
175 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
176 &id);
177 if (id == 0xff)
178 break;
179 if (id == cap)
180 return pos;
181 pos += PCI_CAP_LIST_NEXT;
182 }
183 return 0;
184}
185
687d5fe3
ME
186static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
187 u8 pos, int cap)
188{
189 int ttl = PCI_FIND_CAP_TTL;
190
191 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
192}
193
24a4e377
RD
194int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
195{
196 return __pci_find_next_cap(dev->bus, dev->devfn,
197 pos + PCI_CAP_LIST_NEXT, cap);
198}
199EXPORT_SYMBOL_GPL(pci_find_next_capability);
200
d3bac118
ME
201static int __pci_bus_find_cap_start(struct pci_bus *bus,
202 unsigned int devfn, u8 hdr_type)
1da177e4
LT
203{
204 u16 status;
1da177e4
LT
205
206 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
207 if (!(status & PCI_STATUS_CAP_LIST))
208 return 0;
209
210 switch (hdr_type) {
211 case PCI_HEADER_TYPE_NORMAL:
212 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 213 return PCI_CAPABILITY_LIST;
1da177e4 214 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 215 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
216 default:
217 return 0;
218 }
d3bac118
ME
219
220 return 0;
1da177e4
LT
221}
222
223/**
224 * pci_find_capability - query for devices' capabilities
225 * @dev: PCI device to query
226 * @cap: capability code
227 *
228 * Tell if a device supports a given PCI capability.
229 * Returns the address of the requested capability structure within the
230 * device's PCI configuration space or 0 in case the device does not
231 * support it. Possible values for @cap:
232 *
233 * %PCI_CAP_ID_PM Power Management
234 * %PCI_CAP_ID_AGP Accelerated Graphics Port
235 * %PCI_CAP_ID_VPD Vital Product Data
236 * %PCI_CAP_ID_SLOTID Slot Identification
237 * %PCI_CAP_ID_MSI Message Signalled Interrupts
238 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
239 * %PCI_CAP_ID_PCIX PCI-X
240 * %PCI_CAP_ID_EXP PCI Express
241 */
242int pci_find_capability(struct pci_dev *dev, int cap)
243{
d3bac118
ME
244 int pos;
245
246 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
247 if (pos)
248 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
249
250 return pos;
1da177e4
LT
251}
252
253/**
254 * pci_bus_find_capability - query for devices' capabilities
255 * @bus: the PCI bus to query
256 * @devfn: PCI device to query
257 * @cap: capability code
258 *
259 * Like pci_find_capability() but works for pci devices that do not have a
260 * pci_dev structure set up yet.
261 *
262 * Returns the address of the requested capability structure within the
263 * device's PCI configuration space or 0 in case the device does not
264 * support it.
265 */
266int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
267{
d3bac118 268 int pos;
1da177e4
LT
269 u8 hdr_type;
270
271 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
272
d3bac118
ME
273 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
274 if (pos)
275 pos = __pci_find_next_cap(bus, devfn, pos, cap);
276
277 return pos;
1da177e4
LT
278}
279
280/**
281 * pci_find_ext_capability - Find an extended capability
282 * @dev: PCI device to query
283 * @cap: capability code
284 *
285 * Returns the address of the requested extended capability structure
286 * within the device's PCI configuration space or 0 if the device does
287 * not support it. Possible values for @cap:
288 *
289 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
290 * %PCI_EXT_CAP_ID_VC Virtual Channel
291 * %PCI_EXT_CAP_ID_DSN Device Serial Number
292 * %PCI_EXT_CAP_ID_PWR Power Budgeting
293 */
294int pci_find_ext_capability(struct pci_dev *dev, int cap)
295{
296 u32 header;
557848c3
ZY
297 int ttl;
298 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 299
557848c3
ZY
300 /* minimum 8 bytes per capability */
301 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
302
303 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
304 return 0;
305
306 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
307 return 0;
308
309 /*
310 * If we have no capabilities, this is indicated by cap ID,
311 * cap version and next pointer all being 0.
312 */
313 if (header == 0)
314 return 0;
315
316 while (ttl-- > 0) {
317 if (PCI_EXT_CAP_ID(header) == cap)
318 return pos;
319
320 pos = PCI_EXT_CAP_NEXT(header);
557848c3 321 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
322 break;
323
324 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
325 break;
326 }
327
328 return 0;
329}
3a720d72 330EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 331
cf4c43dd
JB
332/**
333 * pci_bus_find_ext_capability - find an extended capability
334 * @bus: the PCI bus to query
335 * @devfn: PCI device to query
336 * @cap: capability code
337 *
338 * Like pci_find_ext_capability() but works for pci devices that do not have a
339 * pci_dev structure set up yet.
340 *
341 * Returns the address of the requested capability structure within the
342 * device's PCI configuration space or 0 in case the device does not
343 * support it.
344 */
345int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
346 int cap)
347{
348 u32 header;
349 int ttl;
350 int pos = PCI_CFG_SPACE_SIZE;
351
352 /* minimum 8 bytes per capability */
353 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
354
355 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
356 return 0;
357 if (header == 0xffffffff || header == 0)
358 return 0;
359
360 while (ttl-- > 0) {
361 if (PCI_EXT_CAP_ID(header) == cap)
362 return pos;
363
364 pos = PCI_EXT_CAP_NEXT(header);
365 if (pos < PCI_CFG_SPACE_SIZE)
366 break;
367
368 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
369 break;
370 }
371
372 return 0;
373}
374
687d5fe3
ME
375static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
376{
377 int rc, ttl = PCI_FIND_CAP_TTL;
378 u8 cap, mask;
379
380 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
381 mask = HT_3BIT_CAP_MASK;
382 else
383 mask = HT_5BIT_CAP_MASK;
384
385 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
386 PCI_CAP_ID_HT, &ttl);
387 while (pos) {
388 rc = pci_read_config_byte(dev, pos + 3, &cap);
389 if (rc != PCIBIOS_SUCCESSFUL)
390 return 0;
391
392 if ((cap & mask) == ht_cap)
393 return pos;
394
47a4d5be
BG
395 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
396 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
397 PCI_CAP_ID_HT, &ttl);
398 }
399
400 return 0;
401}
402/**
403 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
404 * @dev: PCI device to query
405 * @pos: Position from which to continue searching
406 * @ht_cap: Hypertransport capability code
407 *
408 * To be used in conjunction with pci_find_ht_capability() to search for
409 * all capabilities matching @ht_cap. @pos should always be a value returned
410 * from pci_find_ht_capability().
411 *
412 * NB. To be 100% safe against broken PCI devices, the caller should take
413 * steps to avoid an infinite loop.
414 */
415int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
416{
417 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
418}
419EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
420
421/**
422 * pci_find_ht_capability - query a device's Hypertransport capabilities
423 * @dev: PCI device to query
424 * @ht_cap: Hypertransport capability code
425 *
426 * Tell if a device supports a given Hypertransport capability.
427 * Returns an address within the device's PCI configuration space
428 * or 0 in case the device does not support the request capability.
429 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
430 * which has a Hypertransport capability matching @ht_cap.
431 */
432int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
433{
434 int pos;
435
436 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
437 if (pos)
438 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
439
440 return pos;
441}
442EXPORT_SYMBOL_GPL(pci_find_ht_capability);
443
1da177e4
LT
444/**
445 * pci_find_parent_resource - return resource region of parent bus of given region
446 * @dev: PCI device structure contains resources to be searched
447 * @res: child resource record for which parent is sought
448 *
449 * For given resource region of given device, return the resource
450 * region of parent bus the given region is contained in or where
451 * it should be allocated from.
452 */
453struct resource *
454pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
455{
456 const struct pci_bus *bus = dev->bus;
457 int i;
89a74ecc 458 struct resource *best = NULL, *r;
1da177e4 459
89a74ecc 460 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
461 if (!r)
462 continue;
463 if (res->start && !(res->start >= r->start && res->end <= r->end))
464 continue; /* Not contained */
465 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
466 continue; /* Wrong type */
467 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
468 return r; /* Exact match */
8c8def26
LT
469 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
470 if (r->flags & IORESOURCE_PREFETCH)
471 continue;
472 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
473 if (!best)
474 best = r;
1da177e4
LT
475 }
476 return best;
477}
478
064b53db
JL
479/**
480 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
481 * @dev: PCI device to have its BARs restored
482 *
483 * Restore the BAR values for a given device, so as to make it
484 * accessible by its driver.
485 */
ad668599 486static void
064b53db
JL
487pci_restore_bars(struct pci_dev *dev)
488{
bc5f5a82 489 int i;
064b53db 490
bc5f5a82 491 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 492 pci_update_resource(dev, i);
064b53db
JL
493}
494
961d9120
RW
495static struct pci_platform_pm_ops *pci_platform_pm;
496
497int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
498{
eb9d0fe4
RW
499 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
500 || !ops->sleep_wake || !ops->can_wakeup)
961d9120
RW
501 return -EINVAL;
502 pci_platform_pm = ops;
503 return 0;
504}
505
506static inline bool platform_pci_power_manageable(struct pci_dev *dev)
507{
508 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
509}
510
511static inline int platform_pci_set_power_state(struct pci_dev *dev,
512 pci_power_t t)
513{
514 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
515}
516
517static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
518{
519 return pci_platform_pm ?
520 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
521}
8f7020d3 522
eb9d0fe4
RW
523static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
524{
525 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
526}
527
528static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
529{
530 return pci_platform_pm ?
531 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
532}
533
b67ea761
RW
534static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
535{
536 return pci_platform_pm ?
537 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
538}
539
1da177e4 540/**
44e4e66e
RW
541 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
542 * given PCI device
543 * @dev: PCI device to handle.
44e4e66e 544 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 545 *
44e4e66e
RW
546 * RETURN VALUE:
547 * -EINVAL if the requested state is invalid.
548 * -EIO if device does not support PCI PM or its PM capabilities register has a
549 * wrong version, or device doesn't support the requested state.
550 * 0 if device already is in the requested state.
551 * 0 if device's power state has been successfully changed.
1da177e4 552 */
f00a20ef 553static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 554{
337001b6 555 u16 pmcsr;
44e4e66e 556 bool need_restore = false;
1da177e4 557
4a865905
RW
558 /* Check if we're already there */
559 if (dev->current_state == state)
560 return 0;
561
337001b6 562 if (!dev->pm_cap)
cca03dec
AL
563 return -EIO;
564
44e4e66e
RW
565 if (state < PCI_D0 || state > PCI_D3hot)
566 return -EINVAL;
567
1da177e4
LT
568 /* Validate current state:
569 * Can enter D0 from any state, but if we can only go deeper
570 * to sleep if we're already in a low power state
571 */
4a865905 572 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 573 && dev->current_state > state) {
80ccba11
BH
574 dev_err(&dev->dev, "invalid power transition "
575 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 576 return -EINVAL;
44e4e66e 577 }
1da177e4 578
1da177e4 579 /* check if this device supports the desired state */
337001b6
RW
580 if ((state == PCI_D1 && !dev->d1_support)
581 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 582 return -EIO;
1da177e4 583
337001b6 584 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 585
32a36585 586 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
587 * This doesn't affect PME_Status, disables PME_En, and
588 * sets PowerState to 0.
589 */
32a36585 590 switch (dev->current_state) {
d3535fbb
JL
591 case PCI_D0:
592 case PCI_D1:
593 case PCI_D2:
594 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
595 pmcsr |= state;
596 break;
f62795f1
RW
597 case PCI_D3hot:
598 case PCI_D3cold:
32a36585
JL
599 case PCI_UNKNOWN: /* Boot-up */
600 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 601 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 602 need_restore = true;
32a36585 603 /* Fall-through: force to D0 */
32a36585 604 default:
d3535fbb 605 pmcsr = 0;
32a36585 606 break;
1da177e4
LT
607 }
608
609 /* enter specified state */
337001b6 610 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
611
612 /* Mandatory power management transition delays */
613 /* see PCI PM 1.1 5.6.1 table 18 */
614 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 615 pci_dev_d3_sleep(dev);
1da177e4 616 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 617 udelay(PCI_PM_D2_DELAY);
1da177e4 618
e13cdbd7
RW
619 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
620 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
621 if (dev->current_state != state && printk_ratelimit())
622 dev_info(&dev->dev, "Refused to change power state, "
623 "currently in D%d\n", dev->current_state);
064b53db
JL
624
625 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
626 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
627 * from D3hot to D0 _may_ perform an internal reset, thereby
628 * going to "D0 Uninitialized" rather than "D0 Initialized".
629 * For example, at least some versions of the 3c905B and the
630 * 3c556B exhibit this behaviour.
631 *
632 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
633 * devices in a D3hot state at boot. Consequently, we need to
634 * restore at least the BARs so that the device will be
635 * accessible to its driver.
636 */
637 if (need_restore)
638 pci_restore_bars(dev);
639
f00a20ef 640 if (dev->bus->self)
7d715a6c
SL
641 pcie_aspm_pm_state_change(dev->bus->self);
642
1da177e4
LT
643 return 0;
644}
645
44e4e66e
RW
646/**
647 * pci_update_current_state - Read PCI power state of given device from its
648 * PCI PM registers and cache it
649 * @dev: PCI device to handle.
f06fc0b6 650 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 651 */
73410429 652void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 653{
337001b6 654 if (dev->pm_cap) {
44e4e66e
RW
655 u16 pmcsr;
656
337001b6 657 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 658 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
659 } else {
660 dev->current_state = state;
44e4e66e
RW
661 }
662}
663
0e5dd46b
RW
664/**
665 * pci_platform_power_transition - Use platform to change device power state
666 * @dev: PCI device to handle.
667 * @state: State to put the device into.
668 */
669static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
670{
671 int error;
672
673 if (platform_pci_power_manageable(dev)) {
674 error = platform_pci_set_power_state(dev, state);
675 if (!error)
676 pci_update_current_state(dev, state);
b51306c6
AH
677 /* Fall back to PCI_D0 if native PM is not supported */
678 if (!dev->pm_cap)
679 dev->current_state = PCI_D0;
0e5dd46b
RW
680 } else {
681 error = -ENODEV;
682 /* Fall back to PCI_D0 if native PM is not supported */
b3bad72e
RW
683 if (!dev->pm_cap)
684 dev->current_state = PCI_D0;
0e5dd46b
RW
685 }
686
687 return error;
688}
689
690/**
691 * __pci_start_power_transition - Start power transition of a PCI device
692 * @dev: PCI device to handle.
693 * @state: State to put the device into.
694 */
695static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
696{
697 if (state == PCI_D0)
698 pci_platform_power_transition(dev, PCI_D0);
699}
700
701/**
702 * __pci_complete_power_transition - Complete power transition of a PCI device
703 * @dev: PCI device to handle.
704 * @state: State to put the device into.
705 *
706 * This function should not be called directly by device drivers.
707 */
708int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
709{
cc2893b6 710 return state >= PCI_D0 ?
0e5dd46b
RW
711 pci_platform_power_transition(dev, state) : -EINVAL;
712}
713EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
714
44e4e66e
RW
715/**
716 * pci_set_power_state - Set the power state of a PCI device
717 * @dev: PCI device to handle.
718 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
719 *
877d0310 720 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
721 * the device's PCI PM registers.
722 *
723 * RETURN VALUE:
724 * -EINVAL if the requested state is invalid.
725 * -EIO if device does not support PCI PM or its PM capabilities register has a
726 * wrong version, or device doesn't support the requested state.
727 * 0 if device already is in the requested state.
728 * 0 if device's power state has been successfully changed.
729 */
730int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
731{
337001b6 732 int error;
44e4e66e
RW
733
734 /* bound the state we're entering */
735 if (state > PCI_D3hot)
736 state = PCI_D3hot;
737 else if (state < PCI_D0)
738 state = PCI_D0;
739 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
740 /*
741 * If the device or the parent bridge do not support PCI PM,
742 * ignore the request if we're doing anything other than putting
743 * it into D0 (which would only happen on boot).
744 */
745 return 0;
746
0e5dd46b
RW
747 __pci_start_power_transition(dev, state);
748
979b1791
AC
749 /* This device is quirked not to be put into D3, so
750 don't put it in D3 */
751 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
752 return 0;
44e4e66e 753
f00a20ef 754 error = pci_raw_set_power_state(dev, state);
44e4e66e 755
0e5dd46b
RW
756 if (!__pci_complete_power_transition(dev, state))
757 error = 0;
1a680b7c
NC
758 /*
759 * When aspm_policy is "powersave" this call ensures
760 * that ASPM is configured.
761 */
762 if (!error && dev->bus->self)
763 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
764
765 return error;
766}
767
1da177e4
LT
768/**
769 * pci_choose_state - Choose the power state of a PCI device
770 * @dev: PCI device to be suspended
771 * @state: target sleep state for the whole system. This is the value
772 * that is passed to suspend() function.
773 *
774 * Returns PCI power state suitable for given device and given system
775 * message.
776 */
777
778pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
779{
ab826ca4 780 pci_power_t ret;
0f64474b 781
1da177e4
LT
782 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
783 return PCI_D0;
784
961d9120
RW
785 ret = platform_pci_choose_state(dev);
786 if (ret != PCI_POWER_ERROR)
787 return ret;
ca078bae
PM
788
789 switch (state.event) {
790 case PM_EVENT_ON:
791 return PCI_D0;
792 case PM_EVENT_FREEZE:
b887d2e6
DB
793 case PM_EVENT_PRETHAW:
794 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 795 case PM_EVENT_SUSPEND:
3a2d5b70 796 case PM_EVENT_HIBERNATE:
ca078bae 797 return PCI_D3hot;
1da177e4 798 default:
80ccba11
BH
799 dev_info(&dev->dev, "unrecognized suspend event %d\n",
800 state.event);
1da177e4
LT
801 BUG();
802 }
803 return PCI_D0;
804}
805
806EXPORT_SYMBOL(pci_choose_state);
807
89858517
YZ
808#define PCI_EXP_SAVE_REGS 7
809
1b6b8ce2
YZ
810#define pcie_cap_has_devctl(type, flags) 1
811#define pcie_cap_has_lnkctl(type, flags) \
812 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
813 (type == PCI_EXP_TYPE_ROOT_PORT || \
814 type == PCI_EXP_TYPE_ENDPOINT || \
815 type == PCI_EXP_TYPE_LEG_END))
816#define pcie_cap_has_sltctl(type, flags) \
817 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
818 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
819 (type == PCI_EXP_TYPE_DOWNSTREAM && \
820 (flags & PCI_EXP_FLAGS_SLOT))))
821#define pcie_cap_has_rtctl(type, flags) \
822 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
823 (type == PCI_EXP_TYPE_ROOT_PORT || \
824 type == PCI_EXP_TYPE_RC_EC))
825#define pcie_cap_has_devctl2(type, flags) \
826 ((flags & PCI_EXP_FLAGS_VERS) > 1)
827#define pcie_cap_has_lnkctl2(type, flags) \
828 ((flags & PCI_EXP_FLAGS_VERS) > 1)
829#define pcie_cap_has_sltctl2(type, flags) \
830 ((flags & PCI_EXP_FLAGS_VERS) > 1)
831
34a4876e
YL
832static struct pci_cap_saved_state *pci_find_saved_cap(
833 struct pci_dev *pci_dev, char cap)
834{
835 struct pci_cap_saved_state *tmp;
836 struct hlist_node *pos;
837
838 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
839 if (tmp->cap.cap_nr == cap)
840 return tmp;
841 }
842 return NULL;
843}
844
b56a5a23
MT
845static int pci_save_pcie_state(struct pci_dev *dev)
846{
847 int pos, i = 0;
848 struct pci_cap_saved_state *save_state;
849 u16 *cap;
1b6b8ce2 850 u16 flags;
b56a5a23 851
06a1cbaf
KK
852 pos = pci_pcie_cap(dev);
853 if (!pos)
b56a5a23
MT
854 return 0;
855
9f35575d 856 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 857 if (!save_state) {
e496b617 858 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
859 return -ENOMEM;
860 }
24a4742f 861 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 862
1b6b8ce2
YZ
863 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
864
865 if (pcie_cap_has_devctl(dev->pcie_type, flags))
866 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
867 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
868 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
869 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
870 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
871 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
872 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
873 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
874 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
875 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
876 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
877 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
878 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
63f4898a 879
b56a5a23
MT
880 return 0;
881}
882
883static void pci_restore_pcie_state(struct pci_dev *dev)
884{
885 int i = 0, pos;
886 struct pci_cap_saved_state *save_state;
887 u16 *cap;
1b6b8ce2 888 u16 flags;
b56a5a23
MT
889
890 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
891 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
892 if (!save_state || pos <= 0)
893 return;
24a4742f 894 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 895
1b6b8ce2
YZ
896 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
897
898 if (pcie_cap_has_devctl(dev->pcie_type, flags))
899 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
900 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
901 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
902 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
903 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
904 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
905 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
906 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
907 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
908 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
909 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
910 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
911 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
912}
913
cc692a5f
SH
914
915static int pci_save_pcix_state(struct pci_dev *dev)
916{
63f4898a 917 int pos;
cc692a5f 918 struct pci_cap_saved_state *save_state;
cc692a5f
SH
919
920 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
921 if (pos <= 0)
922 return 0;
923
f34303de 924 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 925 if (!save_state) {
e496b617 926 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
927 return -ENOMEM;
928 }
cc692a5f 929
24a4742f
AW
930 pci_read_config_word(dev, pos + PCI_X_CMD,
931 (u16 *)save_state->cap.data);
63f4898a 932
cc692a5f
SH
933 return 0;
934}
935
936static void pci_restore_pcix_state(struct pci_dev *dev)
937{
938 int i = 0, pos;
939 struct pci_cap_saved_state *save_state;
940 u16 *cap;
941
942 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
943 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
944 if (!save_state || pos <= 0)
945 return;
24a4742f 946 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
947
948 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
949}
950
951
1da177e4
LT
952/**
953 * pci_save_state - save the PCI configuration space of a device before suspending
954 * @dev: - PCI device that we're dealing with
1da177e4
LT
955 */
956int
957pci_save_state(struct pci_dev *dev)
958{
959 int i;
960 /* XXX: 100% dword access ok here? */
961 for (i = 0; i < 16; i++)
9e0b5b2c 962 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 963 dev->state_saved = true;
b56a5a23
MT
964 if ((i = pci_save_pcie_state(dev)) != 0)
965 return i;
cc692a5f
SH
966 if ((i = pci_save_pcix_state(dev)) != 0)
967 return i;
1da177e4
LT
968 return 0;
969}
970
ebfc5b80
RW
971static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
972 u32 saved_val, int retry)
973{
974 u32 val;
975
976 pci_read_config_dword(pdev, offset, &val);
977 if (val == saved_val)
978 return;
979
980 for (;;) {
981 dev_dbg(&pdev->dev, "restoring config space at offset "
982 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
983 pci_write_config_dword(pdev, offset, saved_val);
984 if (retry-- <= 0)
985 return;
986
987 pci_read_config_dword(pdev, offset, &val);
988 if (val == saved_val)
989 return;
990
991 mdelay(1);
992 }
993}
994
a6cb9ee7
RW
995static void pci_restore_config_space_range(struct pci_dev *pdev,
996 int start, int end, int retry)
ebfc5b80
RW
997{
998 int index;
999
1000 for (index = end; index >= start; index--)
1001 pci_restore_config_dword(pdev, 4 * index,
1002 pdev->saved_config_space[index],
1003 retry);
1004}
1005
a6cb9ee7
RW
1006static void pci_restore_config_space(struct pci_dev *pdev)
1007{
1008 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1009 pci_restore_config_space_range(pdev, 10, 15, 0);
1010 /* Restore BARs before the command register. */
1011 pci_restore_config_space_range(pdev, 4, 9, 10);
1012 pci_restore_config_space_range(pdev, 0, 3, 0);
1013 } else {
1014 pci_restore_config_space_range(pdev, 0, 15, 0);
1015 }
1016}
1017
1da177e4
LT
1018/**
1019 * pci_restore_state - Restore the saved state of a PCI device
1020 * @dev: - PCI device that we're dealing with
1da177e4 1021 */
1d3c16a8 1022void pci_restore_state(struct pci_dev *dev)
1da177e4 1023{
c82f63e4 1024 if (!dev->state_saved)
1d3c16a8 1025 return;
4b77b0a2 1026
b56a5a23
MT
1027 /* PCI Express register must be restored first */
1028 pci_restore_pcie_state(dev);
1900ca13 1029 pci_restore_ats_state(dev);
b56a5a23 1030
a6cb9ee7 1031 pci_restore_config_space(dev);
ebfc5b80 1032
cc692a5f 1033 pci_restore_pcix_state(dev);
41017f0c 1034 pci_restore_msi_state(dev);
8c5cdb6a 1035 pci_restore_iov_state(dev);
8fed4b65 1036
4b77b0a2 1037 dev->state_saved = false;
1da177e4
LT
1038}
1039
ffbdd3f7
AW
1040struct pci_saved_state {
1041 u32 config_space[16];
1042 struct pci_cap_saved_data cap[0];
1043};
1044
1045/**
1046 * pci_store_saved_state - Allocate and return an opaque struct containing
1047 * the device saved state.
1048 * @dev: PCI device that we're dealing with
1049 *
1050 * Rerturn NULL if no state or error.
1051 */
1052struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1053{
1054 struct pci_saved_state *state;
1055 struct pci_cap_saved_state *tmp;
1056 struct pci_cap_saved_data *cap;
1057 struct hlist_node *pos;
1058 size_t size;
1059
1060 if (!dev->state_saved)
1061 return NULL;
1062
1063 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1064
1065 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1066 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1067
1068 state = kzalloc(size, GFP_KERNEL);
1069 if (!state)
1070 return NULL;
1071
1072 memcpy(state->config_space, dev->saved_config_space,
1073 sizeof(state->config_space));
1074
1075 cap = state->cap;
1076 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1077 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1078 memcpy(cap, &tmp->cap, len);
1079 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1080 }
1081 /* Empty cap_save terminates list */
1082
1083 return state;
1084}
1085EXPORT_SYMBOL_GPL(pci_store_saved_state);
1086
1087/**
1088 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1089 * @dev: PCI device that we're dealing with
1090 * @state: Saved state returned from pci_store_saved_state()
1091 */
1092int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1093{
1094 struct pci_cap_saved_data *cap;
1095
1096 dev->state_saved = false;
1097
1098 if (!state)
1099 return 0;
1100
1101 memcpy(dev->saved_config_space, state->config_space,
1102 sizeof(state->config_space));
1103
1104 cap = state->cap;
1105 while (cap->size) {
1106 struct pci_cap_saved_state *tmp;
1107
1108 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1109 if (!tmp || tmp->cap.size != cap->size)
1110 return -EINVAL;
1111
1112 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1113 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1114 sizeof(struct pci_cap_saved_data) + cap->size);
1115 }
1116
1117 dev->state_saved = true;
1118 return 0;
1119}
1120EXPORT_SYMBOL_GPL(pci_load_saved_state);
1121
1122/**
1123 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1124 * and free the memory allocated for it.
1125 * @dev: PCI device that we're dealing with
1126 * @state: Pointer to saved state returned from pci_store_saved_state()
1127 */
1128int pci_load_and_free_saved_state(struct pci_dev *dev,
1129 struct pci_saved_state **state)
1130{
1131 int ret = pci_load_saved_state(dev, *state);
1132 kfree(*state);
1133 *state = NULL;
1134 return ret;
1135}
1136EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1137
38cc1302
HS
1138static int do_pci_enable_device(struct pci_dev *dev, int bars)
1139{
1140 int err;
1141
1142 err = pci_set_power_state(dev, PCI_D0);
1143 if (err < 0 && err != -EIO)
1144 return err;
1145 err = pcibios_enable_device(dev, bars);
1146 if (err < 0)
1147 return err;
1148 pci_fixup_device(pci_fixup_enable, dev);
1149
1150 return 0;
1151}
1152
1153/**
0b62e13b 1154 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1155 * @dev: PCI device to be resumed
1156 *
1157 * Note this function is a backend of pci_default_resume and is not supposed
1158 * to be called by normal code, write proper resume handler and use it instead.
1159 */
0b62e13b 1160int pci_reenable_device(struct pci_dev *dev)
38cc1302 1161{
296ccb08 1162 if (pci_is_enabled(dev))
38cc1302
HS
1163 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1164 return 0;
1165}
1166
b718989d
BH
1167static int __pci_enable_device_flags(struct pci_dev *dev,
1168 resource_size_t flags)
1da177e4
LT
1169{
1170 int err;
b718989d 1171 int i, bars = 0;
1da177e4 1172
97c145f7
JB
1173 /*
1174 * Power state could be unknown at this point, either due to a fresh
1175 * boot or a device removal call. So get the current power state
1176 * so that things like MSI message writing will behave as expected
1177 * (e.g. if the device really is in D0 at enable time).
1178 */
1179 if (dev->pm_cap) {
1180 u16 pmcsr;
1181 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1182 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1183 }
1184
9fb625c3
HS
1185 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1186 return 0; /* already enabled */
1187
497f16f2
YL
1188 /* only skip sriov related */
1189 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1190 if (dev->resource[i].flags & flags)
1191 bars |= (1 << i);
1192 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1193 if (dev->resource[i].flags & flags)
1194 bars |= (1 << i);
1195
38cc1302 1196 err = do_pci_enable_device(dev, bars);
95a62965 1197 if (err < 0)
38cc1302 1198 atomic_dec(&dev->enable_cnt);
9fb625c3 1199 return err;
1da177e4
LT
1200}
1201
b718989d
BH
1202/**
1203 * pci_enable_device_io - Initialize a device for use with IO space
1204 * @dev: PCI device to be initialized
1205 *
1206 * Initialize device before it's used by a driver. Ask low-level code
1207 * to enable I/O resources. Wake up the device if it was suspended.
1208 * Beware, this function can fail.
1209 */
1210int pci_enable_device_io(struct pci_dev *dev)
1211{
1212 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1213}
1214
1215/**
1216 * pci_enable_device_mem - Initialize a device for use with Memory space
1217 * @dev: PCI device to be initialized
1218 *
1219 * Initialize device before it's used by a driver. Ask low-level code
1220 * to enable Memory resources. Wake up the device if it was suspended.
1221 * Beware, this function can fail.
1222 */
1223int pci_enable_device_mem(struct pci_dev *dev)
1224{
1225 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1226}
1227
bae94d02
IPG
1228/**
1229 * pci_enable_device - Initialize device before it's used by a driver.
1230 * @dev: PCI device to be initialized
1231 *
1232 * Initialize device before it's used by a driver. Ask low-level code
1233 * to enable I/O and memory. Wake up the device if it was suspended.
1234 * Beware, this function can fail.
1235 *
1236 * Note we don't actually enable the device many times if we call
1237 * this function repeatedly (we just increment the count).
1238 */
1239int pci_enable_device(struct pci_dev *dev)
1240{
b718989d 1241 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1242}
1243
9ac7849e
TH
1244/*
1245 * Managed PCI resources. This manages device on/off, intx/msi/msix
1246 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1247 * there's no need to track it separately. pci_devres is initialized
1248 * when a device is enabled using managed PCI device enable interface.
1249 */
1250struct pci_devres {
7f375f32
TH
1251 unsigned int enabled:1;
1252 unsigned int pinned:1;
9ac7849e
TH
1253 unsigned int orig_intx:1;
1254 unsigned int restore_intx:1;
1255 u32 region_mask;
1256};
1257
1258static void pcim_release(struct device *gendev, void *res)
1259{
1260 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1261 struct pci_devres *this = res;
1262 int i;
1263
1264 if (dev->msi_enabled)
1265 pci_disable_msi(dev);
1266 if (dev->msix_enabled)
1267 pci_disable_msix(dev);
1268
1269 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1270 if (this->region_mask & (1 << i))
1271 pci_release_region(dev, i);
1272
1273 if (this->restore_intx)
1274 pci_intx(dev, this->orig_intx);
1275
7f375f32 1276 if (this->enabled && !this->pinned)
9ac7849e
TH
1277 pci_disable_device(dev);
1278}
1279
1280static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1281{
1282 struct pci_devres *dr, *new_dr;
1283
1284 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1285 if (dr)
1286 return dr;
1287
1288 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1289 if (!new_dr)
1290 return NULL;
1291 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1292}
1293
1294static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1295{
1296 if (pci_is_managed(pdev))
1297 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1298 return NULL;
1299}
1300
1301/**
1302 * pcim_enable_device - Managed pci_enable_device()
1303 * @pdev: PCI device to be initialized
1304 *
1305 * Managed pci_enable_device().
1306 */
1307int pcim_enable_device(struct pci_dev *pdev)
1308{
1309 struct pci_devres *dr;
1310 int rc;
1311
1312 dr = get_pci_dr(pdev);
1313 if (unlikely(!dr))
1314 return -ENOMEM;
b95d58ea
TH
1315 if (dr->enabled)
1316 return 0;
9ac7849e
TH
1317
1318 rc = pci_enable_device(pdev);
1319 if (!rc) {
1320 pdev->is_managed = 1;
7f375f32 1321 dr->enabled = 1;
9ac7849e
TH
1322 }
1323 return rc;
1324}
1325
1326/**
1327 * pcim_pin_device - Pin managed PCI device
1328 * @pdev: PCI device to pin
1329 *
1330 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1331 * driver detach. @pdev must have been enabled with
1332 * pcim_enable_device().
1333 */
1334void pcim_pin_device(struct pci_dev *pdev)
1335{
1336 struct pci_devres *dr;
1337
1338 dr = find_pci_dr(pdev);
7f375f32 1339 WARN_ON(!dr || !dr->enabled);
9ac7849e 1340 if (dr)
7f375f32 1341 dr->pinned = 1;
9ac7849e
TH
1342}
1343
1da177e4
LT
1344/**
1345 * pcibios_disable_device - disable arch specific PCI resources for device dev
1346 * @dev: the PCI device to disable
1347 *
1348 * Disables architecture specific PCI resources for the device. This
1349 * is the default implementation. Architecture implementations can
1350 * override this.
1351 */
1352void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1353
fa58d305
RW
1354static void do_pci_disable_device(struct pci_dev *dev)
1355{
1356 u16 pci_command;
1357
1358 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1359 if (pci_command & PCI_COMMAND_MASTER) {
1360 pci_command &= ~PCI_COMMAND_MASTER;
1361 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1362 }
1363
1364 pcibios_disable_device(dev);
1365}
1366
1367/**
1368 * pci_disable_enabled_device - Disable device without updating enable_cnt
1369 * @dev: PCI device to disable
1370 *
1371 * NOTE: This function is a backend of PCI power management routines and is
1372 * not supposed to be called drivers.
1373 */
1374void pci_disable_enabled_device(struct pci_dev *dev)
1375{
296ccb08 1376 if (pci_is_enabled(dev))
fa58d305
RW
1377 do_pci_disable_device(dev);
1378}
1379
1da177e4
LT
1380/**
1381 * pci_disable_device - Disable PCI device after use
1382 * @dev: PCI device to be disabled
1383 *
1384 * Signal to the system that the PCI device is not in use by the system
1385 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1386 *
1387 * Note we don't actually disable the device until all callers of
ee6583f6 1388 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1389 */
1390void
1391pci_disable_device(struct pci_dev *dev)
1392{
9ac7849e 1393 struct pci_devres *dr;
99dc804d 1394
9ac7849e
TH
1395 dr = find_pci_dr(dev);
1396 if (dr)
7f375f32 1397 dr->enabled = 0;
9ac7849e 1398
bae94d02
IPG
1399 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1400 return;
1401
fa58d305 1402 do_pci_disable_device(dev);
1da177e4 1403
fa58d305 1404 dev->is_busmaster = 0;
1da177e4
LT
1405}
1406
f7bdd12d
BK
1407/**
1408 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1409 * @dev: the PCIe device reset
f7bdd12d
BK
1410 * @state: Reset state to enter into
1411 *
1412 *
45e829ea 1413 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1414 * implementation. Architecture implementations can override this.
1415 */
1416int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1417 enum pcie_reset_state state)
1418{
1419 return -EINVAL;
1420}
1421
1422/**
1423 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1424 * @dev: the PCIe device reset
f7bdd12d
BK
1425 * @state: Reset state to enter into
1426 *
1427 *
1428 * Sets the PCI reset state for the device.
1429 */
1430int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1431{
1432 return pcibios_set_pcie_reset_state(dev, state);
1433}
1434
58ff4633
RW
1435/**
1436 * pci_check_pme_status - Check if given device has generated PME.
1437 * @dev: Device to check.
1438 *
1439 * Check the PME status of the device and if set, clear it and clear PME enable
1440 * (if set). Return 'true' if PME status and PME enable were both set or
1441 * 'false' otherwise.
1442 */
1443bool pci_check_pme_status(struct pci_dev *dev)
1444{
1445 int pmcsr_pos;
1446 u16 pmcsr;
1447 bool ret = false;
1448
1449 if (!dev->pm_cap)
1450 return false;
1451
1452 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1453 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1454 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1455 return false;
1456
1457 /* Clear PME status. */
1458 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1459 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1460 /* Disable PME to avoid interrupt flood. */
1461 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1462 ret = true;
1463 }
1464
1465 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1466
1467 return ret;
1468}
1469
b67ea761
RW
1470/**
1471 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1472 * @dev: Device to handle.
379021d5 1473 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1474 *
1475 * Check if @dev has generated PME and queue a resume request for it in that
1476 * case.
1477 */
379021d5 1478static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1479{
379021d5
RW
1480 if (pme_poll_reset && dev->pme_poll)
1481 dev->pme_poll = false;
1482
c125e96f 1483 if (pci_check_pme_status(dev)) {
c125e96f 1484 pci_wakeup_event(dev);
0f953bf6 1485 pm_request_resume(&dev->dev);
c125e96f 1486 }
b67ea761
RW
1487 return 0;
1488}
1489
1490/**
1491 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1492 * @bus: Top bus of the subtree to walk.
1493 */
1494void pci_pme_wakeup_bus(struct pci_bus *bus)
1495{
1496 if (bus)
379021d5 1497 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1498}
1499
eb9d0fe4
RW
1500/**
1501 * pci_pme_capable - check the capability of PCI device to generate PME#
1502 * @dev: PCI device to handle.
eb9d0fe4
RW
1503 * @state: PCI state from which device will issue PME#.
1504 */
e5899e1b 1505bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1506{
337001b6 1507 if (!dev->pm_cap)
eb9d0fe4
RW
1508 return false;
1509
337001b6 1510 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1511}
1512
df17e62e
MG
1513static void pci_pme_list_scan(struct work_struct *work)
1514{
379021d5 1515 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1516
1517 mutex_lock(&pci_pme_list_mutex);
1518 if (!list_empty(&pci_pme_list)) {
379021d5
RW
1519 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1520 if (pme_dev->dev->pme_poll) {
71a83bd7
ZY
1521 struct pci_dev *bridge;
1522
1523 bridge = pme_dev->dev->bus->self;
1524 /*
1525 * If bridge is in low power state, the
1526 * configuration space of subordinate devices
1527 * may be not accessible
1528 */
1529 if (bridge && bridge->current_state != PCI_D0)
1530 continue;
379021d5
RW
1531 pci_pme_wakeup(pme_dev->dev, NULL);
1532 } else {
1533 list_del(&pme_dev->list);
1534 kfree(pme_dev);
1535 }
1536 }
1537 if (!list_empty(&pci_pme_list))
1538 schedule_delayed_work(&pci_pme_work,
1539 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1540 }
1541 mutex_unlock(&pci_pme_list_mutex);
1542}
1543
eb9d0fe4
RW
1544/**
1545 * pci_pme_active - enable or disable PCI device's PME# function
1546 * @dev: PCI device to handle.
eb9d0fe4
RW
1547 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1548 *
1549 * The caller must verify that the device is capable of generating PME# before
1550 * calling this function with @enable equal to 'true'.
1551 */
5a6c9b60 1552void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1553{
1554 u16 pmcsr;
1555
337001b6 1556 if (!dev->pm_cap)
eb9d0fe4
RW
1557 return;
1558
337001b6 1559 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1560 /* Clear PME_Status by writing 1 to it and enable PME# */
1561 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1562 if (!enable)
1563 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1564
337001b6 1565 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1566
df17e62e
MG
1567 /* PCI (as opposed to PCIe) PME requires that the device have
1568 its PME# line hooked up correctly. Not all hardware vendors
1569 do this, so the PME never gets delivered and the device
1570 remains asleep. The easiest way around this is to
1571 periodically walk the list of suspended devices and check
1572 whether any have their PME flag set. The assumption is that
1573 we'll wake up often enough anyway that this won't be a huge
1574 hit, and the power savings from the devices will still be a
1575 win. */
1576
379021d5 1577 if (dev->pme_poll) {
df17e62e
MG
1578 struct pci_pme_device *pme_dev;
1579 if (enable) {
1580 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1581 GFP_KERNEL);
1582 if (!pme_dev)
1583 goto out;
1584 pme_dev->dev = dev;
1585 mutex_lock(&pci_pme_list_mutex);
1586 list_add(&pme_dev->list, &pci_pme_list);
1587 if (list_is_singular(&pci_pme_list))
1588 schedule_delayed_work(&pci_pme_work,
1589 msecs_to_jiffies(PME_TIMEOUT));
1590 mutex_unlock(&pci_pme_list_mutex);
1591 } else {
1592 mutex_lock(&pci_pme_list_mutex);
1593 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1594 if (pme_dev->dev == dev) {
1595 list_del(&pme_dev->list);
1596 kfree(pme_dev);
1597 break;
1598 }
1599 }
1600 mutex_unlock(&pci_pme_list_mutex);
1601 }
1602 }
1603
1604out:
85b8582d 1605 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1606}
1607
1da177e4 1608/**
6cbf8214 1609 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1610 * @dev: PCI device affected
1611 * @state: PCI state from which device will issue wakeup events
6cbf8214 1612 * @runtime: True if the events are to be generated at run time
075c1771
DB
1613 * @enable: True to enable event generation; false to disable
1614 *
1615 * This enables the device as a wakeup event source, or disables it.
1616 * When such events involves platform-specific hooks, those hooks are
1617 * called automatically by this routine.
1618 *
1619 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1620 * always require such platform hooks.
075c1771 1621 *
eb9d0fe4
RW
1622 * RETURN VALUE:
1623 * 0 is returned on success
1624 * -EINVAL is returned if device is not supposed to wake up the system
1625 * Error code depending on the platform is returned if both the platform and
1626 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1627 */
6cbf8214
RW
1628int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1629 bool runtime, bool enable)
1da177e4 1630{
5bcc2fb4 1631 int ret = 0;
075c1771 1632
6cbf8214 1633 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1634 return -EINVAL;
1da177e4 1635
e80bb09d
RW
1636 /* Don't do the same thing twice in a row for one device. */
1637 if (!!enable == !!dev->wakeup_prepared)
1638 return 0;
1639
eb9d0fe4
RW
1640 /*
1641 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1642 * Anderson we should be doing PME# wake enable followed by ACPI wake
1643 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1644 */
1da177e4 1645
5bcc2fb4
RW
1646 if (enable) {
1647 int error;
1da177e4 1648
5bcc2fb4
RW
1649 if (pci_pme_capable(dev, state))
1650 pci_pme_active(dev, true);
1651 else
1652 ret = 1;
6cbf8214
RW
1653 error = runtime ? platform_pci_run_wake(dev, true) :
1654 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1655 if (ret)
1656 ret = error;
e80bb09d
RW
1657 if (!ret)
1658 dev->wakeup_prepared = true;
5bcc2fb4 1659 } else {
6cbf8214
RW
1660 if (runtime)
1661 platform_pci_run_wake(dev, false);
1662 else
1663 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1664 pci_pme_active(dev, false);
e80bb09d 1665 dev->wakeup_prepared = false;
5bcc2fb4 1666 }
1da177e4 1667
5bcc2fb4 1668 return ret;
eb9d0fe4 1669}
6cbf8214 1670EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1671
0235c4fc
RW
1672/**
1673 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1674 * @dev: PCI device to prepare
1675 * @enable: True to enable wake-up event generation; false to disable
1676 *
1677 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1678 * and this function allows them to set that up cleanly - pci_enable_wake()
1679 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1680 * ordering constraints.
1681 *
1682 * This function only returns error code if the device is not capable of
1683 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1684 * enable wake-up power for it.
1685 */
1686int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1687{
1688 return pci_pme_capable(dev, PCI_D3cold) ?
1689 pci_enable_wake(dev, PCI_D3cold, enable) :
1690 pci_enable_wake(dev, PCI_D3hot, enable);
1691}
1692
404cc2d8 1693/**
37139074
JB
1694 * pci_target_state - find an appropriate low power state for a given PCI dev
1695 * @dev: PCI device
1696 *
1697 * Use underlying platform code to find a supported low power state for @dev.
1698 * If the platform can't manage @dev, return the deepest state from which it
1699 * can generate wake events, based on any available PME info.
404cc2d8 1700 */
e5899e1b 1701pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1702{
1703 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1704
1705 if (platform_pci_power_manageable(dev)) {
1706 /*
1707 * Call the platform to choose the target state of the device
1708 * and enable wake-up from this state if supported.
1709 */
1710 pci_power_t state = platform_pci_choose_state(dev);
1711
1712 switch (state) {
1713 case PCI_POWER_ERROR:
1714 case PCI_UNKNOWN:
1715 break;
1716 case PCI_D1:
1717 case PCI_D2:
1718 if (pci_no_d1d2(dev))
1719 break;
1720 default:
1721 target_state = state;
404cc2d8 1722 }
d2abdf62
RW
1723 } else if (!dev->pm_cap) {
1724 target_state = PCI_D0;
404cc2d8
RW
1725 } else if (device_may_wakeup(&dev->dev)) {
1726 /*
1727 * Find the deepest state from which the device can generate
1728 * wake-up events, make it the target state and enable device
1729 * to generate PME#.
1730 */
337001b6
RW
1731 if (dev->pme_support) {
1732 while (target_state
1733 && !(dev->pme_support & (1 << target_state)))
1734 target_state--;
404cc2d8
RW
1735 }
1736 }
1737
e5899e1b
RW
1738 return target_state;
1739}
1740
1741/**
1742 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1743 * @dev: Device to handle.
1744 *
1745 * Choose the power state appropriate for the device depending on whether
1746 * it can wake up the system and/or is power manageable by the platform
1747 * (PCI_D3hot is the default) and put the device into that state.
1748 */
1749int pci_prepare_to_sleep(struct pci_dev *dev)
1750{
1751 pci_power_t target_state = pci_target_state(dev);
1752 int error;
1753
1754 if (target_state == PCI_POWER_ERROR)
1755 return -EIO;
1756
8efb8c76 1757 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1758
404cc2d8
RW
1759 error = pci_set_power_state(dev, target_state);
1760
1761 if (error)
1762 pci_enable_wake(dev, target_state, false);
1763
1764 return error;
1765}
1766
1767/**
443bd1c4 1768 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1769 * @dev: Device to handle.
1770 *
88393161 1771 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1772 */
1773int pci_back_from_sleep(struct pci_dev *dev)
1774{
1775 pci_enable_wake(dev, PCI_D0, false);
1776 return pci_set_power_state(dev, PCI_D0);
1777}
1778
6cbf8214
RW
1779/**
1780 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1781 * @dev: PCI device being suspended.
1782 *
1783 * Prepare @dev to generate wake-up events at run time and put it into a low
1784 * power state.
1785 */
1786int pci_finish_runtime_suspend(struct pci_dev *dev)
1787{
1788 pci_power_t target_state = pci_target_state(dev);
1789 int error;
1790
1791 if (target_state == PCI_POWER_ERROR)
1792 return -EIO;
1793
1794 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1795
1796 error = pci_set_power_state(dev, target_state);
1797
1798 if (error)
1799 __pci_enable_wake(dev, target_state, true, false);
1800
1801 return error;
1802}
1803
b67ea761
RW
1804/**
1805 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1806 * @dev: Device to check.
1807 *
1808 * Return true if the device itself is cabable of generating wake-up events
1809 * (through the platform or using the native PCIe PME) or if the device supports
1810 * PME and one of its upstream bridges can generate wake-up events.
1811 */
1812bool pci_dev_run_wake(struct pci_dev *dev)
1813{
1814 struct pci_bus *bus = dev->bus;
1815
1816 if (device_run_wake(&dev->dev))
1817 return true;
1818
1819 if (!dev->pme_support)
1820 return false;
1821
1822 while (bus->parent) {
1823 struct pci_dev *bridge = bus->self;
1824
1825 if (device_run_wake(&bridge->dev))
1826 return true;
1827
1828 bus = bus->parent;
1829 }
1830
1831 /* We have reached the root bus. */
1832 if (bus->bridge)
1833 return device_run_wake(bus->bridge);
1834
1835 return false;
1836}
1837EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1838
eb9d0fe4
RW
1839/**
1840 * pci_pm_init - Initialize PM functions of given PCI device
1841 * @dev: PCI device to handle.
1842 */
1843void pci_pm_init(struct pci_dev *dev)
1844{
1845 int pm;
1846 u16 pmc;
1da177e4 1847
bb910a70 1848 pm_runtime_forbid(&dev->dev);
a1e4d72c 1849 device_enable_async_suspend(&dev->dev);
e80bb09d 1850 dev->wakeup_prepared = false;
bb910a70 1851
337001b6
RW
1852 dev->pm_cap = 0;
1853
eb9d0fe4
RW
1854 /* find PCI PM capability in list */
1855 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1856 if (!pm)
50246dd4 1857 return;
eb9d0fe4
RW
1858 /* Check device's ability to generate PME# */
1859 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 1860
eb9d0fe4
RW
1861 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1862 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1863 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 1864 return;
eb9d0fe4
RW
1865 }
1866
337001b6 1867 dev->pm_cap = pm;
1ae861e6 1868 dev->d3_delay = PCI_PM_D3_WAIT;
337001b6
RW
1869
1870 dev->d1_support = false;
1871 dev->d2_support = false;
1872 if (!pci_no_d1d2(dev)) {
c9ed77ee 1873 if (pmc & PCI_PM_CAP_D1)
337001b6 1874 dev->d1_support = true;
c9ed77ee 1875 if (pmc & PCI_PM_CAP_D2)
337001b6 1876 dev->d2_support = true;
c9ed77ee
BH
1877
1878 if (dev->d1_support || dev->d2_support)
1879 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
1880 dev->d1_support ? " D1" : "",
1881 dev->d2_support ? " D2" : "");
337001b6
RW
1882 }
1883
1884 pmc &= PCI_PM_CAP_PME_MASK;
1885 if (pmc) {
10c3d71d
BH
1886 dev_printk(KERN_DEBUG, &dev->dev,
1887 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
1888 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1889 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1890 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1891 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1892 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 1893 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 1894 dev->pme_poll = true;
eb9d0fe4
RW
1895 /*
1896 * Make device's PM flags reflect the wake-up capability, but
1897 * let the user space enable it to wake up the system as needed.
1898 */
1899 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 1900 /* Disable the PME# generation functionality */
337001b6
RW
1901 pci_pme_active(dev, false);
1902 } else {
1903 dev->pme_support = 0;
eb9d0fe4 1904 }
1da177e4
LT
1905}
1906
eb9c39d0
JB
1907/**
1908 * platform_pci_wakeup_init - init platform wakeup if present
1909 * @dev: PCI device
1910 *
1911 * Some devices don't have PCI PM caps but can still generate wakeup
1912 * events through platform methods (like ACPI events). If @dev supports
1913 * platform wakeup events, set the device flag to indicate as much. This
1914 * may be redundant if the device also supports PCI PM caps, but double
1915 * initialization should be safe in that case.
1916 */
1917void platform_pci_wakeup_init(struct pci_dev *dev)
1918{
1919 if (!platform_pci_can_wakeup(dev))
1920 return;
1921
1922 device_set_wakeup_capable(&dev->dev, true);
eb9c39d0
JB
1923 platform_pci_sleep_wake(dev, false);
1924}
1925
34a4876e
YL
1926static void pci_add_saved_cap(struct pci_dev *pci_dev,
1927 struct pci_cap_saved_state *new_cap)
1928{
1929 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1930}
1931
63f4898a
RW
1932/**
1933 * pci_add_save_buffer - allocate buffer for saving given capability registers
1934 * @dev: the PCI device
1935 * @cap: the capability to allocate the buffer for
1936 * @size: requested size of the buffer
1937 */
1938static int pci_add_cap_save_buffer(
1939 struct pci_dev *dev, char cap, unsigned int size)
1940{
1941 int pos;
1942 struct pci_cap_saved_state *save_state;
1943
1944 pos = pci_find_capability(dev, cap);
1945 if (pos <= 0)
1946 return 0;
1947
1948 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1949 if (!save_state)
1950 return -ENOMEM;
1951
24a4742f
AW
1952 save_state->cap.cap_nr = cap;
1953 save_state->cap.size = size;
63f4898a
RW
1954 pci_add_saved_cap(dev, save_state);
1955
1956 return 0;
1957}
1958
1959/**
1960 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1961 * @dev: the PCI device
1962 */
1963void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1964{
1965 int error;
1966
89858517
YZ
1967 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1968 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
1969 if (error)
1970 dev_err(&dev->dev,
1971 "unable to preallocate PCI Express save buffer\n");
1972
1973 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1974 if (error)
1975 dev_err(&dev->dev,
1976 "unable to preallocate PCI-X save buffer\n");
1977}
1978
f796841e
YL
1979void pci_free_cap_save_buffers(struct pci_dev *dev)
1980{
1981 struct pci_cap_saved_state *tmp;
1982 struct hlist_node *pos, *n;
1983
1984 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1985 kfree(tmp);
1986}
1987
58c3a727
YZ
1988/**
1989 * pci_enable_ari - enable ARI forwarding if hardware support it
1990 * @dev: the PCI device
1991 */
1992void pci_enable_ari(struct pci_dev *dev)
1993{
1994 int pos;
1995 u32 cap;
864d296c 1996 u16 flags, ctrl;
8113587c 1997 struct pci_dev *bridge;
58c3a727 1998
6748dcc2 1999 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
2000 return;
2001
8113587c
ZY
2002 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2003 if (!pos)
58c3a727
YZ
2004 return;
2005
8113587c 2006 bridge = dev->bus->self;
5f4d91a1 2007 if (!bridge || !pci_is_pcie(bridge))
8113587c
ZY
2008 return;
2009
06a1cbaf 2010 pos = pci_pcie_cap(bridge);
58c3a727
YZ
2011 if (!pos)
2012 return;
2013
864d296c
CW
2014 /* ARI is a PCIe v2 feature */
2015 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2016 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2017 return;
2018
8113587c 2019 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2020 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2021 return;
2022
8113587c 2023 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
58c3a727 2024 ctrl |= PCI_EXP_DEVCTL2_ARI;
8113587c 2025 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
58c3a727 2026
8113587c 2027 bridge->ari_enabled = 1;
58c3a727
YZ
2028}
2029
b48d4425
JB
2030/**
2031 * pci_enable_ido - enable ID-based ordering on a device
2032 * @dev: the PCI device
2033 * @type: which types of IDO to enable
2034 *
2035 * Enable ID-based ordering on @dev. @type can contain the bits
2036 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2037 * which types of transactions are allowed to be re-ordered.
2038 */
2039void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2040{
2041 int pos;
2042 u16 ctrl;
2043
2044 pos = pci_pcie_cap(dev);
2045 if (!pos)
2046 return;
2047
2048 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2049 if (type & PCI_EXP_IDO_REQUEST)
2050 ctrl |= PCI_EXP_IDO_REQ_EN;
2051 if (type & PCI_EXP_IDO_COMPLETION)
2052 ctrl |= PCI_EXP_IDO_CMP_EN;
2053 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2054}
2055EXPORT_SYMBOL(pci_enable_ido);
2056
2057/**
2058 * pci_disable_ido - disable ID-based ordering on a device
2059 * @dev: the PCI device
2060 * @type: which types of IDO to disable
2061 */
2062void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2063{
2064 int pos;
2065 u16 ctrl;
2066
2067 if (!pci_is_pcie(dev))
2068 return;
2069
2070 pos = pci_pcie_cap(dev);
2071 if (!pos)
2072 return;
2073
2074 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2075 if (type & PCI_EXP_IDO_REQUEST)
2076 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2077 if (type & PCI_EXP_IDO_COMPLETION)
2078 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2079 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2080}
2081EXPORT_SYMBOL(pci_disable_ido);
2082
48a92a81
JB
2083/**
2084 * pci_enable_obff - enable optimized buffer flush/fill
2085 * @dev: PCI device
2086 * @type: type of signaling to use
2087 *
2088 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2089 * signaling if possible, falling back to message signaling only if
2090 * WAKE# isn't supported. @type should indicate whether the PCIe link
2091 * be brought out of L0s or L1 to send the message. It should be either
2092 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2093 *
2094 * If your device can benefit from receiving all messages, even at the
2095 * power cost of bringing the link back up from a low power state, use
2096 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2097 * preferred type).
2098 *
2099 * RETURNS:
2100 * Zero on success, appropriate error number on failure.
2101 */
2102int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2103{
2104 int pos;
2105 u32 cap;
2106 u16 ctrl;
2107 int ret;
2108
2109 if (!pci_is_pcie(dev))
2110 return -ENOTSUPP;
2111
2112 pos = pci_pcie_cap(dev);
2113 if (!pos)
2114 return -ENOTSUPP;
2115
2116 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2117 if (!(cap & PCI_EXP_OBFF_MASK))
2118 return -ENOTSUPP; /* no OBFF support at all */
2119
2120 /* Make sure the topology supports OBFF as well */
2121 if (dev->bus) {
2122 ret = pci_enable_obff(dev->bus->self, type);
2123 if (ret)
2124 return ret;
2125 }
2126
2127 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2128 if (cap & PCI_EXP_OBFF_WAKE)
2129 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2130 else {
2131 switch (type) {
2132 case PCI_EXP_OBFF_SIGNAL_L0:
2133 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2134 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2135 break;
2136 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2137 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2138 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2139 break;
2140 default:
2141 WARN(1, "bad OBFF signal type\n");
2142 return -ENOTSUPP;
2143 }
2144 }
2145 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2146
2147 return 0;
2148}
2149EXPORT_SYMBOL(pci_enable_obff);
2150
2151/**
2152 * pci_disable_obff - disable optimized buffer flush/fill
2153 * @dev: PCI device
2154 *
2155 * Disable OBFF on @dev.
2156 */
2157void pci_disable_obff(struct pci_dev *dev)
2158{
2159 int pos;
2160 u16 ctrl;
2161
2162 if (!pci_is_pcie(dev))
2163 return;
2164
2165 pos = pci_pcie_cap(dev);
2166 if (!pos)
2167 return;
2168
2169 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2170 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2171 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2172}
2173EXPORT_SYMBOL(pci_disable_obff);
2174
51c2e0a7
JB
2175/**
2176 * pci_ltr_supported - check whether a device supports LTR
2177 * @dev: PCI device
2178 *
2179 * RETURNS:
2180 * True if @dev supports latency tolerance reporting, false otherwise.
2181 */
2182bool pci_ltr_supported(struct pci_dev *dev)
2183{
2184 int pos;
2185 u32 cap;
2186
2187 if (!pci_is_pcie(dev))
2188 return false;
2189
2190 pos = pci_pcie_cap(dev);
2191 if (!pos)
2192 return false;
2193
2194 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2195
2196 return cap & PCI_EXP_DEVCAP2_LTR;
2197}
2198EXPORT_SYMBOL(pci_ltr_supported);
2199
2200/**
2201 * pci_enable_ltr - enable latency tolerance reporting
2202 * @dev: PCI device
2203 *
2204 * Enable LTR on @dev if possible, which means enabling it first on
2205 * upstream ports.
2206 *
2207 * RETURNS:
2208 * Zero on success, errno on failure.
2209 */
2210int pci_enable_ltr(struct pci_dev *dev)
2211{
2212 int pos;
2213 u16 ctrl;
2214 int ret;
2215
2216 if (!pci_ltr_supported(dev))
2217 return -ENOTSUPP;
2218
2219 pos = pci_pcie_cap(dev);
2220 if (!pos)
2221 return -ENOTSUPP;
2222
2223 /* Only primary function can enable/disable LTR */
2224 if (PCI_FUNC(dev->devfn) != 0)
2225 return -EINVAL;
2226
2227 /* Enable upstream ports first */
2228 if (dev->bus) {
2229 ret = pci_enable_ltr(dev->bus->self);
2230 if (ret)
2231 return ret;
2232 }
2233
2234 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2235 ctrl |= PCI_EXP_LTR_EN;
2236 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2237
2238 return 0;
2239}
2240EXPORT_SYMBOL(pci_enable_ltr);
2241
2242/**
2243 * pci_disable_ltr - disable latency tolerance reporting
2244 * @dev: PCI device
2245 */
2246void pci_disable_ltr(struct pci_dev *dev)
2247{
2248 int pos;
2249 u16 ctrl;
2250
2251 if (!pci_ltr_supported(dev))
2252 return;
2253
2254 pos = pci_pcie_cap(dev);
2255 if (!pos)
2256 return;
2257
2258 /* Only primary function can enable/disable LTR */
2259 if (PCI_FUNC(dev->devfn) != 0)
2260 return;
2261
2262 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2263 ctrl &= ~PCI_EXP_LTR_EN;
2264 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2265}
2266EXPORT_SYMBOL(pci_disable_ltr);
2267
2268static int __pci_ltr_scale(int *val)
2269{
2270 int scale = 0;
2271
2272 while (*val > 1023) {
2273 *val = (*val + 31) / 32;
2274 scale++;
2275 }
2276 return scale;
2277}
2278
2279/**
2280 * pci_set_ltr - set LTR latency values
2281 * @dev: PCI device
2282 * @snoop_lat_ns: snoop latency in nanoseconds
2283 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2284 *
2285 * Figure out the scale and set the LTR values accordingly.
2286 */
2287int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2288{
2289 int pos, ret, snoop_scale, nosnoop_scale;
2290 u16 val;
2291
2292 if (!pci_ltr_supported(dev))
2293 return -ENOTSUPP;
2294
2295 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2296 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2297
2298 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2299 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2300 return -EINVAL;
2301
2302 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2303 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2304 return -EINVAL;
2305
2306 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2307 if (!pos)
2308 return -ENOTSUPP;
2309
2310 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2311 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2312 if (ret != 4)
2313 return -EIO;
2314
2315 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2316 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2317 if (ret != 4)
2318 return -EIO;
2319
2320 return 0;
2321}
2322EXPORT_SYMBOL(pci_set_ltr);
2323
5d990b62
CW
2324static int pci_acs_enable;
2325
2326/**
2327 * pci_request_acs - ask for ACS to be enabled if supported
2328 */
2329void pci_request_acs(void)
2330{
2331 pci_acs_enable = 1;
2332}
2333
ae21ee65
AK
2334/**
2335 * pci_enable_acs - enable ACS if hardware support it
2336 * @dev: the PCI device
2337 */
2338void pci_enable_acs(struct pci_dev *dev)
2339{
2340 int pos;
2341 u16 cap;
2342 u16 ctrl;
2343
5d990b62
CW
2344 if (!pci_acs_enable)
2345 return;
2346
5f4d91a1 2347 if (!pci_is_pcie(dev))
ae21ee65
AK
2348 return;
2349
2350 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2351 if (!pos)
2352 return;
2353
2354 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2355 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2356
2357 /* Source Validation */
2358 ctrl |= (cap & PCI_ACS_SV);
2359
2360 /* P2P Request Redirect */
2361 ctrl |= (cap & PCI_ACS_RR);
2362
2363 /* P2P Completion Redirect */
2364 ctrl |= (cap & PCI_ACS_CR);
2365
2366 /* Upstream Forwarding */
2367 ctrl |= (cap & PCI_ACS_UF);
2368
2369 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2370}
2371
57c2cf71
BH
2372/**
2373 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2374 * @dev: the PCI device
2375 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2376 *
2377 * Perform INTx swizzling for a device behind one level of bridge. This is
2378 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2379 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2380 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2381 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 2382 */
3df425f3 2383u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 2384{
46b952a3
MW
2385 int slot;
2386
2387 if (pci_ari_enabled(dev->bus))
2388 slot = 0;
2389 else
2390 slot = PCI_SLOT(dev->devfn);
2391
2392 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2393}
2394
1da177e4
LT
2395int
2396pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2397{
2398 u8 pin;
2399
514d207d 2400 pin = dev->pin;
1da177e4
LT
2401 if (!pin)
2402 return -1;
878f2e50 2403
8784fd4d 2404 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2405 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2406 dev = dev->bus->self;
2407 }
2408 *bridge = dev;
2409 return pin;
2410}
2411
68feac87
BH
2412/**
2413 * pci_common_swizzle - swizzle INTx all the way to root bridge
2414 * @dev: the PCI device
2415 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2416 *
2417 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2418 * bridges all the way up to a PCI root bus.
2419 */
2420u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2421{
2422 u8 pin = *pinp;
2423
1eb39487 2424 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2425 pin = pci_swizzle_interrupt_pin(dev, pin);
2426 dev = dev->bus->self;
2427 }
2428 *pinp = pin;
2429 return PCI_SLOT(dev->devfn);
2430}
2431
1da177e4
LT
2432/**
2433 * pci_release_region - Release a PCI bar
2434 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2435 * @bar: BAR to release
2436 *
2437 * Releases the PCI I/O and memory resources previously reserved by a
2438 * successful call to pci_request_region. Call this function only
2439 * after all use of the PCI regions has ceased.
2440 */
2441void pci_release_region(struct pci_dev *pdev, int bar)
2442{
9ac7849e
TH
2443 struct pci_devres *dr;
2444
1da177e4
LT
2445 if (pci_resource_len(pdev, bar) == 0)
2446 return;
2447 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2448 release_region(pci_resource_start(pdev, bar),
2449 pci_resource_len(pdev, bar));
2450 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2451 release_mem_region(pci_resource_start(pdev, bar),
2452 pci_resource_len(pdev, bar));
9ac7849e
TH
2453
2454 dr = find_pci_dr(pdev);
2455 if (dr)
2456 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2457}
2458
2459/**
f5ddcac4 2460 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2461 * @pdev: PCI device whose resources are to be reserved
2462 * @bar: BAR to be reserved
2463 * @res_name: Name to be associated with resource.
f5ddcac4 2464 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2465 *
2466 * Mark the PCI region associated with PCI device @pdev BR @bar as
2467 * being reserved by owner @res_name. Do not access any
2468 * address inside the PCI regions unless this call returns
2469 * successfully.
2470 *
f5ddcac4
RD
2471 * If @exclusive is set, then the region is marked so that userspace
2472 * is explicitly not allowed to map the resource via /dev/mem or
2473 * sysfs MMIO access.
2474 *
1da177e4
LT
2475 * Returns 0 on success, or %EBUSY on error. A warning
2476 * message is also printed on failure.
2477 */
e8de1481
AV
2478static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2479 int exclusive)
1da177e4 2480{
9ac7849e
TH
2481 struct pci_devres *dr;
2482
1da177e4
LT
2483 if (pci_resource_len(pdev, bar) == 0)
2484 return 0;
2485
2486 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2487 if (!request_region(pci_resource_start(pdev, bar),
2488 pci_resource_len(pdev, bar), res_name))
2489 goto err_out;
2490 }
2491 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2492 if (!__request_mem_region(pci_resource_start(pdev, bar),
2493 pci_resource_len(pdev, bar), res_name,
2494 exclusive))
1da177e4
LT
2495 goto err_out;
2496 }
9ac7849e
TH
2497
2498 dr = find_pci_dr(pdev);
2499 if (dr)
2500 dr->region_mask |= 1 << bar;
2501
1da177e4
LT
2502 return 0;
2503
2504err_out:
c7dabef8 2505 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2506 &pdev->resource[bar]);
1da177e4
LT
2507 return -EBUSY;
2508}
2509
e8de1481 2510/**
f5ddcac4 2511 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2512 * @pdev: PCI device whose resources are to be reserved
2513 * @bar: BAR to be reserved
f5ddcac4 2514 * @res_name: Name to be associated with resource
e8de1481 2515 *
f5ddcac4 2516 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2517 * being reserved by owner @res_name. Do not access any
2518 * address inside the PCI regions unless this call returns
2519 * successfully.
2520 *
2521 * Returns 0 on success, or %EBUSY on error. A warning
2522 * message is also printed on failure.
2523 */
2524int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2525{
2526 return __pci_request_region(pdev, bar, res_name, 0);
2527}
2528
2529/**
2530 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2531 * @pdev: PCI device whose resources are to be reserved
2532 * @bar: BAR to be reserved
2533 * @res_name: Name to be associated with resource.
2534 *
2535 * Mark the PCI region associated with PCI device @pdev BR @bar as
2536 * being reserved by owner @res_name. Do not access any
2537 * address inside the PCI regions unless this call returns
2538 * successfully.
2539 *
2540 * Returns 0 on success, or %EBUSY on error. A warning
2541 * message is also printed on failure.
2542 *
2543 * The key difference that _exclusive makes it that userspace is
2544 * explicitly not allowed to map the resource via /dev/mem or
2545 * sysfs.
2546 */
2547int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2548{
2549 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2550}
c87deff7
HS
2551/**
2552 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2553 * @pdev: PCI device whose resources were previously reserved
2554 * @bars: Bitmask of BARs to be released
2555 *
2556 * Release selected PCI I/O and memory resources previously reserved.
2557 * Call this function only after all use of the PCI regions has ceased.
2558 */
2559void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2560{
2561 int i;
2562
2563 for (i = 0; i < 6; i++)
2564 if (bars & (1 << i))
2565 pci_release_region(pdev, i);
2566}
2567
e8de1481
AV
2568int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2569 const char *res_name, int excl)
c87deff7
HS
2570{
2571 int i;
2572
2573 for (i = 0; i < 6; i++)
2574 if (bars & (1 << i))
e8de1481 2575 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2576 goto err_out;
2577 return 0;
2578
2579err_out:
2580 while(--i >= 0)
2581 if (bars & (1 << i))
2582 pci_release_region(pdev, i);
2583
2584 return -EBUSY;
2585}
1da177e4 2586
e8de1481
AV
2587
2588/**
2589 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2590 * @pdev: PCI device whose resources are to be reserved
2591 * @bars: Bitmask of BARs to be requested
2592 * @res_name: Name to be associated with resource
2593 */
2594int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2595 const char *res_name)
2596{
2597 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2598}
2599
2600int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2601 int bars, const char *res_name)
2602{
2603 return __pci_request_selected_regions(pdev, bars, res_name,
2604 IORESOURCE_EXCLUSIVE);
2605}
2606
1da177e4
LT
2607/**
2608 * pci_release_regions - Release reserved PCI I/O and memory resources
2609 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2610 *
2611 * Releases all PCI I/O and memory resources previously reserved by a
2612 * successful call to pci_request_regions. Call this function only
2613 * after all use of the PCI regions has ceased.
2614 */
2615
2616void pci_release_regions(struct pci_dev *pdev)
2617{
c87deff7 2618 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2619}
2620
2621/**
2622 * pci_request_regions - Reserved PCI I/O and memory resources
2623 * @pdev: PCI device whose resources are to be reserved
2624 * @res_name: Name to be associated with resource.
2625 *
2626 * Mark all PCI regions associated with PCI device @pdev as
2627 * being reserved by owner @res_name. Do not access any
2628 * address inside the PCI regions unless this call returns
2629 * successfully.
2630 *
2631 * Returns 0 on success, or %EBUSY on error. A warning
2632 * message is also printed on failure.
2633 */
3c990e92 2634int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2635{
c87deff7 2636 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2637}
2638
e8de1481
AV
2639/**
2640 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2641 * @pdev: PCI device whose resources are to be reserved
2642 * @res_name: Name to be associated with resource.
2643 *
2644 * Mark all PCI regions associated with PCI device @pdev as
2645 * being reserved by owner @res_name. Do not access any
2646 * address inside the PCI regions unless this call returns
2647 * successfully.
2648 *
2649 * pci_request_regions_exclusive() will mark the region so that
2650 * /dev/mem and the sysfs MMIO access will not be allowed.
2651 *
2652 * Returns 0 on success, or %EBUSY on error. A warning
2653 * message is also printed on failure.
2654 */
2655int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2656{
2657 return pci_request_selected_regions_exclusive(pdev,
2658 ((1 << 6) - 1), res_name);
2659}
2660
6a479079
BH
2661static void __pci_set_master(struct pci_dev *dev, bool enable)
2662{
2663 u16 old_cmd, cmd;
2664
2665 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2666 if (enable)
2667 cmd = old_cmd | PCI_COMMAND_MASTER;
2668 else
2669 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2670 if (cmd != old_cmd) {
2671 dev_dbg(&dev->dev, "%s bus mastering\n",
2672 enable ? "enabling" : "disabling");
2673 pci_write_config_word(dev, PCI_COMMAND, cmd);
2674 }
2675 dev->is_busmaster = enable;
2676}
e8de1481 2677
96c55900
MS
2678/**
2679 * pcibios_set_master - enable PCI bus-mastering for device dev
2680 * @dev: the PCI device to enable
2681 *
2682 * Enables PCI bus-mastering for the device. This is the default
2683 * implementation. Architecture specific implementations can override
2684 * this if necessary.
2685 */
2686void __weak pcibios_set_master(struct pci_dev *dev)
2687{
2688 u8 lat;
2689
f676678f
MS
2690 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2691 if (pci_is_pcie(dev))
2692 return;
2693
96c55900
MS
2694 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2695 if (lat < 16)
2696 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2697 else if (lat > pcibios_max_latency)
2698 lat = pcibios_max_latency;
2699 else
2700 return;
2701 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2702 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2703}
2704
1da177e4
LT
2705/**
2706 * pci_set_master - enables bus-mastering for device dev
2707 * @dev: the PCI device to enable
2708 *
2709 * Enables bus-mastering on the device and calls pcibios_set_master()
2710 * to do the needed arch specific settings.
2711 */
6a479079 2712void pci_set_master(struct pci_dev *dev)
1da177e4 2713{
6a479079 2714 __pci_set_master(dev, true);
1da177e4
LT
2715 pcibios_set_master(dev);
2716}
2717
6a479079
BH
2718/**
2719 * pci_clear_master - disables bus-mastering for device dev
2720 * @dev: the PCI device to disable
2721 */
2722void pci_clear_master(struct pci_dev *dev)
2723{
2724 __pci_set_master(dev, false);
2725}
2726
1da177e4 2727/**
edb2d97e
MW
2728 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2729 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2730 *
edb2d97e
MW
2731 * Helper function for pci_set_mwi.
2732 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2733 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2734 *
2735 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2736 */
15ea76d4 2737int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2738{
2739 u8 cacheline_size;
2740
2741 if (!pci_cache_line_size)
15ea76d4 2742 return -EINVAL;
1da177e4
LT
2743
2744 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2745 equal to or multiple of the right value. */
2746 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2747 if (cacheline_size >= pci_cache_line_size &&
2748 (cacheline_size % pci_cache_line_size) == 0)
2749 return 0;
2750
2751 /* Write the correct value. */
2752 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2753 /* Read it back. */
2754 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2755 if (cacheline_size == pci_cache_line_size)
2756 return 0;
2757
80ccba11
BH
2758 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2759 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2760
2761 return -EINVAL;
2762}
15ea76d4
TH
2763EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2764
2765#ifdef PCI_DISABLE_MWI
2766int pci_set_mwi(struct pci_dev *dev)
2767{
2768 return 0;
2769}
2770
2771int pci_try_set_mwi(struct pci_dev *dev)
2772{
2773 return 0;
2774}
2775
2776void pci_clear_mwi(struct pci_dev *dev)
2777{
2778}
2779
2780#else
1da177e4
LT
2781
2782/**
2783 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2784 * @dev: the PCI device for which MWI is enabled
2785 *
694625c0 2786 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2787 *
2788 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2789 */
2790int
2791pci_set_mwi(struct pci_dev *dev)
2792{
2793 int rc;
2794 u16 cmd;
2795
edb2d97e 2796 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2797 if (rc)
2798 return rc;
2799
2800 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2801 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2802 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2803 cmd |= PCI_COMMAND_INVALIDATE;
2804 pci_write_config_word(dev, PCI_COMMAND, cmd);
2805 }
2806
2807 return 0;
2808}
2809
694625c0
RD
2810/**
2811 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2812 * @dev: the PCI device for which MWI is enabled
2813 *
2814 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2815 * Callers are not required to check the return value.
2816 *
2817 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2818 */
2819int pci_try_set_mwi(struct pci_dev *dev)
2820{
2821 int rc = pci_set_mwi(dev);
2822 return rc;
2823}
2824
1da177e4
LT
2825/**
2826 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2827 * @dev: the PCI device to disable
2828 *
2829 * Disables PCI Memory-Write-Invalidate transaction on the device
2830 */
2831void
2832pci_clear_mwi(struct pci_dev *dev)
2833{
2834 u16 cmd;
2835
2836 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2837 if (cmd & PCI_COMMAND_INVALIDATE) {
2838 cmd &= ~PCI_COMMAND_INVALIDATE;
2839 pci_write_config_word(dev, PCI_COMMAND, cmd);
2840 }
2841}
edb2d97e 2842#endif /* ! PCI_DISABLE_MWI */
1da177e4 2843
a04ce0ff
BR
2844/**
2845 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2846 * @pdev: the PCI device to operate on
2847 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2848 *
2849 * Enables/disables PCI INTx for device dev
2850 */
2851void
2852pci_intx(struct pci_dev *pdev, int enable)
2853{
2854 u16 pci_command, new;
2855
2856 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2857
2858 if (enable) {
2859 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2860 } else {
2861 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2862 }
2863
2864 if (new != pci_command) {
9ac7849e
TH
2865 struct pci_devres *dr;
2866
2fd9d74b 2867 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2868
2869 dr = find_pci_dr(pdev);
2870 if (dr && !dr->restore_intx) {
2871 dr->restore_intx = 1;
2872 dr->orig_intx = !enable;
2873 }
a04ce0ff
BR
2874 }
2875}
2876
a2e27787
JK
2877/**
2878 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2879 * @dev: the PCI device to operate on
a2e27787
JK
2880 *
2881 * Check if the device dev support INTx masking via the config space
2882 * command word.
2883 */
2884bool pci_intx_mask_supported(struct pci_dev *dev)
2885{
2886 bool mask_supported = false;
2887 u16 orig, new;
2888
2889 pci_cfg_access_lock(dev);
2890
2891 pci_read_config_word(dev, PCI_COMMAND, &orig);
2892 pci_write_config_word(dev, PCI_COMMAND,
2893 orig ^ PCI_COMMAND_INTX_DISABLE);
2894 pci_read_config_word(dev, PCI_COMMAND, &new);
2895
2896 /*
2897 * There's no way to protect against hardware bugs or detect them
2898 * reliably, but as long as we know what the value should be, let's
2899 * go ahead and check it.
2900 */
2901 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2902 dev_err(&dev->dev, "Command register changed from "
2903 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2904 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2905 mask_supported = true;
2906 pci_write_config_word(dev, PCI_COMMAND, orig);
2907 }
2908
2909 pci_cfg_access_unlock(dev);
2910 return mask_supported;
2911}
2912EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2913
2914static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2915{
2916 struct pci_bus *bus = dev->bus;
2917 bool mask_updated = true;
2918 u32 cmd_status_dword;
2919 u16 origcmd, newcmd;
2920 unsigned long flags;
2921 bool irq_pending;
2922
2923 /*
2924 * We do a single dword read to retrieve both command and status.
2925 * Document assumptions that make this possible.
2926 */
2927 BUILD_BUG_ON(PCI_COMMAND % 4);
2928 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2929
2930 raw_spin_lock_irqsave(&pci_lock, flags);
2931
2932 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2933
2934 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2935
2936 /*
2937 * Check interrupt status register to see whether our device
2938 * triggered the interrupt (when masking) or the next IRQ is
2939 * already pending (when unmasking).
2940 */
2941 if (mask != irq_pending) {
2942 mask_updated = false;
2943 goto done;
2944 }
2945
2946 origcmd = cmd_status_dword;
2947 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2948 if (mask)
2949 newcmd |= PCI_COMMAND_INTX_DISABLE;
2950 if (newcmd != origcmd)
2951 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2952
2953done:
2954 raw_spin_unlock_irqrestore(&pci_lock, flags);
2955
2956 return mask_updated;
2957}
2958
2959/**
2960 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 2961 * @dev: the PCI device to operate on
a2e27787
JK
2962 *
2963 * Check if the device dev has its INTx line asserted, mask it and
2964 * return true in that case. False is returned if not interrupt was
2965 * pending.
2966 */
2967bool pci_check_and_mask_intx(struct pci_dev *dev)
2968{
2969 return pci_check_and_set_intx_mask(dev, true);
2970}
2971EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2972
2973/**
2974 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
6e9292c5 2975 * @dev: the PCI device to operate on
a2e27787
JK
2976 *
2977 * Check if the device dev has its INTx line asserted, unmask it if not
2978 * and return true. False is returned and the mask remains active if
2979 * there was still an interrupt pending.
2980 */
2981bool pci_check_and_unmask_intx(struct pci_dev *dev)
2982{
2983 return pci_check_and_set_intx_mask(dev, false);
2984}
2985EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2986
f5f2b131
EB
2987/**
2988 * pci_msi_off - disables any msi or msix capabilities
8d7d86e9 2989 * @dev: the PCI device to operate on
f5f2b131
EB
2990 *
2991 * If you want to use msi see pci_enable_msi and friends.
2992 * This is a lower level primitive that allows us to disable
2993 * msi operation at the device level.
2994 */
2995void pci_msi_off(struct pci_dev *dev)
2996{
2997 int pos;
2998 u16 control;
2999
3000 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3001 if (pos) {
3002 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3003 control &= ~PCI_MSI_FLAGS_ENABLE;
3004 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3005 }
3006 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3007 if (pos) {
3008 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3009 control &= ~PCI_MSIX_FLAGS_ENABLE;
3010 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3011 }
3012}
b03214d5 3013EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 3014
4d57cdfa
FT
3015int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3016{
3017 return dma_set_max_seg_size(&dev->dev, size);
3018}
3019EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3020
59fc67de
FT
3021int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3022{
3023 return dma_set_seg_boundary(&dev->dev, mask);
3024}
3025EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3026
8c1c699f 3027static int pcie_flr(struct pci_dev *dev, int probe)
8dd7f803 3028{
8c1c699f
YZ
3029 int i;
3030 int pos;
8dd7f803 3031 u32 cap;
04b55c47 3032 u16 status, control;
8dd7f803 3033
06a1cbaf 3034 pos = pci_pcie_cap(dev);
8c1c699f 3035 if (!pos)
8dd7f803 3036 return -ENOTTY;
8c1c699f
YZ
3037
3038 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
8dd7f803
SY
3039 if (!(cap & PCI_EXP_DEVCAP_FLR))
3040 return -ENOTTY;
3041
d91cdc74
SY
3042 if (probe)
3043 return 0;
3044
8dd7f803 3045 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3046 for (i = 0; i < 4; i++) {
3047 if (i)
3048 msleep((1 << (i - 1)) * 100);
5fe5db05 3049
8c1c699f
YZ
3050 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3051 if (!(status & PCI_EXP_DEVSTA_TRPND))
3052 goto clear;
3053 }
3054
3055 dev_err(&dev->dev, "transaction is not cleared; "
3056 "proceeding with reset anyway\n");
3057
3058clear:
04b55c47
SR
3059 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3060 control |= PCI_EXP_DEVCTL_BCR_FLR;
3061 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3062
8c1c699f 3063 msleep(100);
8dd7f803 3064
8dd7f803
SY
3065 return 0;
3066}
d91cdc74 3067
8c1c699f 3068static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3069{
8c1c699f
YZ
3070 int i;
3071 int pos;
1ca88797 3072 u8 cap;
8c1c699f 3073 u8 status;
1ca88797 3074
8c1c699f
YZ
3075 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3076 if (!pos)
1ca88797 3077 return -ENOTTY;
8c1c699f
YZ
3078
3079 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3080 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3081 return -ENOTTY;
3082
3083 if (probe)
3084 return 0;
3085
1ca88797 3086 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3087 for (i = 0; i < 4; i++) {
3088 if (i)
3089 msleep((1 << (i - 1)) * 100);
3090
3091 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3092 if (!(status & PCI_AF_STATUS_TP))
3093 goto clear;
3094 }
5fe5db05 3095
8c1c699f
YZ
3096 dev_err(&dev->dev, "transaction is not cleared; "
3097 "proceeding with reset anyway\n");
5fe5db05 3098
8c1c699f
YZ
3099clear:
3100 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3101 msleep(100);
8c1c699f 3102
1ca88797
SY
3103 return 0;
3104}
3105
83d74e03
RW
3106/**
3107 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3108 * @dev: Device to reset.
3109 * @probe: If set, only check if the device can be reset this way.
3110 *
3111 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3112 * unset, it will be reinitialized internally when going from PCI_D3hot to
3113 * PCI_D0. If that's the case and the device is not in a low-power state
3114 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3115 *
3116 * NOTE: This causes the caller to sleep for twice the device power transition
3117 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3118 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3119 * Moreover, only devices in D0 can be reset by this function.
3120 */
f85876ba 3121static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3122{
f85876ba
YZ
3123 u16 csr;
3124
3125 if (!dev->pm_cap)
3126 return -ENOTTY;
d91cdc74 3127
f85876ba
YZ
3128 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3129 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3130 return -ENOTTY;
d91cdc74 3131
f85876ba
YZ
3132 if (probe)
3133 return 0;
1ca88797 3134
f85876ba
YZ
3135 if (dev->current_state != PCI_D0)
3136 return -EINVAL;
3137
3138 csr &= ~PCI_PM_CTRL_STATE_MASK;
3139 csr |= PCI_D3hot;
3140 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3141 pci_dev_d3_sleep(dev);
f85876ba
YZ
3142
3143 csr &= ~PCI_PM_CTRL_STATE_MASK;
3144 csr |= PCI_D0;
3145 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3146 pci_dev_d3_sleep(dev);
f85876ba
YZ
3147
3148 return 0;
3149}
3150
c12ff1df
YZ
3151static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3152{
3153 u16 ctrl;
3154 struct pci_dev *pdev;
3155
654b75e0 3156 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3157 return -ENOTTY;
3158
3159 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3160 if (pdev != dev)
3161 return -ENOTTY;
3162
3163 if (probe)
3164 return 0;
3165
3166 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3167 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3168 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3169 msleep(100);
3170
3171 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3172 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3173 msleep(100);
3174
3175 return 0;
3176}
3177
977f857c 3178static int __pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3179{
8c1c699f
YZ
3180 int rc;
3181
3182 might_sleep();
3183
b9c3b266
DC
3184 rc = pci_dev_specific_reset(dev, probe);
3185 if (rc != -ENOTTY)
3186 goto done;
3187
8c1c699f
YZ
3188 rc = pcie_flr(dev, probe);
3189 if (rc != -ENOTTY)
3190 goto done;
d91cdc74 3191
8c1c699f 3192 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3193 if (rc != -ENOTTY)
3194 goto done;
3195
3196 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3197 if (rc != -ENOTTY)
3198 goto done;
3199
3200 rc = pci_parent_bus_reset(dev, probe);
8c1c699f 3201done:
977f857c
KRW
3202 return rc;
3203}
3204
3205static int pci_dev_reset(struct pci_dev *dev, int probe)
3206{
3207 int rc;
3208
3209 if (!probe) {
3210 pci_cfg_access_lock(dev);
3211 /* block PM suspend, driver probe, etc. */
3212 device_lock(&dev->dev);
3213 }
3214
3215 rc = __pci_dev_reset(dev, probe);
3216
8c1c699f 3217 if (!probe) {
8e9394ce 3218 device_unlock(&dev->dev);
fb51ccbf 3219 pci_cfg_access_unlock(dev);
8c1c699f 3220 }
8c1c699f 3221 return rc;
d91cdc74 3222}
d91cdc74 3223/**
8c1c699f
YZ
3224 * __pci_reset_function - reset a PCI device function
3225 * @dev: PCI device to reset
d91cdc74
SY
3226 *
3227 * Some devices allow an individual function to be reset without affecting
3228 * other functions in the same device. The PCI device must be responsive
3229 * to PCI config space in order to use this function.
3230 *
3231 * The device function is presumed to be unused when this function is called.
3232 * Resetting the device will make the contents of PCI configuration space
3233 * random, so any caller of this must be prepared to reinitialise the
3234 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3235 * etc.
3236 *
8c1c699f 3237 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3238 * device doesn't support resetting a single function.
3239 */
8c1c699f 3240int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3241{
8c1c699f 3242 return pci_dev_reset(dev, 0);
d91cdc74 3243}
8c1c699f 3244EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3245
6fbf9e7a
KRW
3246/**
3247 * __pci_reset_function_locked - reset a PCI device function while holding
3248 * the @dev mutex lock.
3249 * @dev: PCI device to reset
3250 *
3251 * Some devices allow an individual function to be reset without affecting
3252 * other functions in the same device. The PCI device must be responsive
3253 * to PCI config space in order to use this function.
3254 *
3255 * The device function is presumed to be unused and the caller is holding
3256 * the device mutex lock when this function is called.
3257 * Resetting the device will make the contents of PCI configuration space
3258 * random, so any caller of this must be prepared to reinitialise the
3259 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3260 * etc.
3261 *
3262 * Returns 0 if the device function was successfully reset or negative if the
3263 * device doesn't support resetting a single function.
3264 */
3265int __pci_reset_function_locked(struct pci_dev *dev)
3266{
977f857c 3267 return __pci_dev_reset(dev, 0);
6fbf9e7a
KRW
3268}
3269EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3270
711d5779
MT
3271/**
3272 * pci_probe_reset_function - check whether the device can be safely reset
3273 * @dev: PCI device to reset
3274 *
3275 * Some devices allow an individual function to be reset without affecting
3276 * other functions in the same device. The PCI device must be responsive
3277 * to PCI config space in order to use this function.
3278 *
3279 * Returns 0 if the device function can be reset or negative if the
3280 * device doesn't support resetting a single function.
3281 */
3282int pci_probe_reset_function(struct pci_dev *dev)
3283{
3284 return pci_dev_reset(dev, 1);
3285}
3286
8dd7f803 3287/**
8c1c699f
YZ
3288 * pci_reset_function - quiesce and reset a PCI device function
3289 * @dev: PCI device to reset
8dd7f803
SY
3290 *
3291 * Some devices allow an individual function to be reset without affecting
3292 * other functions in the same device. The PCI device must be responsive
3293 * to PCI config space in order to use this function.
3294 *
3295 * This function does not just reset the PCI portion of a device, but
3296 * clears all the state associated with the device. This function differs
8c1c699f 3297 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3298 * over the reset.
3299 *
8c1c699f 3300 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3301 * device doesn't support resetting a single function.
3302 */
3303int pci_reset_function(struct pci_dev *dev)
3304{
8c1c699f 3305 int rc;
8dd7f803 3306
8c1c699f
YZ
3307 rc = pci_dev_reset(dev, 1);
3308 if (rc)
3309 return rc;
8dd7f803 3310
8dd7f803
SY
3311 pci_save_state(dev);
3312
8c1c699f
YZ
3313 /*
3314 * both INTx and MSI are disabled after the Interrupt Disable bit
3315 * is set and the Bus Master bit is cleared.
3316 */
8dd7f803
SY
3317 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3318
8c1c699f 3319 rc = pci_dev_reset(dev, 0);
8dd7f803
SY
3320
3321 pci_restore_state(dev);
8dd7f803 3322
8c1c699f 3323 return rc;
8dd7f803
SY
3324}
3325EXPORT_SYMBOL_GPL(pci_reset_function);
3326
d556ad4b
PO
3327/**
3328 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3329 * @dev: PCI device to query
3330 *
3331 * Returns mmrbc: maximum designed memory read count in bytes
3332 * or appropriate error value.
3333 */
3334int pcix_get_max_mmrbc(struct pci_dev *dev)
3335{
7c9e2b1c 3336 int cap;
d556ad4b
PO
3337 u32 stat;
3338
3339 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3340 if (!cap)
3341 return -EINVAL;
3342
7c9e2b1c 3343 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3344 return -EINVAL;
3345
25daeb55 3346 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3347}
3348EXPORT_SYMBOL(pcix_get_max_mmrbc);
3349
3350/**
3351 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3352 * @dev: PCI device to query
3353 *
3354 * Returns mmrbc: maximum memory read count in bytes
3355 * or appropriate error value.
3356 */
3357int pcix_get_mmrbc(struct pci_dev *dev)
3358{
7c9e2b1c 3359 int cap;
bdc2bda7 3360 u16 cmd;
d556ad4b
PO
3361
3362 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3363 if (!cap)
3364 return -EINVAL;
3365
7c9e2b1c
DN
3366 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3367 return -EINVAL;
d556ad4b 3368
7c9e2b1c 3369 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3370}
3371EXPORT_SYMBOL(pcix_get_mmrbc);
3372
3373/**
3374 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3375 * @dev: PCI device to query
3376 * @mmrbc: maximum memory read count in bytes
3377 * valid values are 512, 1024, 2048, 4096
3378 *
3379 * If possible sets maximum memory read byte count, some bridges have erratas
3380 * that prevent this.
3381 */
3382int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3383{
7c9e2b1c 3384 int cap;
bdc2bda7
DN
3385 u32 stat, v, o;
3386 u16 cmd;
d556ad4b 3387
229f5afd 3388 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3389 return -EINVAL;
d556ad4b
PO
3390
3391 v = ffs(mmrbc) - 10;
3392
3393 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3394 if (!cap)
7c9e2b1c 3395 return -EINVAL;
d556ad4b 3396
7c9e2b1c
DN
3397 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3398 return -EINVAL;
d556ad4b
PO
3399
3400 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3401 return -E2BIG;
3402
7c9e2b1c
DN
3403 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3404 return -EINVAL;
d556ad4b
PO
3405
3406 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3407 if (o != v) {
3408 if (v > o && dev->bus &&
3409 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3410 return -EIO;
3411
3412 cmd &= ~PCI_X_CMD_MAX_READ;
3413 cmd |= v << 2;
7c9e2b1c
DN
3414 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3415 return -EIO;
d556ad4b 3416 }
7c9e2b1c 3417 return 0;
d556ad4b
PO
3418}
3419EXPORT_SYMBOL(pcix_set_mmrbc);
3420
3421/**
3422 * pcie_get_readrq - get PCI Express read request size
3423 * @dev: PCI device to query
3424 *
3425 * Returns maximum memory read request in bytes
3426 * or appropriate error value.
3427 */
3428int pcie_get_readrq(struct pci_dev *dev)
3429{
3430 int ret, cap;
3431 u16 ctl;
3432
06a1cbaf 3433 cap = pci_pcie_cap(dev);
d556ad4b
PO
3434 if (!cap)
3435 return -EINVAL;
3436
3437 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3438 if (!ret)
93e75fab 3439 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3440
3441 return ret;
3442}
3443EXPORT_SYMBOL(pcie_get_readrq);
3444
3445/**
3446 * pcie_set_readrq - set PCI Express maximum memory read request
3447 * @dev: PCI device to query
42e61f4a 3448 * @rq: maximum memory read count in bytes
d556ad4b
PO
3449 * valid values are 128, 256, 512, 1024, 2048, 4096
3450 *
c9b378c7 3451 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3452 */
3453int pcie_set_readrq(struct pci_dev *dev, int rq)
3454{
3455 int cap, err = -EINVAL;
3456 u16 ctl, v;
3457
229f5afd 3458 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
d556ad4b
PO
3459 goto out;
3460
06a1cbaf 3461 cap = pci_pcie_cap(dev);
d556ad4b
PO
3462 if (!cap)
3463 goto out;
3464
3465 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3466 if (err)
3467 goto out;
a1c473aa
BH
3468 /*
3469 * If using the "performance" PCIe config, we clamp the
3470 * read rq size to the max packet size to prevent the
3471 * host bridge generating requests larger than we can
3472 * cope with
3473 */
3474 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3475 int mps = pcie_get_mps(dev);
3476
3477 if (mps < 0)
3478 return mps;
3479 if (mps < rq)
3480 rq = mps;
3481 }
3482
3483 v = (ffs(rq) - 8) << 12;
d556ad4b
PO
3484
3485 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3486 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3487 ctl |= v;
c9b378c7 3488 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
d556ad4b
PO
3489 }
3490
3491out:
3492 return err;
3493}
3494EXPORT_SYMBOL(pcie_set_readrq);
3495
b03e7495
JM
3496/**
3497 * pcie_get_mps - get PCI Express maximum payload size
3498 * @dev: PCI device to query
3499 *
3500 * Returns maximum payload size in bytes
3501 * or appropriate error value.
3502 */
3503int pcie_get_mps(struct pci_dev *dev)
3504{
3505 int ret, cap;
3506 u16 ctl;
3507
3508 cap = pci_pcie_cap(dev);
3509 if (!cap)
3510 return -EINVAL;
3511
3512 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3513 if (!ret)
3514 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3515
3516 return ret;
3517}
3518
3519/**
3520 * pcie_set_mps - set PCI Express maximum payload size
3521 * @dev: PCI device to query
47c08f31 3522 * @mps: maximum payload size in bytes
b03e7495
JM
3523 * valid values are 128, 256, 512, 1024, 2048, 4096
3524 *
3525 * If possible sets maximum payload size
3526 */
3527int pcie_set_mps(struct pci_dev *dev, int mps)
3528{
3529 int cap, err = -EINVAL;
3530 u16 ctl, v;
3531
3532 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3533 goto out;
3534
3535 v = ffs(mps) - 8;
3536 if (v > dev->pcie_mpss)
3537 goto out;
3538 v <<= 5;
3539
3540 cap = pci_pcie_cap(dev);
3541 if (!cap)
3542 goto out;
3543
3544 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3545 if (err)
3546 goto out;
3547
3548 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3549 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3550 ctl |= v;
3551 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3552 }
3553out:
3554 return err;
3555}
3556
c87deff7
HS
3557/**
3558 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 3559 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
3560 * @flags: resource type mask to be selected
3561 *
3562 * This helper routine makes bar mask from the type of resource.
3563 */
3564int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3565{
3566 int i, bars = 0;
3567 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3568 if (pci_resource_flags(dev, i) & flags)
3569 bars |= (1 << i);
3570 return bars;
3571}
3572
613e7ed6
YZ
3573/**
3574 * pci_resource_bar - get position of the BAR associated with a resource
3575 * @dev: the PCI device
3576 * @resno: the resource number
3577 * @type: the BAR type to be filled in
3578 *
3579 * Returns BAR position in config space, or 0 if the BAR is invalid.
3580 */
3581int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3582{
d1b054da
YZ
3583 int reg;
3584
613e7ed6
YZ
3585 if (resno < PCI_ROM_RESOURCE) {
3586 *type = pci_bar_unknown;
3587 return PCI_BASE_ADDRESS_0 + 4 * resno;
3588 } else if (resno == PCI_ROM_RESOURCE) {
3589 *type = pci_bar_mem32;
3590 return dev->rom_base_reg;
d1b054da
YZ
3591 } else if (resno < PCI_BRIDGE_RESOURCES) {
3592 /* device specific resource */
3593 reg = pci_iov_resource_bar(dev, resno, type);
3594 if (reg)
3595 return reg;
613e7ed6
YZ
3596 }
3597
865df576 3598 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
3599 return 0;
3600}
3601
95a8b6ef
MT
3602/* Some architectures require additional programming to enable VGA */
3603static arch_set_vga_state_t arch_set_vga_state;
3604
3605void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3606{
3607 arch_set_vga_state = func; /* NULL disables */
3608}
3609
3610static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 3611 unsigned int command_bits, u32 flags)
95a8b6ef
MT
3612{
3613 if (arch_set_vga_state)
3614 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 3615 flags);
95a8b6ef
MT
3616 return 0;
3617}
3618
deb2d2ec
BH
3619/**
3620 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
3621 * @dev: the PCI device
3622 * @decode: true = enable decoding, false = disable decoding
3623 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 3624 * @flags: traverse ancestors and change bridges
3448a19d 3625 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
3626 */
3627int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 3628 unsigned int command_bits, u32 flags)
deb2d2ec
BH
3629{
3630 struct pci_bus *bus;
3631 struct pci_dev *bridge;
3632 u16 cmd;
95a8b6ef 3633 int rc;
deb2d2ec 3634
3448a19d 3635 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 3636
95a8b6ef 3637 /* ARCH specific VGA enables */
3448a19d 3638 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
3639 if (rc)
3640 return rc;
3641
3448a19d
DA
3642 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3643 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3644 if (decode == true)
3645 cmd |= command_bits;
3646 else
3647 cmd &= ~command_bits;
3648 pci_write_config_word(dev, PCI_COMMAND, cmd);
3649 }
deb2d2ec 3650
3448a19d 3651 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
3652 return 0;
3653
3654 bus = dev->bus;
3655 while (bus) {
3656 bridge = bus->self;
3657 if (bridge) {
3658 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3659 &cmd);
3660 if (decode == true)
3661 cmd |= PCI_BRIDGE_CTL_VGA;
3662 else
3663 cmd &= ~PCI_BRIDGE_CTL_VGA;
3664 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3665 cmd);
3666 }
3667 bus = bus->parent;
3668 }
3669 return 0;
3670}
3671
32a9a682
YS
3672#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3673static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 3674static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
3675
3676/**
3677 * pci_specified_resource_alignment - get resource alignment specified by user.
3678 * @dev: the PCI device to get
3679 *
3680 * RETURNS: Resource alignment if it is specified.
3681 * Zero if it is not specified.
3682 */
3683resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3684{
3685 int seg, bus, slot, func, align_order, count;
3686 resource_size_t align = 0;
3687 char *p;
3688
3689 spin_lock(&resource_alignment_lock);
3690 p = resource_alignment_param;
3691 while (*p) {
3692 count = 0;
3693 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3694 p[count] == '@') {
3695 p += count + 1;
3696 } else {
3697 align_order = -1;
3698 }
3699 if (sscanf(p, "%x:%x:%x.%x%n",
3700 &seg, &bus, &slot, &func, &count) != 4) {
3701 seg = 0;
3702 if (sscanf(p, "%x:%x.%x%n",
3703 &bus, &slot, &func, &count) != 3) {
3704 /* Invalid format */
3705 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3706 p);
3707 break;
3708 }
3709 }
3710 p += count;
3711 if (seg == pci_domain_nr(dev->bus) &&
3712 bus == dev->bus->number &&
3713 slot == PCI_SLOT(dev->devfn) &&
3714 func == PCI_FUNC(dev->devfn)) {
3715 if (align_order == -1) {
3716 align = PAGE_SIZE;
3717 } else {
3718 align = 1 << align_order;
3719 }
3720 /* Found */
3721 break;
3722 }
3723 if (*p != ';' && *p != ',') {
3724 /* End of param or invalid format */
3725 break;
3726 }
3727 p++;
3728 }
3729 spin_unlock(&resource_alignment_lock);
3730 return align;
3731}
3732
3733/**
3734 * pci_is_reassigndev - check if specified PCI is target device to reassign
3735 * @dev: the PCI device to check
3736 *
3737 * RETURNS: non-zero for PCI device is a target device to reassign,
3738 * or zero is not.
3739 */
3740int pci_is_reassigndev(struct pci_dev *dev)
3741{
3742 return (pci_specified_resource_alignment(dev) != 0);
3743}
3744
2069ecfb
YL
3745/*
3746 * This function disables memory decoding and releases memory resources
3747 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3748 * It also rounds up size to specified alignment.
3749 * Later on, the kernel will assign page-aligned memory resource back
3750 * to the device.
3751 */
3752void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3753{
3754 int i;
3755 struct resource *r;
3756 resource_size_t align, size;
3757 u16 command;
3758
3759 if (!pci_is_reassigndev(dev))
3760 return;
3761
3762 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3763 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3764 dev_warn(&dev->dev,
3765 "Can't reassign resources to host bridge.\n");
3766 return;
3767 }
3768
3769 dev_info(&dev->dev,
3770 "Disabling memory decoding and releasing memory resources.\n");
3771 pci_read_config_word(dev, PCI_COMMAND, &command);
3772 command &= ~PCI_COMMAND_MEMORY;
3773 pci_write_config_word(dev, PCI_COMMAND, command);
3774
3775 align = pci_specified_resource_alignment(dev);
3776 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3777 r = &dev->resource[i];
3778 if (!(r->flags & IORESOURCE_MEM))
3779 continue;
3780 size = resource_size(r);
3781 if (size < align) {
3782 size = align;
3783 dev_info(&dev->dev,
3784 "Rounding up size of resource #%d to %#llx.\n",
3785 i, (unsigned long long)size);
3786 }
3787 r->end = size - 1;
3788 r->start = 0;
3789 }
3790 /* Need to disable bridge's resource window,
3791 * to enable the kernel to reassign new resource
3792 * window later on.
3793 */
3794 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3795 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3796 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3797 r = &dev->resource[i];
3798 if (!(r->flags & IORESOURCE_MEM))
3799 continue;
3800 r->end = resource_size(r) - 1;
3801 r->start = 0;
3802 }
3803 pci_disable_bridge_window(dev);
3804 }
3805}
3806
32a9a682
YS
3807ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3808{
3809 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3810 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3811 spin_lock(&resource_alignment_lock);
3812 strncpy(resource_alignment_param, buf, count);
3813 resource_alignment_param[count] = '\0';
3814 spin_unlock(&resource_alignment_lock);
3815 return count;
3816}
3817
3818ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3819{
3820 size_t count;
3821 spin_lock(&resource_alignment_lock);
3822 count = snprintf(buf, size, "%s", resource_alignment_param);
3823 spin_unlock(&resource_alignment_lock);
3824 return count;
3825}
3826
3827static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3828{
3829 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3830}
3831
3832static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3833 const char *buf, size_t count)
3834{
3835 return pci_set_resource_alignment_param(buf, count);
3836}
3837
3838BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3839 pci_resource_alignment_store);
3840
3841static int __init pci_resource_alignment_sysfs_init(void)
3842{
3843 return bus_create_file(&pci_bus_type,
3844 &bus_attr_resource_alignment);
3845}
3846
3847late_initcall(pci_resource_alignment_sysfs_init);
3848
32a2eea7
JG
3849static void __devinit pci_no_domains(void)
3850{
3851#ifdef CONFIG_PCI_DOMAINS
3852 pci_domains_supported = 0;
3853#endif
3854}
3855
0ef5f8f6
AP
3856/**
3857 * pci_ext_cfg_enabled - can we access extended PCI config space?
3858 * @dev: The PCI device of the root bridge.
3859 *
3860 * Returns 1 if we can access PCI extended config space (offsets
3861 * greater than 0xff). This is the default implementation. Architecture
3862 * implementations can override this.
3863 */
3864int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3865{
3866 return 1;
3867}
3868
2d1c8618
BH
3869void __weak pci_fixup_cardbus(struct pci_bus *bus)
3870{
3871}
3872EXPORT_SYMBOL(pci_fixup_cardbus);
3873
ad04d31e 3874static int __init pci_setup(char *str)
1da177e4
LT
3875{
3876 while (str) {
3877 char *k = strchr(str, ',');
3878 if (k)
3879 *k++ = 0;
3880 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
3881 if (!strcmp(str, "nomsi")) {
3882 pci_no_msi();
7f785763
RD
3883 } else if (!strcmp(str, "noaer")) {
3884 pci_no_aer();
b55438fd
YL
3885 } else if (!strncmp(str, "realloc=", 8)) {
3886 pci_realloc_get_opt(str + 8);
f483d392 3887 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 3888 pci_realloc_get_opt("on");
32a2eea7
JG
3889 } else if (!strcmp(str, "nodomains")) {
3890 pci_no_domains();
6748dcc2
RW
3891 } else if (!strncmp(str, "noari", 5)) {
3892 pcie_ari_disabled = true;
4516a618
AN
3893 } else if (!strncmp(str, "cbiosize=", 9)) {
3894 pci_cardbus_io_size = memparse(str + 9, &str);
3895 } else if (!strncmp(str, "cbmemsize=", 10)) {
3896 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
3897 } else if (!strncmp(str, "resource_alignment=", 19)) {
3898 pci_set_resource_alignment_param(str + 19,
3899 strlen(str + 19));
43c16408
AP
3900 } else if (!strncmp(str, "ecrc=", 5)) {
3901 pcie_ecrc_get_policy(str + 5);
28760489
EB
3902 } else if (!strncmp(str, "hpiosize=", 9)) {
3903 pci_hotplug_io_size = memparse(str + 9, &str);
3904 } else if (!strncmp(str, "hpmemsize=", 10)) {
3905 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
3906 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3907 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
3908 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3909 pcie_bus_config = PCIE_BUS_SAFE;
3910 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3911 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
3912 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3913 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
3914 } else if (!strncmp(str, "pcie_scan_all", 13)) {
3915 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
3916 } else {
3917 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3918 str);
3919 }
1da177e4
LT
3920 }
3921 str = k;
3922 }
0637a70a 3923 return 0;
1da177e4 3924}
0637a70a 3925early_param("pci", pci_setup);
1da177e4 3926
0b62e13b 3927EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
3928EXPORT_SYMBOL(pci_enable_device_io);
3929EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 3930EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
3931EXPORT_SYMBOL(pcim_enable_device);
3932EXPORT_SYMBOL(pcim_pin_device);
1da177e4 3933EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
3934EXPORT_SYMBOL(pci_find_capability);
3935EXPORT_SYMBOL(pci_bus_find_capability);
3936EXPORT_SYMBOL(pci_release_regions);
3937EXPORT_SYMBOL(pci_request_regions);
e8de1481 3938EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
3939EXPORT_SYMBOL(pci_release_region);
3940EXPORT_SYMBOL(pci_request_region);
e8de1481 3941EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
3942EXPORT_SYMBOL(pci_release_selected_regions);
3943EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 3944EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 3945EXPORT_SYMBOL(pci_set_master);
6a479079 3946EXPORT_SYMBOL(pci_clear_master);
1da177e4 3947EXPORT_SYMBOL(pci_set_mwi);
694625c0 3948EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 3949EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 3950EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
3951EXPORT_SYMBOL(pci_assign_resource);
3952EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 3953EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
3954
3955EXPORT_SYMBOL(pci_set_power_state);
3956EXPORT_SYMBOL(pci_save_state);
3957EXPORT_SYMBOL(pci_restore_state);
e5899e1b 3958EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 3959EXPORT_SYMBOL(pci_pme_active);
0235c4fc 3960EXPORT_SYMBOL(pci_wake_from_d3);
e5899e1b 3961EXPORT_SYMBOL(pci_target_state);
404cc2d8
RW
3962EXPORT_SYMBOL(pci_prepare_to_sleep);
3963EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 3964EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
This page took 1.318887 seconds and 5 git commands to generate.