PCI: Show driver, BAR#, and resource on pci_ioremap_bar() failure
[deliverable/linux.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
7c674700
LP
13#include <linux/of.h>
14#include <linux/of_pci.h>
1da177e4 15#include <linux/pci.h>
075c1771 16#include <linux/pm.h>
5a0e3ad6 17#include <linux/slab.h>
1da177e4
LT
18#include <linux/module.h>
19#include <linux/spinlock.h>
4e57b681 20#include <linux/string.h>
229f5afd 21#include <linux/log2.h>
7d715a6c 22#include <linux/pci-aspm.h>
c300bd2f 23#include <linux/pm_wakeup.h>
8dd7f803 24#include <linux/interrupt.h>
32a9a682 25#include <linux/device.h>
b67ea761 26#include <linux/pm_runtime.h>
608c3881 27#include <linux/pci_hotplug.h>
284f5f9d 28#include <asm-generic/pci-bridge.h>
32a9a682 29#include <asm/setup.h>
bc56b9e0 30#include "pci.h"
1da177e4 31
00240c38
AS
32const char *pci_power_names[] = {
33 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
34};
35EXPORT_SYMBOL_GPL(pci_power_names);
36
93177a74
RW
37int isa_dma_bridge_buggy;
38EXPORT_SYMBOL(isa_dma_bridge_buggy);
39
40int pci_pci_problems;
41EXPORT_SYMBOL(pci_pci_problems);
42
1ae861e6
RW
43unsigned int pci_pm_d3_delay;
44
df17e62e
MG
45static void pci_pme_list_scan(struct work_struct *work);
46
47static LIST_HEAD(pci_pme_list);
48static DEFINE_MUTEX(pci_pme_list_mutex);
49static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
50
51struct pci_pme_device {
52 struct list_head list;
53 struct pci_dev *dev;
54};
55
56#define PME_TIMEOUT 1000 /* How long between PME checks */
57
1ae861e6
RW
58static void pci_dev_d3_sleep(struct pci_dev *dev)
59{
60 unsigned int delay = dev->d3_delay;
61
62 if (delay < pci_pm_d3_delay)
63 delay = pci_pm_d3_delay;
64
65 msleep(delay);
66}
1da177e4 67
32a2eea7
JG
68#ifdef CONFIG_PCI_DOMAINS
69int pci_domains_supported = 1;
70#endif
71
4516a618
AN
72#define DEFAULT_CARDBUS_IO_SIZE (256)
73#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
74/* pci=cbmemsize=nnM,cbiosize=nn can override this */
75unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
76unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77
28760489
EB
78#define DEFAULT_HOTPLUG_IO_SIZE (256)
79#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
80/* pci=hpmemsize=nnM,hpiosize=nn can override this */
81unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
82unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
83
5f39e670 84enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 85
ac1aa47b
JB
86/*
87 * The default CLS is used if arch didn't set CLS explicitly and not
88 * all pci devices agree on the same value. Arch can override either
89 * the dfl or actual value as it sees fit. Don't forget this is
90 * measured in 32-bit words, not bytes.
91 */
15856ad5 92u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
93u8 pci_cache_line_size;
94
96c55900
MS
95/*
96 * If we set up a device for bus mastering, we need to check the latency
97 * timer as certain BIOSes forget to set it properly.
98 */
99unsigned int pcibios_max_latency = 255;
100
6748dcc2
RW
101/* If set, the PCIe ARI capability will not be used. */
102static bool pcie_ari_disabled;
103
1da177e4
LT
104/**
105 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
106 * @bus: pointer to PCI bus structure to search
107 *
108 * Given a PCI bus, returns the highest PCI bus number present in the set
109 * including the given PCI bus and its list of child PCI buses.
110 */
07656d83 111unsigned char pci_bus_max_busnr(struct pci_bus *bus)
1da177e4 112{
94e6a9b9 113 struct pci_bus *tmp;
1da177e4
LT
114 unsigned char max, n;
115
b918c62e 116 max = bus->busn_res.end;
94e6a9b9
YW
117 list_for_each_entry(tmp, &bus->children, node) {
118 n = pci_bus_max_busnr(tmp);
3c78bc61 119 if (n > max)
1da177e4
LT
120 max = n;
121 }
122 return max;
123}
b82db5ce 124EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 125
1684f5dd
AM
126#ifdef CONFIG_HAS_IOMEM
127void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
128{
1f7bf3bf
BH
129 struct resource *res = &pdev->resource[bar];
130
1684f5dd
AM
131 /*
132 * Make sure the BAR is actually a memory resource, not an IO resource
133 */
1f7bf3bf
BH
134 if (!(res->flags & IORESOURCE_MEM)) {
135 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
1684f5dd
AM
136 return NULL;
137 }
1f7bf3bf 138 return ioremap_nocache(res->start, resource_size(res));
1684f5dd
AM
139}
140EXPORT_SYMBOL_GPL(pci_ioremap_bar);
141#endif
142
687d5fe3
ME
143#define PCI_FIND_CAP_TTL 48
144
145static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
146 u8 pos, int cap, int *ttl)
24a4e377
RD
147{
148 u8 id;
24a4e377 149
687d5fe3 150 while ((*ttl)--) {
24a4e377
RD
151 pci_bus_read_config_byte(bus, devfn, pos, &pos);
152 if (pos < 0x40)
153 break;
154 pos &= ~3;
155 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
156 &id);
157 if (id == 0xff)
158 break;
159 if (id == cap)
160 return pos;
161 pos += PCI_CAP_LIST_NEXT;
162 }
163 return 0;
164}
165
687d5fe3
ME
166static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
167 u8 pos, int cap)
168{
169 int ttl = PCI_FIND_CAP_TTL;
170
171 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
172}
173
24a4e377
RD
174int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
175{
176 return __pci_find_next_cap(dev->bus, dev->devfn,
177 pos + PCI_CAP_LIST_NEXT, cap);
178}
179EXPORT_SYMBOL_GPL(pci_find_next_capability);
180
d3bac118
ME
181static int __pci_bus_find_cap_start(struct pci_bus *bus,
182 unsigned int devfn, u8 hdr_type)
1da177e4
LT
183{
184 u16 status;
1da177e4
LT
185
186 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
187 if (!(status & PCI_STATUS_CAP_LIST))
188 return 0;
189
190 switch (hdr_type) {
191 case PCI_HEADER_TYPE_NORMAL:
192 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 193 return PCI_CAPABILITY_LIST;
1da177e4 194 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 195 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
196 default:
197 return 0;
198 }
d3bac118
ME
199
200 return 0;
1da177e4
LT
201}
202
203/**
f7625980 204 * pci_find_capability - query for devices' capabilities
1da177e4
LT
205 * @dev: PCI device to query
206 * @cap: capability code
207 *
208 * Tell if a device supports a given PCI capability.
209 * Returns the address of the requested capability structure within the
210 * device's PCI configuration space or 0 in case the device does not
211 * support it. Possible values for @cap:
212 *
f7625980
BH
213 * %PCI_CAP_ID_PM Power Management
214 * %PCI_CAP_ID_AGP Accelerated Graphics Port
215 * %PCI_CAP_ID_VPD Vital Product Data
216 * %PCI_CAP_ID_SLOTID Slot Identification
1da177e4 217 * %PCI_CAP_ID_MSI Message Signalled Interrupts
f7625980 218 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
1da177e4
LT
219 * %PCI_CAP_ID_PCIX PCI-X
220 * %PCI_CAP_ID_EXP PCI Express
221 */
222int pci_find_capability(struct pci_dev *dev, int cap)
223{
d3bac118
ME
224 int pos;
225
226 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
227 if (pos)
228 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
229
230 return pos;
1da177e4 231}
b7fe9434 232EXPORT_SYMBOL(pci_find_capability);
1da177e4
LT
233
234/**
f7625980 235 * pci_bus_find_capability - query for devices' capabilities
1da177e4
LT
236 * @bus: the PCI bus to query
237 * @devfn: PCI device to query
238 * @cap: capability code
239 *
240 * Like pci_find_capability() but works for pci devices that do not have a
f7625980 241 * pci_dev structure set up yet.
1da177e4
LT
242 *
243 * Returns the address of the requested capability structure within the
244 * device's PCI configuration space or 0 in case the device does not
245 * support it.
246 */
247int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
248{
d3bac118 249 int pos;
1da177e4
LT
250 u8 hdr_type;
251
252 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
253
d3bac118
ME
254 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
255 if (pos)
256 pos = __pci_find_next_cap(bus, devfn, pos, cap);
257
258 return pos;
1da177e4 259}
b7fe9434 260EXPORT_SYMBOL(pci_bus_find_capability);
1da177e4
LT
261
262/**
44a9a36f 263 * pci_find_next_ext_capability - Find an extended capability
1da177e4 264 * @dev: PCI device to query
44a9a36f 265 * @start: address at which to start looking (0 to start at beginning of list)
1da177e4
LT
266 * @cap: capability code
267 *
44a9a36f 268 * Returns the address of the next matching extended capability structure
1da177e4 269 * within the device's PCI configuration space or 0 if the device does
44a9a36f
BH
270 * not support it. Some capabilities can occur several times, e.g., the
271 * vendor-specific capability, and this provides a way to find them all.
1da177e4 272 */
44a9a36f 273int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
1da177e4
LT
274{
275 u32 header;
557848c3
ZY
276 int ttl;
277 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 278
557848c3
ZY
279 /* minimum 8 bytes per capability */
280 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
281
282 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
283 return 0;
284
44a9a36f
BH
285 if (start)
286 pos = start;
287
1da177e4
LT
288 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
289 return 0;
290
291 /*
292 * If we have no capabilities, this is indicated by cap ID,
293 * cap version and next pointer all being 0.
294 */
295 if (header == 0)
296 return 0;
297
298 while (ttl-- > 0) {
44a9a36f 299 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1da177e4
LT
300 return pos;
301
302 pos = PCI_EXT_CAP_NEXT(header);
557848c3 303 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
304 break;
305
306 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
307 break;
308 }
309
310 return 0;
311}
44a9a36f
BH
312EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
313
314/**
315 * pci_find_ext_capability - Find an extended capability
316 * @dev: PCI device to query
317 * @cap: capability code
318 *
319 * Returns the address of the requested extended capability structure
320 * within the device's PCI configuration space or 0 if the device does
321 * not support it. Possible values for @cap:
322 *
323 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
324 * %PCI_EXT_CAP_ID_VC Virtual Channel
325 * %PCI_EXT_CAP_ID_DSN Device Serial Number
326 * %PCI_EXT_CAP_ID_PWR Power Budgeting
327 */
328int pci_find_ext_capability(struct pci_dev *dev, int cap)
329{
330 return pci_find_next_ext_capability(dev, 0, cap);
331}
3a720d72 332EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 333
687d5fe3
ME
334static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
335{
336 int rc, ttl = PCI_FIND_CAP_TTL;
337 u8 cap, mask;
338
339 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
340 mask = HT_3BIT_CAP_MASK;
341 else
342 mask = HT_5BIT_CAP_MASK;
343
344 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
345 PCI_CAP_ID_HT, &ttl);
346 while (pos) {
347 rc = pci_read_config_byte(dev, pos + 3, &cap);
348 if (rc != PCIBIOS_SUCCESSFUL)
349 return 0;
350
351 if ((cap & mask) == ht_cap)
352 return pos;
353
47a4d5be
BG
354 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
355 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
356 PCI_CAP_ID_HT, &ttl);
357 }
358
359 return 0;
360}
361/**
362 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
363 * @dev: PCI device to query
364 * @pos: Position from which to continue searching
365 * @ht_cap: Hypertransport capability code
366 *
367 * To be used in conjunction with pci_find_ht_capability() to search for
368 * all capabilities matching @ht_cap. @pos should always be a value returned
369 * from pci_find_ht_capability().
370 *
371 * NB. To be 100% safe against broken PCI devices, the caller should take
372 * steps to avoid an infinite loop.
373 */
374int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
375{
376 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
377}
378EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
379
380/**
381 * pci_find_ht_capability - query a device's Hypertransport capabilities
382 * @dev: PCI device to query
383 * @ht_cap: Hypertransport capability code
384 *
385 * Tell if a device supports a given Hypertransport capability.
386 * Returns an address within the device's PCI configuration space
387 * or 0 in case the device does not support the request capability.
388 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
389 * which has a Hypertransport capability matching @ht_cap.
390 */
391int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
392{
393 int pos;
394
395 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
396 if (pos)
397 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
398
399 return pos;
400}
401EXPORT_SYMBOL_GPL(pci_find_ht_capability);
402
1da177e4
LT
403/**
404 * pci_find_parent_resource - return resource region of parent bus of given region
405 * @dev: PCI device structure contains resources to be searched
406 * @res: child resource record for which parent is sought
407 *
408 * For given resource region of given device, return the resource
f44116ae 409 * region of parent bus the given region is contained in.
1da177e4 410 */
3c78bc61
RD
411struct resource *pci_find_parent_resource(const struct pci_dev *dev,
412 struct resource *res)
1da177e4
LT
413{
414 const struct pci_bus *bus = dev->bus;
f44116ae 415 struct resource *r;
1da177e4 416 int i;
1da177e4 417
89a74ecc 418 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
419 if (!r)
420 continue;
f44116ae
BH
421 if (res->start && resource_contains(r, res)) {
422
423 /*
424 * If the window is prefetchable but the BAR is
425 * not, the allocator made a mistake.
426 */
427 if (r->flags & IORESOURCE_PREFETCH &&
428 !(res->flags & IORESOURCE_PREFETCH))
429 return NULL;
430
431 /*
432 * If we're below a transparent bridge, there may
433 * be both a positively-decoded aperture and a
434 * subtractively-decoded region that contain the BAR.
435 * We want the positively-decoded one, so this depends
436 * on pci_bus_for_each_resource() giving us those
437 * first.
438 */
439 return r;
440 }
1da177e4 441 }
f44116ae 442 return NULL;
1da177e4 443}
b7fe9434 444EXPORT_SYMBOL(pci_find_parent_resource);
1da177e4 445
157e876f
AW
446/**
447 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
448 * @dev: the PCI device to operate on
449 * @pos: config space offset of status word
450 * @mask: mask of bit(s) to care about in status word
451 *
452 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
453 */
454int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
455{
456 int i;
457
458 /* Wait for Transaction Pending bit clean */
459 for (i = 0; i < 4; i++) {
460 u16 status;
461 if (i)
462 msleep((1 << (i - 1)) * 100);
463
464 pci_read_config_word(dev, pos, &status);
465 if (!(status & mask))
466 return 1;
467 }
468
469 return 0;
470}
471
064b53db
JL
472/**
473 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
474 * @dev: PCI device to have its BARs restored
475 *
476 * Restore the BAR values for a given device, so as to make it
477 * accessible by its driver.
478 */
3c78bc61 479static void pci_restore_bars(struct pci_dev *dev)
064b53db 480{
bc5f5a82 481 int i;
064b53db 482
bc5f5a82 483 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 484 pci_update_resource(dev, i);
064b53db
JL
485}
486
961d9120
RW
487static struct pci_platform_pm_ops *pci_platform_pm;
488
489int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
490{
eb9d0fe4 491 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
d2e5f0c1 492 || !ops->sleep_wake)
961d9120
RW
493 return -EINVAL;
494 pci_platform_pm = ops;
495 return 0;
496}
497
498static inline bool platform_pci_power_manageable(struct pci_dev *dev)
499{
500 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
501}
502
503static inline int platform_pci_set_power_state(struct pci_dev *dev,
3c78bc61 504 pci_power_t t)
961d9120
RW
505{
506 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
507}
508
509static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
510{
511 return pci_platform_pm ?
512 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
513}
8f7020d3 514
eb9d0fe4
RW
515static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
516{
517 return pci_platform_pm ?
518 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
519}
520
b67ea761
RW
521static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
522{
523 return pci_platform_pm ?
524 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
525}
526
bac2a909
RW
527static inline bool platform_pci_need_resume(struct pci_dev *dev)
528{
529 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
530}
531
1da177e4 532/**
44e4e66e
RW
533 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
534 * given PCI device
535 * @dev: PCI device to handle.
44e4e66e 536 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 537 *
44e4e66e
RW
538 * RETURN VALUE:
539 * -EINVAL if the requested state is invalid.
540 * -EIO if device does not support PCI PM or its PM capabilities register has a
541 * wrong version, or device doesn't support the requested state.
542 * 0 if device already is in the requested state.
543 * 0 if device's power state has been successfully changed.
1da177e4 544 */
f00a20ef 545static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 546{
337001b6 547 u16 pmcsr;
44e4e66e 548 bool need_restore = false;
1da177e4 549
4a865905
RW
550 /* Check if we're already there */
551 if (dev->current_state == state)
552 return 0;
553
337001b6 554 if (!dev->pm_cap)
cca03dec
AL
555 return -EIO;
556
44e4e66e
RW
557 if (state < PCI_D0 || state > PCI_D3hot)
558 return -EINVAL;
559
1da177e4 560 /* Validate current state:
f7625980 561 * Can enter D0 from any state, but if we can only go deeper
1da177e4
LT
562 * to sleep if we're already in a low power state
563 */
4a865905 564 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 565 && dev->current_state > state) {
227f0647
RD
566 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
567 dev->current_state, state);
1da177e4 568 return -EINVAL;
44e4e66e 569 }
1da177e4 570
1da177e4 571 /* check if this device supports the desired state */
337001b6
RW
572 if ((state == PCI_D1 && !dev->d1_support)
573 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 574 return -EIO;
1da177e4 575
337001b6 576 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 577
32a36585 578 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
579 * This doesn't affect PME_Status, disables PME_En, and
580 * sets PowerState to 0.
581 */
32a36585 582 switch (dev->current_state) {
d3535fbb
JL
583 case PCI_D0:
584 case PCI_D1:
585 case PCI_D2:
586 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
587 pmcsr |= state;
588 break;
f62795f1
RW
589 case PCI_D3hot:
590 case PCI_D3cold:
32a36585
JL
591 case PCI_UNKNOWN: /* Boot-up */
592 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 593 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 594 need_restore = true;
32a36585 595 /* Fall-through: force to D0 */
32a36585 596 default:
d3535fbb 597 pmcsr = 0;
32a36585 598 break;
1da177e4
LT
599 }
600
601 /* enter specified state */
337001b6 602 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
603
604 /* Mandatory power management transition delays */
605 /* see PCI PM 1.1 5.6.1 table 18 */
606 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 607 pci_dev_d3_sleep(dev);
1da177e4 608 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 609 udelay(PCI_PM_D2_DELAY);
1da177e4 610
e13cdbd7
RW
611 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
612 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
613 if (dev->current_state != state && printk_ratelimit())
227f0647
RD
614 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
615 dev->current_state);
064b53db 616
448bd857
HY
617 /*
618 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
064b53db
JL
619 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
620 * from D3hot to D0 _may_ perform an internal reset, thereby
621 * going to "D0 Uninitialized" rather than "D0 Initialized".
622 * For example, at least some versions of the 3c905B and the
623 * 3c556B exhibit this behaviour.
624 *
625 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
626 * devices in a D3hot state at boot. Consequently, we need to
627 * restore at least the BARs so that the device will be
628 * accessible to its driver.
629 */
630 if (need_restore)
631 pci_restore_bars(dev);
632
f00a20ef 633 if (dev->bus->self)
7d715a6c
SL
634 pcie_aspm_pm_state_change(dev->bus->self);
635
1da177e4
LT
636 return 0;
637}
638
44e4e66e
RW
639/**
640 * pci_update_current_state - Read PCI power state of given device from its
641 * PCI PM registers and cache it
642 * @dev: PCI device to handle.
f06fc0b6 643 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 644 */
73410429 645void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 646{
337001b6 647 if (dev->pm_cap) {
44e4e66e
RW
648 u16 pmcsr;
649
448bd857
HY
650 /*
651 * Configuration space is not accessible for device in
652 * D3cold, so just keep or set D3cold for safety
653 */
654 if (dev->current_state == PCI_D3cold)
655 return;
656 if (state == PCI_D3cold) {
657 dev->current_state = PCI_D3cold;
658 return;
659 }
337001b6 660 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 661 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
662 } else {
663 dev->current_state = state;
44e4e66e
RW
664 }
665}
666
db288c9c
RW
667/**
668 * pci_power_up - Put the given device into D0 forcibly
669 * @dev: PCI device to power up
670 */
671void pci_power_up(struct pci_dev *dev)
672{
673 if (platform_pci_power_manageable(dev))
674 platform_pci_set_power_state(dev, PCI_D0);
675
676 pci_raw_set_power_state(dev, PCI_D0);
677 pci_update_current_state(dev, PCI_D0);
678}
679
0e5dd46b
RW
680/**
681 * pci_platform_power_transition - Use platform to change device power state
682 * @dev: PCI device to handle.
683 * @state: State to put the device into.
684 */
685static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
686{
687 int error;
688
689 if (platform_pci_power_manageable(dev)) {
690 error = platform_pci_set_power_state(dev, state);
691 if (!error)
692 pci_update_current_state(dev, state);
769ba721 693 } else
0e5dd46b 694 error = -ENODEV;
769ba721
RW
695
696 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
697 dev->current_state = PCI_D0;
0e5dd46b
RW
698
699 return error;
700}
701
0b950f0f
SH
702/**
703 * pci_wakeup - Wake up a PCI device
704 * @pci_dev: Device to handle.
705 * @ign: ignored parameter
706 */
707static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
708{
709 pci_wakeup_event(pci_dev);
710 pm_request_resume(&pci_dev->dev);
711 return 0;
712}
713
714/**
715 * pci_wakeup_bus - Walk given bus and wake up devices on it
716 * @bus: Top bus of the subtree to walk.
717 */
718static void pci_wakeup_bus(struct pci_bus *bus)
719{
720 if (bus)
721 pci_walk_bus(bus, pci_wakeup, NULL);
722}
723
0e5dd46b
RW
724/**
725 * __pci_start_power_transition - Start power transition of a PCI device
726 * @dev: PCI device to handle.
727 * @state: State to put the device into.
728 */
729static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
730{
448bd857 731 if (state == PCI_D0) {
0e5dd46b 732 pci_platform_power_transition(dev, PCI_D0);
448bd857
HY
733 /*
734 * Mandatory power management transition delays, see
735 * PCI Express Base Specification Revision 2.0 Section
736 * 6.6.1: Conventional Reset. Do not delay for
737 * devices powered on/off by corresponding bridge,
738 * because have already delayed for the bridge.
739 */
740 if (dev->runtime_d3cold) {
741 msleep(dev->d3cold_delay);
742 /*
743 * When powering on a bridge from D3cold, the
744 * whole hierarchy may be powered on into
745 * D0uninitialized state, resume them to give
746 * them a chance to suspend again
747 */
748 pci_wakeup_bus(dev->subordinate);
749 }
750 }
751}
752
753/**
754 * __pci_dev_set_current_state - Set current state of a PCI device
755 * @dev: Device to handle
756 * @data: pointer to state to be set
757 */
758static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
759{
760 pci_power_t state = *(pci_power_t *)data;
761
762 dev->current_state = state;
763 return 0;
764}
765
766/**
767 * __pci_bus_set_current_state - Walk given bus and set current state of devices
768 * @bus: Top bus of the subtree to walk.
769 * @state: state to be set
770 */
771static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
772{
773 if (bus)
774 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
0e5dd46b
RW
775}
776
777/**
778 * __pci_complete_power_transition - Complete power transition of a PCI device
779 * @dev: PCI device to handle.
780 * @state: State to put the device into.
781 *
782 * This function should not be called directly by device drivers.
783 */
784int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
785{
448bd857
HY
786 int ret;
787
db288c9c 788 if (state <= PCI_D0)
448bd857
HY
789 return -EINVAL;
790 ret = pci_platform_power_transition(dev, state);
791 /* Power off the bridge may power off the whole hierarchy */
792 if (!ret && state == PCI_D3cold)
793 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
794 return ret;
0e5dd46b
RW
795}
796EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
797
44e4e66e
RW
798/**
799 * pci_set_power_state - Set the power state of a PCI device
800 * @dev: PCI device to handle.
801 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
802 *
877d0310 803 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
804 * the device's PCI PM registers.
805 *
806 * RETURN VALUE:
807 * -EINVAL if the requested state is invalid.
808 * -EIO if device does not support PCI PM or its PM capabilities register has a
809 * wrong version, or device doesn't support the requested state.
810 * 0 if device already is in the requested state.
811 * 0 if device's power state has been successfully changed.
812 */
813int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
814{
337001b6 815 int error;
44e4e66e
RW
816
817 /* bound the state we're entering */
448bd857
HY
818 if (state > PCI_D3cold)
819 state = PCI_D3cold;
44e4e66e
RW
820 else if (state < PCI_D0)
821 state = PCI_D0;
822 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
823 /*
824 * If the device or the parent bridge do not support PCI PM,
825 * ignore the request if we're doing anything other than putting
826 * it into D0 (which would only happen on boot).
827 */
828 return 0;
829
db288c9c
RW
830 /* Check if we're already there */
831 if (dev->current_state == state)
832 return 0;
833
0e5dd46b
RW
834 __pci_start_power_transition(dev, state);
835
979b1791
AC
836 /* This device is quirked not to be put into D3, so
837 don't put it in D3 */
448bd857 838 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
979b1791 839 return 0;
44e4e66e 840
448bd857
HY
841 /*
842 * To put device in D3cold, we put device into D3hot in native
843 * way, then put device into D3cold with platform ops
844 */
845 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
846 PCI_D3hot : state);
44e4e66e 847
0e5dd46b
RW
848 if (!__pci_complete_power_transition(dev, state))
849 error = 0;
44e4e66e
RW
850
851 return error;
852}
b7fe9434 853EXPORT_SYMBOL(pci_set_power_state);
44e4e66e 854
1da177e4
LT
855/**
856 * pci_choose_state - Choose the power state of a PCI device
857 * @dev: PCI device to be suspended
858 * @state: target sleep state for the whole system. This is the value
859 * that is passed to suspend() function.
860 *
861 * Returns PCI power state suitable for given device and given system
862 * message.
863 */
864
865pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
866{
ab826ca4 867 pci_power_t ret;
0f64474b 868
728cdb75 869 if (!dev->pm_cap)
1da177e4
LT
870 return PCI_D0;
871
961d9120
RW
872 ret = platform_pci_choose_state(dev);
873 if (ret != PCI_POWER_ERROR)
874 return ret;
ca078bae
PM
875
876 switch (state.event) {
877 case PM_EVENT_ON:
878 return PCI_D0;
879 case PM_EVENT_FREEZE:
b887d2e6
DB
880 case PM_EVENT_PRETHAW:
881 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 882 case PM_EVENT_SUSPEND:
3a2d5b70 883 case PM_EVENT_HIBERNATE:
ca078bae 884 return PCI_D3hot;
1da177e4 885 default:
80ccba11
BH
886 dev_info(&dev->dev, "unrecognized suspend event %d\n",
887 state.event);
1da177e4
LT
888 BUG();
889 }
890 return PCI_D0;
891}
1da177e4
LT
892EXPORT_SYMBOL(pci_choose_state);
893
89858517
YZ
894#define PCI_EXP_SAVE_REGS 7
895
fd0f7f73
AW
896static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
897 u16 cap, bool extended)
34a4876e
YL
898{
899 struct pci_cap_saved_state *tmp;
34a4876e 900
b67bfe0d 901 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
fd0f7f73 902 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
34a4876e
YL
903 return tmp;
904 }
905 return NULL;
906}
907
fd0f7f73
AW
908struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
909{
910 return _pci_find_saved_cap(dev, cap, false);
911}
912
913struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
914{
915 return _pci_find_saved_cap(dev, cap, true);
916}
917
b56a5a23
MT
918static int pci_save_pcie_state(struct pci_dev *dev)
919{
59875ae4 920 int i = 0;
b56a5a23
MT
921 struct pci_cap_saved_state *save_state;
922 u16 *cap;
923
59875ae4 924 if (!pci_is_pcie(dev))
b56a5a23
MT
925 return 0;
926
9f35575d 927 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 928 if (!save_state) {
e496b617 929 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
930 return -ENOMEM;
931 }
63f4898a 932
59875ae4
JL
933 cap = (u16 *)&save_state->cap.data[0];
934 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
935 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
936 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
937 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
938 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
939 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
940 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
9cb604ed 941
b56a5a23
MT
942 return 0;
943}
944
945static void pci_restore_pcie_state(struct pci_dev *dev)
946{
59875ae4 947 int i = 0;
b56a5a23
MT
948 struct pci_cap_saved_state *save_state;
949 u16 *cap;
950
951 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
59875ae4 952 if (!save_state)
9cb604ed
MS
953 return;
954
59875ae4
JL
955 cap = (u16 *)&save_state->cap.data[0];
956 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
957 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
958 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
959 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
960 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
961 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
962 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
963}
964
cc692a5f
SH
965
966static int pci_save_pcix_state(struct pci_dev *dev)
967{
63f4898a 968 int pos;
cc692a5f 969 struct pci_cap_saved_state *save_state;
cc692a5f
SH
970
971 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
972 if (pos <= 0)
973 return 0;
974
f34303de 975 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 976 if (!save_state) {
e496b617 977 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
978 return -ENOMEM;
979 }
cc692a5f 980
24a4742f
AW
981 pci_read_config_word(dev, pos + PCI_X_CMD,
982 (u16 *)save_state->cap.data);
63f4898a 983
cc692a5f
SH
984 return 0;
985}
986
987static void pci_restore_pcix_state(struct pci_dev *dev)
988{
989 int i = 0, pos;
990 struct pci_cap_saved_state *save_state;
991 u16 *cap;
992
993 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
994 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
995 if (!save_state || pos <= 0)
996 return;
24a4742f 997 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
998
999 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
1000}
1001
1002
1da177e4
LT
1003/**
1004 * pci_save_state - save the PCI configuration space of a device before suspending
1005 * @dev: - PCI device that we're dealing with
1da177e4 1006 */
3c78bc61 1007int pci_save_state(struct pci_dev *dev)
1da177e4
LT
1008{
1009 int i;
1010 /* XXX: 100% dword access ok here? */
1011 for (i = 0; i < 16; i++)
9e0b5b2c 1012 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 1013 dev->state_saved = true;
79e50e72
QL
1014
1015 i = pci_save_pcie_state(dev);
1016 if (i != 0)
b56a5a23 1017 return i;
79e50e72
QL
1018
1019 i = pci_save_pcix_state(dev);
1020 if (i != 0)
cc692a5f 1021 return i;
79e50e72 1022
754834b9 1023 return pci_save_vc_state(dev);
1da177e4 1024}
b7fe9434 1025EXPORT_SYMBOL(pci_save_state);
1da177e4 1026
ebfc5b80
RW
1027static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1028 u32 saved_val, int retry)
1029{
1030 u32 val;
1031
1032 pci_read_config_dword(pdev, offset, &val);
1033 if (val == saved_val)
1034 return;
1035
1036 for (;;) {
227f0647
RD
1037 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1038 offset, val, saved_val);
ebfc5b80
RW
1039 pci_write_config_dword(pdev, offset, saved_val);
1040 if (retry-- <= 0)
1041 return;
1042
1043 pci_read_config_dword(pdev, offset, &val);
1044 if (val == saved_val)
1045 return;
1046
1047 mdelay(1);
1048 }
1049}
1050
a6cb9ee7
RW
1051static void pci_restore_config_space_range(struct pci_dev *pdev,
1052 int start, int end, int retry)
ebfc5b80
RW
1053{
1054 int index;
1055
1056 for (index = end; index >= start; index--)
1057 pci_restore_config_dword(pdev, 4 * index,
1058 pdev->saved_config_space[index],
1059 retry);
1060}
1061
a6cb9ee7
RW
1062static void pci_restore_config_space(struct pci_dev *pdev)
1063{
1064 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1065 pci_restore_config_space_range(pdev, 10, 15, 0);
1066 /* Restore BARs before the command register. */
1067 pci_restore_config_space_range(pdev, 4, 9, 10);
1068 pci_restore_config_space_range(pdev, 0, 3, 0);
1069 } else {
1070 pci_restore_config_space_range(pdev, 0, 15, 0);
1071 }
1072}
1073
f7625980 1074/**
1da177e4
LT
1075 * pci_restore_state - Restore the saved state of a PCI device
1076 * @dev: - PCI device that we're dealing with
1da177e4 1077 */
1d3c16a8 1078void pci_restore_state(struct pci_dev *dev)
1da177e4 1079{
c82f63e4 1080 if (!dev->state_saved)
1d3c16a8 1081 return;
4b77b0a2 1082
b56a5a23
MT
1083 /* PCI Express register must be restored first */
1084 pci_restore_pcie_state(dev);
1900ca13 1085 pci_restore_ats_state(dev);
425c1b22 1086 pci_restore_vc_state(dev);
b56a5a23 1087
a6cb9ee7 1088 pci_restore_config_space(dev);
ebfc5b80 1089
cc692a5f 1090 pci_restore_pcix_state(dev);
41017f0c 1091 pci_restore_msi_state(dev);
8c5cdb6a 1092 pci_restore_iov_state(dev);
8fed4b65 1093
4b77b0a2 1094 dev->state_saved = false;
1da177e4 1095}
b7fe9434 1096EXPORT_SYMBOL(pci_restore_state);
1da177e4 1097
ffbdd3f7
AW
1098struct pci_saved_state {
1099 u32 config_space[16];
1100 struct pci_cap_saved_data cap[0];
1101};
1102
1103/**
1104 * pci_store_saved_state - Allocate and return an opaque struct containing
1105 * the device saved state.
1106 * @dev: PCI device that we're dealing with
1107 *
f7625980 1108 * Return NULL if no state or error.
ffbdd3f7
AW
1109 */
1110struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1111{
1112 struct pci_saved_state *state;
1113 struct pci_cap_saved_state *tmp;
1114 struct pci_cap_saved_data *cap;
ffbdd3f7
AW
1115 size_t size;
1116
1117 if (!dev->state_saved)
1118 return NULL;
1119
1120 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1121
b67bfe0d 1122 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
ffbdd3f7
AW
1123 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1124
1125 state = kzalloc(size, GFP_KERNEL);
1126 if (!state)
1127 return NULL;
1128
1129 memcpy(state->config_space, dev->saved_config_space,
1130 sizeof(state->config_space));
1131
1132 cap = state->cap;
b67bfe0d 1133 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
ffbdd3f7
AW
1134 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1135 memcpy(cap, &tmp->cap, len);
1136 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1137 }
1138 /* Empty cap_save terminates list */
1139
1140 return state;
1141}
1142EXPORT_SYMBOL_GPL(pci_store_saved_state);
1143
1144/**
1145 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1146 * @dev: PCI device that we're dealing with
1147 * @state: Saved state returned from pci_store_saved_state()
1148 */
98d9b271
KRW
1149int pci_load_saved_state(struct pci_dev *dev,
1150 struct pci_saved_state *state)
ffbdd3f7
AW
1151{
1152 struct pci_cap_saved_data *cap;
1153
1154 dev->state_saved = false;
1155
1156 if (!state)
1157 return 0;
1158
1159 memcpy(dev->saved_config_space, state->config_space,
1160 sizeof(state->config_space));
1161
1162 cap = state->cap;
1163 while (cap->size) {
1164 struct pci_cap_saved_state *tmp;
1165
fd0f7f73 1166 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
ffbdd3f7
AW
1167 if (!tmp || tmp->cap.size != cap->size)
1168 return -EINVAL;
1169
1170 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1171 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1172 sizeof(struct pci_cap_saved_data) + cap->size);
1173 }
1174
1175 dev->state_saved = true;
1176 return 0;
1177}
98d9b271 1178EXPORT_SYMBOL_GPL(pci_load_saved_state);
ffbdd3f7
AW
1179
1180/**
1181 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1182 * and free the memory allocated for it.
1183 * @dev: PCI device that we're dealing with
1184 * @state: Pointer to saved state returned from pci_store_saved_state()
1185 */
1186int pci_load_and_free_saved_state(struct pci_dev *dev,
1187 struct pci_saved_state **state)
1188{
1189 int ret = pci_load_saved_state(dev, *state);
1190 kfree(*state);
1191 *state = NULL;
1192 return ret;
1193}
1194EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1195
8a9d5609
BH
1196int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1197{
1198 return pci_enable_resources(dev, bars);
1199}
1200
38cc1302
HS
1201static int do_pci_enable_device(struct pci_dev *dev, int bars)
1202{
1203 int err;
1f6ae47e 1204 struct pci_dev *bridge;
1e2571a7
BH
1205 u16 cmd;
1206 u8 pin;
38cc1302
HS
1207
1208 err = pci_set_power_state(dev, PCI_D0);
1209 if (err < 0 && err != -EIO)
1210 return err;
1f6ae47e
VS
1211
1212 bridge = pci_upstream_bridge(dev);
1213 if (bridge)
1214 pcie_aspm_powersave_config_link(bridge);
1215
38cc1302
HS
1216 err = pcibios_enable_device(dev, bars);
1217 if (err < 0)
1218 return err;
1219 pci_fixup_device(pci_fixup_enable, dev);
1220
866d5417
BH
1221 if (dev->msi_enabled || dev->msix_enabled)
1222 return 0;
1223
1e2571a7
BH
1224 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1225 if (pin) {
1226 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1227 if (cmd & PCI_COMMAND_INTX_DISABLE)
1228 pci_write_config_word(dev, PCI_COMMAND,
1229 cmd & ~PCI_COMMAND_INTX_DISABLE);
1230 }
1231
38cc1302
HS
1232 return 0;
1233}
1234
1235/**
0b62e13b 1236 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1237 * @dev: PCI device to be resumed
1238 *
1239 * Note this function is a backend of pci_default_resume and is not supposed
1240 * to be called by normal code, write proper resume handler and use it instead.
1241 */
0b62e13b 1242int pci_reenable_device(struct pci_dev *dev)
38cc1302 1243{
296ccb08 1244 if (pci_is_enabled(dev))
38cc1302
HS
1245 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1246 return 0;
1247}
b7fe9434 1248EXPORT_SYMBOL(pci_reenable_device);
38cc1302 1249
928bea96
YL
1250static void pci_enable_bridge(struct pci_dev *dev)
1251{
79272138 1252 struct pci_dev *bridge;
928bea96
YL
1253 int retval;
1254
79272138
BH
1255 bridge = pci_upstream_bridge(dev);
1256 if (bridge)
1257 pci_enable_bridge(bridge);
928bea96 1258
cf3e1feb 1259 if (pci_is_enabled(dev)) {
fbeeb822 1260 if (!dev->is_busmaster)
cf3e1feb 1261 pci_set_master(dev);
928bea96 1262 return;
cf3e1feb
YL
1263 }
1264
928bea96
YL
1265 retval = pci_enable_device(dev);
1266 if (retval)
1267 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1268 retval);
1269 pci_set_master(dev);
1270}
1271
b4b4fbba 1272static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1da177e4 1273{
79272138 1274 struct pci_dev *bridge;
1da177e4 1275 int err;
b718989d 1276 int i, bars = 0;
1da177e4 1277
97c145f7
JB
1278 /*
1279 * Power state could be unknown at this point, either due to a fresh
1280 * boot or a device removal call. So get the current power state
1281 * so that things like MSI message writing will behave as expected
1282 * (e.g. if the device really is in D0 at enable time).
1283 */
1284 if (dev->pm_cap) {
1285 u16 pmcsr;
1286 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1287 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1288 }
1289
cc7ba39b 1290 if (atomic_inc_return(&dev->enable_cnt) > 1)
9fb625c3
HS
1291 return 0; /* already enabled */
1292
79272138
BH
1293 bridge = pci_upstream_bridge(dev);
1294 if (bridge)
1295 pci_enable_bridge(bridge);
928bea96 1296
497f16f2
YL
1297 /* only skip sriov related */
1298 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1299 if (dev->resource[i].flags & flags)
1300 bars |= (1 << i);
1301 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1302 if (dev->resource[i].flags & flags)
1303 bars |= (1 << i);
1304
38cc1302 1305 err = do_pci_enable_device(dev, bars);
95a62965 1306 if (err < 0)
38cc1302 1307 atomic_dec(&dev->enable_cnt);
9fb625c3 1308 return err;
1da177e4
LT
1309}
1310
b718989d
BH
1311/**
1312 * pci_enable_device_io - Initialize a device for use with IO space
1313 * @dev: PCI device to be initialized
1314 *
1315 * Initialize device before it's used by a driver. Ask low-level code
1316 * to enable I/O resources. Wake up the device if it was suspended.
1317 * Beware, this function can fail.
1318 */
1319int pci_enable_device_io(struct pci_dev *dev)
1320{
b4b4fbba 1321 return pci_enable_device_flags(dev, IORESOURCE_IO);
b718989d 1322}
b7fe9434 1323EXPORT_SYMBOL(pci_enable_device_io);
b718989d
BH
1324
1325/**
1326 * pci_enable_device_mem - Initialize a device for use with Memory space
1327 * @dev: PCI device to be initialized
1328 *
1329 * Initialize device before it's used by a driver. Ask low-level code
1330 * to enable Memory resources. Wake up the device if it was suspended.
1331 * Beware, this function can fail.
1332 */
1333int pci_enable_device_mem(struct pci_dev *dev)
1334{
b4b4fbba 1335 return pci_enable_device_flags(dev, IORESOURCE_MEM);
b718989d 1336}
b7fe9434 1337EXPORT_SYMBOL(pci_enable_device_mem);
b718989d 1338
bae94d02
IPG
1339/**
1340 * pci_enable_device - Initialize device before it's used by a driver.
1341 * @dev: PCI device to be initialized
1342 *
1343 * Initialize device before it's used by a driver. Ask low-level code
1344 * to enable I/O and memory. Wake up the device if it was suspended.
1345 * Beware, this function can fail.
1346 *
1347 * Note we don't actually enable the device many times if we call
1348 * this function repeatedly (we just increment the count).
1349 */
1350int pci_enable_device(struct pci_dev *dev)
1351{
b4b4fbba 1352 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02 1353}
b7fe9434 1354EXPORT_SYMBOL(pci_enable_device);
bae94d02 1355
9ac7849e
TH
1356/*
1357 * Managed PCI resources. This manages device on/off, intx/msi/msix
1358 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1359 * there's no need to track it separately. pci_devres is initialized
1360 * when a device is enabled using managed PCI device enable interface.
1361 */
1362struct pci_devres {
7f375f32
TH
1363 unsigned int enabled:1;
1364 unsigned int pinned:1;
9ac7849e
TH
1365 unsigned int orig_intx:1;
1366 unsigned int restore_intx:1;
1367 u32 region_mask;
1368};
1369
1370static void pcim_release(struct device *gendev, void *res)
1371{
1372 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1373 struct pci_devres *this = res;
1374 int i;
1375
1376 if (dev->msi_enabled)
1377 pci_disable_msi(dev);
1378 if (dev->msix_enabled)
1379 pci_disable_msix(dev);
1380
1381 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1382 if (this->region_mask & (1 << i))
1383 pci_release_region(dev, i);
1384
1385 if (this->restore_intx)
1386 pci_intx(dev, this->orig_intx);
1387
7f375f32 1388 if (this->enabled && !this->pinned)
9ac7849e
TH
1389 pci_disable_device(dev);
1390}
1391
07656d83 1392static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
9ac7849e
TH
1393{
1394 struct pci_devres *dr, *new_dr;
1395
1396 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1397 if (dr)
1398 return dr;
1399
1400 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1401 if (!new_dr)
1402 return NULL;
1403 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1404}
1405
07656d83 1406static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
9ac7849e
TH
1407{
1408 if (pci_is_managed(pdev))
1409 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1410 return NULL;
1411}
1412
1413/**
1414 * pcim_enable_device - Managed pci_enable_device()
1415 * @pdev: PCI device to be initialized
1416 *
1417 * Managed pci_enable_device().
1418 */
1419int pcim_enable_device(struct pci_dev *pdev)
1420{
1421 struct pci_devres *dr;
1422 int rc;
1423
1424 dr = get_pci_dr(pdev);
1425 if (unlikely(!dr))
1426 return -ENOMEM;
b95d58ea
TH
1427 if (dr->enabled)
1428 return 0;
9ac7849e
TH
1429
1430 rc = pci_enable_device(pdev);
1431 if (!rc) {
1432 pdev->is_managed = 1;
7f375f32 1433 dr->enabled = 1;
9ac7849e
TH
1434 }
1435 return rc;
1436}
b7fe9434 1437EXPORT_SYMBOL(pcim_enable_device);
9ac7849e
TH
1438
1439/**
1440 * pcim_pin_device - Pin managed PCI device
1441 * @pdev: PCI device to pin
1442 *
1443 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1444 * driver detach. @pdev must have been enabled with
1445 * pcim_enable_device().
1446 */
1447void pcim_pin_device(struct pci_dev *pdev)
1448{
1449 struct pci_devres *dr;
1450
1451 dr = find_pci_dr(pdev);
7f375f32 1452 WARN_ON(!dr || !dr->enabled);
9ac7849e 1453 if (dr)
7f375f32 1454 dr->pinned = 1;
9ac7849e 1455}
b7fe9434 1456EXPORT_SYMBOL(pcim_pin_device);
9ac7849e 1457
eca0d467
MG
1458/*
1459 * pcibios_add_device - provide arch specific hooks when adding device dev
1460 * @dev: the PCI device being added
1461 *
1462 * Permits the platform to provide architecture specific functionality when
1463 * devices are added. This is the default implementation. Architecture
1464 * implementations can override this.
1465 */
3c78bc61 1466int __weak pcibios_add_device(struct pci_dev *dev)
eca0d467
MG
1467{
1468 return 0;
1469}
1470
6ae32c53
SO
1471/**
1472 * pcibios_release_device - provide arch specific hooks when releasing device dev
1473 * @dev: the PCI device being released
1474 *
1475 * Permits the platform to provide architecture specific functionality when
1476 * devices are released. This is the default implementation. Architecture
1477 * implementations can override this.
1478 */
1479void __weak pcibios_release_device(struct pci_dev *dev) {}
1480
1da177e4
LT
1481/**
1482 * pcibios_disable_device - disable arch specific PCI resources for device dev
1483 * @dev: the PCI device to disable
1484 *
1485 * Disables architecture specific PCI resources for the device. This
1486 * is the default implementation. Architecture implementations can
1487 * override this.
1488 */
d6d88c83 1489void __weak pcibios_disable_device (struct pci_dev *dev) {}
1da177e4 1490
a43ae58c
HG
1491/**
1492 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1493 * @irq: ISA IRQ to penalize
1494 * @active: IRQ active or not
1495 *
1496 * Permits the platform to provide architecture-specific functionality when
1497 * penalizing ISA IRQs. This is the default implementation. Architecture
1498 * implementations can override this.
1499 */
1500void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1501
fa58d305
RW
1502static void do_pci_disable_device(struct pci_dev *dev)
1503{
1504 u16 pci_command;
1505
1506 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1507 if (pci_command & PCI_COMMAND_MASTER) {
1508 pci_command &= ~PCI_COMMAND_MASTER;
1509 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1510 }
1511
1512 pcibios_disable_device(dev);
1513}
1514
1515/**
1516 * pci_disable_enabled_device - Disable device without updating enable_cnt
1517 * @dev: PCI device to disable
1518 *
1519 * NOTE: This function is a backend of PCI power management routines and is
1520 * not supposed to be called drivers.
1521 */
1522void pci_disable_enabled_device(struct pci_dev *dev)
1523{
296ccb08 1524 if (pci_is_enabled(dev))
fa58d305
RW
1525 do_pci_disable_device(dev);
1526}
1527
1da177e4
LT
1528/**
1529 * pci_disable_device - Disable PCI device after use
1530 * @dev: PCI device to be disabled
1531 *
1532 * Signal to the system that the PCI device is not in use by the system
1533 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1534 *
1535 * Note we don't actually disable the device until all callers of
ee6583f6 1536 * pci_enable_device() have called pci_disable_device().
1da177e4 1537 */
3c78bc61 1538void pci_disable_device(struct pci_dev *dev)
1da177e4 1539{
9ac7849e 1540 struct pci_devres *dr;
99dc804d 1541
9ac7849e
TH
1542 dr = find_pci_dr(dev);
1543 if (dr)
7f375f32 1544 dr->enabled = 0;
9ac7849e 1545
fd6dceab
KK
1546 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1547 "disabling already-disabled device");
1548
cc7ba39b 1549 if (atomic_dec_return(&dev->enable_cnt) != 0)
bae94d02
IPG
1550 return;
1551
fa58d305 1552 do_pci_disable_device(dev);
1da177e4 1553
fa58d305 1554 dev->is_busmaster = 0;
1da177e4 1555}
b7fe9434 1556EXPORT_SYMBOL(pci_disable_device);
1da177e4 1557
f7bdd12d
BK
1558/**
1559 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1560 * @dev: the PCIe device reset
f7bdd12d
BK
1561 * @state: Reset state to enter into
1562 *
1563 *
45e829ea 1564 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1565 * implementation. Architecture implementations can override this.
1566 */
d6d88c83
BH
1567int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1568 enum pcie_reset_state state)
f7bdd12d
BK
1569{
1570 return -EINVAL;
1571}
1572
1573/**
1574 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1575 * @dev: the PCIe device reset
f7bdd12d
BK
1576 * @state: Reset state to enter into
1577 *
1578 *
1579 * Sets the PCI reset state for the device.
1580 */
1581int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1582{
1583 return pcibios_set_pcie_reset_state(dev, state);
1584}
b7fe9434 1585EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
f7bdd12d 1586
58ff4633
RW
1587/**
1588 * pci_check_pme_status - Check if given device has generated PME.
1589 * @dev: Device to check.
1590 *
1591 * Check the PME status of the device and if set, clear it and clear PME enable
1592 * (if set). Return 'true' if PME status and PME enable were both set or
1593 * 'false' otherwise.
1594 */
1595bool pci_check_pme_status(struct pci_dev *dev)
1596{
1597 int pmcsr_pos;
1598 u16 pmcsr;
1599 bool ret = false;
1600
1601 if (!dev->pm_cap)
1602 return false;
1603
1604 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1605 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1606 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1607 return false;
1608
1609 /* Clear PME status. */
1610 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1611 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1612 /* Disable PME to avoid interrupt flood. */
1613 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1614 ret = true;
1615 }
1616
1617 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1618
1619 return ret;
1620}
1621
b67ea761
RW
1622/**
1623 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1624 * @dev: Device to handle.
379021d5 1625 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1626 *
1627 * Check if @dev has generated PME and queue a resume request for it in that
1628 * case.
1629 */
379021d5 1630static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1631{
379021d5
RW
1632 if (pme_poll_reset && dev->pme_poll)
1633 dev->pme_poll = false;
1634
c125e96f 1635 if (pci_check_pme_status(dev)) {
c125e96f 1636 pci_wakeup_event(dev);
0f953bf6 1637 pm_request_resume(&dev->dev);
c125e96f 1638 }
b67ea761
RW
1639 return 0;
1640}
1641
1642/**
1643 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1644 * @bus: Top bus of the subtree to walk.
1645 */
1646void pci_pme_wakeup_bus(struct pci_bus *bus)
1647{
1648 if (bus)
379021d5 1649 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1650}
1651
448bd857 1652
eb9d0fe4
RW
1653/**
1654 * pci_pme_capable - check the capability of PCI device to generate PME#
1655 * @dev: PCI device to handle.
eb9d0fe4
RW
1656 * @state: PCI state from which device will issue PME#.
1657 */
e5899e1b 1658bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1659{
337001b6 1660 if (!dev->pm_cap)
eb9d0fe4
RW
1661 return false;
1662
337001b6 1663 return !!(dev->pme_support & (1 << state));
eb9d0fe4 1664}
b7fe9434 1665EXPORT_SYMBOL(pci_pme_capable);
eb9d0fe4 1666
df17e62e
MG
1667static void pci_pme_list_scan(struct work_struct *work)
1668{
379021d5 1669 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1670
1671 mutex_lock(&pci_pme_list_mutex);
ce300008
BH
1672 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1673 if (pme_dev->dev->pme_poll) {
1674 struct pci_dev *bridge;
1675
1676 bridge = pme_dev->dev->bus->self;
1677 /*
1678 * If bridge is in low power state, the
1679 * configuration space of subordinate devices
1680 * may be not accessible
1681 */
1682 if (bridge && bridge->current_state != PCI_D0)
1683 continue;
1684 pci_pme_wakeup(pme_dev->dev, NULL);
1685 } else {
1686 list_del(&pme_dev->list);
1687 kfree(pme_dev);
379021d5 1688 }
df17e62e 1689 }
ce300008
BH
1690 if (!list_empty(&pci_pme_list))
1691 schedule_delayed_work(&pci_pme_work,
1692 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1693 mutex_unlock(&pci_pme_list_mutex);
1694}
1695
eb9d0fe4
RW
1696/**
1697 * pci_pme_active - enable or disable PCI device's PME# function
1698 * @dev: PCI device to handle.
eb9d0fe4
RW
1699 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1700 *
1701 * The caller must verify that the device is capable of generating PME# before
1702 * calling this function with @enable equal to 'true'.
1703 */
5a6c9b60 1704void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1705{
1706 u16 pmcsr;
1707
ffaddbe8 1708 if (!dev->pme_support)
eb9d0fe4
RW
1709 return;
1710
337001b6 1711 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1712 /* Clear PME_Status by writing 1 to it and enable PME# */
1713 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1714 if (!enable)
1715 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1716
337001b6 1717 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1718
6e965e0d
HY
1719 /*
1720 * PCI (as opposed to PCIe) PME requires that the device have
1721 * its PME# line hooked up correctly. Not all hardware vendors
1722 * do this, so the PME never gets delivered and the device
1723 * remains asleep. The easiest way around this is to
1724 * periodically walk the list of suspended devices and check
1725 * whether any have their PME flag set. The assumption is that
1726 * we'll wake up often enough anyway that this won't be a huge
1727 * hit, and the power savings from the devices will still be a
1728 * win.
1729 *
1730 * Although PCIe uses in-band PME message instead of PME# line
1731 * to report PME, PME does not work for some PCIe devices in
1732 * reality. For example, there are devices that set their PME
1733 * status bits, but don't really bother to send a PME message;
1734 * there are PCI Express Root Ports that don't bother to
1735 * trigger interrupts when they receive PME messages from the
1736 * devices below. So PME poll is used for PCIe devices too.
1737 */
df17e62e 1738
379021d5 1739 if (dev->pme_poll) {
df17e62e
MG
1740 struct pci_pme_device *pme_dev;
1741 if (enable) {
1742 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1743 GFP_KERNEL);
0394cb19
BH
1744 if (!pme_dev) {
1745 dev_warn(&dev->dev, "can't enable PME#\n");
1746 return;
1747 }
df17e62e
MG
1748 pme_dev->dev = dev;
1749 mutex_lock(&pci_pme_list_mutex);
1750 list_add(&pme_dev->list, &pci_pme_list);
1751 if (list_is_singular(&pci_pme_list))
1752 schedule_delayed_work(&pci_pme_work,
1753 msecs_to_jiffies(PME_TIMEOUT));
1754 mutex_unlock(&pci_pme_list_mutex);
1755 } else {
1756 mutex_lock(&pci_pme_list_mutex);
1757 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1758 if (pme_dev->dev == dev) {
1759 list_del(&pme_dev->list);
1760 kfree(pme_dev);
1761 break;
1762 }
1763 }
1764 mutex_unlock(&pci_pme_list_mutex);
1765 }
1766 }
1767
85b8582d 1768 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4 1769}
b7fe9434 1770EXPORT_SYMBOL(pci_pme_active);
eb9d0fe4 1771
1da177e4 1772/**
6cbf8214 1773 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1774 * @dev: PCI device affected
1775 * @state: PCI state from which device will issue wakeup events
6cbf8214 1776 * @runtime: True if the events are to be generated at run time
075c1771
DB
1777 * @enable: True to enable event generation; false to disable
1778 *
1779 * This enables the device as a wakeup event source, or disables it.
1780 * When such events involves platform-specific hooks, those hooks are
1781 * called automatically by this routine.
1782 *
1783 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1784 * always require such platform hooks.
075c1771 1785 *
eb9d0fe4
RW
1786 * RETURN VALUE:
1787 * 0 is returned on success
1788 * -EINVAL is returned if device is not supposed to wake up the system
1789 * Error code depending on the platform is returned if both the platform and
1790 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1791 */
6cbf8214
RW
1792int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1793 bool runtime, bool enable)
1da177e4 1794{
5bcc2fb4 1795 int ret = 0;
075c1771 1796
6cbf8214 1797 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1798 return -EINVAL;
1da177e4 1799
e80bb09d
RW
1800 /* Don't do the same thing twice in a row for one device. */
1801 if (!!enable == !!dev->wakeup_prepared)
1802 return 0;
1803
eb9d0fe4
RW
1804 /*
1805 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1806 * Anderson we should be doing PME# wake enable followed by ACPI wake
1807 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1808 */
1da177e4 1809
5bcc2fb4
RW
1810 if (enable) {
1811 int error;
1da177e4 1812
5bcc2fb4
RW
1813 if (pci_pme_capable(dev, state))
1814 pci_pme_active(dev, true);
1815 else
1816 ret = 1;
6cbf8214
RW
1817 error = runtime ? platform_pci_run_wake(dev, true) :
1818 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1819 if (ret)
1820 ret = error;
e80bb09d
RW
1821 if (!ret)
1822 dev->wakeup_prepared = true;
5bcc2fb4 1823 } else {
6cbf8214
RW
1824 if (runtime)
1825 platform_pci_run_wake(dev, false);
1826 else
1827 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1828 pci_pme_active(dev, false);
e80bb09d 1829 dev->wakeup_prepared = false;
5bcc2fb4 1830 }
1da177e4 1831
5bcc2fb4 1832 return ret;
eb9d0fe4 1833}
6cbf8214 1834EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1835
0235c4fc
RW
1836/**
1837 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1838 * @dev: PCI device to prepare
1839 * @enable: True to enable wake-up event generation; false to disable
1840 *
1841 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1842 * and this function allows them to set that up cleanly - pci_enable_wake()
1843 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1844 * ordering constraints.
1845 *
1846 * This function only returns error code if the device is not capable of
1847 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1848 * enable wake-up power for it.
1849 */
1850int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1851{
1852 return pci_pme_capable(dev, PCI_D3cold) ?
1853 pci_enable_wake(dev, PCI_D3cold, enable) :
1854 pci_enable_wake(dev, PCI_D3hot, enable);
1855}
b7fe9434 1856EXPORT_SYMBOL(pci_wake_from_d3);
0235c4fc 1857
404cc2d8 1858/**
37139074
JB
1859 * pci_target_state - find an appropriate low power state for a given PCI dev
1860 * @dev: PCI device
1861 *
1862 * Use underlying platform code to find a supported low power state for @dev.
1863 * If the platform can't manage @dev, return the deepest state from which it
1864 * can generate wake events, based on any available PME info.
404cc2d8 1865 */
0b950f0f 1866static pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1867{
1868 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1869
1870 if (platform_pci_power_manageable(dev)) {
1871 /*
1872 * Call the platform to choose the target state of the device
1873 * and enable wake-up from this state if supported.
1874 */
1875 pci_power_t state = platform_pci_choose_state(dev);
1876
1877 switch (state) {
1878 case PCI_POWER_ERROR:
1879 case PCI_UNKNOWN:
1880 break;
1881 case PCI_D1:
1882 case PCI_D2:
1883 if (pci_no_d1d2(dev))
1884 break;
1885 default:
1886 target_state = state;
404cc2d8 1887 }
d2abdf62
RW
1888 } else if (!dev->pm_cap) {
1889 target_state = PCI_D0;
404cc2d8
RW
1890 } else if (device_may_wakeup(&dev->dev)) {
1891 /*
1892 * Find the deepest state from which the device can generate
1893 * wake-up events, make it the target state and enable device
1894 * to generate PME#.
1895 */
337001b6
RW
1896 if (dev->pme_support) {
1897 while (target_state
1898 && !(dev->pme_support & (1 << target_state)))
1899 target_state--;
404cc2d8
RW
1900 }
1901 }
1902
e5899e1b
RW
1903 return target_state;
1904}
1905
1906/**
1907 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1908 * @dev: Device to handle.
1909 *
1910 * Choose the power state appropriate for the device depending on whether
1911 * it can wake up the system and/or is power manageable by the platform
1912 * (PCI_D3hot is the default) and put the device into that state.
1913 */
1914int pci_prepare_to_sleep(struct pci_dev *dev)
1915{
1916 pci_power_t target_state = pci_target_state(dev);
1917 int error;
1918
1919 if (target_state == PCI_POWER_ERROR)
1920 return -EIO;
1921
8efb8c76 1922 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1923
404cc2d8
RW
1924 error = pci_set_power_state(dev, target_state);
1925
1926 if (error)
1927 pci_enable_wake(dev, target_state, false);
1928
1929 return error;
1930}
b7fe9434 1931EXPORT_SYMBOL(pci_prepare_to_sleep);
404cc2d8
RW
1932
1933/**
443bd1c4 1934 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1935 * @dev: Device to handle.
1936 *
88393161 1937 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1938 */
1939int pci_back_from_sleep(struct pci_dev *dev)
1940{
1941 pci_enable_wake(dev, PCI_D0, false);
1942 return pci_set_power_state(dev, PCI_D0);
1943}
b7fe9434 1944EXPORT_SYMBOL(pci_back_from_sleep);
404cc2d8 1945
6cbf8214
RW
1946/**
1947 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1948 * @dev: PCI device being suspended.
1949 *
1950 * Prepare @dev to generate wake-up events at run time and put it into a low
1951 * power state.
1952 */
1953int pci_finish_runtime_suspend(struct pci_dev *dev)
1954{
1955 pci_power_t target_state = pci_target_state(dev);
1956 int error;
1957
1958 if (target_state == PCI_POWER_ERROR)
1959 return -EIO;
1960
448bd857
HY
1961 dev->runtime_d3cold = target_state == PCI_D3cold;
1962
6cbf8214
RW
1963 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1964
1965 error = pci_set_power_state(dev, target_state);
1966
448bd857 1967 if (error) {
6cbf8214 1968 __pci_enable_wake(dev, target_state, true, false);
448bd857
HY
1969 dev->runtime_d3cold = false;
1970 }
6cbf8214
RW
1971
1972 return error;
1973}
1974
b67ea761
RW
1975/**
1976 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1977 * @dev: Device to check.
1978 *
f7625980 1979 * Return true if the device itself is capable of generating wake-up events
b67ea761
RW
1980 * (through the platform or using the native PCIe PME) or if the device supports
1981 * PME and one of its upstream bridges can generate wake-up events.
1982 */
1983bool pci_dev_run_wake(struct pci_dev *dev)
1984{
1985 struct pci_bus *bus = dev->bus;
1986
1987 if (device_run_wake(&dev->dev))
1988 return true;
1989
1990 if (!dev->pme_support)
1991 return false;
1992
1993 while (bus->parent) {
1994 struct pci_dev *bridge = bus->self;
1995
1996 if (device_run_wake(&bridge->dev))
1997 return true;
1998
1999 bus = bus->parent;
2000 }
2001
2002 /* We have reached the root bus. */
2003 if (bus->bridge)
2004 return device_run_wake(bus->bridge);
2005
2006 return false;
2007}
2008EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2009
bac2a909
RW
2010/**
2011 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2012 * @pci_dev: Device to check.
2013 *
2014 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2015 * reconfigured due to wakeup settings difference between system and runtime
2016 * suspend and the current power state of it is suitable for the upcoming
2017 * (system) transition.
2018 */
2019bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2020{
2021 struct device *dev = &pci_dev->dev;
2022
2023 if (!pm_runtime_suspended(dev)
2024 || (device_can_wakeup(dev) && !device_may_wakeup(dev))
2025 || platform_pci_need_resume(pci_dev))
2026 return false;
2027
2028 return pci_target_state(pci_dev) == pci_dev->current_state;
2029}
2030
b3c32c4f
HY
2031void pci_config_pm_runtime_get(struct pci_dev *pdev)
2032{
2033 struct device *dev = &pdev->dev;
2034 struct device *parent = dev->parent;
2035
2036 if (parent)
2037 pm_runtime_get_sync(parent);
2038 pm_runtime_get_noresume(dev);
2039 /*
2040 * pdev->current_state is set to PCI_D3cold during suspending,
2041 * so wait until suspending completes
2042 */
2043 pm_runtime_barrier(dev);
2044 /*
2045 * Only need to resume devices in D3cold, because config
2046 * registers are still accessible for devices suspended but
2047 * not in D3cold.
2048 */
2049 if (pdev->current_state == PCI_D3cold)
2050 pm_runtime_resume(dev);
2051}
2052
2053void pci_config_pm_runtime_put(struct pci_dev *pdev)
2054{
2055 struct device *dev = &pdev->dev;
2056 struct device *parent = dev->parent;
2057
2058 pm_runtime_put(dev);
2059 if (parent)
2060 pm_runtime_put_sync(parent);
2061}
2062
eb9d0fe4
RW
2063/**
2064 * pci_pm_init - Initialize PM functions of given PCI device
2065 * @dev: PCI device to handle.
2066 */
2067void pci_pm_init(struct pci_dev *dev)
2068{
2069 int pm;
2070 u16 pmc;
1da177e4 2071
bb910a70 2072 pm_runtime_forbid(&dev->dev);
967577b0
HY
2073 pm_runtime_set_active(&dev->dev);
2074 pm_runtime_enable(&dev->dev);
a1e4d72c 2075 device_enable_async_suspend(&dev->dev);
e80bb09d 2076 dev->wakeup_prepared = false;
bb910a70 2077
337001b6 2078 dev->pm_cap = 0;
ffaddbe8 2079 dev->pme_support = 0;
337001b6 2080
eb9d0fe4
RW
2081 /* find PCI PM capability in list */
2082 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2083 if (!pm)
50246dd4 2084 return;
eb9d0fe4
RW
2085 /* Check device's ability to generate PME# */
2086 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 2087
eb9d0fe4
RW
2088 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2089 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2090 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 2091 return;
eb9d0fe4
RW
2092 }
2093
337001b6 2094 dev->pm_cap = pm;
1ae861e6 2095 dev->d3_delay = PCI_PM_D3_WAIT;
448bd857 2096 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
4f9c1397 2097 dev->d3cold_allowed = true;
337001b6
RW
2098
2099 dev->d1_support = false;
2100 dev->d2_support = false;
2101 if (!pci_no_d1d2(dev)) {
c9ed77ee 2102 if (pmc & PCI_PM_CAP_D1)
337001b6 2103 dev->d1_support = true;
c9ed77ee 2104 if (pmc & PCI_PM_CAP_D2)
337001b6 2105 dev->d2_support = true;
c9ed77ee
BH
2106
2107 if (dev->d1_support || dev->d2_support)
2108 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
2109 dev->d1_support ? " D1" : "",
2110 dev->d2_support ? " D2" : "");
337001b6
RW
2111 }
2112
2113 pmc &= PCI_PM_CAP_PME_MASK;
2114 if (pmc) {
10c3d71d
BH
2115 dev_printk(KERN_DEBUG, &dev->dev,
2116 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
2117 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2118 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2119 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2120 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2121 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 2122 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 2123 dev->pme_poll = true;
eb9d0fe4
RW
2124 /*
2125 * Make device's PM flags reflect the wake-up capability, but
2126 * let the user space enable it to wake up the system as needed.
2127 */
2128 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 2129 /* Disable the PME# generation functionality */
337001b6 2130 pci_pme_active(dev, false);
eb9d0fe4 2131 }
1da177e4
LT
2132}
2133
34a4876e
YL
2134static void pci_add_saved_cap(struct pci_dev *pci_dev,
2135 struct pci_cap_saved_state *new_cap)
2136{
2137 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2138}
2139
63f4898a 2140/**
fd0f7f73
AW
2141 * _pci_add_cap_save_buffer - allocate buffer for saving given
2142 * capability registers
63f4898a
RW
2143 * @dev: the PCI device
2144 * @cap: the capability to allocate the buffer for
fd0f7f73 2145 * @extended: Standard or Extended capability ID
63f4898a
RW
2146 * @size: requested size of the buffer
2147 */
fd0f7f73
AW
2148static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2149 bool extended, unsigned int size)
63f4898a
RW
2150{
2151 int pos;
2152 struct pci_cap_saved_state *save_state;
2153
fd0f7f73
AW
2154 if (extended)
2155 pos = pci_find_ext_capability(dev, cap);
2156 else
2157 pos = pci_find_capability(dev, cap);
2158
63f4898a
RW
2159 if (pos <= 0)
2160 return 0;
2161
2162 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2163 if (!save_state)
2164 return -ENOMEM;
2165
24a4742f 2166 save_state->cap.cap_nr = cap;
fd0f7f73 2167 save_state->cap.cap_extended = extended;
24a4742f 2168 save_state->cap.size = size;
63f4898a
RW
2169 pci_add_saved_cap(dev, save_state);
2170
2171 return 0;
2172}
2173
fd0f7f73
AW
2174int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2175{
2176 return _pci_add_cap_save_buffer(dev, cap, false, size);
2177}
2178
2179int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2180{
2181 return _pci_add_cap_save_buffer(dev, cap, true, size);
2182}
2183
63f4898a
RW
2184/**
2185 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2186 * @dev: the PCI device
2187 */
2188void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2189{
2190 int error;
2191
89858517
YZ
2192 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2193 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
2194 if (error)
2195 dev_err(&dev->dev,
2196 "unable to preallocate PCI Express save buffer\n");
2197
2198 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2199 if (error)
2200 dev_err(&dev->dev,
2201 "unable to preallocate PCI-X save buffer\n");
425c1b22
AW
2202
2203 pci_allocate_vc_save_buffers(dev);
63f4898a
RW
2204}
2205
f796841e
YL
2206void pci_free_cap_save_buffers(struct pci_dev *dev)
2207{
2208 struct pci_cap_saved_state *tmp;
b67bfe0d 2209 struct hlist_node *n;
f796841e 2210
b67bfe0d 2211 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
f796841e
YL
2212 kfree(tmp);
2213}
2214
58c3a727 2215/**
31ab2476 2216 * pci_configure_ari - enable or disable ARI forwarding
58c3a727 2217 * @dev: the PCI device
b0cc6020
YW
2218 *
2219 * If @dev and its upstream bridge both support ARI, enable ARI in the
2220 * bridge. Otherwise, disable ARI in the bridge.
58c3a727 2221 */
31ab2476 2222void pci_configure_ari(struct pci_dev *dev)
58c3a727 2223{
58c3a727 2224 u32 cap;
8113587c 2225 struct pci_dev *bridge;
58c3a727 2226
6748dcc2 2227 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
2228 return;
2229
8113587c 2230 bridge = dev->bus->self;
cb97ae34 2231 if (!bridge)
8113587c
ZY
2232 return;
2233
59875ae4 2234 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2235 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2236 return;
2237
b0cc6020
YW
2238 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2239 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2240 PCI_EXP_DEVCTL2_ARI);
2241 bridge->ari_enabled = 1;
2242 } else {
2243 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2244 PCI_EXP_DEVCTL2_ARI);
2245 bridge->ari_enabled = 0;
2246 }
58c3a727
YZ
2247}
2248
5d990b62
CW
2249static int pci_acs_enable;
2250
2251/**
2252 * pci_request_acs - ask for ACS to be enabled if supported
2253 */
2254void pci_request_acs(void)
2255{
2256 pci_acs_enable = 1;
2257}
2258
ae21ee65 2259/**
2c744244 2260 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
ae21ee65
AK
2261 * @dev: the PCI device
2262 */
2c744244 2263static int pci_std_enable_acs(struct pci_dev *dev)
ae21ee65
AK
2264{
2265 int pos;
2266 u16 cap;
2267 u16 ctrl;
2268
ae21ee65
AK
2269 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2270 if (!pos)
2c744244 2271 return -ENODEV;
ae21ee65
AK
2272
2273 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2274 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2275
2276 /* Source Validation */
2277 ctrl |= (cap & PCI_ACS_SV);
2278
2279 /* P2P Request Redirect */
2280 ctrl |= (cap & PCI_ACS_RR);
2281
2282 /* P2P Completion Redirect */
2283 ctrl |= (cap & PCI_ACS_CR);
2284
2285 /* Upstream Forwarding */
2286 ctrl |= (cap & PCI_ACS_UF);
2287
2288 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2c744244
AW
2289
2290 return 0;
2291}
2292
2293/**
2294 * pci_enable_acs - enable ACS if hardware support it
2295 * @dev: the PCI device
2296 */
2297void pci_enable_acs(struct pci_dev *dev)
2298{
2299 if (!pci_acs_enable)
2300 return;
2301
2302 if (!pci_std_enable_acs(dev))
2303 return;
2304
2305 pci_dev_specific_enable_acs(dev);
ae21ee65
AK
2306}
2307
0a67119f
AW
2308static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2309{
2310 int pos;
83db7e0b 2311 u16 cap, ctrl;
0a67119f
AW
2312
2313 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2314 if (!pos)
2315 return false;
2316
83db7e0b
AW
2317 /*
2318 * Except for egress control, capabilities are either required
2319 * or only required if controllable. Features missing from the
2320 * capability field can therefore be assumed as hard-wired enabled.
2321 */
2322 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2323 acs_flags &= (cap | PCI_ACS_EC);
2324
0a67119f
AW
2325 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2326 return (ctrl & acs_flags) == acs_flags;
2327}
2328
ad805758
AW
2329/**
2330 * pci_acs_enabled - test ACS against required flags for a given device
2331 * @pdev: device to test
2332 * @acs_flags: required PCI ACS flags
2333 *
2334 * Return true if the device supports the provided flags. Automatically
2335 * filters out flags that are not implemented on multifunction devices.
0a67119f
AW
2336 *
2337 * Note that this interface checks the effective ACS capabilities of the
2338 * device rather than the actual capabilities. For instance, most single
2339 * function endpoints are not required to support ACS because they have no
2340 * opportunity for peer-to-peer access. We therefore return 'true'
2341 * regardless of whether the device exposes an ACS capability. This makes
2342 * it much easier for callers of this function to ignore the actual type
2343 * or topology of the device when testing ACS support.
ad805758
AW
2344 */
2345bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2346{
0a67119f 2347 int ret;
ad805758
AW
2348
2349 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2350 if (ret >= 0)
2351 return ret > 0;
2352
0a67119f
AW
2353 /*
2354 * Conventional PCI and PCI-X devices never support ACS, either
2355 * effectively or actually. The shared bus topology implies that
2356 * any device on the bus can receive or snoop DMA.
2357 */
ad805758
AW
2358 if (!pci_is_pcie(pdev))
2359 return false;
2360
0a67119f
AW
2361 switch (pci_pcie_type(pdev)) {
2362 /*
2363 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
f7625980 2364 * but since their primary interface is PCI/X, we conservatively
0a67119f
AW
2365 * handle them as we would a non-PCIe device.
2366 */
2367 case PCI_EXP_TYPE_PCIE_BRIDGE:
2368 /*
2369 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2370 * applicable... must never implement an ACS Extended Capability...".
2371 * This seems arbitrary, but we take a conservative interpretation
2372 * of this statement.
2373 */
2374 case PCI_EXP_TYPE_PCI_BRIDGE:
2375 case PCI_EXP_TYPE_RC_EC:
2376 return false;
2377 /*
2378 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2379 * implement ACS in order to indicate their peer-to-peer capabilities,
2380 * regardless of whether they are single- or multi-function devices.
2381 */
2382 case PCI_EXP_TYPE_DOWNSTREAM:
2383 case PCI_EXP_TYPE_ROOT_PORT:
2384 return pci_acs_flags_enabled(pdev, acs_flags);
2385 /*
2386 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2387 * implemented by the remaining PCIe types to indicate peer-to-peer
f7625980 2388 * capabilities, but only when they are part of a multifunction
0a67119f
AW
2389 * device. The footnote for section 6.12 indicates the specific
2390 * PCIe types included here.
2391 */
2392 case PCI_EXP_TYPE_ENDPOINT:
2393 case PCI_EXP_TYPE_UPSTREAM:
2394 case PCI_EXP_TYPE_LEG_END:
2395 case PCI_EXP_TYPE_RC_END:
2396 if (!pdev->multifunction)
2397 break;
2398
0a67119f 2399 return pci_acs_flags_enabled(pdev, acs_flags);
ad805758
AW
2400 }
2401
0a67119f 2402 /*
f7625980 2403 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
0a67119f
AW
2404 * to single function devices with the exception of downstream ports.
2405 */
ad805758
AW
2406 return true;
2407}
2408
2409/**
2410 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2411 * @start: starting downstream device
2412 * @end: ending upstream device or NULL to search to the root bus
2413 * @acs_flags: required flags
2414 *
2415 * Walk up a device tree from start to end testing PCI ACS support. If
2416 * any step along the way does not support the required flags, return false.
2417 */
2418bool pci_acs_path_enabled(struct pci_dev *start,
2419 struct pci_dev *end, u16 acs_flags)
2420{
2421 struct pci_dev *pdev, *parent = start;
2422
2423 do {
2424 pdev = parent;
2425
2426 if (!pci_acs_enabled(pdev, acs_flags))
2427 return false;
2428
2429 if (pci_is_root_bus(pdev->bus))
2430 return (end == NULL);
2431
2432 parent = pdev->bus->self;
2433 } while (pdev != end);
2434
2435 return true;
2436}
2437
57c2cf71
BH
2438/**
2439 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2440 * @dev: the PCI device
bb5c2de2 2441 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
57c2cf71
BH
2442 *
2443 * Perform INTx swizzling for a device behind one level of bridge. This is
2444 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2445 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2446 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2447 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 2448 */
3df425f3 2449u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 2450{
46b952a3
MW
2451 int slot;
2452
2453 if (pci_ari_enabled(dev->bus))
2454 slot = 0;
2455 else
2456 slot = PCI_SLOT(dev->devfn);
2457
2458 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2459}
2460
3c78bc61 2461int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1da177e4
LT
2462{
2463 u8 pin;
2464
514d207d 2465 pin = dev->pin;
1da177e4
LT
2466 if (!pin)
2467 return -1;
878f2e50 2468
8784fd4d 2469 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2470 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2471 dev = dev->bus->self;
2472 }
2473 *bridge = dev;
2474 return pin;
2475}
2476
68feac87
BH
2477/**
2478 * pci_common_swizzle - swizzle INTx all the way to root bridge
2479 * @dev: the PCI device
2480 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2481 *
2482 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2483 * bridges all the way up to a PCI root bus.
2484 */
2485u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2486{
2487 u8 pin = *pinp;
2488
1eb39487 2489 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2490 pin = pci_swizzle_interrupt_pin(dev, pin);
2491 dev = dev->bus->self;
2492 }
2493 *pinp = pin;
2494 return PCI_SLOT(dev->devfn);
2495}
2496
1da177e4
LT
2497/**
2498 * pci_release_region - Release a PCI bar
2499 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2500 * @bar: BAR to release
2501 *
2502 * Releases the PCI I/O and memory resources previously reserved by a
2503 * successful call to pci_request_region. Call this function only
2504 * after all use of the PCI regions has ceased.
2505 */
2506void pci_release_region(struct pci_dev *pdev, int bar)
2507{
9ac7849e
TH
2508 struct pci_devres *dr;
2509
1da177e4
LT
2510 if (pci_resource_len(pdev, bar) == 0)
2511 return;
2512 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2513 release_region(pci_resource_start(pdev, bar),
2514 pci_resource_len(pdev, bar));
2515 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2516 release_mem_region(pci_resource_start(pdev, bar),
2517 pci_resource_len(pdev, bar));
9ac7849e
TH
2518
2519 dr = find_pci_dr(pdev);
2520 if (dr)
2521 dr->region_mask &= ~(1 << bar);
1da177e4 2522}
b7fe9434 2523EXPORT_SYMBOL(pci_release_region);
1da177e4
LT
2524
2525/**
f5ddcac4 2526 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2527 * @pdev: PCI device whose resources are to be reserved
2528 * @bar: BAR to be reserved
2529 * @res_name: Name to be associated with resource.
f5ddcac4 2530 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2531 *
2532 * Mark the PCI region associated with PCI device @pdev BR @bar as
2533 * being reserved by owner @res_name. Do not access any
2534 * address inside the PCI regions unless this call returns
2535 * successfully.
2536 *
f5ddcac4
RD
2537 * If @exclusive is set, then the region is marked so that userspace
2538 * is explicitly not allowed to map the resource via /dev/mem or
f7625980 2539 * sysfs MMIO access.
f5ddcac4 2540 *
1da177e4
LT
2541 * Returns 0 on success, or %EBUSY on error. A warning
2542 * message is also printed on failure.
2543 */
3c78bc61
RD
2544static int __pci_request_region(struct pci_dev *pdev, int bar,
2545 const char *res_name, int exclusive)
1da177e4 2546{
9ac7849e
TH
2547 struct pci_devres *dr;
2548
1da177e4
LT
2549 if (pci_resource_len(pdev, bar) == 0)
2550 return 0;
f7625980 2551
1da177e4
LT
2552 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2553 if (!request_region(pci_resource_start(pdev, bar),
2554 pci_resource_len(pdev, bar), res_name))
2555 goto err_out;
3c78bc61 2556 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2557 if (!__request_mem_region(pci_resource_start(pdev, bar),
2558 pci_resource_len(pdev, bar), res_name,
2559 exclusive))
1da177e4
LT
2560 goto err_out;
2561 }
9ac7849e
TH
2562
2563 dr = find_pci_dr(pdev);
2564 if (dr)
2565 dr->region_mask |= 1 << bar;
2566
1da177e4
LT
2567 return 0;
2568
2569err_out:
c7dabef8 2570 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2571 &pdev->resource[bar]);
1da177e4
LT
2572 return -EBUSY;
2573}
2574
e8de1481 2575/**
f5ddcac4 2576 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2577 * @pdev: PCI device whose resources are to be reserved
2578 * @bar: BAR to be reserved
f5ddcac4 2579 * @res_name: Name to be associated with resource
e8de1481 2580 *
f5ddcac4 2581 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2582 * being reserved by owner @res_name. Do not access any
2583 * address inside the PCI regions unless this call returns
2584 * successfully.
2585 *
2586 * Returns 0 on success, or %EBUSY on error. A warning
2587 * message is also printed on failure.
2588 */
2589int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2590{
2591 return __pci_request_region(pdev, bar, res_name, 0);
2592}
b7fe9434 2593EXPORT_SYMBOL(pci_request_region);
e8de1481
AV
2594
2595/**
2596 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2597 * @pdev: PCI device whose resources are to be reserved
2598 * @bar: BAR to be reserved
2599 * @res_name: Name to be associated with resource.
2600 *
2601 * Mark the PCI region associated with PCI device @pdev BR @bar as
2602 * being reserved by owner @res_name. Do not access any
2603 * address inside the PCI regions unless this call returns
2604 * successfully.
2605 *
2606 * Returns 0 on success, or %EBUSY on error. A warning
2607 * message is also printed on failure.
2608 *
2609 * The key difference that _exclusive makes it that userspace is
2610 * explicitly not allowed to map the resource via /dev/mem or
f7625980 2611 * sysfs.
e8de1481 2612 */
3c78bc61
RD
2613int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
2614 const char *res_name)
e8de1481
AV
2615{
2616 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2617}
b7fe9434
RD
2618EXPORT_SYMBOL(pci_request_region_exclusive);
2619
c87deff7
HS
2620/**
2621 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2622 * @pdev: PCI device whose resources were previously reserved
2623 * @bars: Bitmask of BARs to be released
2624 *
2625 * Release selected PCI I/O and memory resources previously reserved.
2626 * Call this function only after all use of the PCI regions has ceased.
2627 */
2628void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2629{
2630 int i;
2631
2632 for (i = 0; i < 6; i++)
2633 if (bars & (1 << i))
2634 pci_release_region(pdev, i);
2635}
b7fe9434 2636EXPORT_SYMBOL(pci_release_selected_regions);
c87deff7 2637
9738abed 2638static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3c78bc61 2639 const char *res_name, int excl)
c87deff7
HS
2640{
2641 int i;
2642
2643 for (i = 0; i < 6; i++)
2644 if (bars & (1 << i))
e8de1481 2645 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2646 goto err_out;
2647 return 0;
2648
2649err_out:
3c78bc61 2650 while (--i >= 0)
c87deff7
HS
2651 if (bars & (1 << i))
2652 pci_release_region(pdev, i);
2653
2654 return -EBUSY;
2655}
1da177e4 2656
e8de1481
AV
2657
2658/**
2659 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2660 * @pdev: PCI device whose resources are to be reserved
2661 * @bars: Bitmask of BARs to be requested
2662 * @res_name: Name to be associated with resource
2663 */
2664int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2665 const char *res_name)
2666{
2667 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2668}
b7fe9434 2669EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 2670
3c78bc61
RD
2671int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
2672 const char *res_name)
e8de1481
AV
2673{
2674 return __pci_request_selected_regions(pdev, bars, res_name,
2675 IORESOURCE_EXCLUSIVE);
2676}
b7fe9434 2677EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
e8de1481 2678
1da177e4
LT
2679/**
2680 * pci_release_regions - Release reserved PCI I/O and memory resources
2681 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2682 *
2683 * Releases all PCI I/O and memory resources previously reserved by a
2684 * successful call to pci_request_regions. Call this function only
2685 * after all use of the PCI regions has ceased.
2686 */
2687
2688void pci_release_regions(struct pci_dev *pdev)
2689{
c87deff7 2690 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4 2691}
b7fe9434 2692EXPORT_SYMBOL(pci_release_regions);
1da177e4
LT
2693
2694/**
2695 * pci_request_regions - Reserved PCI I/O and memory resources
2696 * @pdev: PCI device whose resources are to be reserved
2697 * @res_name: Name to be associated with resource.
2698 *
2699 * Mark all PCI regions associated with PCI device @pdev as
2700 * being reserved by owner @res_name. Do not access any
2701 * address inside the PCI regions unless this call returns
2702 * successfully.
2703 *
2704 * Returns 0 on success, or %EBUSY on error. A warning
2705 * message is also printed on failure.
2706 */
3c990e92 2707int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2708{
c87deff7 2709 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4 2710}
b7fe9434 2711EXPORT_SYMBOL(pci_request_regions);
1da177e4 2712
e8de1481
AV
2713/**
2714 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2715 * @pdev: PCI device whose resources are to be reserved
2716 * @res_name: Name to be associated with resource.
2717 *
2718 * Mark all PCI regions associated with PCI device @pdev as
2719 * being reserved by owner @res_name. Do not access any
2720 * address inside the PCI regions unless this call returns
2721 * successfully.
2722 *
2723 * pci_request_regions_exclusive() will mark the region so that
f7625980 2724 * /dev/mem and the sysfs MMIO access will not be allowed.
e8de1481
AV
2725 *
2726 * Returns 0 on success, or %EBUSY on error. A warning
2727 * message is also printed on failure.
2728 */
2729int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2730{
2731 return pci_request_selected_regions_exclusive(pdev,
2732 ((1 << 6) - 1), res_name);
2733}
b7fe9434 2734EXPORT_SYMBOL(pci_request_regions_exclusive);
e8de1481 2735
8b921acf
LD
2736/**
2737 * pci_remap_iospace - Remap the memory mapped I/O space
2738 * @res: Resource describing the I/O space
2739 * @phys_addr: physical address of range to be mapped
2740 *
2741 * Remap the memory mapped I/O space described by the @res
2742 * and the CPU physical address @phys_addr into virtual address space.
2743 * Only architectures that have memory mapped IO functions defined
2744 * (and the PCI_IOBASE value defined) should call this function.
2745 */
2746int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
2747{
2748#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
2749 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
2750
2751 if (!(res->flags & IORESOURCE_IO))
2752 return -EINVAL;
2753
2754 if (res->end > IO_SPACE_LIMIT)
2755 return -EINVAL;
2756
2757 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
2758 pgprot_device(PAGE_KERNEL));
2759#else
2760 /* this architecture does not have memory mapped I/O space,
2761 so this function should never be called */
2762 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
2763 return -ENODEV;
2764#endif
2765}
2766
6a479079
BH
2767static void __pci_set_master(struct pci_dev *dev, bool enable)
2768{
2769 u16 old_cmd, cmd;
2770
2771 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2772 if (enable)
2773 cmd = old_cmd | PCI_COMMAND_MASTER;
2774 else
2775 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2776 if (cmd != old_cmd) {
2777 dev_dbg(&dev->dev, "%s bus mastering\n",
2778 enable ? "enabling" : "disabling");
2779 pci_write_config_word(dev, PCI_COMMAND, cmd);
2780 }
2781 dev->is_busmaster = enable;
2782}
e8de1481 2783
2b6f2c35
MS
2784/**
2785 * pcibios_setup - process "pci=" kernel boot arguments
2786 * @str: string used to pass in "pci=" kernel boot arguments
2787 *
2788 * Process kernel boot arguments. This is the default implementation.
2789 * Architecture specific implementations can override this as necessary.
2790 */
2791char * __weak __init pcibios_setup(char *str)
2792{
2793 return str;
2794}
2795
96c55900
MS
2796/**
2797 * pcibios_set_master - enable PCI bus-mastering for device dev
2798 * @dev: the PCI device to enable
2799 *
2800 * Enables PCI bus-mastering for the device. This is the default
2801 * implementation. Architecture specific implementations can override
2802 * this if necessary.
2803 */
2804void __weak pcibios_set_master(struct pci_dev *dev)
2805{
2806 u8 lat;
2807
f676678f
MS
2808 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2809 if (pci_is_pcie(dev))
2810 return;
2811
96c55900
MS
2812 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2813 if (lat < 16)
2814 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2815 else if (lat > pcibios_max_latency)
2816 lat = pcibios_max_latency;
2817 else
2818 return;
a006482b 2819
96c55900
MS
2820 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2821}
2822
1da177e4
LT
2823/**
2824 * pci_set_master - enables bus-mastering for device dev
2825 * @dev: the PCI device to enable
2826 *
2827 * Enables bus-mastering on the device and calls pcibios_set_master()
2828 * to do the needed arch specific settings.
2829 */
6a479079 2830void pci_set_master(struct pci_dev *dev)
1da177e4 2831{
6a479079 2832 __pci_set_master(dev, true);
1da177e4
LT
2833 pcibios_set_master(dev);
2834}
b7fe9434 2835EXPORT_SYMBOL(pci_set_master);
1da177e4 2836
6a479079
BH
2837/**
2838 * pci_clear_master - disables bus-mastering for device dev
2839 * @dev: the PCI device to disable
2840 */
2841void pci_clear_master(struct pci_dev *dev)
2842{
2843 __pci_set_master(dev, false);
2844}
b7fe9434 2845EXPORT_SYMBOL(pci_clear_master);
6a479079 2846
1da177e4 2847/**
edb2d97e
MW
2848 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2849 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2850 *
edb2d97e
MW
2851 * Helper function for pci_set_mwi.
2852 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2853 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2854 *
2855 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2856 */
15ea76d4 2857int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2858{
2859 u8 cacheline_size;
2860
2861 if (!pci_cache_line_size)
15ea76d4 2862 return -EINVAL;
1da177e4
LT
2863
2864 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2865 equal to or multiple of the right value. */
2866 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2867 if (cacheline_size >= pci_cache_line_size &&
2868 (cacheline_size % pci_cache_line_size) == 0)
2869 return 0;
2870
2871 /* Write the correct value. */
2872 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2873 /* Read it back. */
2874 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2875 if (cacheline_size == pci_cache_line_size)
2876 return 0;
2877
227f0647
RD
2878 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
2879 pci_cache_line_size << 2);
1da177e4
LT
2880
2881 return -EINVAL;
2882}
15ea76d4
TH
2883EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2884
1da177e4
LT
2885/**
2886 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2887 * @dev: the PCI device for which MWI is enabled
2888 *
694625c0 2889 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2890 *
2891 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2892 */
3c78bc61 2893int pci_set_mwi(struct pci_dev *dev)
1da177e4 2894{
b7fe9434
RD
2895#ifdef PCI_DISABLE_MWI
2896 return 0;
2897#else
1da177e4
LT
2898 int rc;
2899 u16 cmd;
2900
edb2d97e 2901 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2902 if (rc)
2903 return rc;
2904
2905 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3c78bc61 2906 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2907 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2908 cmd |= PCI_COMMAND_INVALIDATE;
2909 pci_write_config_word(dev, PCI_COMMAND, cmd);
2910 }
1da177e4 2911 return 0;
b7fe9434 2912#endif
1da177e4 2913}
b7fe9434 2914EXPORT_SYMBOL(pci_set_mwi);
1da177e4 2915
694625c0
RD
2916/**
2917 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2918 * @dev: the PCI device for which MWI is enabled
2919 *
2920 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2921 * Callers are not required to check the return value.
2922 *
2923 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2924 */
2925int pci_try_set_mwi(struct pci_dev *dev)
2926{
b7fe9434
RD
2927#ifdef PCI_DISABLE_MWI
2928 return 0;
2929#else
2930 return pci_set_mwi(dev);
2931#endif
694625c0 2932}
b7fe9434 2933EXPORT_SYMBOL(pci_try_set_mwi);
694625c0 2934
1da177e4
LT
2935/**
2936 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2937 * @dev: the PCI device to disable
2938 *
2939 * Disables PCI Memory-Write-Invalidate transaction on the device
2940 */
3c78bc61 2941void pci_clear_mwi(struct pci_dev *dev)
1da177e4 2942{
b7fe9434 2943#ifndef PCI_DISABLE_MWI
1da177e4
LT
2944 u16 cmd;
2945
2946 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2947 if (cmd & PCI_COMMAND_INVALIDATE) {
2948 cmd &= ~PCI_COMMAND_INVALIDATE;
2949 pci_write_config_word(dev, PCI_COMMAND, cmd);
2950 }
b7fe9434 2951#endif
1da177e4 2952}
b7fe9434 2953EXPORT_SYMBOL(pci_clear_mwi);
1da177e4 2954
a04ce0ff
BR
2955/**
2956 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2957 * @pdev: the PCI device to operate on
2958 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2959 *
2960 * Enables/disables PCI INTx for device dev
2961 */
3c78bc61 2962void pci_intx(struct pci_dev *pdev, int enable)
a04ce0ff
BR
2963{
2964 u16 pci_command, new;
2965
2966 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2967
3c78bc61 2968 if (enable)
a04ce0ff 2969 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3c78bc61 2970 else
a04ce0ff 2971 new = pci_command | PCI_COMMAND_INTX_DISABLE;
a04ce0ff
BR
2972
2973 if (new != pci_command) {
9ac7849e
TH
2974 struct pci_devres *dr;
2975
2fd9d74b 2976 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2977
2978 dr = find_pci_dr(pdev);
2979 if (dr && !dr->restore_intx) {
2980 dr->restore_intx = 1;
2981 dr->orig_intx = !enable;
2982 }
a04ce0ff
BR
2983 }
2984}
b7fe9434 2985EXPORT_SYMBOL_GPL(pci_intx);
a04ce0ff 2986
a2e27787
JK
2987/**
2988 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2989 * @dev: the PCI device to operate on
a2e27787
JK
2990 *
2991 * Check if the device dev support INTx masking via the config space
2992 * command word.
2993 */
2994bool pci_intx_mask_supported(struct pci_dev *dev)
2995{
2996 bool mask_supported = false;
2997 u16 orig, new;
2998
fbebb9fd
BH
2999 if (dev->broken_intx_masking)
3000 return false;
3001
a2e27787
JK
3002 pci_cfg_access_lock(dev);
3003
3004 pci_read_config_word(dev, PCI_COMMAND, &orig);
3005 pci_write_config_word(dev, PCI_COMMAND,
3006 orig ^ PCI_COMMAND_INTX_DISABLE);
3007 pci_read_config_word(dev, PCI_COMMAND, &new);
3008
3009 /*
3010 * There's no way to protect against hardware bugs or detect them
3011 * reliably, but as long as we know what the value should be, let's
3012 * go ahead and check it.
3013 */
3014 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
227f0647
RD
3015 dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3016 orig, new);
a2e27787
JK
3017 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3018 mask_supported = true;
3019 pci_write_config_word(dev, PCI_COMMAND, orig);
3020 }
3021
3022 pci_cfg_access_unlock(dev);
3023 return mask_supported;
3024}
3025EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3026
3027static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3028{
3029 struct pci_bus *bus = dev->bus;
3030 bool mask_updated = true;
3031 u32 cmd_status_dword;
3032 u16 origcmd, newcmd;
3033 unsigned long flags;
3034 bool irq_pending;
3035
3036 /*
3037 * We do a single dword read to retrieve both command and status.
3038 * Document assumptions that make this possible.
3039 */
3040 BUILD_BUG_ON(PCI_COMMAND % 4);
3041 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3042
3043 raw_spin_lock_irqsave(&pci_lock, flags);
3044
3045 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3046
3047 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3048
3049 /*
3050 * Check interrupt status register to see whether our device
3051 * triggered the interrupt (when masking) or the next IRQ is
3052 * already pending (when unmasking).
3053 */
3054 if (mask != irq_pending) {
3055 mask_updated = false;
3056 goto done;
3057 }
3058
3059 origcmd = cmd_status_dword;
3060 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3061 if (mask)
3062 newcmd |= PCI_COMMAND_INTX_DISABLE;
3063 if (newcmd != origcmd)
3064 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3065
3066done:
3067 raw_spin_unlock_irqrestore(&pci_lock, flags);
3068
3069 return mask_updated;
3070}
3071
3072/**
3073 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 3074 * @dev: the PCI device to operate on
a2e27787
JK
3075 *
3076 * Check if the device dev has its INTx line asserted, mask it and
3077 * return true in that case. False is returned if not interrupt was
3078 * pending.
3079 */
3080bool pci_check_and_mask_intx(struct pci_dev *dev)
3081{
3082 return pci_check_and_set_intx_mask(dev, true);
3083}
3084EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3085
3086/**
ebd50b93 3087 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
6e9292c5 3088 * @dev: the PCI device to operate on
a2e27787
JK
3089 *
3090 * Check if the device dev has its INTx line asserted, unmask it if not
3091 * and return true. False is returned and the mask remains active if
3092 * there was still an interrupt pending.
3093 */
3094bool pci_check_and_unmask_intx(struct pci_dev *dev)
3095{
3096 return pci_check_and_set_intx_mask(dev, false);
3097}
3098EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3099
f5f2b131 3100/**
da27f4b3 3101 * pci_msi_off - disables any MSI or MSI-X capabilities
8d7d86e9 3102 * @dev: the PCI device to operate on
f5f2b131 3103 *
da27f4b3
BH
3104 * If you want to use MSI, see pci_enable_msi() and friends.
3105 * This is a lower-level primitive that allows us to disable
3106 * MSI operation at the device level.
f5f2b131
EB
3107 */
3108void pci_msi_off(struct pci_dev *dev)
3109{
3110 int pos;
3111 u16 control;
3112
da27f4b3
BH
3113 /*
3114 * This looks like it could go in msi.c, but we need it even when
3115 * CONFIG_PCI_MSI=n. For the same reason, we can't use
3116 * dev->msi_cap or dev->msix_cap here.
3117 */
f5f2b131
EB
3118 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3119 if (pos) {
3120 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3121 control &= ~PCI_MSI_FLAGS_ENABLE;
3122 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3123 }
3124 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3125 if (pos) {
3126 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3127 control &= ~PCI_MSIX_FLAGS_ENABLE;
3128 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3129 }
3130}
b03214d5 3131EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 3132
4d57cdfa
FT
3133int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3134{
3135 return dma_set_max_seg_size(&dev->dev, size);
3136}
3137EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3138
59fc67de
FT
3139int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3140{
3141 return dma_set_seg_boundary(&dev->dev, mask);
3142}
3143EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3144
3775a209
CL
3145/**
3146 * pci_wait_for_pending_transaction - waits for pending transaction
3147 * @dev: the PCI device to operate on
3148 *
3149 * Return 0 if transaction is pending 1 otherwise.
3150 */
3151int pci_wait_for_pending_transaction(struct pci_dev *dev)
8dd7f803 3152{
157e876f
AW
3153 if (!pci_is_pcie(dev))
3154 return 1;
8c1c699f 3155
d0b4cc4e
GS
3156 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3157 PCI_EXP_DEVSTA_TRPND);
3775a209
CL
3158}
3159EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3160
3161static int pcie_flr(struct pci_dev *dev, int probe)
3162{
3163 u32 cap;
3164
3165 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3166 if (!(cap & PCI_EXP_DEVCAP_FLR))
3167 return -ENOTTY;
3168
3169 if (probe)
3170 return 0;
3171
3172 if (!pci_wait_for_pending_transaction(dev))
bb383e28 3173 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
8c1c699f 3174
59875ae4 3175 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
8c1c699f 3176 msleep(100);
8dd7f803
SY
3177 return 0;
3178}
d91cdc74 3179
8c1c699f 3180static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3181{
8c1c699f 3182 int pos;
1ca88797
SY
3183 u8 cap;
3184
8c1c699f
YZ
3185 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3186 if (!pos)
1ca88797 3187 return -ENOTTY;
8c1c699f
YZ
3188
3189 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3190 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3191 return -ENOTTY;
3192
3193 if (probe)
3194 return 0;
3195
d066c946
AW
3196 /*
3197 * Wait for Transaction Pending bit to clear. A word-aligned test
3198 * is used, so we use the conrol offset rather than status and shift
3199 * the test bit to match.
3200 */
bb383e28 3201 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
d066c946 3202 PCI_AF_STATUS_TP << 8))
bb383e28 3203 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
5fe5db05 3204
8c1c699f 3205 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3206 msleep(100);
1ca88797
SY
3207 return 0;
3208}
3209
83d74e03
RW
3210/**
3211 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3212 * @dev: Device to reset.
3213 * @probe: If set, only check if the device can be reset this way.
3214 *
3215 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3216 * unset, it will be reinitialized internally when going from PCI_D3hot to
3217 * PCI_D0. If that's the case and the device is not in a low-power state
3218 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3219 *
3220 * NOTE: This causes the caller to sleep for twice the device power transition
3221 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
f7625980 3222 * by default (i.e. unless the @dev's d3_delay field has a different value).
83d74e03
RW
3223 * Moreover, only devices in D0 can be reset by this function.
3224 */
f85876ba 3225static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3226{
f85876ba
YZ
3227 u16 csr;
3228
51e53738 3229 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
f85876ba 3230 return -ENOTTY;
d91cdc74 3231
f85876ba
YZ
3232 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3233 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3234 return -ENOTTY;
d91cdc74 3235
f85876ba
YZ
3236 if (probe)
3237 return 0;
1ca88797 3238
f85876ba
YZ
3239 if (dev->current_state != PCI_D0)
3240 return -EINVAL;
3241
3242 csr &= ~PCI_PM_CTRL_STATE_MASK;
3243 csr |= PCI_D3hot;
3244 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3245 pci_dev_d3_sleep(dev);
f85876ba
YZ
3246
3247 csr &= ~PCI_PM_CTRL_STATE_MASK;
3248 csr |= PCI_D0;
3249 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3250 pci_dev_d3_sleep(dev);
f85876ba
YZ
3251
3252 return 0;
3253}
3254
9e33002f 3255void pci_reset_secondary_bus(struct pci_dev *dev)
c12ff1df
YZ
3256{
3257 u16 ctrl;
64e8674f
AW
3258
3259 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3260 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3261 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
de0c548c
AW
3262 /*
3263 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
f7625980 3264 * this to 2ms to ensure that we meet the minimum requirement.
de0c548c
AW
3265 */
3266 msleep(2);
64e8674f
AW
3267
3268 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3269 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
de0c548c
AW
3270
3271 /*
3272 * Trhfa for conventional PCI is 2^25 clock cycles.
3273 * Assuming a minimum 33MHz clock this results in a 1s
3274 * delay before we can consider subordinate devices to
3275 * be re-initialized. PCIe has some ways to shorten this,
3276 * but we don't make use of them yet.
3277 */
3278 ssleep(1);
64e8674f 3279}
d92a208d 3280
9e33002f
GS
3281void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
3282{
3283 pci_reset_secondary_bus(dev);
3284}
3285
d92a208d
GS
3286/**
3287 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3288 * @dev: Bridge device
3289 *
3290 * Use the bridge control register to assert reset on the secondary bus.
3291 * Devices on the secondary bus are left in power-on state.
3292 */
3293void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3294{
3295 pcibios_reset_secondary_bus(dev);
3296}
64e8674f
AW
3297EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3298
3299static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3300{
c12ff1df
YZ
3301 struct pci_dev *pdev;
3302
f331a859
AW
3303 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
3304 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
c12ff1df
YZ
3305 return -ENOTTY;
3306
3307 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3308 if (pdev != dev)
3309 return -ENOTTY;
3310
3311 if (probe)
3312 return 0;
3313
64e8674f 3314 pci_reset_bridge_secondary_bus(dev->bus->self);
c12ff1df
YZ
3315
3316 return 0;
3317}
3318
608c3881
AW
3319static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3320{
3321 int rc = -ENOTTY;
3322
3323 if (!hotplug || !try_module_get(hotplug->ops->owner))
3324 return rc;
3325
3326 if (hotplug->ops->reset_slot)
3327 rc = hotplug->ops->reset_slot(hotplug, probe);
3328
3329 module_put(hotplug->ops->owner);
3330
3331 return rc;
3332}
3333
3334static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3335{
3336 struct pci_dev *pdev;
3337
f331a859
AW
3338 if (dev->subordinate || !dev->slot ||
3339 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
608c3881
AW
3340 return -ENOTTY;
3341
3342 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3343 if (pdev != dev && pdev->slot == dev->slot)
3344 return -ENOTTY;
3345
3346 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3347}
3348
977f857c 3349static int __pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3350{
8c1c699f
YZ
3351 int rc;
3352
3353 might_sleep();
3354
b9c3b266
DC
3355 rc = pci_dev_specific_reset(dev, probe);
3356 if (rc != -ENOTTY)
3357 goto done;
3358
8c1c699f
YZ
3359 rc = pcie_flr(dev, probe);
3360 if (rc != -ENOTTY)
3361 goto done;
d91cdc74 3362
8c1c699f 3363 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3364 if (rc != -ENOTTY)
3365 goto done;
3366
3367 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3368 if (rc != -ENOTTY)
3369 goto done;
3370
608c3881
AW
3371 rc = pci_dev_reset_slot_function(dev, probe);
3372 if (rc != -ENOTTY)
3373 goto done;
3374
c12ff1df 3375 rc = pci_parent_bus_reset(dev, probe);
8c1c699f 3376done:
977f857c
KRW
3377 return rc;
3378}
3379
77cb985a
AW
3380static void pci_dev_lock(struct pci_dev *dev)
3381{
3382 pci_cfg_access_lock(dev);
3383 /* block PM suspend, driver probe, etc. */
3384 device_lock(&dev->dev);
3385}
3386
61cf16d8
AW
3387/* Return 1 on successful lock, 0 on contention */
3388static int pci_dev_trylock(struct pci_dev *dev)
3389{
3390 if (pci_cfg_access_trylock(dev)) {
3391 if (device_trylock(&dev->dev))
3392 return 1;
3393 pci_cfg_access_unlock(dev);
3394 }
3395
3396 return 0;
3397}
3398
77cb985a
AW
3399static void pci_dev_unlock(struct pci_dev *dev)
3400{
3401 device_unlock(&dev->dev);
3402 pci_cfg_access_unlock(dev);
3403}
3404
3ebe7f9f
KB
3405/**
3406 * pci_reset_notify - notify device driver of reset
3407 * @dev: device to be notified of reset
3408 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
3409 * completed
3410 *
3411 * Must be called prior to device access being disabled and after device
3412 * access is restored.
3413 */
3414static void pci_reset_notify(struct pci_dev *dev, bool prepare)
3415{
3416 const struct pci_error_handlers *err_handler =
3417 dev->driver ? dev->driver->err_handler : NULL;
3418 if (err_handler && err_handler->reset_notify)
3419 err_handler->reset_notify(dev, prepare);
3420}
3421
77cb985a
AW
3422static void pci_dev_save_and_disable(struct pci_dev *dev)
3423{
3ebe7f9f
KB
3424 pci_reset_notify(dev, true);
3425
a6cbaade
AW
3426 /*
3427 * Wake-up device prior to save. PM registers default to D0 after
3428 * reset and a simple register restore doesn't reliably return
3429 * to a non-D0 state anyway.
3430 */
3431 pci_set_power_state(dev, PCI_D0);
3432
77cb985a
AW
3433 pci_save_state(dev);
3434 /*
3435 * Disable the device by clearing the Command register, except for
3436 * INTx-disable which is set. This not only disables MMIO and I/O port
3437 * BARs, but also prevents the device from being Bus Master, preventing
3438 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
3439 * compliant devices, INTx-disable prevents legacy interrupts.
3440 */
3441 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3442}
3443
3444static void pci_dev_restore(struct pci_dev *dev)
3445{
3446 pci_restore_state(dev);
3ebe7f9f 3447 pci_reset_notify(dev, false);
77cb985a
AW
3448}
3449
977f857c
KRW
3450static int pci_dev_reset(struct pci_dev *dev, int probe)
3451{
3452 int rc;
3453
77cb985a
AW
3454 if (!probe)
3455 pci_dev_lock(dev);
977f857c
KRW
3456
3457 rc = __pci_dev_reset(dev, probe);
3458
77cb985a
AW
3459 if (!probe)
3460 pci_dev_unlock(dev);
3461
8c1c699f 3462 return rc;
d91cdc74 3463}
3ebe7f9f 3464
d91cdc74 3465/**
8c1c699f
YZ
3466 * __pci_reset_function - reset a PCI device function
3467 * @dev: PCI device to reset
d91cdc74
SY
3468 *
3469 * Some devices allow an individual function to be reset without affecting
3470 * other functions in the same device. The PCI device must be responsive
3471 * to PCI config space in order to use this function.
3472 *
3473 * The device function is presumed to be unused when this function is called.
3474 * Resetting the device will make the contents of PCI configuration space
3475 * random, so any caller of this must be prepared to reinitialise the
3476 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3477 * etc.
3478 *
8c1c699f 3479 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3480 * device doesn't support resetting a single function.
3481 */
8c1c699f 3482int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3483{
8c1c699f 3484 return pci_dev_reset(dev, 0);
d91cdc74 3485}
8c1c699f 3486EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3487
6fbf9e7a
KRW
3488/**
3489 * __pci_reset_function_locked - reset a PCI device function while holding
3490 * the @dev mutex lock.
3491 * @dev: PCI device to reset
3492 *
3493 * Some devices allow an individual function to be reset without affecting
3494 * other functions in the same device. The PCI device must be responsive
3495 * to PCI config space in order to use this function.
3496 *
3497 * The device function is presumed to be unused and the caller is holding
3498 * the device mutex lock when this function is called.
3499 * Resetting the device will make the contents of PCI configuration space
3500 * random, so any caller of this must be prepared to reinitialise the
3501 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3502 * etc.
3503 *
3504 * Returns 0 if the device function was successfully reset or negative if the
3505 * device doesn't support resetting a single function.
3506 */
3507int __pci_reset_function_locked(struct pci_dev *dev)
3508{
977f857c 3509 return __pci_dev_reset(dev, 0);
6fbf9e7a
KRW
3510}
3511EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3512
711d5779
MT
3513/**
3514 * pci_probe_reset_function - check whether the device can be safely reset
3515 * @dev: PCI device to reset
3516 *
3517 * Some devices allow an individual function to be reset without affecting
3518 * other functions in the same device. The PCI device must be responsive
3519 * to PCI config space in order to use this function.
3520 *
3521 * Returns 0 if the device function can be reset or negative if the
3522 * device doesn't support resetting a single function.
3523 */
3524int pci_probe_reset_function(struct pci_dev *dev)
3525{
3526 return pci_dev_reset(dev, 1);
3527}
3528
8dd7f803 3529/**
8c1c699f
YZ
3530 * pci_reset_function - quiesce and reset a PCI device function
3531 * @dev: PCI device to reset
8dd7f803
SY
3532 *
3533 * Some devices allow an individual function to be reset without affecting
3534 * other functions in the same device. The PCI device must be responsive
3535 * to PCI config space in order to use this function.
3536 *
3537 * This function does not just reset the PCI portion of a device, but
3538 * clears all the state associated with the device. This function differs
8c1c699f 3539 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3540 * over the reset.
3541 *
8c1c699f 3542 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3543 * device doesn't support resetting a single function.
3544 */
3545int pci_reset_function(struct pci_dev *dev)
3546{
8c1c699f 3547 int rc;
8dd7f803 3548
8c1c699f
YZ
3549 rc = pci_dev_reset(dev, 1);
3550 if (rc)
3551 return rc;
8dd7f803 3552
77cb985a 3553 pci_dev_save_and_disable(dev);
8dd7f803 3554
8c1c699f 3555 rc = pci_dev_reset(dev, 0);
8dd7f803 3556
77cb985a 3557 pci_dev_restore(dev);
8dd7f803 3558
8c1c699f 3559 return rc;
8dd7f803
SY
3560}
3561EXPORT_SYMBOL_GPL(pci_reset_function);
3562
61cf16d8
AW
3563/**
3564 * pci_try_reset_function - quiesce and reset a PCI device function
3565 * @dev: PCI device to reset
3566 *
3567 * Same as above, except return -EAGAIN if unable to lock device.
3568 */
3569int pci_try_reset_function(struct pci_dev *dev)
3570{
3571 int rc;
3572
3573 rc = pci_dev_reset(dev, 1);
3574 if (rc)
3575 return rc;
3576
3577 pci_dev_save_and_disable(dev);
3578
3579 if (pci_dev_trylock(dev)) {
3580 rc = __pci_dev_reset(dev, 0);
3581 pci_dev_unlock(dev);
3582 } else
3583 rc = -EAGAIN;
3584
3585 pci_dev_restore(dev);
3586
3587 return rc;
3588}
3589EXPORT_SYMBOL_GPL(pci_try_reset_function);
3590
f331a859
AW
3591/* Do any devices on or below this bus prevent a bus reset? */
3592static bool pci_bus_resetable(struct pci_bus *bus)
3593{
3594 struct pci_dev *dev;
3595
3596 list_for_each_entry(dev, &bus->devices, bus_list) {
3597 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3598 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3599 return false;
3600 }
3601
3602 return true;
3603}
3604
090a3c53
AW
3605/* Lock devices from the top of the tree down */
3606static void pci_bus_lock(struct pci_bus *bus)
3607{
3608 struct pci_dev *dev;
3609
3610 list_for_each_entry(dev, &bus->devices, bus_list) {
3611 pci_dev_lock(dev);
3612 if (dev->subordinate)
3613 pci_bus_lock(dev->subordinate);
3614 }
3615}
3616
3617/* Unlock devices from the bottom of the tree up */
3618static void pci_bus_unlock(struct pci_bus *bus)
3619{
3620 struct pci_dev *dev;
3621
3622 list_for_each_entry(dev, &bus->devices, bus_list) {
3623 if (dev->subordinate)
3624 pci_bus_unlock(dev->subordinate);
3625 pci_dev_unlock(dev);
3626 }
3627}
3628
61cf16d8
AW
3629/* Return 1 on successful lock, 0 on contention */
3630static int pci_bus_trylock(struct pci_bus *bus)
3631{
3632 struct pci_dev *dev;
3633
3634 list_for_each_entry(dev, &bus->devices, bus_list) {
3635 if (!pci_dev_trylock(dev))
3636 goto unlock;
3637 if (dev->subordinate) {
3638 if (!pci_bus_trylock(dev->subordinate)) {
3639 pci_dev_unlock(dev);
3640 goto unlock;
3641 }
3642 }
3643 }
3644 return 1;
3645
3646unlock:
3647 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
3648 if (dev->subordinate)
3649 pci_bus_unlock(dev->subordinate);
3650 pci_dev_unlock(dev);
3651 }
3652 return 0;
3653}
3654
f331a859
AW
3655/* Do any devices on or below this slot prevent a bus reset? */
3656static bool pci_slot_resetable(struct pci_slot *slot)
3657{
3658 struct pci_dev *dev;
3659
3660 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3661 if (!dev->slot || dev->slot != slot)
3662 continue;
3663 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3664 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3665 return false;
3666 }
3667
3668 return true;
3669}
3670
090a3c53
AW
3671/* Lock devices from the top of the tree down */
3672static void pci_slot_lock(struct pci_slot *slot)
3673{
3674 struct pci_dev *dev;
3675
3676 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3677 if (!dev->slot || dev->slot != slot)
3678 continue;
3679 pci_dev_lock(dev);
3680 if (dev->subordinate)
3681 pci_bus_lock(dev->subordinate);
3682 }
3683}
3684
3685/* Unlock devices from the bottom of the tree up */
3686static void pci_slot_unlock(struct pci_slot *slot)
3687{
3688 struct pci_dev *dev;
3689
3690 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3691 if (!dev->slot || dev->slot != slot)
3692 continue;
3693 if (dev->subordinate)
3694 pci_bus_unlock(dev->subordinate);
3695 pci_dev_unlock(dev);
3696 }
3697}
3698
61cf16d8
AW
3699/* Return 1 on successful lock, 0 on contention */
3700static int pci_slot_trylock(struct pci_slot *slot)
3701{
3702 struct pci_dev *dev;
3703
3704 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3705 if (!dev->slot || dev->slot != slot)
3706 continue;
3707 if (!pci_dev_trylock(dev))
3708 goto unlock;
3709 if (dev->subordinate) {
3710 if (!pci_bus_trylock(dev->subordinate)) {
3711 pci_dev_unlock(dev);
3712 goto unlock;
3713 }
3714 }
3715 }
3716 return 1;
3717
3718unlock:
3719 list_for_each_entry_continue_reverse(dev,
3720 &slot->bus->devices, bus_list) {
3721 if (!dev->slot || dev->slot != slot)
3722 continue;
3723 if (dev->subordinate)
3724 pci_bus_unlock(dev->subordinate);
3725 pci_dev_unlock(dev);
3726 }
3727 return 0;
3728}
3729
090a3c53
AW
3730/* Save and disable devices from the top of the tree down */
3731static void pci_bus_save_and_disable(struct pci_bus *bus)
3732{
3733 struct pci_dev *dev;
3734
3735 list_for_each_entry(dev, &bus->devices, bus_list) {
3736 pci_dev_save_and_disable(dev);
3737 if (dev->subordinate)
3738 pci_bus_save_and_disable(dev->subordinate);
3739 }
3740}
3741
3742/*
3743 * Restore devices from top of the tree down - parent bridges need to be
3744 * restored before we can get to subordinate devices.
3745 */
3746static void pci_bus_restore(struct pci_bus *bus)
3747{
3748 struct pci_dev *dev;
3749
3750 list_for_each_entry(dev, &bus->devices, bus_list) {
3751 pci_dev_restore(dev);
3752 if (dev->subordinate)
3753 pci_bus_restore(dev->subordinate);
3754 }
3755}
3756
3757/* Save and disable devices from the top of the tree down */
3758static void pci_slot_save_and_disable(struct pci_slot *slot)
3759{
3760 struct pci_dev *dev;
3761
3762 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3763 if (!dev->slot || dev->slot != slot)
3764 continue;
3765 pci_dev_save_and_disable(dev);
3766 if (dev->subordinate)
3767 pci_bus_save_and_disable(dev->subordinate);
3768 }
3769}
3770
3771/*
3772 * Restore devices from top of the tree down - parent bridges need to be
3773 * restored before we can get to subordinate devices.
3774 */
3775static void pci_slot_restore(struct pci_slot *slot)
3776{
3777 struct pci_dev *dev;
3778
3779 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3780 if (!dev->slot || dev->slot != slot)
3781 continue;
3782 pci_dev_restore(dev);
3783 if (dev->subordinate)
3784 pci_bus_restore(dev->subordinate);
3785 }
3786}
3787
3788static int pci_slot_reset(struct pci_slot *slot, int probe)
3789{
3790 int rc;
3791
f331a859 3792 if (!slot || !pci_slot_resetable(slot))
090a3c53
AW
3793 return -ENOTTY;
3794
3795 if (!probe)
3796 pci_slot_lock(slot);
3797
3798 might_sleep();
3799
3800 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
3801
3802 if (!probe)
3803 pci_slot_unlock(slot);
3804
3805 return rc;
3806}
3807
9a3d2b9b
AW
3808/**
3809 * pci_probe_reset_slot - probe whether a PCI slot can be reset
3810 * @slot: PCI slot to probe
3811 *
3812 * Return 0 if slot can be reset, negative if a slot reset is not supported.
3813 */
3814int pci_probe_reset_slot(struct pci_slot *slot)
3815{
3816 return pci_slot_reset(slot, 1);
3817}
3818EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
3819
090a3c53
AW
3820/**
3821 * pci_reset_slot - reset a PCI slot
3822 * @slot: PCI slot to reset
3823 *
3824 * A PCI bus may host multiple slots, each slot may support a reset mechanism
3825 * independent of other slots. For instance, some slots may support slot power
3826 * control. In the case of a 1:1 bus to slot architecture, this function may
3827 * wrap the bus reset to avoid spurious slot related events such as hotplug.
3828 * Generally a slot reset should be attempted before a bus reset. All of the
3829 * function of the slot and any subordinate buses behind the slot are reset
3830 * through this function. PCI config space of all devices in the slot and
3831 * behind the slot is saved before and restored after reset.
3832 *
3833 * Return 0 on success, non-zero on error.
3834 */
3835int pci_reset_slot(struct pci_slot *slot)
3836{
3837 int rc;
3838
3839 rc = pci_slot_reset(slot, 1);
3840 if (rc)
3841 return rc;
3842
3843 pci_slot_save_and_disable(slot);
3844
3845 rc = pci_slot_reset(slot, 0);
3846
3847 pci_slot_restore(slot);
3848
3849 return rc;
3850}
3851EXPORT_SYMBOL_GPL(pci_reset_slot);
3852
61cf16d8
AW
3853/**
3854 * pci_try_reset_slot - Try to reset a PCI slot
3855 * @slot: PCI slot to reset
3856 *
3857 * Same as above except return -EAGAIN if the slot cannot be locked
3858 */
3859int pci_try_reset_slot(struct pci_slot *slot)
3860{
3861 int rc;
3862
3863 rc = pci_slot_reset(slot, 1);
3864 if (rc)
3865 return rc;
3866
3867 pci_slot_save_and_disable(slot);
3868
3869 if (pci_slot_trylock(slot)) {
3870 might_sleep();
3871 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
3872 pci_slot_unlock(slot);
3873 } else
3874 rc = -EAGAIN;
3875
3876 pci_slot_restore(slot);
3877
3878 return rc;
3879}
3880EXPORT_SYMBOL_GPL(pci_try_reset_slot);
3881
090a3c53
AW
3882static int pci_bus_reset(struct pci_bus *bus, int probe)
3883{
f331a859 3884 if (!bus->self || !pci_bus_resetable(bus))
090a3c53
AW
3885 return -ENOTTY;
3886
3887 if (probe)
3888 return 0;
3889
3890 pci_bus_lock(bus);
3891
3892 might_sleep();
3893
3894 pci_reset_bridge_secondary_bus(bus->self);
3895
3896 pci_bus_unlock(bus);
3897
3898 return 0;
3899}
3900
9a3d2b9b
AW
3901/**
3902 * pci_probe_reset_bus - probe whether a PCI bus can be reset
3903 * @bus: PCI bus to probe
3904 *
3905 * Return 0 if bus can be reset, negative if a bus reset is not supported.
3906 */
3907int pci_probe_reset_bus(struct pci_bus *bus)
3908{
3909 return pci_bus_reset(bus, 1);
3910}
3911EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
3912
090a3c53
AW
3913/**
3914 * pci_reset_bus - reset a PCI bus
3915 * @bus: top level PCI bus to reset
3916 *
3917 * Do a bus reset on the given bus and any subordinate buses, saving
3918 * and restoring state of all devices.
3919 *
3920 * Return 0 on success, non-zero on error.
3921 */
3922int pci_reset_bus(struct pci_bus *bus)
3923{
3924 int rc;
3925
3926 rc = pci_bus_reset(bus, 1);
3927 if (rc)
3928 return rc;
3929
3930 pci_bus_save_and_disable(bus);
3931
3932 rc = pci_bus_reset(bus, 0);
3933
3934 pci_bus_restore(bus);
3935
3936 return rc;
3937}
3938EXPORT_SYMBOL_GPL(pci_reset_bus);
3939
61cf16d8
AW
3940/**
3941 * pci_try_reset_bus - Try to reset a PCI bus
3942 * @bus: top level PCI bus to reset
3943 *
3944 * Same as above except return -EAGAIN if the bus cannot be locked
3945 */
3946int pci_try_reset_bus(struct pci_bus *bus)
3947{
3948 int rc;
3949
3950 rc = pci_bus_reset(bus, 1);
3951 if (rc)
3952 return rc;
3953
3954 pci_bus_save_and_disable(bus);
3955
3956 if (pci_bus_trylock(bus)) {
3957 might_sleep();
3958 pci_reset_bridge_secondary_bus(bus->self);
3959 pci_bus_unlock(bus);
3960 } else
3961 rc = -EAGAIN;
3962
3963 pci_bus_restore(bus);
3964
3965 return rc;
3966}
3967EXPORT_SYMBOL_GPL(pci_try_reset_bus);
3968
d556ad4b
PO
3969/**
3970 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3971 * @dev: PCI device to query
3972 *
3973 * Returns mmrbc: maximum designed memory read count in bytes
3974 * or appropriate error value.
3975 */
3976int pcix_get_max_mmrbc(struct pci_dev *dev)
3977{
7c9e2b1c 3978 int cap;
d556ad4b
PO
3979 u32 stat;
3980
3981 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3982 if (!cap)
3983 return -EINVAL;
3984
7c9e2b1c 3985 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3986 return -EINVAL;
3987
25daeb55 3988 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3989}
3990EXPORT_SYMBOL(pcix_get_max_mmrbc);
3991
3992/**
3993 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3994 * @dev: PCI device to query
3995 *
3996 * Returns mmrbc: maximum memory read count in bytes
3997 * or appropriate error value.
3998 */
3999int pcix_get_mmrbc(struct pci_dev *dev)
4000{
7c9e2b1c 4001 int cap;
bdc2bda7 4002 u16 cmd;
d556ad4b
PO
4003
4004 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4005 if (!cap)
4006 return -EINVAL;
4007
7c9e2b1c
DN
4008 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4009 return -EINVAL;
d556ad4b 4010
7c9e2b1c 4011 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
4012}
4013EXPORT_SYMBOL(pcix_get_mmrbc);
4014
4015/**
4016 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4017 * @dev: PCI device to query
4018 * @mmrbc: maximum memory read count in bytes
4019 * valid values are 512, 1024, 2048, 4096
4020 *
4021 * If possible sets maximum memory read byte count, some bridges have erratas
4022 * that prevent this.
4023 */
4024int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4025{
7c9e2b1c 4026 int cap;
bdc2bda7
DN
4027 u32 stat, v, o;
4028 u16 cmd;
d556ad4b 4029
229f5afd 4030 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 4031 return -EINVAL;
d556ad4b
PO
4032
4033 v = ffs(mmrbc) - 10;
4034
4035 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4036 if (!cap)
7c9e2b1c 4037 return -EINVAL;
d556ad4b 4038
7c9e2b1c
DN
4039 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4040 return -EINVAL;
d556ad4b
PO
4041
4042 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4043 return -E2BIG;
4044
7c9e2b1c
DN
4045 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4046 return -EINVAL;
d556ad4b
PO
4047
4048 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4049 if (o != v) {
809a3bf9 4050 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
d556ad4b
PO
4051 return -EIO;
4052
4053 cmd &= ~PCI_X_CMD_MAX_READ;
4054 cmd |= v << 2;
7c9e2b1c
DN
4055 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4056 return -EIO;
d556ad4b 4057 }
7c9e2b1c 4058 return 0;
d556ad4b
PO
4059}
4060EXPORT_SYMBOL(pcix_set_mmrbc);
4061
4062/**
4063 * pcie_get_readrq - get PCI Express read request size
4064 * @dev: PCI device to query
4065 *
4066 * Returns maximum memory read request in bytes
4067 * or appropriate error value.
4068 */
4069int pcie_get_readrq(struct pci_dev *dev)
4070{
d556ad4b
PO
4071 u16 ctl;
4072
59875ae4 4073 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
d556ad4b 4074
59875ae4 4075 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
4076}
4077EXPORT_SYMBOL(pcie_get_readrq);
4078
4079/**
4080 * pcie_set_readrq - set PCI Express maximum memory read request
4081 * @dev: PCI device to query
42e61f4a 4082 * @rq: maximum memory read count in bytes
d556ad4b
PO
4083 * valid values are 128, 256, 512, 1024, 2048, 4096
4084 *
c9b378c7 4085 * If possible sets maximum memory read request in bytes
d556ad4b
PO
4086 */
4087int pcie_set_readrq(struct pci_dev *dev, int rq)
4088{
59875ae4 4089 u16 v;
d556ad4b 4090
229f5afd 4091 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
59875ae4 4092 return -EINVAL;
d556ad4b 4093
a1c473aa
BH
4094 /*
4095 * If using the "performance" PCIe config, we clamp the
4096 * read rq size to the max packet size to prevent the
4097 * host bridge generating requests larger than we can
4098 * cope with
4099 */
4100 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4101 int mps = pcie_get_mps(dev);
4102
a1c473aa
BH
4103 if (mps < rq)
4104 rq = mps;
4105 }
4106
4107 v = (ffs(rq) - 8) << 12;
d556ad4b 4108
59875ae4
JL
4109 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4110 PCI_EXP_DEVCTL_READRQ, v);
d556ad4b
PO
4111}
4112EXPORT_SYMBOL(pcie_set_readrq);
4113
b03e7495
JM
4114/**
4115 * pcie_get_mps - get PCI Express maximum payload size
4116 * @dev: PCI device to query
4117 *
4118 * Returns maximum payload size in bytes
b03e7495
JM
4119 */
4120int pcie_get_mps(struct pci_dev *dev)
4121{
b03e7495
JM
4122 u16 ctl;
4123
59875ae4 4124 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
b03e7495 4125
59875ae4 4126 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
b03e7495 4127}
f1c66c46 4128EXPORT_SYMBOL(pcie_get_mps);
b03e7495
JM
4129
4130/**
4131 * pcie_set_mps - set PCI Express maximum payload size
4132 * @dev: PCI device to query
47c08f31 4133 * @mps: maximum payload size in bytes
b03e7495
JM
4134 * valid values are 128, 256, 512, 1024, 2048, 4096
4135 *
4136 * If possible sets maximum payload size
4137 */
4138int pcie_set_mps(struct pci_dev *dev, int mps)
4139{
59875ae4 4140 u16 v;
b03e7495
JM
4141
4142 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
59875ae4 4143 return -EINVAL;
b03e7495
JM
4144
4145 v = ffs(mps) - 8;
f7625980 4146 if (v > dev->pcie_mpss)
59875ae4 4147 return -EINVAL;
b03e7495
JM
4148 v <<= 5;
4149
59875ae4
JL
4150 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4151 PCI_EXP_DEVCTL_PAYLOAD, v);
b03e7495 4152}
f1c66c46 4153EXPORT_SYMBOL(pcie_set_mps);
b03e7495 4154
81377c8d
JK
4155/**
4156 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4157 * @dev: PCI device to query
4158 * @speed: storage for minimum speed
4159 * @width: storage for minimum width
4160 *
4161 * This function will walk up the PCI device chain and determine the minimum
4162 * link width and speed of the device.
4163 */
4164int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4165 enum pcie_link_width *width)
4166{
4167 int ret;
4168
4169 *speed = PCI_SPEED_UNKNOWN;
4170 *width = PCIE_LNK_WIDTH_UNKNOWN;
4171
4172 while (dev) {
4173 u16 lnksta;
4174 enum pci_bus_speed next_speed;
4175 enum pcie_link_width next_width;
4176
4177 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4178 if (ret)
4179 return ret;
4180
4181 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4182 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4183 PCI_EXP_LNKSTA_NLW_SHIFT;
4184
4185 if (next_speed < *speed)
4186 *speed = next_speed;
4187
4188 if (next_width < *width)
4189 *width = next_width;
4190
4191 dev = dev->bus->self;
4192 }
4193
4194 return 0;
4195}
4196EXPORT_SYMBOL(pcie_get_minimum_link);
4197
c87deff7
HS
4198/**
4199 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 4200 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
4201 * @flags: resource type mask to be selected
4202 *
4203 * This helper routine makes bar mask from the type of resource.
4204 */
4205int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4206{
4207 int i, bars = 0;
4208 for (i = 0; i < PCI_NUM_RESOURCES; i++)
4209 if (pci_resource_flags(dev, i) & flags)
4210 bars |= (1 << i);
4211 return bars;
4212}
b7fe9434 4213EXPORT_SYMBOL(pci_select_bars);
c87deff7 4214
613e7ed6
YZ
4215/**
4216 * pci_resource_bar - get position of the BAR associated with a resource
4217 * @dev: the PCI device
4218 * @resno: the resource number
4219 * @type: the BAR type to be filled in
4220 *
4221 * Returns BAR position in config space, or 0 if the BAR is invalid.
4222 */
4223int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4224{
d1b054da
YZ
4225 int reg;
4226
613e7ed6
YZ
4227 if (resno < PCI_ROM_RESOURCE) {
4228 *type = pci_bar_unknown;
4229 return PCI_BASE_ADDRESS_0 + 4 * resno;
4230 } else if (resno == PCI_ROM_RESOURCE) {
4231 *type = pci_bar_mem32;
4232 return dev->rom_base_reg;
d1b054da
YZ
4233 } else if (resno < PCI_BRIDGE_RESOURCES) {
4234 /* device specific resource */
26ff46c6
MS
4235 *type = pci_bar_unknown;
4236 reg = pci_iov_resource_bar(dev, resno);
d1b054da
YZ
4237 if (reg)
4238 return reg;
613e7ed6
YZ
4239 }
4240
865df576 4241 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
4242 return 0;
4243}
4244
95a8b6ef
MT
4245/* Some architectures require additional programming to enable VGA */
4246static arch_set_vga_state_t arch_set_vga_state;
4247
4248void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4249{
4250 arch_set_vga_state = func; /* NULL disables */
4251}
4252
4253static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3c78bc61 4254 unsigned int command_bits, u32 flags)
95a8b6ef
MT
4255{
4256 if (arch_set_vga_state)
4257 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 4258 flags);
95a8b6ef
MT
4259 return 0;
4260}
4261
deb2d2ec
BH
4262/**
4263 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
4264 * @dev: the PCI device
4265 * @decode: true = enable decoding, false = disable decoding
4266 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 4267 * @flags: traverse ancestors and change bridges
3448a19d 4268 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
4269 */
4270int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 4271 unsigned int command_bits, u32 flags)
deb2d2ec
BH
4272{
4273 struct pci_bus *bus;
4274 struct pci_dev *bridge;
4275 u16 cmd;
95a8b6ef 4276 int rc;
deb2d2ec 4277
67ebd814 4278 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 4279
95a8b6ef 4280 /* ARCH specific VGA enables */
3448a19d 4281 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
4282 if (rc)
4283 return rc;
4284
3448a19d
DA
4285 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4286 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4287 if (decode == true)
4288 cmd |= command_bits;
4289 else
4290 cmd &= ~command_bits;
4291 pci_write_config_word(dev, PCI_COMMAND, cmd);
4292 }
deb2d2ec 4293
3448a19d 4294 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
4295 return 0;
4296
4297 bus = dev->bus;
4298 while (bus) {
4299 bridge = bus->self;
4300 if (bridge) {
4301 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4302 &cmd);
4303 if (decode == true)
4304 cmd |= PCI_BRIDGE_CTL_VGA;
4305 else
4306 cmd &= ~PCI_BRIDGE_CTL_VGA;
4307 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4308 cmd);
4309 }
4310 bus = bus->parent;
4311 }
4312 return 0;
4313}
4314
8496e85c
RW
4315bool pci_device_is_present(struct pci_dev *pdev)
4316{
4317 u32 v;
4318
4319 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4320}
4321EXPORT_SYMBOL_GPL(pci_device_is_present);
4322
32a9a682
YS
4323#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4324static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 4325static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
4326
4327/**
4328 * pci_specified_resource_alignment - get resource alignment specified by user.
4329 * @dev: the PCI device to get
4330 *
4331 * RETURNS: Resource alignment if it is specified.
4332 * Zero if it is not specified.
4333 */
9738abed 4334static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
32a9a682
YS
4335{
4336 int seg, bus, slot, func, align_order, count;
4337 resource_size_t align = 0;
4338 char *p;
4339
4340 spin_lock(&resource_alignment_lock);
4341 p = resource_alignment_param;
4342 while (*p) {
4343 count = 0;
4344 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
4345 p[count] == '@') {
4346 p += count + 1;
4347 } else {
4348 align_order = -1;
4349 }
4350 if (sscanf(p, "%x:%x:%x.%x%n",
4351 &seg, &bus, &slot, &func, &count) != 4) {
4352 seg = 0;
4353 if (sscanf(p, "%x:%x.%x%n",
4354 &bus, &slot, &func, &count) != 3) {
4355 /* Invalid format */
4356 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
4357 p);
4358 break;
4359 }
4360 }
4361 p += count;
4362 if (seg == pci_domain_nr(dev->bus) &&
4363 bus == dev->bus->number &&
4364 slot == PCI_SLOT(dev->devfn) &&
4365 func == PCI_FUNC(dev->devfn)) {
3c78bc61 4366 if (align_order == -1)
32a9a682 4367 align = PAGE_SIZE;
3c78bc61 4368 else
32a9a682 4369 align = 1 << align_order;
32a9a682
YS
4370 /* Found */
4371 break;
4372 }
4373 if (*p != ';' && *p != ',') {
4374 /* End of param or invalid format */
4375 break;
4376 }
4377 p++;
4378 }
4379 spin_unlock(&resource_alignment_lock);
4380 return align;
4381}
4382
2069ecfb
YL
4383/*
4384 * This function disables memory decoding and releases memory resources
4385 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
4386 * It also rounds up size to specified alignment.
4387 * Later on, the kernel will assign page-aligned memory resource back
4388 * to the device.
4389 */
4390void pci_reassigndev_resource_alignment(struct pci_dev *dev)
4391{
4392 int i;
4393 struct resource *r;
4394 resource_size_t align, size;
4395 u16 command;
4396
10c463a7
YL
4397 /* check if specified PCI is target device to reassign */
4398 align = pci_specified_resource_alignment(dev);
4399 if (!align)
2069ecfb
YL
4400 return;
4401
4402 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
4403 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
4404 dev_warn(&dev->dev,
4405 "Can't reassign resources to host bridge.\n");
4406 return;
4407 }
4408
4409 dev_info(&dev->dev,
4410 "Disabling memory decoding and releasing memory resources.\n");
4411 pci_read_config_word(dev, PCI_COMMAND, &command);
4412 command &= ~PCI_COMMAND_MEMORY;
4413 pci_write_config_word(dev, PCI_COMMAND, command);
4414
2069ecfb
YL
4415 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
4416 r = &dev->resource[i];
4417 if (!(r->flags & IORESOURCE_MEM))
4418 continue;
4419 size = resource_size(r);
4420 if (size < align) {
4421 size = align;
4422 dev_info(&dev->dev,
4423 "Rounding up size of resource #%d to %#llx.\n",
4424 i, (unsigned long long)size);
4425 }
bd064f0a 4426 r->flags |= IORESOURCE_UNSET;
2069ecfb
YL
4427 r->end = size - 1;
4428 r->start = 0;
4429 }
4430 /* Need to disable bridge's resource window,
4431 * to enable the kernel to reassign new resource
4432 * window later on.
4433 */
4434 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4435 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
4436 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
4437 r = &dev->resource[i];
4438 if (!(r->flags & IORESOURCE_MEM))
4439 continue;
bd064f0a 4440 r->flags |= IORESOURCE_UNSET;
2069ecfb
YL
4441 r->end = resource_size(r) - 1;
4442 r->start = 0;
4443 }
4444 pci_disable_bridge_window(dev);
4445 }
4446}
4447
9738abed 4448static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
32a9a682
YS
4449{
4450 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
4451 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
4452 spin_lock(&resource_alignment_lock);
4453 strncpy(resource_alignment_param, buf, count);
4454 resource_alignment_param[count] = '\0';
4455 spin_unlock(&resource_alignment_lock);
4456 return count;
4457}
4458
9738abed 4459static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
32a9a682
YS
4460{
4461 size_t count;
4462 spin_lock(&resource_alignment_lock);
4463 count = snprintf(buf, size, "%s", resource_alignment_param);
4464 spin_unlock(&resource_alignment_lock);
4465 return count;
4466}
4467
4468static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
4469{
4470 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
4471}
4472
4473static ssize_t pci_resource_alignment_store(struct bus_type *bus,
4474 const char *buf, size_t count)
4475{
4476 return pci_set_resource_alignment_param(buf, count);
4477}
4478
4479BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
4480 pci_resource_alignment_store);
4481
4482static int __init pci_resource_alignment_sysfs_init(void)
4483{
4484 return bus_create_file(&pci_bus_type,
4485 &bus_attr_resource_alignment);
4486}
32a9a682
YS
4487late_initcall(pci_resource_alignment_sysfs_init);
4488
15856ad5 4489static void pci_no_domains(void)
32a2eea7
JG
4490{
4491#ifdef CONFIG_PCI_DOMAINS
4492 pci_domains_supported = 0;
4493#endif
4494}
4495
41e5c0f8
LD
4496#ifdef CONFIG_PCI_DOMAINS
4497static atomic_t __domain_nr = ATOMIC_INIT(-1);
4498
4499int pci_get_new_domain_nr(void)
4500{
4501 return atomic_inc_return(&__domain_nr);
4502}
7c674700
LP
4503
4504#ifdef CONFIG_PCI_DOMAINS_GENERIC
4505void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4506{
4507 static int use_dt_domains = -1;
4508 int domain = of_get_pci_domain_nr(parent->of_node);
4509
4510 /*
4511 * Check DT domain and use_dt_domains values.
4512 *
4513 * If DT domain property is valid (domain >= 0) and
4514 * use_dt_domains != 0, the DT assignment is valid since this means
4515 * we have not previously allocated a domain number by using
4516 * pci_get_new_domain_nr(); we should also update use_dt_domains to
4517 * 1, to indicate that we have just assigned a domain number from
4518 * DT.
4519 *
4520 * If DT domain property value is not valid (ie domain < 0), and we
4521 * have not previously assigned a domain number from DT
4522 * (use_dt_domains != 1) we should assign a domain number by
4523 * using the:
4524 *
4525 * pci_get_new_domain_nr()
4526 *
4527 * API and update the use_dt_domains value to keep track of method we
4528 * are using to assign domain numbers (use_dt_domains = 0).
4529 *
4530 * All other combinations imply we have a platform that is trying
4531 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
4532 * which is a recipe for domain mishandling and it is prevented by
4533 * invalidating the domain value (domain = -1) and printing a
4534 * corresponding error.
4535 */
4536 if (domain >= 0 && use_dt_domains) {
4537 use_dt_domains = 1;
4538 } else if (domain < 0 && use_dt_domains != 1) {
4539 use_dt_domains = 0;
4540 domain = pci_get_new_domain_nr();
4541 } else {
4542 dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
4543 parent->of_node->full_name);
4544 domain = -1;
4545 }
4546
4547 bus->domain_nr = domain;
4548}
4549#endif
41e5c0f8
LD
4550#endif
4551
0ef5f8f6 4552/**
642c92da 4553 * pci_ext_cfg_avail - can we access extended PCI config space?
0ef5f8f6
AP
4554 *
4555 * Returns 1 if we can access PCI extended config space (offsets
4556 * greater than 0xff). This is the default implementation. Architecture
4557 * implementations can override this.
4558 */
642c92da 4559int __weak pci_ext_cfg_avail(void)
0ef5f8f6
AP
4560{
4561 return 1;
4562}
4563
2d1c8618
BH
4564void __weak pci_fixup_cardbus(struct pci_bus *bus)
4565{
4566}
4567EXPORT_SYMBOL(pci_fixup_cardbus);
4568
ad04d31e 4569static int __init pci_setup(char *str)
1da177e4
LT
4570{
4571 while (str) {
4572 char *k = strchr(str, ',');
4573 if (k)
4574 *k++ = 0;
4575 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
4576 if (!strcmp(str, "nomsi")) {
4577 pci_no_msi();
7f785763
RD
4578 } else if (!strcmp(str, "noaer")) {
4579 pci_no_aer();
b55438fd
YL
4580 } else if (!strncmp(str, "realloc=", 8)) {
4581 pci_realloc_get_opt(str + 8);
f483d392 4582 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 4583 pci_realloc_get_opt("on");
32a2eea7
JG
4584 } else if (!strcmp(str, "nodomains")) {
4585 pci_no_domains();
6748dcc2
RW
4586 } else if (!strncmp(str, "noari", 5)) {
4587 pcie_ari_disabled = true;
4516a618
AN
4588 } else if (!strncmp(str, "cbiosize=", 9)) {
4589 pci_cardbus_io_size = memparse(str + 9, &str);
4590 } else if (!strncmp(str, "cbmemsize=", 10)) {
4591 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
4592 } else if (!strncmp(str, "resource_alignment=", 19)) {
4593 pci_set_resource_alignment_param(str + 19,
4594 strlen(str + 19));
43c16408
AP
4595 } else if (!strncmp(str, "ecrc=", 5)) {
4596 pcie_ecrc_get_policy(str + 5);
28760489
EB
4597 } else if (!strncmp(str, "hpiosize=", 9)) {
4598 pci_hotplug_io_size = memparse(str + 9, &str);
4599 } else if (!strncmp(str, "hpmemsize=", 10)) {
4600 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
4601 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4602 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
4603 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
4604 pcie_bus_config = PCIE_BUS_SAFE;
4605 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
4606 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
4607 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4608 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
4609 } else if (!strncmp(str, "pcie_scan_all", 13)) {
4610 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
4611 } else {
4612 printk(KERN_ERR "PCI: Unknown option `%s'\n",
4613 str);
4614 }
1da177e4
LT
4615 }
4616 str = k;
4617 }
0637a70a 4618 return 0;
1da177e4 4619}
0637a70a 4620early_param("pci", pci_setup);
This page took 1.819905 seconds and 5 git commands to generate.