PCI: Add support for polling PME state on suspended legacy PCI devices
[deliverable/linux.git] / drivers / pci / pci.c
1 /*
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm/setup.h>
26 #include "pci.h"
27
28 const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30 };
31 EXPORT_SYMBOL_GPL(pci_power_names);
32
33 int isa_dma_bridge_buggy;
34 EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36 int pci_pci_problems;
37 EXPORT_SYMBOL(pci_pci_problems);
38
39 unsigned int pci_pm_d3_delay;
40
41 static void pci_pme_list_scan(struct work_struct *work);
42
43 static LIST_HEAD(pci_pme_list);
44 static DEFINE_MUTEX(pci_pme_list_mutex);
45 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47 struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50 };
51
52 #define PME_TIMEOUT 1000 /* How long between PME checks */
53
54 static void pci_dev_d3_sleep(struct pci_dev *dev)
55 {
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62 }
63
64 #ifdef CONFIG_PCI_DOMAINS
65 int pci_domains_supported = 1;
66 #endif
67
68 #define DEFAULT_CARDBUS_IO_SIZE (256)
69 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
71 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
74 #define DEFAULT_HOTPLUG_IO_SIZE (256)
75 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
77 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
80 /*
81 * The default CLS is used if arch didn't set CLS explicitly and not
82 * all pci devices agree on the same value. Arch can override either
83 * the dfl or actual value as it sees fit. Don't forget this is
84 * measured in 32-bit words, not bytes.
85 */
86 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
87 u8 pci_cache_line_size;
88
89 /**
90 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
91 * @bus: pointer to PCI bus structure to search
92 *
93 * Given a PCI bus, returns the highest PCI bus number present in the set
94 * including the given PCI bus and its list of child PCI buses.
95 */
96 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
97 {
98 struct list_head *tmp;
99 unsigned char max, n;
100
101 max = bus->subordinate;
102 list_for_each(tmp, &bus->children) {
103 n = pci_bus_max_busnr(pci_bus_b(tmp));
104 if(n > max)
105 max = n;
106 }
107 return max;
108 }
109 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
110
111 #ifdef CONFIG_HAS_IOMEM
112 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
113 {
114 /*
115 * Make sure the BAR is actually a memory resource, not an IO resource
116 */
117 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
118 WARN_ON(1);
119 return NULL;
120 }
121 return ioremap_nocache(pci_resource_start(pdev, bar),
122 pci_resource_len(pdev, bar));
123 }
124 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
125 #endif
126
127 #if 0
128 /**
129 * pci_max_busnr - returns maximum PCI bus number
130 *
131 * Returns the highest PCI bus number present in the system global list of
132 * PCI buses.
133 */
134 unsigned char __devinit
135 pci_max_busnr(void)
136 {
137 struct pci_bus *bus = NULL;
138 unsigned char max, n;
139
140 max = 0;
141 while ((bus = pci_find_next_bus(bus)) != NULL) {
142 n = pci_bus_max_busnr(bus);
143 if(n > max)
144 max = n;
145 }
146 return max;
147 }
148
149 #endif /* 0 */
150
151 #define PCI_FIND_CAP_TTL 48
152
153 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
154 u8 pos, int cap, int *ttl)
155 {
156 u8 id;
157
158 while ((*ttl)--) {
159 pci_bus_read_config_byte(bus, devfn, pos, &pos);
160 if (pos < 0x40)
161 break;
162 pos &= ~3;
163 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
164 &id);
165 if (id == 0xff)
166 break;
167 if (id == cap)
168 return pos;
169 pos += PCI_CAP_LIST_NEXT;
170 }
171 return 0;
172 }
173
174 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
175 u8 pos, int cap)
176 {
177 int ttl = PCI_FIND_CAP_TTL;
178
179 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
180 }
181
182 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
183 {
184 return __pci_find_next_cap(dev->bus, dev->devfn,
185 pos + PCI_CAP_LIST_NEXT, cap);
186 }
187 EXPORT_SYMBOL_GPL(pci_find_next_capability);
188
189 static int __pci_bus_find_cap_start(struct pci_bus *bus,
190 unsigned int devfn, u8 hdr_type)
191 {
192 u16 status;
193
194 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
195 if (!(status & PCI_STATUS_CAP_LIST))
196 return 0;
197
198 switch (hdr_type) {
199 case PCI_HEADER_TYPE_NORMAL:
200 case PCI_HEADER_TYPE_BRIDGE:
201 return PCI_CAPABILITY_LIST;
202 case PCI_HEADER_TYPE_CARDBUS:
203 return PCI_CB_CAPABILITY_LIST;
204 default:
205 return 0;
206 }
207
208 return 0;
209 }
210
211 /**
212 * pci_find_capability - query for devices' capabilities
213 * @dev: PCI device to query
214 * @cap: capability code
215 *
216 * Tell if a device supports a given PCI capability.
217 * Returns the address of the requested capability structure within the
218 * device's PCI configuration space or 0 in case the device does not
219 * support it. Possible values for @cap:
220 *
221 * %PCI_CAP_ID_PM Power Management
222 * %PCI_CAP_ID_AGP Accelerated Graphics Port
223 * %PCI_CAP_ID_VPD Vital Product Data
224 * %PCI_CAP_ID_SLOTID Slot Identification
225 * %PCI_CAP_ID_MSI Message Signalled Interrupts
226 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
227 * %PCI_CAP_ID_PCIX PCI-X
228 * %PCI_CAP_ID_EXP PCI Express
229 */
230 int pci_find_capability(struct pci_dev *dev, int cap)
231 {
232 int pos;
233
234 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
235 if (pos)
236 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
237
238 return pos;
239 }
240
241 /**
242 * pci_bus_find_capability - query for devices' capabilities
243 * @bus: the PCI bus to query
244 * @devfn: PCI device to query
245 * @cap: capability code
246 *
247 * Like pci_find_capability() but works for pci devices that do not have a
248 * pci_dev structure set up yet.
249 *
250 * Returns the address of the requested capability structure within the
251 * device's PCI configuration space or 0 in case the device does not
252 * support it.
253 */
254 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
255 {
256 int pos;
257 u8 hdr_type;
258
259 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
260
261 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
262 if (pos)
263 pos = __pci_find_next_cap(bus, devfn, pos, cap);
264
265 return pos;
266 }
267
268 /**
269 * pci_find_ext_capability - Find an extended capability
270 * @dev: PCI device to query
271 * @cap: capability code
272 *
273 * Returns the address of the requested extended capability structure
274 * within the device's PCI configuration space or 0 if the device does
275 * not support it. Possible values for @cap:
276 *
277 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
278 * %PCI_EXT_CAP_ID_VC Virtual Channel
279 * %PCI_EXT_CAP_ID_DSN Device Serial Number
280 * %PCI_EXT_CAP_ID_PWR Power Budgeting
281 */
282 int pci_find_ext_capability(struct pci_dev *dev, int cap)
283 {
284 u32 header;
285 int ttl;
286 int pos = PCI_CFG_SPACE_SIZE;
287
288 /* minimum 8 bytes per capability */
289 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
290
291 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
292 return 0;
293
294 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
295 return 0;
296
297 /*
298 * If we have no capabilities, this is indicated by cap ID,
299 * cap version and next pointer all being 0.
300 */
301 if (header == 0)
302 return 0;
303
304 while (ttl-- > 0) {
305 if (PCI_EXT_CAP_ID(header) == cap)
306 return pos;
307
308 pos = PCI_EXT_CAP_NEXT(header);
309 if (pos < PCI_CFG_SPACE_SIZE)
310 break;
311
312 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
313 break;
314 }
315
316 return 0;
317 }
318 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
319
320 /**
321 * pci_bus_find_ext_capability - find an extended capability
322 * @bus: the PCI bus to query
323 * @devfn: PCI device to query
324 * @cap: capability code
325 *
326 * Like pci_find_ext_capability() but works for pci devices that do not have a
327 * pci_dev structure set up yet.
328 *
329 * Returns the address of the requested capability structure within the
330 * device's PCI configuration space or 0 in case the device does not
331 * support it.
332 */
333 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
334 int cap)
335 {
336 u32 header;
337 int ttl;
338 int pos = PCI_CFG_SPACE_SIZE;
339
340 /* minimum 8 bytes per capability */
341 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
342
343 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
344 return 0;
345 if (header == 0xffffffff || header == 0)
346 return 0;
347
348 while (ttl-- > 0) {
349 if (PCI_EXT_CAP_ID(header) == cap)
350 return pos;
351
352 pos = PCI_EXT_CAP_NEXT(header);
353 if (pos < PCI_CFG_SPACE_SIZE)
354 break;
355
356 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
357 break;
358 }
359
360 return 0;
361 }
362
363 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
364 {
365 int rc, ttl = PCI_FIND_CAP_TTL;
366 u8 cap, mask;
367
368 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
369 mask = HT_3BIT_CAP_MASK;
370 else
371 mask = HT_5BIT_CAP_MASK;
372
373 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
374 PCI_CAP_ID_HT, &ttl);
375 while (pos) {
376 rc = pci_read_config_byte(dev, pos + 3, &cap);
377 if (rc != PCIBIOS_SUCCESSFUL)
378 return 0;
379
380 if ((cap & mask) == ht_cap)
381 return pos;
382
383 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
384 pos + PCI_CAP_LIST_NEXT,
385 PCI_CAP_ID_HT, &ttl);
386 }
387
388 return 0;
389 }
390 /**
391 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
392 * @dev: PCI device to query
393 * @pos: Position from which to continue searching
394 * @ht_cap: Hypertransport capability code
395 *
396 * To be used in conjunction with pci_find_ht_capability() to search for
397 * all capabilities matching @ht_cap. @pos should always be a value returned
398 * from pci_find_ht_capability().
399 *
400 * NB. To be 100% safe against broken PCI devices, the caller should take
401 * steps to avoid an infinite loop.
402 */
403 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
404 {
405 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
406 }
407 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
408
409 /**
410 * pci_find_ht_capability - query a device's Hypertransport capabilities
411 * @dev: PCI device to query
412 * @ht_cap: Hypertransport capability code
413 *
414 * Tell if a device supports a given Hypertransport capability.
415 * Returns an address within the device's PCI configuration space
416 * or 0 in case the device does not support the request capability.
417 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
418 * which has a Hypertransport capability matching @ht_cap.
419 */
420 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
421 {
422 int pos;
423
424 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
425 if (pos)
426 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
427
428 return pos;
429 }
430 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
431
432 /**
433 * pci_find_parent_resource - return resource region of parent bus of given region
434 * @dev: PCI device structure contains resources to be searched
435 * @res: child resource record for which parent is sought
436 *
437 * For given resource region of given device, return the resource
438 * region of parent bus the given region is contained in or where
439 * it should be allocated from.
440 */
441 struct resource *
442 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
443 {
444 const struct pci_bus *bus = dev->bus;
445 int i;
446 struct resource *best = NULL, *r;
447
448 pci_bus_for_each_resource(bus, r, i) {
449 if (!r)
450 continue;
451 if (res->start && !(res->start >= r->start && res->end <= r->end))
452 continue; /* Not contained */
453 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
454 continue; /* Wrong type */
455 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
456 return r; /* Exact match */
457 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
458 if (r->flags & IORESOURCE_PREFETCH)
459 continue;
460 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
461 if (!best)
462 best = r;
463 }
464 return best;
465 }
466
467 /**
468 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
469 * @dev: PCI device to have its BARs restored
470 *
471 * Restore the BAR values for a given device, so as to make it
472 * accessible by its driver.
473 */
474 static void
475 pci_restore_bars(struct pci_dev *dev)
476 {
477 int i;
478
479 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
480 pci_update_resource(dev, i);
481 }
482
483 static struct pci_platform_pm_ops *pci_platform_pm;
484
485 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
486 {
487 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
488 || !ops->sleep_wake || !ops->can_wakeup)
489 return -EINVAL;
490 pci_platform_pm = ops;
491 return 0;
492 }
493
494 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
495 {
496 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
497 }
498
499 static inline int platform_pci_set_power_state(struct pci_dev *dev,
500 pci_power_t t)
501 {
502 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
503 }
504
505 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
506 {
507 return pci_platform_pm ?
508 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
509 }
510
511 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
512 {
513 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
514 }
515
516 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
517 {
518 return pci_platform_pm ?
519 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
520 }
521
522 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
523 {
524 return pci_platform_pm ?
525 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
526 }
527
528 /**
529 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
530 * given PCI device
531 * @dev: PCI device to handle.
532 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
533 *
534 * RETURN VALUE:
535 * -EINVAL if the requested state is invalid.
536 * -EIO if device does not support PCI PM or its PM capabilities register has a
537 * wrong version, or device doesn't support the requested state.
538 * 0 if device already is in the requested state.
539 * 0 if device's power state has been successfully changed.
540 */
541 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
542 {
543 u16 pmcsr;
544 bool need_restore = false;
545
546 /* Check if we're already there */
547 if (dev->current_state == state)
548 return 0;
549
550 if (!dev->pm_cap)
551 return -EIO;
552
553 if (state < PCI_D0 || state > PCI_D3hot)
554 return -EINVAL;
555
556 /* Validate current state:
557 * Can enter D0 from any state, but if we can only go deeper
558 * to sleep if we're already in a low power state
559 */
560 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
561 && dev->current_state > state) {
562 dev_err(&dev->dev, "invalid power transition "
563 "(from state %d to %d)\n", dev->current_state, state);
564 return -EINVAL;
565 }
566
567 /* check if this device supports the desired state */
568 if ((state == PCI_D1 && !dev->d1_support)
569 || (state == PCI_D2 && !dev->d2_support))
570 return -EIO;
571
572 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
573
574 /* If we're (effectively) in D3, force entire word to 0.
575 * This doesn't affect PME_Status, disables PME_En, and
576 * sets PowerState to 0.
577 */
578 switch (dev->current_state) {
579 case PCI_D0:
580 case PCI_D1:
581 case PCI_D2:
582 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
583 pmcsr |= state;
584 break;
585 case PCI_D3hot:
586 case PCI_D3cold:
587 case PCI_UNKNOWN: /* Boot-up */
588 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
589 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
590 need_restore = true;
591 /* Fall-through: force to D0 */
592 default:
593 pmcsr = 0;
594 break;
595 }
596
597 /* enter specified state */
598 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
599
600 /* Mandatory power management transition delays */
601 /* see PCI PM 1.1 5.6.1 table 18 */
602 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
603 pci_dev_d3_sleep(dev);
604 else if (state == PCI_D2 || dev->current_state == PCI_D2)
605 udelay(PCI_PM_D2_DELAY);
606
607 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
608 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
609 if (dev->current_state != state && printk_ratelimit())
610 dev_info(&dev->dev, "Refused to change power state, "
611 "currently in D%d\n", dev->current_state);
612
613 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
614 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
615 * from D3hot to D0 _may_ perform an internal reset, thereby
616 * going to "D0 Uninitialized" rather than "D0 Initialized".
617 * For example, at least some versions of the 3c905B and the
618 * 3c556B exhibit this behaviour.
619 *
620 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
621 * devices in a D3hot state at boot. Consequently, we need to
622 * restore at least the BARs so that the device will be
623 * accessible to its driver.
624 */
625 if (need_restore)
626 pci_restore_bars(dev);
627
628 if (dev->bus->self)
629 pcie_aspm_pm_state_change(dev->bus->self);
630
631 return 0;
632 }
633
634 /**
635 * pci_update_current_state - Read PCI power state of given device from its
636 * PCI PM registers and cache it
637 * @dev: PCI device to handle.
638 * @state: State to cache in case the device doesn't have the PM capability
639 */
640 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
641 {
642 if (dev->pm_cap) {
643 u16 pmcsr;
644
645 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
646 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
647 } else {
648 dev->current_state = state;
649 }
650 }
651
652 /**
653 * pci_platform_power_transition - Use platform to change device power state
654 * @dev: PCI device to handle.
655 * @state: State to put the device into.
656 */
657 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
658 {
659 int error;
660
661 if (platform_pci_power_manageable(dev)) {
662 error = platform_pci_set_power_state(dev, state);
663 if (!error)
664 pci_update_current_state(dev, state);
665 } else {
666 error = -ENODEV;
667 /* Fall back to PCI_D0 if native PM is not supported */
668 if (!dev->pm_cap)
669 dev->current_state = PCI_D0;
670 }
671
672 return error;
673 }
674
675 /**
676 * __pci_start_power_transition - Start power transition of a PCI device
677 * @dev: PCI device to handle.
678 * @state: State to put the device into.
679 */
680 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
681 {
682 if (state == PCI_D0)
683 pci_platform_power_transition(dev, PCI_D0);
684 }
685
686 /**
687 * __pci_complete_power_transition - Complete power transition of a PCI device
688 * @dev: PCI device to handle.
689 * @state: State to put the device into.
690 *
691 * This function should not be called directly by device drivers.
692 */
693 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
694 {
695 return state >= PCI_D0 ?
696 pci_platform_power_transition(dev, state) : -EINVAL;
697 }
698 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
699
700 /**
701 * pci_set_power_state - Set the power state of a PCI device
702 * @dev: PCI device to handle.
703 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
704 *
705 * Transition a device to a new power state, using the platform firmware and/or
706 * the device's PCI PM registers.
707 *
708 * RETURN VALUE:
709 * -EINVAL if the requested state is invalid.
710 * -EIO if device does not support PCI PM or its PM capabilities register has a
711 * wrong version, or device doesn't support the requested state.
712 * 0 if device already is in the requested state.
713 * 0 if device's power state has been successfully changed.
714 */
715 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
716 {
717 int error;
718
719 /* bound the state we're entering */
720 if (state > PCI_D3hot)
721 state = PCI_D3hot;
722 else if (state < PCI_D0)
723 state = PCI_D0;
724 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
725 /*
726 * If the device or the parent bridge do not support PCI PM,
727 * ignore the request if we're doing anything other than putting
728 * it into D0 (which would only happen on boot).
729 */
730 return 0;
731
732 __pci_start_power_transition(dev, state);
733
734 /* This device is quirked not to be put into D3, so
735 don't put it in D3 */
736 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
737 return 0;
738
739 error = pci_raw_set_power_state(dev, state);
740
741 if (!__pci_complete_power_transition(dev, state))
742 error = 0;
743
744 return error;
745 }
746
747 /**
748 * pci_choose_state - Choose the power state of a PCI device
749 * @dev: PCI device to be suspended
750 * @state: target sleep state for the whole system. This is the value
751 * that is passed to suspend() function.
752 *
753 * Returns PCI power state suitable for given device and given system
754 * message.
755 */
756
757 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
758 {
759 pci_power_t ret;
760
761 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
762 return PCI_D0;
763
764 ret = platform_pci_choose_state(dev);
765 if (ret != PCI_POWER_ERROR)
766 return ret;
767
768 switch (state.event) {
769 case PM_EVENT_ON:
770 return PCI_D0;
771 case PM_EVENT_FREEZE:
772 case PM_EVENT_PRETHAW:
773 /* REVISIT both freeze and pre-thaw "should" use D0 */
774 case PM_EVENT_SUSPEND:
775 case PM_EVENT_HIBERNATE:
776 return PCI_D3hot;
777 default:
778 dev_info(&dev->dev, "unrecognized suspend event %d\n",
779 state.event);
780 BUG();
781 }
782 return PCI_D0;
783 }
784
785 EXPORT_SYMBOL(pci_choose_state);
786
787 #define PCI_EXP_SAVE_REGS 7
788
789 #define pcie_cap_has_devctl(type, flags) 1
790 #define pcie_cap_has_lnkctl(type, flags) \
791 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
792 (type == PCI_EXP_TYPE_ROOT_PORT || \
793 type == PCI_EXP_TYPE_ENDPOINT || \
794 type == PCI_EXP_TYPE_LEG_END))
795 #define pcie_cap_has_sltctl(type, flags) \
796 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
797 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
798 (type == PCI_EXP_TYPE_DOWNSTREAM && \
799 (flags & PCI_EXP_FLAGS_SLOT))))
800 #define pcie_cap_has_rtctl(type, flags) \
801 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
802 (type == PCI_EXP_TYPE_ROOT_PORT || \
803 type == PCI_EXP_TYPE_RC_EC))
804 #define pcie_cap_has_devctl2(type, flags) \
805 ((flags & PCI_EXP_FLAGS_VERS) > 1)
806 #define pcie_cap_has_lnkctl2(type, flags) \
807 ((flags & PCI_EXP_FLAGS_VERS) > 1)
808 #define pcie_cap_has_sltctl2(type, flags) \
809 ((flags & PCI_EXP_FLAGS_VERS) > 1)
810
811 static int pci_save_pcie_state(struct pci_dev *dev)
812 {
813 int pos, i = 0;
814 struct pci_cap_saved_state *save_state;
815 u16 *cap;
816 u16 flags;
817
818 pos = pci_pcie_cap(dev);
819 if (!pos)
820 return 0;
821
822 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
823 if (!save_state) {
824 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
825 return -ENOMEM;
826 }
827 cap = (u16 *)&save_state->data[0];
828
829 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
830
831 if (pcie_cap_has_devctl(dev->pcie_type, flags))
832 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
833 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
834 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
835 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
836 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
837 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
838 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
839 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
840 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
841 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
842 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
843 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
844 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
845
846 return 0;
847 }
848
849 static void pci_restore_pcie_state(struct pci_dev *dev)
850 {
851 int i = 0, pos;
852 struct pci_cap_saved_state *save_state;
853 u16 *cap;
854 u16 flags;
855
856 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
857 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
858 if (!save_state || pos <= 0)
859 return;
860 cap = (u16 *)&save_state->data[0];
861
862 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
863
864 if (pcie_cap_has_devctl(dev->pcie_type, flags))
865 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
866 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
867 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
868 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
869 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
870 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
871 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
872 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
873 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
874 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
875 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
876 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
877 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
878 }
879
880
881 static int pci_save_pcix_state(struct pci_dev *dev)
882 {
883 int pos;
884 struct pci_cap_saved_state *save_state;
885
886 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
887 if (pos <= 0)
888 return 0;
889
890 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
891 if (!save_state) {
892 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
893 return -ENOMEM;
894 }
895
896 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
897
898 return 0;
899 }
900
901 static void pci_restore_pcix_state(struct pci_dev *dev)
902 {
903 int i = 0, pos;
904 struct pci_cap_saved_state *save_state;
905 u16 *cap;
906
907 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
908 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
909 if (!save_state || pos <= 0)
910 return;
911 cap = (u16 *)&save_state->data[0];
912
913 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
914 }
915
916
917 /**
918 * pci_save_state - save the PCI configuration space of a device before suspending
919 * @dev: - PCI device that we're dealing with
920 */
921 int
922 pci_save_state(struct pci_dev *dev)
923 {
924 int i;
925 /* XXX: 100% dword access ok here? */
926 for (i = 0; i < 16; i++)
927 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
928 dev->state_saved = true;
929 if ((i = pci_save_pcie_state(dev)) != 0)
930 return i;
931 if ((i = pci_save_pcix_state(dev)) != 0)
932 return i;
933 return 0;
934 }
935
936 /**
937 * pci_restore_state - Restore the saved state of a PCI device
938 * @dev: - PCI device that we're dealing with
939 */
940 int
941 pci_restore_state(struct pci_dev *dev)
942 {
943 int i;
944 u32 val;
945
946 if (!dev->state_saved)
947 return 0;
948
949 /* PCI Express register must be restored first */
950 pci_restore_pcie_state(dev);
951
952 /*
953 * The Base Address register should be programmed before the command
954 * register(s)
955 */
956 for (i = 15; i >= 0; i--) {
957 pci_read_config_dword(dev, i * 4, &val);
958 if (val != dev->saved_config_space[i]) {
959 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
960 "space at offset %#x (was %#x, writing %#x)\n",
961 i, val, (int)dev->saved_config_space[i]);
962 pci_write_config_dword(dev,i * 4,
963 dev->saved_config_space[i]);
964 }
965 }
966 pci_restore_pcix_state(dev);
967 pci_restore_msi_state(dev);
968 pci_restore_iov_state(dev);
969
970 dev->state_saved = false;
971
972 return 0;
973 }
974
975 static int do_pci_enable_device(struct pci_dev *dev, int bars)
976 {
977 int err;
978
979 err = pci_set_power_state(dev, PCI_D0);
980 if (err < 0 && err != -EIO)
981 return err;
982 err = pcibios_enable_device(dev, bars);
983 if (err < 0)
984 return err;
985 pci_fixup_device(pci_fixup_enable, dev);
986
987 return 0;
988 }
989
990 /**
991 * pci_reenable_device - Resume abandoned device
992 * @dev: PCI device to be resumed
993 *
994 * Note this function is a backend of pci_default_resume and is not supposed
995 * to be called by normal code, write proper resume handler and use it instead.
996 */
997 int pci_reenable_device(struct pci_dev *dev)
998 {
999 if (pci_is_enabled(dev))
1000 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1001 return 0;
1002 }
1003
1004 static int __pci_enable_device_flags(struct pci_dev *dev,
1005 resource_size_t flags)
1006 {
1007 int err;
1008 int i, bars = 0;
1009
1010 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1011 return 0; /* already enabled */
1012
1013 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1014 if (dev->resource[i].flags & flags)
1015 bars |= (1 << i);
1016
1017 err = do_pci_enable_device(dev, bars);
1018 if (err < 0)
1019 atomic_dec(&dev->enable_cnt);
1020 return err;
1021 }
1022
1023 /**
1024 * pci_enable_device_io - Initialize a device for use with IO space
1025 * @dev: PCI device to be initialized
1026 *
1027 * Initialize device before it's used by a driver. Ask low-level code
1028 * to enable I/O resources. Wake up the device if it was suspended.
1029 * Beware, this function can fail.
1030 */
1031 int pci_enable_device_io(struct pci_dev *dev)
1032 {
1033 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1034 }
1035
1036 /**
1037 * pci_enable_device_mem - Initialize a device for use with Memory space
1038 * @dev: PCI device to be initialized
1039 *
1040 * Initialize device before it's used by a driver. Ask low-level code
1041 * to enable Memory resources. Wake up the device if it was suspended.
1042 * Beware, this function can fail.
1043 */
1044 int pci_enable_device_mem(struct pci_dev *dev)
1045 {
1046 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1047 }
1048
1049 /**
1050 * pci_enable_device - Initialize device before it's used by a driver.
1051 * @dev: PCI device to be initialized
1052 *
1053 * Initialize device before it's used by a driver. Ask low-level code
1054 * to enable I/O and memory. Wake up the device if it was suspended.
1055 * Beware, this function can fail.
1056 *
1057 * Note we don't actually enable the device many times if we call
1058 * this function repeatedly (we just increment the count).
1059 */
1060 int pci_enable_device(struct pci_dev *dev)
1061 {
1062 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1063 }
1064
1065 /*
1066 * Managed PCI resources. This manages device on/off, intx/msi/msix
1067 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1068 * there's no need to track it separately. pci_devres is initialized
1069 * when a device is enabled using managed PCI device enable interface.
1070 */
1071 struct pci_devres {
1072 unsigned int enabled:1;
1073 unsigned int pinned:1;
1074 unsigned int orig_intx:1;
1075 unsigned int restore_intx:1;
1076 u32 region_mask;
1077 };
1078
1079 static void pcim_release(struct device *gendev, void *res)
1080 {
1081 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1082 struct pci_devres *this = res;
1083 int i;
1084
1085 if (dev->msi_enabled)
1086 pci_disable_msi(dev);
1087 if (dev->msix_enabled)
1088 pci_disable_msix(dev);
1089
1090 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1091 if (this->region_mask & (1 << i))
1092 pci_release_region(dev, i);
1093
1094 if (this->restore_intx)
1095 pci_intx(dev, this->orig_intx);
1096
1097 if (this->enabled && !this->pinned)
1098 pci_disable_device(dev);
1099 }
1100
1101 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1102 {
1103 struct pci_devres *dr, *new_dr;
1104
1105 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1106 if (dr)
1107 return dr;
1108
1109 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1110 if (!new_dr)
1111 return NULL;
1112 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1113 }
1114
1115 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1116 {
1117 if (pci_is_managed(pdev))
1118 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1119 return NULL;
1120 }
1121
1122 /**
1123 * pcim_enable_device - Managed pci_enable_device()
1124 * @pdev: PCI device to be initialized
1125 *
1126 * Managed pci_enable_device().
1127 */
1128 int pcim_enable_device(struct pci_dev *pdev)
1129 {
1130 struct pci_devres *dr;
1131 int rc;
1132
1133 dr = get_pci_dr(pdev);
1134 if (unlikely(!dr))
1135 return -ENOMEM;
1136 if (dr->enabled)
1137 return 0;
1138
1139 rc = pci_enable_device(pdev);
1140 if (!rc) {
1141 pdev->is_managed = 1;
1142 dr->enabled = 1;
1143 }
1144 return rc;
1145 }
1146
1147 /**
1148 * pcim_pin_device - Pin managed PCI device
1149 * @pdev: PCI device to pin
1150 *
1151 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1152 * driver detach. @pdev must have been enabled with
1153 * pcim_enable_device().
1154 */
1155 void pcim_pin_device(struct pci_dev *pdev)
1156 {
1157 struct pci_devres *dr;
1158
1159 dr = find_pci_dr(pdev);
1160 WARN_ON(!dr || !dr->enabled);
1161 if (dr)
1162 dr->pinned = 1;
1163 }
1164
1165 /**
1166 * pcibios_disable_device - disable arch specific PCI resources for device dev
1167 * @dev: the PCI device to disable
1168 *
1169 * Disables architecture specific PCI resources for the device. This
1170 * is the default implementation. Architecture implementations can
1171 * override this.
1172 */
1173 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1174
1175 static void do_pci_disable_device(struct pci_dev *dev)
1176 {
1177 u16 pci_command;
1178
1179 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1180 if (pci_command & PCI_COMMAND_MASTER) {
1181 pci_command &= ~PCI_COMMAND_MASTER;
1182 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1183 }
1184
1185 pcibios_disable_device(dev);
1186 }
1187
1188 /**
1189 * pci_disable_enabled_device - Disable device without updating enable_cnt
1190 * @dev: PCI device to disable
1191 *
1192 * NOTE: This function is a backend of PCI power management routines and is
1193 * not supposed to be called drivers.
1194 */
1195 void pci_disable_enabled_device(struct pci_dev *dev)
1196 {
1197 if (pci_is_enabled(dev))
1198 do_pci_disable_device(dev);
1199 }
1200
1201 /**
1202 * pci_disable_device - Disable PCI device after use
1203 * @dev: PCI device to be disabled
1204 *
1205 * Signal to the system that the PCI device is not in use by the system
1206 * anymore. This only involves disabling PCI bus-mastering, if active.
1207 *
1208 * Note we don't actually disable the device until all callers of
1209 * pci_enable_device() have called pci_disable_device().
1210 */
1211 void
1212 pci_disable_device(struct pci_dev *dev)
1213 {
1214 struct pci_devres *dr;
1215
1216 dr = find_pci_dr(dev);
1217 if (dr)
1218 dr->enabled = 0;
1219
1220 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1221 return;
1222
1223 do_pci_disable_device(dev);
1224
1225 dev->is_busmaster = 0;
1226 }
1227
1228 /**
1229 * pcibios_set_pcie_reset_state - set reset state for device dev
1230 * @dev: the PCIe device reset
1231 * @state: Reset state to enter into
1232 *
1233 *
1234 * Sets the PCIe reset state for the device. This is the default
1235 * implementation. Architecture implementations can override this.
1236 */
1237 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1238 enum pcie_reset_state state)
1239 {
1240 return -EINVAL;
1241 }
1242
1243 /**
1244 * pci_set_pcie_reset_state - set reset state for device dev
1245 * @dev: the PCIe device reset
1246 * @state: Reset state to enter into
1247 *
1248 *
1249 * Sets the PCI reset state for the device.
1250 */
1251 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1252 {
1253 return pcibios_set_pcie_reset_state(dev, state);
1254 }
1255
1256 /**
1257 * pci_check_pme_status - Check if given device has generated PME.
1258 * @dev: Device to check.
1259 *
1260 * Check the PME status of the device and if set, clear it and clear PME enable
1261 * (if set). Return 'true' if PME status and PME enable were both set or
1262 * 'false' otherwise.
1263 */
1264 bool pci_check_pme_status(struct pci_dev *dev)
1265 {
1266 int pmcsr_pos;
1267 u16 pmcsr;
1268 bool ret = false;
1269
1270 if (!dev->pm_cap)
1271 return false;
1272
1273 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1274 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1275 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1276 return false;
1277
1278 /* Clear PME status. */
1279 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1280 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1281 /* Disable PME to avoid interrupt flood. */
1282 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1283 ret = true;
1284 }
1285
1286 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1287
1288 return ret;
1289 }
1290
1291 /*
1292 * Time to wait before the system can be put into a sleep state after reporting
1293 * a wakeup event signaled by a PCI device.
1294 */
1295 #define PCI_WAKEUP_COOLDOWN 100
1296
1297 /**
1298 * pci_wakeup_event - Report a wakeup event related to a given PCI device.
1299 * @dev: Device to report the wakeup event for.
1300 */
1301 void pci_wakeup_event(struct pci_dev *dev)
1302 {
1303 if (device_may_wakeup(&dev->dev))
1304 pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN);
1305 }
1306
1307 /**
1308 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1309 * @dev: Device to handle.
1310 * @ign: Ignored.
1311 *
1312 * Check if @dev has generated PME and queue a resume request for it in that
1313 * case.
1314 */
1315 static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1316 {
1317 if (pci_check_pme_status(dev)) {
1318 pm_request_resume(&dev->dev);
1319 pci_wakeup_event(dev);
1320 }
1321 return 0;
1322 }
1323
1324 /**
1325 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1326 * @bus: Top bus of the subtree to walk.
1327 */
1328 void pci_pme_wakeup_bus(struct pci_bus *bus)
1329 {
1330 if (bus)
1331 pci_walk_bus(bus, pci_pme_wakeup, NULL);
1332 }
1333
1334 /**
1335 * pci_pme_capable - check the capability of PCI device to generate PME#
1336 * @dev: PCI device to handle.
1337 * @state: PCI state from which device will issue PME#.
1338 */
1339 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1340 {
1341 if (!dev->pm_cap)
1342 return false;
1343
1344 return !!(dev->pme_support & (1 << state));
1345 }
1346
1347 static void pci_pme_list_scan(struct work_struct *work)
1348 {
1349 struct pci_pme_device *pme_dev;
1350
1351 mutex_lock(&pci_pme_list_mutex);
1352 if (!list_empty(&pci_pme_list)) {
1353 list_for_each_entry(pme_dev, &pci_pme_list, list)
1354 pci_pme_wakeup(pme_dev->dev, NULL);
1355 schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
1356 }
1357 mutex_unlock(&pci_pme_list_mutex);
1358 }
1359
1360 /**
1361 * pci_external_pme - is a device an external PCI PME source?
1362 * @dev: PCI device to check
1363 *
1364 */
1365
1366 static bool pci_external_pme(struct pci_dev *dev)
1367 {
1368 if (pci_is_pcie(dev) || dev->bus->number == 0)
1369 return false;
1370 return true;
1371 }
1372
1373 /**
1374 * pci_pme_active - enable or disable PCI device's PME# function
1375 * @dev: PCI device to handle.
1376 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1377 *
1378 * The caller must verify that the device is capable of generating PME# before
1379 * calling this function with @enable equal to 'true'.
1380 */
1381 void pci_pme_active(struct pci_dev *dev, bool enable)
1382 {
1383 u16 pmcsr;
1384
1385 if (!dev->pm_cap)
1386 return;
1387
1388 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1389 /* Clear PME_Status by writing 1 to it and enable PME# */
1390 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1391 if (!enable)
1392 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1393
1394 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1395
1396 /* PCI (as opposed to PCIe) PME requires that the device have
1397 its PME# line hooked up correctly. Not all hardware vendors
1398 do this, so the PME never gets delivered and the device
1399 remains asleep. The easiest way around this is to
1400 periodically walk the list of suspended devices and check
1401 whether any have their PME flag set. The assumption is that
1402 we'll wake up often enough anyway that this won't be a huge
1403 hit, and the power savings from the devices will still be a
1404 win. */
1405
1406 if (pci_external_pme(dev)) {
1407 struct pci_pme_device *pme_dev;
1408 if (enable) {
1409 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1410 GFP_KERNEL);
1411 if (!pme_dev)
1412 goto out;
1413 pme_dev->dev = dev;
1414 mutex_lock(&pci_pme_list_mutex);
1415 list_add(&pme_dev->list, &pci_pme_list);
1416 if (list_is_singular(&pci_pme_list))
1417 schedule_delayed_work(&pci_pme_work,
1418 msecs_to_jiffies(PME_TIMEOUT));
1419 mutex_unlock(&pci_pme_list_mutex);
1420 } else {
1421 mutex_lock(&pci_pme_list_mutex);
1422 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1423 if (pme_dev->dev == dev) {
1424 list_del(&pme_dev->list);
1425 kfree(pme_dev);
1426 break;
1427 }
1428 }
1429 mutex_unlock(&pci_pme_list_mutex);
1430 }
1431 }
1432
1433 out:
1434 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1435 enable ? "enabled" : "disabled");
1436 }
1437
1438 /**
1439 * __pci_enable_wake - enable PCI device as wakeup event source
1440 * @dev: PCI device affected
1441 * @state: PCI state from which device will issue wakeup events
1442 * @runtime: True if the events are to be generated at run time
1443 * @enable: True to enable event generation; false to disable
1444 *
1445 * This enables the device as a wakeup event source, or disables it.
1446 * When such events involves platform-specific hooks, those hooks are
1447 * called automatically by this routine.
1448 *
1449 * Devices with legacy power management (no standard PCI PM capabilities)
1450 * always require such platform hooks.
1451 *
1452 * RETURN VALUE:
1453 * 0 is returned on success
1454 * -EINVAL is returned if device is not supposed to wake up the system
1455 * Error code depending on the platform is returned if both the platform and
1456 * the native mechanism fail to enable the generation of wake-up events
1457 */
1458 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1459 bool runtime, bool enable)
1460 {
1461 int ret = 0;
1462
1463 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1464 return -EINVAL;
1465
1466 /* Don't do the same thing twice in a row for one device. */
1467 if (!!enable == !!dev->wakeup_prepared)
1468 return 0;
1469
1470 /*
1471 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1472 * Anderson we should be doing PME# wake enable followed by ACPI wake
1473 * enable. To disable wake-up we call the platform first, for symmetry.
1474 */
1475
1476 if (enable) {
1477 int error;
1478
1479 if (pci_pme_capable(dev, state))
1480 pci_pme_active(dev, true);
1481 else
1482 ret = 1;
1483 error = runtime ? platform_pci_run_wake(dev, true) :
1484 platform_pci_sleep_wake(dev, true);
1485 if (ret)
1486 ret = error;
1487 if (!ret)
1488 dev->wakeup_prepared = true;
1489 } else {
1490 if (runtime)
1491 platform_pci_run_wake(dev, false);
1492 else
1493 platform_pci_sleep_wake(dev, false);
1494 pci_pme_active(dev, false);
1495 dev->wakeup_prepared = false;
1496 }
1497
1498 return ret;
1499 }
1500 EXPORT_SYMBOL(__pci_enable_wake);
1501
1502 /**
1503 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1504 * @dev: PCI device to prepare
1505 * @enable: True to enable wake-up event generation; false to disable
1506 *
1507 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1508 * and this function allows them to set that up cleanly - pci_enable_wake()
1509 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1510 * ordering constraints.
1511 *
1512 * This function only returns error code if the device is not capable of
1513 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1514 * enable wake-up power for it.
1515 */
1516 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1517 {
1518 return pci_pme_capable(dev, PCI_D3cold) ?
1519 pci_enable_wake(dev, PCI_D3cold, enable) :
1520 pci_enable_wake(dev, PCI_D3hot, enable);
1521 }
1522
1523 /**
1524 * pci_target_state - find an appropriate low power state for a given PCI dev
1525 * @dev: PCI device
1526 *
1527 * Use underlying platform code to find a supported low power state for @dev.
1528 * If the platform can't manage @dev, return the deepest state from which it
1529 * can generate wake events, based on any available PME info.
1530 */
1531 pci_power_t pci_target_state(struct pci_dev *dev)
1532 {
1533 pci_power_t target_state = PCI_D3hot;
1534
1535 if (platform_pci_power_manageable(dev)) {
1536 /*
1537 * Call the platform to choose the target state of the device
1538 * and enable wake-up from this state if supported.
1539 */
1540 pci_power_t state = platform_pci_choose_state(dev);
1541
1542 switch (state) {
1543 case PCI_POWER_ERROR:
1544 case PCI_UNKNOWN:
1545 break;
1546 case PCI_D1:
1547 case PCI_D2:
1548 if (pci_no_d1d2(dev))
1549 break;
1550 default:
1551 target_state = state;
1552 }
1553 } else if (!dev->pm_cap) {
1554 target_state = PCI_D0;
1555 } else if (device_may_wakeup(&dev->dev)) {
1556 /*
1557 * Find the deepest state from which the device can generate
1558 * wake-up events, make it the target state and enable device
1559 * to generate PME#.
1560 */
1561 if (dev->pme_support) {
1562 while (target_state
1563 && !(dev->pme_support & (1 << target_state)))
1564 target_state--;
1565 }
1566 }
1567
1568 return target_state;
1569 }
1570
1571 /**
1572 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1573 * @dev: Device to handle.
1574 *
1575 * Choose the power state appropriate for the device depending on whether
1576 * it can wake up the system and/or is power manageable by the platform
1577 * (PCI_D3hot is the default) and put the device into that state.
1578 */
1579 int pci_prepare_to_sleep(struct pci_dev *dev)
1580 {
1581 pci_power_t target_state = pci_target_state(dev);
1582 int error;
1583
1584 if (target_state == PCI_POWER_ERROR)
1585 return -EIO;
1586
1587 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1588
1589 error = pci_set_power_state(dev, target_state);
1590
1591 if (error)
1592 pci_enable_wake(dev, target_state, false);
1593
1594 return error;
1595 }
1596
1597 /**
1598 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1599 * @dev: Device to handle.
1600 *
1601 * Disable device's system wake-up capability and put it into D0.
1602 */
1603 int pci_back_from_sleep(struct pci_dev *dev)
1604 {
1605 pci_enable_wake(dev, PCI_D0, false);
1606 return pci_set_power_state(dev, PCI_D0);
1607 }
1608
1609 /**
1610 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1611 * @dev: PCI device being suspended.
1612 *
1613 * Prepare @dev to generate wake-up events at run time and put it into a low
1614 * power state.
1615 */
1616 int pci_finish_runtime_suspend(struct pci_dev *dev)
1617 {
1618 pci_power_t target_state = pci_target_state(dev);
1619 int error;
1620
1621 if (target_state == PCI_POWER_ERROR)
1622 return -EIO;
1623
1624 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1625
1626 error = pci_set_power_state(dev, target_state);
1627
1628 if (error)
1629 __pci_enable_wake(dev, target_state, true, false);
1630
1631 return error;
1632 }
1633
1634 /**
1635 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1636 * @dev: Device to check.
1637 *
1638 * Return true if the device itself is cabable of generating wake-up events
1639 * (through the platform or using the native PCIe PME) or if the device supports
1640 * PME and one of its upstream bridges can generate wake-up events.
1641 */
1642 bool pci_dev_run_wake(struct pci_dev *dev)
1643 {
1644 struct pci_bus *bus = dev->bus;
1645
1646 if (device_run_wake(&dev->dev))
1647 return true;
1648
1649 if (!dev->pme_support)
1650 return false;
1651
1652 while (bus->parent) {
1653 struct pci_dev *bridge = bus->self;
1654
1655 if (device_run_wake(&bridge->dev))
1656 return true;
1657
1658 bus = bus->parent;
1659 }
1660
1661 /* We have reached the root bus. */
1662 if (bus->bridge)
1663 return device_run_wake(bus->bridge);
1664
1665 return false;
1666 }
1667 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1668
1669 /**
1670 * pci_pm_init - Initialize PM functions of given PCI device
1671 * @dev: PCI device to handle.
1672 */
1673 void pci_pm_init(struct pci_dev *dev)
1674 {
1675 int pm;
1676 u16 pmc;
1677
1678 pm_runtime_forbid(&dev->dev);
1679 device_enable_async_suspend(&dev->dev);
1680 dev->wakeup_prepared = false;
1681
1682 dev->pm_cap = 0;
1683
1684 /* find PCI PM capability in list */
1685 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1686 if (!pm)
1687 return;
1688 /* Check device's ability to generate PME# */
1689 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1690
1691 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1692 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1693 pmc & PCI_PM_CAP_VER_MASK);
1694 return;
1695 }
1696
1697 dev->pm_cap = pm;
1698 dev->d3_delay = PCI_PM_D3_WAIT;
1699
1700 dev->d1_support = false;
1701 dev->d2_support = false;
1702 if (!pci_no_d1d2(dev)) {
1703 if (pmc & PCI_PM_CAP_D1)
1704 dev->d1_support = true;
1705 if (pmc & PCI_PM_CAP_D2)
1706 dev->d2_support = true;
1707
1708 if (dev->d1_support || dev->d2_support)
1709 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1710 dev->d1_support ? " D1" : "",
1711 dev->d2_support ? " D2" : "");
1712 }
1713
1714 pmc &= PCI_PM_CAP_PME_MASK;
1715 if (pmc) {
1716 dev_printk(KERN_DEBUG, &dev->dev,
1717 "PME# supported from%s%s%s%s%s\n",
1718 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1719 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1720 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1721 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1722 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1723 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1724 /*
1725 * Make device's PM flags reflect the wake-up capability, but
1726 * let the user space enable it to wake up the system as needed.
1727 */
1728 device_set_wakeup_capable(&dev->dev, true);
1729 /* Disable the PME# generation functionality */
1730 pci_pme_active(dev, false);
1731 } else {
1732 dev->pme_support = 0;
1733 }
1734 }
1735
1736 /**
1737 * platform_pci_wakeup_init - init platform wakeup if present
1738 * @dev: PCI device
1739 *
1740 * Some devices don't have PCI PM caps but can still generate wakeup
1741 * events through platform methods (like ACPI events). If @dev supports
1742 * platform wakeup events, set the device flag to indicate as much. This
1743 * may be redundant if the device also supports PCI PM caps, but double
1744 * initialization should be safe in that case.
1745 */
1746 void platform_pci_wakeup_init(struct pci_dev *dev)
1747 {
1748 if (!platform_pci_can_wakeup(dev))
1749 return;
1750
1751 device_set_wakeup_capable(&dev->dev, true);
1752 platform_pci_sleep_wake(dev, false);
1753 }
1754
1755 /**
1756 * pci_add_save_buffer - allocate buffer for saving given capability registers
1757 * @dev: the PCI device
1758 * @cap: the capability to allocate the buffer for
1759 * @size: requested size of the buffer
1760 */
1761 static int pci_add_cap_save_buffer(
1762 struct pci_dev *dev, char cap, unsigned int size)
1763 {
1764 int pos;
1765 struct pci_cap_saved_state *save_state;
1766
1767 pos = pci_find_capability(dev, cap);
1768 if (pos <= 0)
1769 return 0;
1770
1771 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1772 if (!save_state)
1773 return -ENOMEM;
1774
1775 save_state->cap_nr = cap;
1776 pci_add_saved_cap(dev, save_state);
1777
1778 return 0;
1779 }
1780
1781 /**
1782 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1783 * @dev: the PCI device
1784 */
1785 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1786 {
1787 int error;
1788
1789 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1790 PCI_EXP_SAVE_REGS * sizeof(u16));
1791 if (error)
1792 dev_err(&dev->dev,
1793 "unable to preallocate PCI Express save buffer\n");
1794
1795 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1796 if (error)
1797 dev_err(&dev->dev,
1798 "unable to preallocate PCI-X save buffer\n");
1799 }
1800
1801 /**
1802 * pci_enable_ari - enable ARI forwarding if hardware support it
1803 * @dev: the PCI device
1804 */
1805 void pci_enable_ari(struct pci_dev *dev)
1806 {
1807 int pos;
1808 u32 cap;
1809 u16 ctrl;
1810 struct pci_dev *bridge;
1811
1812 if (!pci_is_pcie(dev) || dev->devfn)
1813 return;
1814
1815 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1816 if (!pos)
1817 return;
1818
1819 bridge = dev->bus->self;
1820 if (!bridge || !pci_is_pcie(bridge))
1821 return;
1822
1823 pos = pci_pcie_cap(bridge);
1824 if (!pos)
1825 return;
1826
1827 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1828 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1829 return;
1830
1831 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1832 ctrl |= PCI_EXP_DEVCTL2_ARI;
1833 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1834
1835 bridge->ari_enabled = 1;
1836 }
1837
1838 static int pci_acs_enable;
1839
1840 /**
1841 * pci_request_acs - ask for ACS to be enabled if supported
1842 */
1843 void pci_request_acs(void)
1844 {
1845 pci_acs_enable = 1;
1846 }
1847
1848 /**
1849 * pci_enable_acs - enable ACS if hardware support it
1850 * @dev: the PCI device
1851 */
1852 void pci_enable_acs(struct pci_dev *dev)
1853 {
1854 int pos;
1855 u16 cap;
1856 u16 ctrl;
1857
1858 if (!pci_acs_enable)
1859 return;
1860
1861 if (!pci_is_pcie(dev))
1862 return;
1863
1864 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
1865 if (!pos)
1866 return;
1867
1868 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
1869 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
1870
1871 /* Source Validation */
1872 ctrl |= (cap & PCI_ACS_SV);
1873
1874 /* P2P Request Redirect */
1875 ctrl |= (cap & PCI_ACS_RR);
1876
1877 /* P2P Completion Redirect */
1878 ctrl |= (cap & PCI_ACS_CR);
1879
1880 /* Upstream Forwarding */
1881 ctrl |= (cap & PCI_ACS_UF);
1882
1883 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
1884 }
1885
1886 /**
1887 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
1888 * @dev: the PCI device
1889 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
1890 *
1891 * Perform INTx swizzling for a device behind one level of bridge. This is
1892 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
1893 * behind bridges on add-in cards. For devices with ARI enabled, the slot
1894 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
1895 * the PCI Express Base Specification, Revision 2.1)
1896 */
1897 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
1898 {
1899 int slot;
1900
1901 if (pci_ari_enabled(dev->bus))
1902 slot = 0;
1903 else
1904 slot = PCI_SLOT(dev->devfn);
1905
1906 return (((pin - 1) + slot) % 4) + 1;
1907 }
1908
1909 int
1910 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1911 {
1912 u8 pin;
1913
1914 pin = dev->pin;
1915 if (!pin)
1916 return -1;
1917
1918 while (!pci_is_root_bus(dev->bus)) {
1919 pin = pci_swizzle_interrupt_pin(dev, pin);
1920 dev = dev->bus->self;
1921 }
1922 *bridge = dev;
1923 return pin;
1924 }
1925
1926 /**
1927 * pci_common_swizzle - swizzle INTx all the way to root bridge
1928 * @dev: the PCI device
1929 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
1930 *
1931 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
1932 * bridges all the way up to a PCI root bus.
1933 */
1934 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1935 {
1936 u8 pin = *pinp;
1937
1938 while (!pci_is_root_bus(dev->bus)) {
1939 pin = pci_swizzle_interrupt_pin(dev, pin);
1940 dev = dev->bus->self;
1941 }
1942 *pinp = pin;
1943 return PCI_SLOT(dev->devfn);
1944 }
1945
1946 /**
1947 * pci_release_region - Release a PCI bar
1948 * @pdev: PCI device whose resources were previously reserved by pci_request_region
1949 * @bar: BAR to release
1950 *
1951 * Releases the PCI I/O and memory resources previously reserved by a
1952 * successful call to pci_request_region. Call this function only
1953 * after all use of the PCI regions has ceased.
1954 */
1955 void pci_release_region(struct pci_dev *pdev, int bar)
1956 {
1957 struct pci_devres *dr;
1958
1959 if (pci_resource_len(pdev, bar) == 0)
1960 return;
1961 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
1962 release_region(pci_resource_start(pdev, bar),
1963 pci_resource_len(pdev, bar));
1964 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
1965 release_mem_region(pci_resource_start(pdev, bar),
1966 pci_resource_len(pdev, bar));
1967
1968 dr = find_pci_dr(pdev);
1969 if (dr)
1970 dr->region_mask &= ~(1 << bar);
1971 }
1972
1973 /**
1974 * __pci_request_region - Reserved PCI I/O and memory resource
1975 * @pdev: PCI device whose resources are to be reserved
1976 * @bar: BAR to be reserved
1977 * @res_name: Name to be associated with resource.
1978 * @exclusive: whether the region access is exclusive or not
1979 *
1980 * Mark the PCI region associated with PCI device @pdev BR @bar as
1981 * being reserved by owner @res_name. Do not access any
1982 * address inside the PCI regions unless this call returns
1983 * successfully.
1984 *
1985 * If @exclusive is set, then the region is marked so that userspace
1986 * is explicitly not allowed to map the resource via /dev/mem or
1987 * sysfs MMIO access.
1988 *
1989 * Returns 0 on success, or %EBUSY on error. A warning
1990 * message is also printed on failure.
1991 */
1992 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
1993 int exclusive)
1994 {
1995 struct pci_devres *dr;
1996
1997 if (pci_resource_len(pdev, bar) == 0)
1998 return 0;
1999
2000 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2001 if (!request_region(pci_resource_start(pdev, bar),
2002 pci_resource_len(pdev, bar), res_name))
2003 goto err_out;
2004 }
2005 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2006 if (!__request_mem_region(pci_resource_start(pdev, bar),
2007 pci_resource_len(pdev, bar), res_name,
2008 exclusive))
2009 goto err_out;
2010 }
2011
2012 dr = find_pci_dr(pdev);
2013 if (dr)
2014 dr->region_mask |= 1 << bar;
2015
2016 return 0;
2017
2018 err_out:
2019 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2020 &pdev->resource[bar]);
2021 return -EBUSY;
2022 }
2023
2024 /**
2025 * pci_request_region - Reserve PCI I/O and memory resource
2026 * @pdev: PCI device whose resources are to be reserved
2027 * @bar: BAR to be reserved
2028 * @res_name: Name to be associated with resource
2029 *
2030 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2031 * being reserved by owner @res_name. Do not access any
2032 * address inside the PCI regions unless this call returns
2033 * successfully.
2034 *
2035 * Returns 0 on success, or %EBUSY on error. A warning
2036 * message is also printed on failure.
2037 */
2038 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2039 {
2040 return __pci_request_region(pdev, bar, res_name, 0);
2041 }
2042
2043 /**
2044 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2045 * @pdev: PCI device whose resources are to be reserved
2046 * @bar: BAR to be reserved
2047 * @res_name: Name to be associated with resource.
2048 *
2049 * Mark the PCI region associated with PCI device @pdev BR @bar as
2050 * being reserved by owner @res_name. Do not access any
2051 * address inside the PCI regions unless this call returns
2052 * successfully.
2053 *
2054 * Returns 0 on success, or %EBUSY on error. A warning
2055 * message is also printed on failure.
2056 *
2057 * The key difference that _exclusive makes it that userspace is
2058 * explicitly not allowed to map the resource via /dev/mem or
2059 * sysfs.
2060 */
2061 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2062 {
2063 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2064 }
2065 /**
2066 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2067 * @pdev: PCI device whose resources were previously reserved
2068 * @bars: Bitmask of BARs to be released
2069 *
2070 * Release selected PCI I/O and memory resources previously reserved.
2071 * Call this function only after all use of the PCI regions has ceased.
2072 */
2073 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2074 {
2075 int i;
2076
2077 for (i = 0; i < 6; i++)
2078 if (bars & (1 << i))
2079 pci_release_region(pdev, i);
2080 }
2081
2082 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2083 const char *res_name, int excl)
2084 {
2085 int i;
2086
2087 for (i = 0; i < 6; i++)
2088 if (bars & (1 << i))
2089 if (__pci_request_region(pdev, i, res_name, excl))
2090 goto err_out;
2091 return 0;
2092
2093 err_out:
2094 while(--i >= 0)
2095 if (bars & (1 << i))
2096 pci_release_region(pdev, i);
2097
2098 return -EBUSY;
2099 }
2100
2101
2102 /**
2103 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2104 * @pdev: PCI device whose resources are to be reserved
2105 * @bars: Bitmask of BARs to be requested
2106 * @res_name: Name to be associated with resource
2107 */
2108 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2109 const char *res_name)
2110 {
2111 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2112 }
2113
2114 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2115 int bars, const char *res_name)
2116 {
2117 return __pci_request_selected_regions(pdev, bars, res_name,
2118 IORESOURCE_EXCLUSIVE);
2119 }
2120
2121 /**
2122 * pci_release_regions - Release reserved PCI I/O and memory resources
2123 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2124 *
2125 * Releases all PCI I/O and memory resources previously reserved by a
2126 * successful call to pci_request_regions. Call this function only
2127 * after all use of the PCI regions has ceased.
2128 */
2129
2130 void pci_release_regions(struct pci_dev *pdev)
2131 {
2132 pci_release_selected_regions(pdev, (1 << 6) - 1);
2133 }
2134
2135 /**
2136 * pci_request_regions - Reserved PCI I/O and memory resources
2137 * @pdev: PCI device whose resources are to be reserved
2138 * @res_name: Name to be associated with resource.
2139 *
2140 * Mark all PCI regions associated with PCI device @pdev as
2141 * being reserved by owner @res_name. Do not access any
2142 * address inside the PCI regions unless this call returns
2143 * successfully.
2144 *
2145 * Returns 0 on success, or %EBUSY on error. A warning
2146 * message is also printed on failure.
2147 */
2148 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2149 {
2150 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2151 }
2152
2153 /**
2154 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2155 * @pdev: PCI device whose resources are to be reserved
2156 * @res_name: Name to be associated with resource.
2157 *
2158 * Mark all PCI regions associated with PCI device @pdev as
2159 * being reserved by owner @res_name. Do not access any
2160 * address inside the PCI regions unless this call returns
2161 * successfully.
2162 *
2163 * pci_request_regions_exclusive() will mark the region so that
2164 * /dev/mem and the sysfs MMIO access will not be allowed.
2165 *
2166 * Returns 0 on success, or %EBUSY on error. A warning
2167 * message is also printed on failure.
2168 */
2169 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2170 {
2171 return pci_request_selected_regions_exclusive(pdev,
2172 ((1 << 6) - 1), res_name);
2173 }
2174
2175 static void __pci_set_master(struct pci_dev *dev, bool enable)
2176 {
2177 u16 old_cmd, cmd;
2178
2179 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2180 if (enable)
2181 cmd = old_cmd | PCI_COMMAND_MASTER;
2182 else
2183 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2184 if (cmd != old_cmd) {
2185 dev_dbg(&dev->dev, "%s bus mastering\n",
2186 enable ? "enabling" : "disabling");
2187 pci_write_config_word(dev, PCI_COMMAND, cmd);
2188 }
2189 dev->is_busmaster = enable;
2190 }
2191
2192 /**
2193 * pci_set_master - enables bus-mastering for device dev
2194 * @dev: the PCI device to enable
2195 *
2196 * Enables bus-mastering on the device and calls pcibios_set_master()
2197 * to do the needed arch specific settings.
2198 */
2199 void pci_set_master(struct pci_dev *dev)
2200 {
2201 __pci_set_master(dev, true);
2202 pcibios_set_master(dev);
2203 }
2204
2205 /**
2206 * pci_clear_master - disables bus-mastering for device dev
2207 * @dev: the PCI device to disable
2208 */
2209 void pci_clear_master(struct pci_dev *dev)
2210 {
2211 __pci_set_master(dev, false);
2212 }
2213
2214 /**
2215 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2216 * @dev: the PCI device for which MWI is to be enabled
2217 *
2218 * Helper function for pci_set_mwi.
2219 * Originally copied from drivers/net/acenic.c.
2220 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2221 *
2222 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2223 */
2224 int pci_set_cacheline_size(struct pci_dev *dev)
2225 {
2226 u8 cacheline_size;
2227
2228 if (!pci_cache_line_size)
2229 return -EINVAL;
2230
2231 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2232 equal to or multiple of the right value. */
2233 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2234 if (cacheline_size >= pci_cache_line_size &&
2235 (cacheline_size % pci_cache_line_size) == 0)
2236 return 0;
2237
2238 /* Write the correct value. */
2239 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2240 /* Read it back. */
2241 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2242 if (cacheline_size == pci_cache_line_size)
2243 return 0;
2244
2245 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2246 "supported\n", pci_cache_line_size << 2);
2247
2248 return -EINVAL;
2249 }
2250 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2251
2252 #ifdef PCI_DISABLE_MWI
2253 int pci_set_mwi(struct pci_dev *dev)
2254 {
2255 return 0;
2256 }
2257
2258 int pci_try_set_mwi(struct pci_dev *dev)
2259 {
2260 return 0;
2261 }
2262
2263 void pci_clear_mwi(struct pci_dev *dev)
2264 {
2265 }
2266
2267 #else
2268
2269 /**
2270 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2271 * @dev: the PCI device for which MWI is enabled
2272 *
2273 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2274 *
2275 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2276 */
2277 int
2278 pci_set_mwi(struct pci_dev *dev)
2279 {
2280 int rc;
2281 u16 cmd;
2282
2283 rc = pci_set_cacheline_size(dev);
2284 if (rc)
2285 return rc;
2286
2287 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2288 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2289 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2290 cmd |= PCI_COMMAND_INVALIDATE;
2291 pci_write_config_word(dev, PCI_COMMAND, cmd);
2292 }
2293
2294 return 0;
2295 }
2296
2297 /**
2298 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2299 * @dev: the PCI device for which MWI is enabled
2300 *
2301 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2302 * Callers are not required to check the return value.
2303 *
2304 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2305 */
2306 int pci_try_set_mwi(struct pci_dev *dev)
2307 {
2308 int rc = pci_set_mwi(dev);
2309 return rc;
2310 }
2311
2312 /**
2313 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2314 * @dev: the PCI device to disable
2315 *
2316 * Disables PCI Memory-Write-Invalidate transaction on the device
2317 */
2318 void
2319 pci_clear_mwi(struct pci_dev *dev)
2320 {
2321 u16 cmd;
2322
2323 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2324 if (cmd & PCI_COMMAND_INVALIDATE) {
2325 cmd &= ~PCI_COMMAND_INVALIDATE;
2326 pci_write_config_word(dev, PCI_COMMAND, cmd);
2327 }
2328 }
2329 #endif /* ! PCI_DISABLE_MWI */
2330
2331 /**
2332 * pci_intx - enables/disables PCI INTx for device dev
2333 * @pdev: the PCI device to operate on
2334 * @enable: boolean: whether to enable or disable PCI INTx
2335 *
2336 * Enables/disables PCI INTx for device dev
2337 */
2338 void
2339 pci_intx(struct pci_dev *pdev, int enable)
2340 {
2341 u16 pci_command, new;
2342
2343 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2344
2345 if (enable) {
2346 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2347 } else {
2348 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2349 }
2350
2351 if (new != pci_command) {
2352 struct pci_devres *dr;
2353
2354 pci_write_config_word(pdev, PCI_COMMAND, new);
2355
2356 dr = find_pci_dr(pdev);
2357 if (dr && !dr->restore_intx) {
2358 dr->restore_intx = 1;
2359 dr->orig_intx = !enable;
2360 }
2361 }
2362 }
2363
2364 /**
2365 * pci_msi_off - disables any msi or msix capabilities
2366 * @dev: the PCI device to operate on
2367 *
2368 * If you want to use msi see pci_enable_msi and friends.
2369 * This is a lower level primitive that allows us to disable
2370 * msi operation at the device level.
2371 */
2372 void pci_msi_off(struct pci_dev *dev)
2373 {
2374 int pos;
2375 u16 control;
2376
2377 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2378 if (pos) {
2379 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2380 control &= ~PCI_MSI_FLAGS_ENABLE;
2381 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2382 }
2383 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2384 if (pos) {
2385 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2386 control &= ~PCI_MSIX_FLAGS_ENABLE;
2387 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2388 }
2389 }
2390 EXPORT_SYMBOL_GPL(pci_msi_off);
2391
2392 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2393 {
2394 return dma_set_max_seg_size(&dev->dev, size);
2395 }
2396 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2397
2398 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2399 {
2400 return dma_set_seg_boundary(&dev->dev, mask);
2401 }
2402 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2403
2404 static int pcie_flr(struct pci_dev *dev, int probe)
2405 {
2406 int i;
2407 int pos;
2408 u32 cap;
2409 u16 status, control;
2410
2411 pos = pci_pcie_cap(dev);
2412 if (!pos)
2413 return -ENOTTY;
2414
2415 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2416 if (!(cap & PCI_EXP_DEVCAP_FLR))
2417 return -ENOTTY;
2418
2419 if (probe)
2420 return 0;
2421
2422 /* Wait for Transaction Pending bit clean */
2423 for (i = 0; i < 4; i++) {
2424 if (i)
2425 msleep((1 << (i - 1)) * 100);
2426
2427 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2428 if (!(status & PCI_EXP_DEVSTA_TRPND))
2429 goto clear;
2430 }
2431
2432 dev_err(&dev->dev, "transaction is not cleared; "
2433 "proceeding with reset anyway\n");
2434
2435 clear:
2436 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2437 control |= PCI_EXP_DEVCTL_BCR_FLR;
2438 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2439
2440 msleep(100);
2441
2442 return 0;
2443 }
2444
2445 static int pci_af_flr(struct pci_dev *dev, int probe)
2446 {
2447 int i;
2448 int pos;
2449 u8 cap;
2450 u8 status;
2451
2452 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2453 if (!pos)
2454 return -ENOTTY;
2455
2456 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2457 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2458 return -ENOTTY;
2459
2460 if (probe)
2461 return 0;
2462
2463 /* Wait for Transaction Pending bit clean */
2464 for (i = 0; i < 4; i++) {
2465 if (i)
2466 msleep((1 << (i - 1)) * 100);
2467
2468 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2469 if (!(status & PCI_AF_STATUS_TP))
2470 goto clear;
2471 }
2472
2473 dev_err(&dev->dev, "transaction is not cleared; "
2474 "proceeding with reset anyway\n");
2475
2476 clear:
2477 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2478 msleep(100);
2479
2480 return 0;
2481 }
2482
2483 static int pci_pm_reset(struct pci_dev *dev, int probe)
2484 {
2485 u16 csr;
2486
2487 if (!dev->pm_cap)
2488 return -ENOTTY;
2489
2490 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2491 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2492 return -ENOTTY;
2493
2494 if (probe)
2495 return 0;
2496
2497 if (dev->current_state != PCI_D0)
2498 return -EINVAL;
2499
2500 csr &= ~PCI_PM_CTRL_STATE_MASK;
2501 csr |= PCI_D3hot;
2502 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2503 pci_dev_d3_sleep(dev);
2504
2505 csr &= ~PCI_PM_CTRL_STATE_MASK;
2506 csr |= PCI_D0;
2507 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2508 pci_dev_d3_sleep(dev);
2509
2510 return 0;
2511 }
2512
2513 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2514 {
2515 u16 ctrl;
2516 struct pci_dev *pdev;
2517
2518 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2519 return -ENOTTY;
2520
2521 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2522 if (pdev != dev)
2523 return -ENOTTY;
2524
2525 if (probe)
2526 return 0;
2527
2528 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2529 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2530 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2531 msleep(100);
2532
2533 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2534 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2535 msleep(100);
2536
2537 return 0;
2538 }
2539
2540 static int pci_dev_reset(struct pci_dev *dev, int probe)
2541 {
2542 int rc;
2543
2544 might_sleep();
2545
2546 if (!probe) {
2547 pci_block_user_cfg_access(dev);
2548 /* block PM suspend, driver probe, etc. */
2549 device_lock(&dev->dev);
2550 }
2551
2552 rc = pci_dev_specific_reset(dev, probe);
2553 if (rc != -ENOTTY)
2554 goto done;
2555
2556 rc = pcie_flr(dev, probe);
2557 if (rc != -ENOTTY)
2558 goto done;
2559
2560 rc = pci_af_flr(dev, probe);
2561 if (rc != -ENOTTY)
2562 goto done;
2563
2564 rc = pci_pm_reset(dev, probe);
2565 if (rc != -ENOTTY)
2566 goto done;
2567
2568 rc = pci_parent_bus_reset(dev, probe);
2569 done:
2570 if (!probe) {
2571 device_unlock(&dev->dev);
2572 pci_unblock_user_cfg_access(dev);
2573 }
2574
2575 return rc;
2576 }
2577
2578 /**
2579 * __pci_reset_function - reset a PCI device function
2580 * @dev: PCI device to reset
2581 *
2582 * Some devices allow an individual function to be reset without affecting
2583 * other functions in the same device. The PCI device must be responsive
2584 * to PCI config space in order to use this function.
2585 *
2586 * The device function is presumed to be unused when this function is called.
2587 * Resetting the device will make the contents of PCI configuration space
2588 * random, so any caller of this must be prepared to reinitialise the
2589 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
2590 * etc.
2591 *
2592 * Returns 0 if the device function was successfully reset or negative if the
2593 * device doesn't support resetting a single function.
2594 */
2595 int __pci_reset_function(struct pci_dev *dev)
2596 {
2597 return pci_dev_reset(dev, 0);
2598 }
2599 EXPORT_SYMBOL_GPL(__pci_reset_function);
2600
2601 /**
2602 * pci_probe_reset_function - check whether the device can be safely reset
2603 * @dev: PCI device to reset
2604 *
2605 * Some devices allow an individual function to be reset without affecting
2606 * other functions in the same device. The PCI device must be responsive
2607 * to PCI config space in order to use this function.
2608 *
2609 * Returns 0 if the device function can be reset or negative if the
2610 * device doesn't support resetting a single function.
2611 */
2612 int pci_probe_reset_function(struct pci_dev *dev)
2613 {
2614 return pci_dev_reset(dev, 1);
2615 }
2616
2617 /**
2618 * pci_reset_function - quiesce and reset a PCI device function
2619 * @dev: PCI device to reset
2620 *
2621 * Some devices allow an individual function to be reset without affecting
2622 * other functions in the same device. The PCI device must be responsive
2623 * to PCI config space in order to use this function.
2624 *
2625 * This function does not just reset the PCI portion of a device, but
2626 * clears all the state associated with the device. This function differs
2627 * from __pci_reset_function in that it saves and restores device state
2628 * over the reset.
2629 *
2630 * Returns 0 if the device function was successfully reset or negative if the
2631 * device doesn't support resetting a single function.
2632 */
2633 int pci_reset_function(struct pci_dev *dev)
2634 {
2635 int rc;
2636
2637 rc = pci_dev_reset(dev, 1);
2638 if (rc)
2639 return rc;
2640
2641 pci_save_state(dev);
2642
2643 /*
2644 * both INTx and MSI are disabled after the Interrupt Disable bit
2645 * is set and the Bus Master bit is cleared.
2646 */
2647 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2648
2649 rc = pci_dev_reset(dev, 0);
2650
2651 pci_restore_state(dev);
2652
2653 return rc;
2654 }
2655 EXPORT_SYMBOL_GPL(pci_reset_function);
2656
2657 /**
2658 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
2659 * @dev: PCI device to query
2660 *
2661 * Returns mmrbc: maximum designed memory read count in bytes
2662 * or appropriate error value.
2663 */
2664 int pcix_get_max_mmrbc(struct pci_dev *dev)
2665 {
2666 int cap;
2667 u32 stat;
2668
2669 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2670 if (!cap)
2671 return -EINVAL;
2672
2673 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2674 return -EINVAL;
2675
2676 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
2677 }
2678 EXPORT_SYMBOL(pcix_get_max_mmrbc);
2679
2680 /**
2681 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
2682 * @dev: PCI device to query
2683 *
2684 * Returns mmrbc: maximum memory read count in bytes
2685 * or appropriate error value.
2686 */
2687 int pcix_get_mmrbc(struct pci_dev *dev)
2688 {
2689 int cap;
2690 u16 cmd;
2691
2692 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2693 if (!cap)
2694 return -EINVAL;
2695
2696 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2697 return -EINVAL;
2698
2699 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2700 }
2701 EXPORT_SYMBOL(pcix_get_mmrbc);
2702
2703 /**
2704 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
2705 * @dev: PCI device to query
2706 * @mmrbc: maximum memory read count in bytes
2707 * valid values are 512, 1024, 2048, 4096
2708 *
2709 * If possible sets maximum memory read byte count, some bridges have erratas
2710 * that prevent this.
2711 */
2712 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2713 {
2714 int cap;
2715 u32 stat, v, o;
2716 u16 cmd;
2717
2718 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2719 return -EINVAL;
2720
2721 v = ffs(mmrbc) - 10;
2722
2723 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2724 if (!cap)
2725 return -EINVAL;
2726
2727 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2728 return -EINVAL;
2729
2730 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2731 return -E2BIG;
2732
2733 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2734 return -EINVAL;
2735
2736 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2737 if (o != v) {
2738 if (v > o && dev->bus &&
2739 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
2740 return -EIO;
2741
2742 cmd &= ~PCI_X_CMD_MAX_READ;
2743 cmd |= v << 2;
2744 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
2745 return -EIO;
2746 }
2747 return 0;
2748 }
2749 EXPORT_SYMBOL(pcix_set_mmrbc);
2750
2751 /**
2752 * pcie_get_readrq - get PCI Express read request size
2753 * @dev: PCI device to query
2754 *
2755 * Returns maximum memory read request in bytes
2756 * or appropriate error value.
2757 */
2758 int pcie_get_readrq(struct pci_dev *dev)
2759 {
2760 int ret, cap;
2761 u16 ctl;
2762
2763 cap = pci_pcie_cap(dev);
2764 if (!cap)
2765 return -EINVAL;
2766
2767 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2768 if (!ret)
2769 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
2770
2771 return ret;
2772 }
2773 EXPORT_SYMBOL(pcie_get_readrq);
2774
2775 /**
2776 * pcie_set_readrq - set PCI Express maximum memory read request
2777 * @dev: PCI device to query
2778 * @rq: maximum memory read count in bytes
2779 * valid values are 128, 256, 512, 1024, 2048, 4096
2780 *
2781 * If possible sets maximum read byte count
2782 */
2783 int pcie_set_readrq(struct pci_dev *dev, int rq)
2784 {
2785 int cap, err = -EINVAL;
2786 u16 ctl, v;
2787
2788 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
2789 goto out;
2790
2791 v = (ffs(rq) - 8) << 12;
2792
2793 cap = pci_pcie_cap(dev);
2794 if (!cap)
2795 goto out;
2796
2797 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2798 if (err)
2799 goto out;
2800
2801 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
2802 ctl &= ~PCI_EXP_DEVCTL_READRQ;
2803 ctl |= v;
2804 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
2805 }
2806
2807 out:
2808 return err;
2809 }
2810 EXPORT_SYMBOL(pcie_set_readrq);
2811
2812 /**
2813 * pci_select_bars - Make BAR mask from the type of resource
2814 * @dev: the PCI device for which BAR mask is made
2815 * @flags: resource type mask to be selected
2816 *
2817 * This helper routine makes bar mask from the type of resource.
2818 */
2819 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2820 {
2821 int i, bars = 0;
2822 for (i = 0; i < PCI_NUM_RESOURCES; i++)
2823 if (pci_resource_flags(dev, i) & flags)
2824 bars |= (1 << i);
2825 return bars;
2826 }
2827
2828 /**
2829 * pci_resource_bar - get position of the BAR associated with a resource
2830 * @dev: the PCI device
2831 * @resno: the resource number
2832 * @type: the BAR type to be filled in
2833 *
2834 * Returns BAR position in config space, or 0 if the BAR is invalid.
2835 */
2836 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2837 {
2838 int reg;
2839
2840 if (resno < PCI_ROM_RESOURCE) {
2841 *type = pci_bar_unknown;
2842 return PCI_BASE_ADDRESS_0 + 4 * resno;
2843 } else if (resno == PCI_ROM_RESOURCE) {
2844 *type = pci_bar_mem32;
2845 return dev->rom_base_reg;
2846 } else if (resno < PCI_BRIDGE_RESOURCES) {
2847 /* device specific resource */
2848 reg = pci_iov_resource_bar(dev, resno, type);
2849 if (reg)
2850 return reg;
2851 }
2852
2853 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
2854 return 0;
2855 }
2856
2857 /* Some architectures require additional programming to enable VGA */
2858 static arch_set_vga_state_t arch_set_vga_state;
2859
2860 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
2861 {
2862 arch_set_vga_state = func; /* NULL disables */
2863 }
2864
2865 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
2866 unsigned int command_bits, bool change_bridge)
2867 {
2868 if (arch_set_vga_state)
2869 return arch_set_vga_state(dev, decode, command_bits,
2870 change_bridge);
2871 return 0;
2872 }
2873
2874 /**
2875 * pci_set_vga_state - set VGA decode state on device and parents if requested
2876 * @dev: the PCI device
2877 * @decode: true = enable decoding, false = disable decoding
2878 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
2879 * @change_bridge: traverse ancestors and change bridges
2880 */
2881 int pci_set_vga_state(struct pci_dev *dev, bool decode,
2882 unsigned int command_bits, bool change_bridge)
2883 {
2884 struct pci_bus *bus;
2885 struct pci_dev *bridge;
2886 u16 cmd;
2887 int rc;
2888
2889 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
2890
2891 /* ARCH specific VGA enables */
2892 rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
2893 if (rc)
2894 return rc;
2895
2896 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2897 if (decode == true)
2898 cmd |= command_bits;
2899 else
2900 cmd &= ~command_bits;
2901 pci_write_config_word(dev, PCI_COMMAND, cmd);
2902
2903 if (change_bridge == false)
2904 return 0;
2905
2906 bus = dev->bus;
2907 while (bus) {
2908 bridge = bus->self;
2909 if (bridge) {
2910 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
2911 &cmd);
2912 if (decode == true)
2913 cmd |= PCI_BRIDGE_CTL_VGA;
2914 else
2915 cmd &= ~PCI_BRIDGE_CTL_VGA;
2916 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
2917 cmd);
2918 }
2919 bus = bus->parent;
2920 }
2921 return 0;
2922 }
2923
2924 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2925 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2926 static DEFINE_SPINLOCK(resource_alignment_lock);
2927
2928 /**
2929 * pci_specified_resource_alignment - get resource alignment specified by user.
2930 * @dev: the PCI device to get
2931 *
2932 * RETURNS: Resource alignment if it is specified.
2933 * Zero if it is not specified.
2934 */
2935 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2936 {
2937 int seg, bus, slot, func, align_order, count;
2938 resource_size_t align = 0;
2939 char *p;
2940
2941 spin_lock(&resource_alignment_lock);
2942 p = resource_alignment_param;
2943 while (*p) {
2944 count = 0;
2945 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2946 p[count] == '@') {
2947 p += count + 1;
2948 } else {
2949 align_order = -1;
2950 }
2951 if (sscanf(p, "%x:%x:%x.%x%n",
2952 &seg, &bus, &slot, &func, &count) != 4) {
2953 seg = 0;
2954 if (sscanf(p, "%x:%x.%x%n",
2955 &bus, &slot, &func, &count) != 3) {
2956 /* Invalid format */
2957 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2958 p);
2959 break;
2960 }
2961 }
2962 p += count;
2963 if (seg == pci_domain_nr(dev->bus) &&
2964 bus == dev->bus->number &&
2965 slot == PCI_SLOT(dev->devfn) &&
2966 func == PCI_FUNC(dev->devfn)) {
2967 if (align_order == -1) {
2968 align = PAGE_SIZE;
2969 } else {
2970 align = 1 << align_order;
2971 }
2972 /* Found */
2973 break;
2974 }
2975 if (*p != ';' && *p != ',') {
2976 /* End of param or invalid format */
2977 break;
2978 }
2979 p++;
2980 }
2981 spin_unlock(&resource_alignment_lock);
2982 return align;
2983 }
2984
2985 /**
2986 * pci_is_reassigndev - check if specified PCI is target device to reassign
2987 * @dev: the PCI device to check
2988 *
2989 * RETURNS: non-zero for PCI device is a target device to reassign,
2990 * or zero is not.
2991 */
2992 int pci_is_reassigndev(struct pci_dev *dev)
2993 {
2994 return (pci_specified_resource_alignment(dev) != 0);
2995 }
2996
2997 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2998 {
2999 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3000 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3001 spin_lock(&resource_alignment_lock);
3002 strncpy(resource_alignment_param, buf, count);
3003 resource_alignment_param[count] = '\0';
3004 spin_unlock(&resource_alignment_lock);
3005 return count;
3006 }
3007
3008 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3009 {
3010 size_t count;
3011 spin_lock(&resource_alignment_lock);
3012 count = snprintf(buf, size, "%s", resource_alignment_param);
3013 spin_unlock(&resource_alignment_lock);
3014 return count;
3015 }
3016
3017 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3018 {
3019 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3020 }
3021
3022 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3023 const char *buf, size_t count)
3024 {
3025 return pci_set_resource_alignment_param(buf, count);
3026 }
3027
3028 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3029 pci_resource_alignment_store);
3030
3031 static int __init pci_resource_alignment_sysfs_init(void)
3032 {
3033 return bus_create_file(&pci_bus_type,
3034 &bus_attr_resource_alignment);
3035 }
3036
3037 late_initcall(pci_resource_alignment_sysfs_init);
3038
3039 static void __devinit pci_no_domains(void)
3040 {
3041 #ifdef CONFIG_PCI_DOMAINS
3042 pci_domains_supported = 0;
3043 #endif
3044 }
3045
3046 /**
3047 * pci_ext_cfg_enabled - can we access extended PCI config space?
3048 * @dev: The PCI device of the root bridge.
3049 *
3050 * Returns 1 if we can access PCI extended config space (offsets
3051 * greater than 0xff). This is the default implementation. Architecture
3052 * implementations can override this.
3053 */
3054 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3055 {
3056 return 1;
3057 }
3058
3059 void __weak pci_fixup_cardbus(struct pci_bus *bus)
3060 {
3061 }
3062 EXPORT_SYMBOL(pci_fixup_cardbus);
3063
3064 static int __init pci_setup(char *str)
3065 {
3066 while (str) {
3067 char *k = strchr(str, ',');
3068 if (k)
3069 *k++ = 0;
3070 if (*str && (str = pcibios_setup(str)) && *str) {
3071 if (!strcmp(str, "nomsi")) {
3072 pci_no_msi();
3073 } else if (!strcmp(str, "noaer")) {
3074 pci_no_aer();
3075 } else if (!strcmp(str, "nodomains")) {
3076 pci_no_domains();
3077 } else if (!strncmp(str, "cbiosize=", 9)) {
3078 pci_cardbus_io_size = memparse(str + 9, &str);
3079 } else if (!strncmp(str, "cbmemsize=", 10)) {
3080 pci_cardbus_mem_size = memparse(str + 10, &str);
3081 } else if (!strncmp(str, "resource_alignment=", 19)) {
3082 pci_set_resource_alignment_param(str + 19,
3083 strlen(str + 19));
3084 } else if (!strncmp(str, "ecrc=", 5)) {
3085 pcie_ecrc_get_policy(str + 5);
3086 } else if (!strncmp(str, "hpiosize=", 9)) {
3087 pci_hotplug_io_size = memparse(str + 9, &str);
3088 } else if (!strncmp(str, "hpmemsize=", 10)) {
3089 pci_hotplug_mem_size = memparse(str + 10, &str);
3090 } else {
3091 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3092 str);
3093 }
3094 }
3095 str = k;
3096 }
3097 return 0;
3098 }
3099 early_param("pci", pci_setup);
3100
3101 EXPORT_SYMBOL(pci_reenable_device);
3102 EXPORT_SYMBOL(pci_enable_device_io);
3103 EXPORT_SYMBOL(pci_enable_device_mem);
3104 EXPORT_SYMBOL(pci_enable_device);
3105 EXPORT_SYMBOL(pcim_enable_device);
3106 EXPORT_SYMBOL(pcim_pin_device);
3107 EXPORT_SYMBOL(pci_disable_device);
3108 EXPORT_SYMBOL(pci_find_capability);
3109 EXPORT_SYMBOL(pci_bus_find_capability);
3110 EXPORT_SYMBOL(pci_release_regions);
3111 EXPORT_SYMBOL(pci_request_regions);
3112 EXPORT_SYMBOL(pci_request_regions_exclusive);
3113 EXPORT_SYMBOL(pci_release_region);
3114 EXPORT_SYMBOL(pci_request_region);
3115 EXPORT_SYMBOL(pci_request_region_exclusive);
3116 EXPORT_SYMBOL(pci_release_selected_regions);
3117 EXPORT_SYMBOL(pci_request_selected_regions);
3118 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3119 EXPORT_SYMBOL(pci_set_master);
3120 EXPORT_SYMBOL(pci_clear_master);
3121 EXPORT_SYMBOL(pci_set_mwi);
3122 EXPORT_SYMBOL(pci_try_set_mwi);
3123 EXPORT_SYMBOL(pci_clear_mwi);
3124 EXPORT_SYMBOL_GPL(pci_intx);
3125 EXPORT_SYMBOL(pci_assign_resource);
3126 EXPORT_SYMBOL(pci_find_parent_resource);
3127 EXPORT_SYMBOL(pci_select_bars);
3128
3129 EXPORT_SYMBOL(pci_set_power_state);
3130 EXPORT_SYMBOL(pci_save_state);
3131 EXPORT_SYMBOL(pci_restore_state);
3132 EXPORT_SYMBOL(pci_pme_capable);
3133 EXPORT_SYMBOL(pci_pme_active);
3134 EXPORT_SYMBOL(pci_wake_from_d3);
3135 EXPORT_SYMBOL(pci_target_state);
3136 EXPORT_SYMBOL(pci_prepare_to_sleep);
3137 EXPORT_SYMBOL(pci_back_from_sleep);
3138 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
This page took 0.095267 seconds and 5 git commands to generate.