Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / drivers / pci / pci.c
1 /*
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm/setup.h>
26 #include "pci.h"
27
28 const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30 };
31 EXPORT_SYMBOL_GPL(pci_power_names);
32
33 int isa_dma_bridge_buggy;
34 EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36 int pci_pci_problems;
37 EXPORT_SYMBOL(pci_pci_problems);
38
39 unsigned int pci_pm_d3_delay;
40
41 static void pci_pme_list_scan(struct work_struct *work);
42
43 static LIST_HEAD(pci_pme_list);
44 static DEFINE_MUTEX(pci_pme_list_mutex);
45 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47 struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50 };
51
52 #define PME_TIMEOUT 1000 /* How long between PME checks */
53
54 static void pci_dev_d3_sleep(struct pci_dev *dev)
55 {
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62 }
63
64 #ifdef CONFIG_PCI_DOMAINS
65 int pci_domains_supported = 1;
66 #endif
67
68 #define DEFAULT_CARDBUS_IO_SIZE (256)
69 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
71 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
74 #define DEFAULT_HOTPLUG_IO_SIZE (256)
75 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
77 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
80 /*
81 * The default CLS is used if arch didn't set CLS explicitly and not
82 * all pci devices agree on the same value. Arch can override either
83 * the dfl or actual value as it sees fit. Don't forget this is
84 * measured in 32-bit words, not bytes.
85 */
86 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
87 u8 pci_cache_line_size;
88
89 /**
90 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
91 * @bus: pointer to PCI bus structure to search
92 *
93 * Given a PCI bus, returns the highest PCI bus number present in the set
94 * including the given PCI bus and its list of child PCI buses.
95 */
96 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
97 {
98 struct list_head *tmp;
99 unsigned char max, n;
100
101 max = bus->subordinate;
102 list_for_each(tmp, &bus->children) {
103 n = pci_bus_max_busnr(pci_bus_b(tmp));
104 if(n > max)
105 max = n;
106 }
107 return max;
108 }
109 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
110
111 #ifdef CONFIG_HAS_IOMEM
112 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
113 {
114 /*
115 * Make sure the BAR is actually a memory resource, not an IO resource
116 */
117 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
118 WARN_ON(1);
119 return NULL;
120 }
121 return ioremap_nocache(pci_resource_start(pdev, bar),
122 pci_resource_len(pdev, bar));
123 }
124 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
125 #endif
126
127 #if 0
128 /**
129 * pci_max_busnr - returns maximum PCI bus number
130 *
131 * Returns the highest PCI bus number present in the system global list of
132 * PCI buses.
133 */
134 unsigned char __devinit
135 pci_max_busnr(void)
136 {
137 struct pci_bus *bus = NULL;
138 unsigned char max, n;
139
140 max = 0;
141 while ((bus = pci_find_next_bus(bus)) != NULL) {
142 n = pci_bus_max_busnr(bus);
143 if(n > max)
144 max = n;
145 }
146 return max;
147 }
148
149 #endif /* 0 */
150
151 #define PCI_FIND_CAP_TTL 48
152
153 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
154 u8 pos, int cap, int *ttl)
155 {
156 u8 id;
157
158 while ((*ttl)--) {
159 pci_bus_read_config_byte(bus, devfn, pos, &pos);
160 if (pos < 0x40)
161 break;
162 pos &= ~3;
163 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
164 &id);
165 if (id == 0xff)
166 break;
167 if (id == cap)
168 return pos;
169 pos += PCI_CAP_LIST_NEXT;
170 }
171 return 0;
172 }
173
174 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
175 u8 pos, int cap)
176 {
177 int ttl = PCI_FIND_CAP_TTL;
178
179 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
180 }
181
182 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
183 {
184 return __pci_find_next_cap(dev->bus, dev->devfn,
185 pos + PCI_CAP_LIST_NEXT, cap);
186 }
187 EXPORT_SYMBOL_GPL(pci_find_next_capability);
188
189 static int __pci_bus_find_cap_start(struct pci_bus *bus,
190 unsigned int devfn, u8 hdr_type)
191 {
192 u16 status;
193
194 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
195 if (!(status & PCI_STATUS_CAP_LIST))
196 return 0;
197
198 switch (hdr_type) {
199 case PCI_HEADER_TYPE_NORMAL:
200 case PCI_HEADER_TYPE_BRIDGE:
201 return PCI_CAPABILITY_LIST;
202 case PCI_HEADER_TYPE_CARDBUS:
203 return PCI_CB_CAPABILITY_LIST;
204 default:
205 return 0;
206 }
207
208 return 0;
209 }
210
211 /**
212 * pci_find_capability - query for devices' capabilities
213 * @dev: PCI device to query
214 * @cap: capability code
215 *
216 * Tell if a device supports a given PCI capability.
217 * Returns the address of the requested capability structure within the
218 * device's PCI configuration space or 0 in case the device does not
219 * support it. Possible values for @cap:
220 *
221 * %PCI_CAP_ID_PM Power Management
222 * %PCI_CAP_ID_AGP Accelerated Graphics Port
223 * %PCI_CAP_ID_VPD Vital Product Data
224 * %PCI_CAP_ID_SLOTID Slot Identification
225 * %PCI_CAP_ID_MSI Message Signalled Interrupts
226 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
227 * %PCI_CAP_ID_PCIX PCI-X
228 * %PCI_CAP_ID_EXP PCI Express
229 */
230 int pci_find_capability(struct pci_dev *dev, int cap)
231 {
232 int pos;
233
234 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
235 if (pos)
236 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
237
238 return pos;
239 }
240
241 /**
242 * pci_bus_find_capability - query for devices' capabilities
243 * @bus: the PCI bus to query
244 * @devfn: PCI device to query
245 * @cap: capability code
246 *
247 * Like pci_find_capability() but works for pci devices that do not have a
248 * pci_dev structure set up yet.
249 *
250 * Returns the address of the requested capability structure within the
251 * device's PCI configuration space or 0 in case the device does not
252 * support it.
253 */
254 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
255 {
256 int pos;
257 u8 hdr_type;
258
259 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
260
261 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
262 if (pos)
263 pos = __pci_find_next_cap(bus, devfn, pos, cap);
264
265 return pos;
266 }
267
268 /**
269 * pci_find_ext_capability - Find an extended capability
270 * @dev: PCI device to query
271 * @cap: capability code
272 *
273 * Returns the address of the requested extended capability structure
274 * within the device's PCI configuration space or 0 if the device does
275 * not support it. Possible values for @cap:
276 *
277 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
278 * %PCI_EXT_CAP_ID_VC Virtual Channel
279 * %PCI_EXT_CAP_ID_DSN Device Serial Number
280 * %PCI_EXT_CAP_ID_PWR Power Budgeting
281 */
282 int pci_find_ext_capability(struct pci_dev *dev, int cap)
283 {
284 u32 header;
285 int ttl;
286 int pos = PCI_CFG_SPACE_SIZE;
287
288 /* minimum 8 bytes per capability */
289 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
290
291 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
292 return 0;
293
294 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
295 return 0;
296
297 /*
298 * If we have no capabilities, this is indicated by cap ID,
299 * cap version and next pointer all being 0.
300 */
301 if (header == 0)
302 return 0;
303
304 while (ttl-- > 0) {
305 if (PCI_EXT_CAP_ID(header) == cap)
306 return pos;
307
308 pos = PCI_EXT_CAP_NEXT(header);
309 if (pos < PCI_CFG_SPACE_SIZE)
310 break;
311
312 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
313 break;
314 }
315
316 return 0;
317 }
318 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
319
320 /**
321 * pci_bus_find_ext_capability - find an extended capability
322 * @bus: the PCI bus to query
323 * @devfn: PCI device to query
324 * @cap: capability code
325 *
326 * Like pci_find_ext_capability() but works for pci devices that do not have a
327 * pci_dev structure set up yet.
328 *
329 * Returns the address of the requested capability structure within the
330 * device's PCI configuration space or 0 in case the device does not
331 * support it.
332 */
333 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
334 int cap)
335 {
336 u32 header;
337 int ttl;
338 int pos = PCI_CFG_SPACE_SIZE;
339
340 /* minimum 8 bytes per capability */
341 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
342
343 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
344 return 0;
345 if (header == 0xffffffff || header == 0)
346 return 0;
347
348 while (ttl-- > 0) {
349 if (PCI_EXT_CAP_ID(header) == cap)
350 return pos;
351
352 pos = PCI_EXT_CAP_NEXT(header);
353 if (pos < PCI_CFG_SPACE_SIZE)
354 break;
355
356 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
357 break;
358 }
359
360 return 0;
361 }
362
363 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
364 {
365 int rc, ttl = PCI_FIND_CAP_TTL;
366 u8 cap, mask;
367
368 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
369 mask = HT_3BIT_CAP_MASK;
370 else
371 mask = HT_5BIT_CAP_MASK;
372
373 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
374 PCI_CAP_ID_HT, &ttl);
375 while (pos) {
376 rc = pci_read_config_byte(dev, pos + 3, &cap);
377 if (rc != PCIBIOS_SUCCESSFUL)
378 return 0;
379
380 if ((cap & mask) == ht_cap)
381 return pos;
382
383 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
384 pos + PCI_CAP_LIST_NEXT,
385 PCI_CAP_ID_HT, &ttl);
386 }
387
388 return 0;
389 }
390 /**
391 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
392 * @dev: PCI device to query
393 * @pos: Position from which to continue searching
394 * @ht_cap: Hypertransport capability code
395 *
396 * To be used in conjunction with pci_find_ht_capability() to search for
397 * all capabilities matching @ht_cap. @pos should always be a value returned
398 * from pci_find_ht_capability().
399 *
400 * NB. To be 100% safe against broken PCI devices, the caller should take
401 * steps to avoid an infinite loop.
402 */
403 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
404 {
405 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
406 }
407 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
408
409 /**
410 * pci_find_ht_capability - query a device's Hypertransport capabilities
411 * @dev: PCI device to query
412 * @ht_cap: Hypertransport capability code
413 *
414 * Tell if a device supports a given Hypertransport capability.
415 * Returns an address within the device's PCI configuration space
416 * or 0 in case the device does not support the request capability.
417 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
418 * which has a Hypertransport capability matching @ht_cap.
419 */
420 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
421 {
422 int pos;
423
424 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
425 if (pos)
426 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
427
428 return pos;
429 }
430 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
431
432 /**
433 * pci_find_parent_resource - return resource region of parent bus of given region
434 * @dev: PCI device structure contains resources to be searched
435 * @res: child resource record for which parent is sought
436 *
437 * For given resource region of given device, return the resource
438 * region of parent bus the given region is contained in or where
439 * it should be allocated from.
440 */
441 struct resource *
442 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
443 {
444 const struct pci_bus *bus = dev->bus;
445 int i;
446 struct resource *best = NULL, *r;
447
448 pci_bus_for_each_resource(bus, r, i) {
449 if (!r)
450 continue;
451 if (res->start && !(res->start >= r->start && res->end <= r->end))
452 continue; /* Not contained */
453 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
454 continue; /* Wrong type */
455 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
456 return r; /* Exact match */
457 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
458 if (r->flags & IORESOURCE_PREFETCH)
459 continue;
460 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
461 if (!best)
462 best = r;
463 }
464 return best;
465 }
466
467 /**
468 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
469 * @dev: PCI device to have its BARs restored
470 *
471 * Restore the BAR values for a given device, so as to make it
472 * accessible by its driver.
473 */
474 static void
475 pci_restore_bars(struct pci_dev *dev)
476 {
477 int i;
478
479 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
480 pci_update_resource(dev, i);
481 }
482
483 static struct pci_platform_pm_ops *pci_platform_pm;
484
485 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
486 {
487 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
488 || !ops->sleep_wake || !ops->can_wakeup)
489 return -EINVAL;
490 pci_platform_pm = ops;
491 return 0;
492 }
493
494 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
495 {
496 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
497 }
498
499 static inline int platform_pci_set_power_state(struct pci_dev *dev,
500 pci_power_t t)
501 {
502 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
503 }
504
505 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
506 {
507 return pci_platform_pm ?
508 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
509 }
510
511 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
512 {
513 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
514 }
515
516 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
517 {
518 return pci_platform_pm ?
519 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
520 }
521
522 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
523 {
524 return pci_platform_pm ?
525 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
526 }
527
528 /**
529 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
530 * given PCI device
531 * @dev: PCI device to handle.
532 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
533 *
534 * RETURN VALUE:
535 * -EINVAL if the requested state is invalid.
536 * -EIO if device does not support PCI PM or its PM capabilities register has a
537 * wrong version, or device doesn't support the requested state.
538 * 0 if device already is in the requested state.
539 * 0 if device's power state has been successfully changed.
540 */
541 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
542 {
543 u16 pmcsr;
544 bool need_restore = false;
545
546 /* Check if we're already there */
547 if (dev->current_state == state)
548 return 0;
549
550 if (!dev->pm_cap)
551 return -EIO;
552
553 if (state < PCI_D0 || state > PCI_D3hot)
554 return -EINVAL;
555
556 /* Validate current state:
557 * Can enter D0 from any state, but if we can only go deeper
558 * to sleep if we're already in a low power state
559 */
560 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
561 && dev->current_state > state) {
562 dev_err(&dev->dev, "invalid power transition "
563 "(from state %d to %d)\n", dev->current_state, state);
564 return -EINVAL;
565 }
566
567 /* check if this device supports the desired state */
568 if ((state == PCI_D1 && !dev->d1_support)
569 || (state == PCI_D2 && !dev->d2_support))
570 return -EIO;
571
572 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
573
574 /* If we're (effectively) in D3, force entire word to 0.
575 * This doesn't affect PME_Status, disables PME_En, and
576 * sets PowerState to 0.
577 */
578 switch (dev->current_state) {
579 case PCI_D0:
580 case PCI_D1:
581 case PCI_D2:
582 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
583 pmcsr |= state;
584 break;
585 case PCI_D3hot:
586 case PCI_D3cold:
587 case PCI_UNKNOWN: /* Boot-up */
588 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
589 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
590 need_restore = true;
591 /* Fall-through: force to D0 */
592 default:
593 pmcsr = 0;
594 break;
595 }
596
597 /* enter specified state */
598 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
599
600 /* Mandatory power management transition delays */
601 /* see PCI PM 1.1 5.6.1 table 18 */
602 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
603 pci_dev_d3_sleep(dev);
604 else if (state == PCI_D2 || dev->current_state == PCI_D2)
605 udelay(PCI_PM_D2_DELAY);
606
607 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
608 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
609 if (dev->current_state != state && printk_ratelimit())
610 dev_info(&dev->dev, "Refused to change power state, "
611 "currently in D%d\n", dev->current_state);
612
613 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
614 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
615 * from D3hot to D0 _may_ perform an internal reset, thereby
616 * going to "D0 Uninitialized" rather than "D0 Initialized".
617 * For example, at least some versions of the 3c905B and the
618 * 3c556B exhibit this behaviour.
619 *
620 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
621 * devices in a D3hot state at boot. Consequently, we need to
622 * restore at least the BARs so that the device will be
623 * accessible to its driver.
624 */
625 if (need_restore)
626 pci_restore_bars(dev);
627
628 if (dev->bus->self)
629 pcie_aspm_pm_state_change(dev->bus->self);
630
631 return 0;
632 }
633
634 /**
635 * pci_update_current_state - Read PCI power state of given device from its
636 * PCI PM registers and cache it
637 * @dev: PCI device to handle.
638 * @state: State to cache in case the device doesn't have the PM capability
639 */
640 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
641 {
642 if (dev->pm_cap) {
643 u16 pmcsr;
644
645 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
646 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
647 } else {
648 dev->current_state = state;
649 }
650 }
651
652 /**
653 * pci_platform_power_transition - Use platform to change device power state
654 * @dev: PCI device to handle.
655 * @state: State to put the device into.
656 */
657 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
658 {
659 int error;
660
661 if (platform_pci_power_manageable(dev)) {
662 error = platform_pci_set_power_state(dev, state);
663 if (!error)
664 pci_update_current_state(dev, state);
665 } else {
666 error = -ENODEV;
667 /* Fall back to PCI_D0 if native PM is not supported */
668 if (!dev->pm_cap)
669 dev->current_state = PCI_D0;
670 }
671
672 return error;
673 }
674
675 /**
676 * __pci_start_power_transition - Start power transition of a PCI device
677 * @dev: PCI device to handle.
678 * @state: State to put the device into.
679 */
680 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
681 {
682 if (state == PCI_D0)
683 pci_platform_power_transition(dev, PCI_D0);
684 }
685
686 /**
687 * __pci_complete_power_transition - Complete power transition of a PCI device
688 * @dev: PCI device to handle.
689 * @state: State to put the device into.
690 *
691 * This function should not be called directly by device drivers.
692 */
693 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
694 {
695 return state >= PCI_D0 ?
696 pci_platform_power_transition(dev, state) : -EINVAL;
697 }
698 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
699
700 /**
701 * pci_set_power_state - Set the power state of a PCI device
702 * @dev: PCI device to handle.
703 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
704 *
705 * Transition a device to a new power state, using the platform firmware and/or
706 * the device's PCI PM registers.
707 *
708 * RETURN VALUE:
709 * -EINVAL if the requested state is invalid.
710 * -EIO if device does not support PCI PM or its PM capabilities register has a
711 * wrong version, or device doesn't support the requested state.
712 * 0 if device already is in the requested state.
713 * 0 if device's power state has been successfully changed.
714 */
715 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
716 {
717 int error;
718
719 /* bound the state we're entering */
720 if (state > PCI_D3hot)
721 state = PCI_D3hot;
722 else if (state < PCI_D0)
723 state = PCI_D0;
724 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
725 /*
726 * If the device or the parent bridge do not support PCI PM,
727 * ignore the request if we're doing anything other than putting
728 * it into D0 (which would only happen on boot).
729 */
730 return 0;
731
732 __pci_start_power_transition(dev, state);
733
734 /* This device is quirked not to be put into D3, so
735 don't put it in D3 */
736 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
737 return 0;
738
739 error = pci_raw_set_power_state(dev, state);
740
741 if (!__pci_complete_power_transition(dev, state))
742 error = 0;
743
744 return error;
745 }
746
747 /**
748 * pci_choose_state - Choose the power state of a PCI device
749 * @dev: PCI device to be suspended
750 * @state: target sleep state for the whole system. This is the value
751 * that is passed to suspend() function.
752 *
753 * Returns PCI power state suitable for given device and given system
754 * message.
755 */
756
757 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
758 {
759 pci_power_t ret;
760
761 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
762 return PCI_D0;
763
764 ret = platform_pci_choose_state(dev);
765 if (ret != PCI_POWER_ERROR)
766 return ret;
767
768 switch (state.event) {
769 case PM_EVENT_ON:
770 return PCI_D0;
771 case PM_EVENT_FREEZE:
772 case PM_EVENT_PRETHAW:
773 /* REVISIT both freeze and pre-thaw "should" use D0 */
774 case PM_EVENT_SUSPEND:
775 case PM_EVENT_HIBERNATE:
776 return PCI_D3hot;
777 default:
778 dev_info(&dev->dev, "unrecognized suspend event %d\n",
779 state.event);
780 BUG();
781 }
782 return PCI_D0;
783 }
784
785 EXPORT_SYMBOL(pci_choose_state);
786
787 #define PCI_EXP_SAVE_REGS 7
788
789 #define pcie_cap_has_devctl(type, flags) 1
790 #define pcie_cap_has_lnkctl(type, flags) \
791 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
792 (type == PCI_EXP_TYPE_ROOT_PORT || \
793 type == PCI_EXP_TYPE_ENDPOINT || \
794 type == PCI_EXP_TYPE_LEG_END))
795 #define pcie_cap_has_sltctl(type, flags) \
796 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
797 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
798 (type == PCI_EXP_TYPE_DOWNSTREAM && \
799 (flags & PCI_EXP_FLAGS_SLOT))))
800 #define pcie_cap_has_rtctl(type, flags) \
801 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
802 (type == PCI_EXP_TYPE_ROOT_PORT || \
803 type == PCI_EXP_TYPE_RC_EC))
804 #define pcie_cap_has_devctl2(type, flags) \
805 ((flags & PCI_EXP_FLAGS_VERS) > 1)
806 #define pcie_cap_has_lnkctl2(type, flags) \
807 ((flags & PCI_EXP_FLAGS_VERS) > 1)
808 #define pcie_cap_has_sltctl2(type, flags) \
809 ((flags & PCI_EXP_FLAGS_VERS) > 1)
810
811 static int pci_save_pcie_state(struct pci_dev *dev)
812 {
813 int pos, i = 0;
814 struct pci_cap_saved_state *save_state;
815 u16 *cap;
816 u16 flags;
817
818 pos = pci_pcie_cap(dev);
819 if (!pos)
820 return 0;
821
822 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
823 if (!save_state) {
824 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
825 return -ENOMEM;
826 }
827 cap = (u16 *)&save_state->data[0];
828
829 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
830
831 if (pcie_cap_has_devctl(dev->pcie_type, flags))
832 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
833 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
834 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
835 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
836 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
837 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
838 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
839 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
840 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
841 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
842 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
843 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
844 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
845
846 return 0;
847 }
848
849 static void pci_restore_pcie_state(struct pci_dev *dev)
850 {
851 int i = 0, pos;
852 struct pci_cap_saved_state *save_state;
853 u16 *cap;
854 u16 flags;
855
856 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
857 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
858 if (!save_state || pos <= 0)
859 return;
860 cap = (u16 *)&save_state->data[0];
861
862 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
863
864 if (pcie_cap_has_devctl(dev->pcie_type, flags))
865 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
866 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
867 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
868 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
869 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
870 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
871 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
872 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
873 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
874 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
875 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
876 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
877 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
878 }
879
880
881 static int pci_save_pcix_state(struct pci_dev *dev)
882 {
883 int pos;
884 struct pci_cap_saved_state *save_state;
885
886 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
887 if (pos <= 0)
888 return 0;
889
890 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
891 if (!save_state) {
892 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
893 return -ENOMEM;
894 }
895
896 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
897
898 return 0;
899 }
900
901 static void pci_restore_pcix_state(struct pci_dev *dev)
902 {
903 int i = 0, pos;
904 struct pci_cap_saved_state *save_state;
905 u16 *cap;
906
907 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
908 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
909 if (!save_state || pos <= 0)
910 return;
911 cap = (u16 *)&save_state->data[0];
912
913 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
914 }
915
916
917 /**
918 * pci_save_state - save the PCI configuration space of a device before suspending
919 * @dev: - PCI device that we're dealing with
920 */
921 int
922 pci_save_state(struct pci_dev *dev)
923 {
924 int i;
925 /* XXX: 100% dword access ok here? */
926 for (i = 0; i < 16; i++)
927 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
928 dev->state_saved = true;
929 if ((i = pci_save_pcie_state(dev)) != 0)
930 return i;
931 if ((i = pci_save_pcix_state(dev)) != 0)
932 return i;
933 return 0;
934 }
935
936 /**
937 * pci_restore_state - Restore the saved state of a PCI device
938 * @dev: - PCI device that we're dealing with
939 */
940 void pci_restore_state(struct pci_dev *dev)
941 {
942 int i;
943 u32 val;
944
945 if (!dev->state_saved)
946 return;
947
948 /* PCI Express register must be restored first */
949 pci_restore_pcie_state(dev);
950
951 /*
952 * The Base Address register should be programmed before the command
953 * register(s)
954 */
955 for (i = 15; i >= 0; i--) {
956 pci_read_config_dword(dev, i * 4, &val);
957 if (val != dev->saved_config_space[i]) {
958 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
959 "space at offset %#x (was %#x, writing %#x)\n",
960 i, val, (int)dev->saved_config_space[i]);
961 pci_write_config_dword(dev,i * 4,
962 dev->saved_config_space[i]);
963 }
964 }
965 pci_restore_pcix_state(dev);
966 pci_restore_msi_state(dev);
967 pci_restore_iov_state(dev);
968
969 dev->state_saved = false;
970 }
971
972 static int do_pci_enable_device(struct pci_dev *dev, int bars)
973 {
974 int err;
975
976 err = pci_set_power_state(dev, PCI_D0);
977 if (err < 0 && err != -EIO)
978 return err;
979 err = pcibios_enable_device(dev, bars);
980 if (err < 0)
981 return err;
982 pci_fixup_device(pci_fixup_enable, dev);
983
984 return 0;
985 }
986
987 /**
988 * pci_reenable_device - Resume abandoned device
989 * @dev: PCI device to be resumed
990 *
991 * Note this function is a backend of pci_default_resume and is not supposed
992 * to be called by normal code, write proper resume handler and use it instead.
993 */
994 int pci_reenable_device(struct pci_dev *dev)
995 {
996 if (pci_is_enabled(dev))
997 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
998 return 0;
999 }
1000
1001 static int __pci_enable_device_flags(struct pci_dev *dev,
1002 resource_size_t flags)
1003 {
1004 int err;
1005 int i, bars = 0;
1006
1007 /*
1008 * Power state could be unknown at this point, either due to a fresh
1009 * boot or a device removal call. So get the current power state
1010 * so that things like MSI message writing will behave as expected
1011 * (e.g. if the device really is in D0 at enable time).
1012 */
1013 if (dev->pm_cap) {
1014 u16 pmcsr;
1015 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1016 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1017 }
1018
1019 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1020 return 0; /* already enabled */
1021
1022 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1023 if (dev->resource[i].flags & flags)
1024 bars |= (1 << i);
1025
1026 err = do_pci_enable_device(dev, bars);
1027 if (err < 0)
1028 atomic_dec(&dev->enable_cnt);
1029 return err;
1030 }
1031
1032 /**
1033 * pci_enable_device_io - Initialize a device for use with IO space
1034 * @dev: PCI device to be initialized
1035 *
1036 * Initialize device before it's used by a driver. Ask low-level code
1037 * to enable I/O resources. Wake up the device if it was suspended.
1038 * Beware, this function can fail.
1039 */
1040 int pci_enable_device_io(struct pci_dev *dev)
1041 {
1042 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1043 }
1044
1045 /**
1046 * pci_enable_device_mem - Initialize a device for use with Memory space
1047 * @dev: PCI device to be initialized
1048 *
1049 * Initialize device before it's used by a driver. Ask low-level code
1050 * to enable Memory resources. Wake up the device if it was suspended.
1051 * Beware, this function can fail.
1052 */
1053 int pci_enable_device_mem(struct pci_dev *dev)
1054 {
1055 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1056 }
1057
1058 /**
1059 * pci_enable_device - Initialize device before it's used by a driver.
1060 * @dev: PCI device to be initialized
1061 *
1062 * Initialize device before it's used by a driver. Ask low-level code
1063 * to enable I/O and memory. Wake up the device if it was suspended.
1064 * Beware, this function can fail.
1065 *
1066 * Note we don't actually enable the device many times if we call
1067 * this function repeatedly (we just increment the count).
1068 */
1069 int pci_enable_device(struct pci_dev *dev)
1070 {
1071 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1072 }
1073
1074 /*
1075 * Managed PCI resources. This manages device on/off, intx/msi/msix
1076 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1077 * there's no need to track it separately. pci_devres is initialized
1078 * when a device is enabled using managed PCI device enable interface.
1079 */
1080 struct pci_devres {
1081 unsigned int enabled:1;
1082 unsigned int pinned:1;
1083 unsigned int orig_intx:1;
1084 unsigned int restore_intx:1;
1085 u32 region_mask;
1086 };
1087
1088 static void pcim_release(struct device *gendev, void *res)
1089 {
1090 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1091 struct pci_devres *this = res;
1092 int i;
1093
1094 if (dev->msi_enabled)
1095 pci_disable_msi(dev);
1096 if (dev->msix_enabled)
1097 pci_disable_msix(dev);
1098
1099 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1100 if (this->region_mask & (1 << i))
1101 pci_release_region(dev, i);
1102
1103 if (this->restore_intx)
1104 pci_intx(dev, this->orig_intx);
1105
1106 if (this->enabled && !this->pinned)
1107 pci_disable_device(dev);
1108 }
1109
1110 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1111 {
1112 struct pci_devres *dr, *new_dr;
1113
1114 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1115 if (dr)
1116 return dr;
1117
1118 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1119 if (!new_dr)
1120 return NULL;
1121 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1122 }
1123
1124 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1125 {
1126 if (pci_is_managed(pdev))
1127 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1128 return NULL;
1129 }
1130
1131 /**
1132 * pcim_enable_device - Managed pci_enable_device()
1133 * @pdev: PCI device to be initialized
1134 *
1135 * Managed pci_enable_device().
1136 */
1137 int pcim_enable_device(struct pci_dev *pdev)
1138 {
1139 struct pci_devres *dr;
1140 int rc;
1141
1142 dr = get_pci_dr(pdev);
1143 if (unlikely(!dr))
1144 return -ENOMEM;
1145 if (dr->enabled)
1146 return 0;
1147
1148 rc = pci_enable_device(pdev);
1149 if (!rc) {
1150 pdev->is_managed = 1;
1151 dr->enabled = 1;
1152 }
1153 return rc;
1154 }
1155
1156 /**
1157 * pcim_pin_device - Pin managed PCI device
1158 * @pdev: PCI device to pin
1159 *
1160 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1161 * driver detach. @pdev must have been enabled with
1162 * pcim_enable_device().
1163 */
1164 void pcim_pin_device(struct pci_dev *pdev)
1165 {
1166 struct pci_devres *dr;
1167
1168 dr = find_pci_dr(pdev);
1169 WARN_ON(!dr || !dr->enabled);
1170 if (dr)
1171 dr->pinned = 1;
1172 }
1173
1174 /**
1175 * pcibios_disable_device - disable arch specific PCI resources for device dev
1176 * @dev: the PCI device to disable
1177 *
1178 * Disables architecture specific PCI resources for the device. This
1179 * is the default implementation. Architecture implementations can
1180 * override this.
1181 */
1182 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1183
1184 static void do_pci_disable_device(struct pci_dev *dev)
1185 {
1186 u16 pci_command;
1187
1188 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1189 if (pci_command & PCI_COMMAND_MASTER) {
1190 pci_command &= ~PCI_COMMAND_MASTER;
1191 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1192 }
1193
1194 pcibios_disable_device(dev);
1195 }
1196
1197 /**
1198 * pci_disable_enabled_device - Disable device without updating enable_cnt
1199 * @dev: PCI device to disable
1200 *
1201 * NOTE: This function is a backend of PCI power management routines and is
1202 * not supposed to be called drivers.
1203 */
1204 void pci_disable_enabled_device(struct pci_dev *dev)
1205 {
1206 if (pci_is_enabled(dev))
1207 do_pci_disable_device(dev);
1208 }
1209
1210 /**
1211 * pci_disable_device - Disable PCI device after use
1212 * @dev: PCI device to be disabled
1213 *
1214 * Signal to the system that the PCI device is not in use by the system
1215 * anymore. This only involves disabling PCI bus-mastering, if active.
1216 *
1217 * Note we don't actually disable the device until all callers of
1218 * pci_enable_device() have called pci_disable_device().
1219 */
1220 void
1221 pci_disable_device(struct pci_dev *dev)
1222 {
1223 struct pci_devres *dr;
1224
1225 dr = find_pci_dr(dev);
1226 if (dr)
1227 dr->enabled = 0;
1228
1229 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1230 return;
1231
1232 do_pci_disable_device(dev);
1233
1234 dev->is_busmaster = 0;
1235 }
1236
1237 /**
1238 * pcibios_set_pcie_reset_state - set reset state for device dev
1239 * @dev: the PCIe device reset
1240 * @state: Reset state to enter into
1241 *
1242 *
1243 * Sets the PCIe reset state for the device. This is the default
1244 * implementation. Architecture implementations can override this.
1245 */
1246 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1247 enum pcie_reset_state state)
1248 {
1249 return -EINVAL;
1250 }
1251
1252 /**
1253 * pci_set_pcie_reset_state - set reset state for device dev
1254 * @dev: the PCIe device reset
1255 * @state: Reset state to enter into
1256 *
1257 *
1258 * Sets the PCI reset state for the device.
1259 */
1260 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1261 {
1262 return pcibios_set_pcie_reset_state(dev, state);
1263 }
1264
1265 /**
1266 * pci_check_pme_status - Check if given device has generated PME.
1267 * @dev: Device to check.
1268 *
1269 * Check the PME status of the device and if set, clear it and clear PME enable
1270 * (if set). Return 'true' if PME status and PME enable were both set or
1271 * 'false' otherwise.
1272 */
1273 bool pci_check_pme_status(struct pci_dev *dev)
1274 {
1275 int pmcsr_pos;
1276 u16 pmcsr;
1277 bool ret = false;
1278
1279 if (!dev->pm_cap)
1280 return false;
1281
1282 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1283 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1284 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1285 return false;
1286
1287 /* Clear PME status. */
1288 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1289 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1290 /* Disable PME to avoid interrupt flood. */
1291 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1292 ret = true;
1293 }
1294
1295 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1296
1297 return ret;
1298 }
1299
1300 /**
1301 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1302 * @dev: Device to handle.
1303 * @ign: Ignored.
1304 *
1305 * Check if @dev has generated PME and queue a resume request for it in that
1306 * case.
1307 */
1308 static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1309 {
1310 if (pci_check_pme_status(dev)) {
1311 pci_wakeup_event(dev);
1312 pm_request_resume(&dev->dev);
1313 }
1314 return 0;
1315 }
1316
1317 /**
1318 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1319 * @bus: Top bus of the subtree to walk.
1320 */
1321 void pci_pme_wakeup_bus(struct pci_bus *bus)
1322 {
1323 if (bus)
1324 pci_walk_bus(bus, pci_pme_wakeup, NULL);
1325 }
1326
1327 /**
1328 * pci_pme_capable - check the capability of PCI device to generate PME#
1329 * @dev: PCI device to handle.
1330 * @state: PCI state from which device will issue PME#.
1331 */
1332 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1333 {
1334 if (!dev->pm_cap)
1335 return false;
1336
1337 return !!(dev->pme_support & (1 << state));
1338 }
1339
1340 static void pci_pme_list_scan(struct work_struct *work)
1341 {
1342 struct pci_pme_device *pme_dev;
1343
1344 mutex_lock(&pci_pme_list_mutex);
1345 if (!list_empty(&pci_pme_list)) {
1346 list_for_each_entry(pme_dev, &pci_pme_list, list)
1347 pci_pme_wakeup(pme_dev->dev, NULL);
1348 schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
1349 }
1350 mutex_unlock(&pci_pme_list_mutex);
1351 }
1352
1353 /**
1354 * pci_external_pme - is a device an external PCI PME source?
1355 * @dev: PCI device to check
1356 *
1357 */
1358
1359 static bool pci_external_pme(struct pci_dev *dev)
1360 {
1361 if (pci_is_pcie(dev) || dev->bus->number == 0)
1362 return false;
1363 return true;
1364 }
1365
1366 /**
1367 * pci_pme_active - enable or disable PCI device's PME# function
1368 * @dev: PCI device to handle.
1369 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1370 *
1371 * The caller must verify that the device is capable of generating PME# before
1372 * calling this function with @enable equal to 'true'.
1373 */
1374 void pci_pme_active(struct pci_dev *dev, bool enable)
1375 {
1376 u16 pmcsr;
1377
1378 if (!dev->pm_cap)
1379 return;
1380
1381 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1382 /* Clear PME_Status by writing 1 to it and enable PME# */
1383 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1384 if (!enable)
1385 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1386
1387 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1388
1389 /* PCI (as opposed to PCIe) PME requires that the device have
1390 its PME# line hooked up correctly. Not all hardware vendors
1391 do this, so the PME never gets delivered and the device
1392 remains asleep. The easiest way around this is to
1393 periodically walk the list of suspended devices and check
1394 whether any have their PME flag set. The assumption is that
1395 we'll wake up often enough anyway that this won't be a huge
1396 hit, and the power savings from the devices will still be a
1397 win. */
1398
1399 if (pci_external_pme(dev)) {
1400 struct pci_pme_device *pme_dev;
1401 if (enable) {
1402 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1403 GFP_KERNEL);
1404 if (!pme_dev)
1405 goto out;
1406 pme_dev->dev = dev;
1407 mutex_lock(&pci_pme_list_mutex);
1408 list_add(&pme_dev->list, &pci_pme_list);
1409 if (list_is_singular(&pci_pme_list))
1410 schedule_delayed_work(&pci_pme_work,
1411 msecs_to_jiffies(PME_TIMEOUT));
1412 mutex_unlock(&pci_pme_list_mutex);
1413 } else {
1414 mutex_lock(&pci_pme_list_mutex);
1415 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1416 if (pme_dev->dev == dev) {
1417 list_del(&pme_dev->list);
1418 kfree(pme_dev);
1419 break;
1420 }
1421 }
1422 mutex_unlock(&pci_pme_list_mutex);
1423 }
1424 }
1425
1426 out:
1427 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1428 enable ? "enabled" : "disabled");
1429 }
1430
1431 /**
1432 * __pci_enable_wake - enable PCI device as wakeup event source
1433 * @dev: PCI device affected
1434 * @state: PCI state from which device will issue wakeup events
1435 * @runtime: True if the events are to be generated at run time
1436 * @enable: True to enable event generation; false to disable
1437 *
1438 * This enables the device as a wakeup event source, or disables it.
1439 * When such events involves platform-specific hooks, those hooks are
1440 * called automatically by this routine.
1441 *
1442 * Devices with legacy power management (no standard PCI PM capabilities)
1443 * always require such platform hooks.
1444 *
1445 * RETURN VALUE:
1446 * 0 is returned on success
1447 * -EINVAL is returned if device is not supposed to wake up the system
1448 * Error code depending on the platform is returned if both the platform and
1449 * the native mechanism fail to enable the generation of wake-up events
1450 */
1451 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1452 bool runtime, bool enable)
1453 {
1454 int ret = 0;
1455
1456 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1457 return -EINVAL;
1458
1459 /* Don't do the same thing twice in a row for one device. */
1460 if (!!enable == !!dev->wakeup_prepared)
1461 return 0;
1462
1463 /*
1464 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1465 * Anderson we should be doing PME# wake enable followed by ACPI wake
1466 * enable. To disable wake-up we call the platform first, for symmetry.
1467 */
1468
1469 if (enable) {
1470 int error;
1471
1472 if (pci_pme_capable(dev, state))
1473 pci_pme_active(dev, true);
1474 else
1475 ret = 1;
1476 error = runtime ? platform_pci_run_wake(dev, true) :
1477 platform_pci_sleep_wake(dev, true);
1478 if (ret)
1479 ret = error;
1480 if (!ret)
1481 dev->wakeup_prepared = true;
1482 } else {
1483 if (runtime)
1484 platform_pci_run_wake(dev, false);
1485 else
1486 platform_pci_sleep_wake(dev, false);
1487 pci_pme_active(dev, false);
1488 dev->wakeup_prepared = false;
1489 }
1490
1491 return ret;
1492 }
1493 EXPORT_SYMBOL(__pci_enable_wake);
1494
1495 /**
1496 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1497 * @dev: PCI device to prepare
1498 * @enable: True to enable wake-up event generation; false to disable
1499 *
1500 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1501 * and this function allows them to set that up cleanly - pci_enable_wake()
1502 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1503 * ordering constraints.
1504 *
1505 * This function only returns error code if the device is not capable of
1506 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1507 * enable wake-up power for it.
1508 */
1509 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1510 {
1511 return pci_pme_capable(dev, PCI_D3cold) ?
1512 pci_enable_wake(dev, PCI_D3cold, enable) :
1513 pci_enable_wake(dev, PCI_D3hot, enable);
1514 }
1515
1516 /**
1517 * pci_target_state - find an appropriate low power state for a given PCI dev
1518 * @dev: PCI device
1519 *
1520 * Use underlying platform code to find a supported low power state for @dev.
1521 * If the platform can't manage @dev, return the deepest state from which it
1522 * can generate wake events, based on any available PME info.
1523 */
1524 pci_power_t pci_target_state(struct pci_dev *dev)
1525 {
1526 pci_power_t target_state = PCI_D3hot;
1527
1528 if (platform_pci_power_manageable(dev)) {
1529 /*
1530 * Call the platform to choose the target state of the device
1531 * and enable wake-up from this state if supported.
1532 */
1533 pci_power_t state = platform_pci_choose_state(dev);
1534
1535 switch (state) {
1536 case PCI_POWER_ERROR:
1537 case PCI_UNKNOWN:
1538 break;
1539 case PCI_D1:
1540 case PCI_D2:
1541 if (pci_no_d1d2(dev))
1542 break;
1543 default:
1544 target_state = state;
1545 }
1546 } else if (!dev->pm_cap) {
1547 target_state = PCI_D0;
1548 } else if (device_may_wakeup(&dev->dev)) {
1549 /*
1550 * Find the deepest state from which the device can generate
1551 * wake-up events, make it the target state and enable device
1552 * to generate PME#.
1553 */
1554 if (dev->pme_support) {
1555 while (target_state
1556 && !(dev->pme_support & (1 << target_state)))
1557 target_state--;
1558 }
1559 }
1560
1561 return target_state;
1562 }
1563
1564 /**
1565 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1566 * @dev: Device to handle.
1567 *
1568 * Choose the power state appropriate for the device depending on whether
1569 * it can wake up the system and/or is power manageable by the platform
1570 * (PCI_D3hot is the default) and put the device into that state.
1571 */
1572 int pci_prepare_to_sleep(struct pci_dev *dev)
1573 {
1574 pci_power_t target_state = pci_target_state(dev);
1575 int error;
1576
1577 if (target_state == PCI_POWER_ERROR)
1578 return -EIO;
1579
1580 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1581
1582 error = pci_set_power_state(dev, target_state);
1583
1584 if (error)
1585 pci_enable_wake(dev, target_state, false);
1586
1587 return error;
1588 }
1589
1590 /**
1591 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1592 * @dev: Device to handle.
1593 *
1594 * Disable device's system wake-up capability and put it into D0.
1595 */
1596 int pci_back_from_sleep(struct pci_dev *dev)
1597 {
1598 pci_enable_wake(dev, PCI_D0, false);
1599 return pci_set_power_state(dev, PCI_D0);
1600 }
1601
1602 /**
1603 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1604 * @dev: PCI device being suspended.
1605 *
1606 * Prepare @dev to generate wake-up events at run time and put it into a low
1607 * power state.
1608 */
1609 int pci_finish_runtime_suspend(struct pci_dev *dev)
1610 {
1611 pci_power_t target_state = pci_target_state(dev);
1612 int error;
1613
1614 if (target_state == PCI_POWER_ERROR)
1615 return -EIO;
1616
1617 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1618
1619 error = pci_set_power_state(dev, target_state);
1620
1621 if (error)
1622 __pci_enable_wake(dev, target_state, true, false);
1623
1624 return error;
1625 }
1626
1627 /**
1628 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1629 * @dev: Device to check.
1630 *
1631 * Return true if the device itself is cabable of generating wake-up events
1632 * (through the platform or using the native PCIe PME) or if the device supports
1633 * PME and one of its upstream bridges can generate wake-up events.
1634 */
1635 bool pci_dev_run_wake(struct pci_dev *dev)
1636 {
1637 struct pci_bus *bus = dev->bus;
1638
1639 if (device_run_wake(&dev->dev))
1640 return true;
1641
1642 if (!dev->pme_support)
1643 return false;
1644
1645 while (bus->parent) {
1646 struct pci_dev *bridge = bus->self;
1647
1648 if (device_run_wake(&bridge->dev))
1649 return true;
1650
1651 bus = bus->parent;
1652 }
1653
1654 /* We have reached the root bus. */
1655 if (bus->bridge)
1656 return device_run_wake(bus->bridge);
1657
1658 return false;
1659 }
1660 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1661
1662 /**
1663 * pci_pm_init - Initialize PM functions of given PCI device
1664 * @dev: PCI device to handle.
1665 */
1666 void pci_pm_init(struct pci_dev *dev)
1667 {
1668 int pm;
1669 u16 pmc;
1670
1671 pm_runtime_forbid(&dev->dev);
1672 device_enable_async_suspend(&dev->dev);
1673 dev->wakeup_prepared = false;
1674
1675 dev->pm_cap = 0;
1676
1677 /* find PCI PM capability in list */
1678 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1679 if (!pm)
1680 return;
1681 /* Check device's ability to generate PME# */
1682 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1683
1684 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1685 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1686 pmc & PCI_PM_CAP_VER_MASK);
1687 return;
1688 }
1689
1690 dev->pm_cap = pm;
1691 dev->d3_delay = PCI_PM_D3_WAIT;
1692
1693 dev->d1_support = false;
1694 dev->d2_support = false;
1695 if (!pci_no_d1d2(dev)) {
1696 if (pmc & PCI_PM_CAP_D1)
1697 dev->d1_support = true;
1698 if (pmc & PCI_PM_CAP_D2)
1699 dev->d2_support = true;
1700
1701 if (dev->d1_support || dev->d2_support)
1702 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1703 dev->d1_support ? " D1" : "",
1704 dev->d2_support ? " D2" : "");
1705 }
1706
1707 pmc &= PCI_PM_CAP_PME_MASK;
1708 if (pmc) {
1709 dev_printk(KERN_DEBUG, &dev->dev,
1710 "PME# supported from%s%s%s%s%s\n",
1711 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1712 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1713 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1714 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1715 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1716 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1717 /*
1718 * Make device's PM flags reflect the wake-up capability, but
1719 * let the user space enable it to wake up the system as needed.
1720 */
1721 device_set_wakeup_capable(&dev->dev, true);
1722 /* Disable the PME# generation functionality */
1723 pci_pme_active(dev, false);
1724 } else {
1725 dev->pme_support = 0;
1726 }
1727 }
1728
1729 /**
1730 * platform_pci_wakeup_init - init platform wakeup if present
1731 * @dev: PCI device
1732 *
1733 * Some devices don't have PCI PM caps but can still generate wakeup
1734 * events through platform methods (like ACPI events). If @dev supports
1735 * platform wakeup events, set the device flag to indicate as much. This
1736 * may be redundant if the device also supports PCI PM caps, but double
1737 * initialization should be safe in that case.
1738 */
1739 void platform_pci_wakeup_init(struct pci_dev *dev)
1740 {
1741 if (!platform_pci_can_wakeup(dev))
1742 return;
1743
1744 device_set_wakeup_capable(&dev->dev, true);
1745 platform_pci_sleep_wake(dev, false);
1746 }
1747
1748 /**
1749 * pci_add_save_buffer - allocate buffer for saving given capability registers
1750 * @dev: the PCI device
1751 * @cap: the capability to allocate the buffer for
1752 * @size: requested size of the buffer
1753 */
1754 static int pci_add_cap_save_buffer(
1755 struct pci_dev *dev, char cap, unsigned int size)
1756 {
1757 int pos;
1758 struct pci_cap_saved_state *save_state;
1759
1760 pos = pci_find_capability(dev, cap);
1761 if (pos <= 0)
1762 return 0;
1763
1764 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1765 if (!save_state)
1766 return -ENOMEM;
1767
1768 save_state->cap_nr = cap;
1769 pci_add_saved_cap(dev, save_state);
1770
1771 return 0;
1772 }
1773
1774 /**
1775 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1776 * @dev: the PCI device
1777 */
1778 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1779 {
1780 int error;
1781
1782 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1783 PCI_EXP_SAVE_REGS * sizeof(u16));
1784 if (error)
1785 dev_err(&dev->dev,
1786 "unable to preallocate PCI Express save buffer\n");
1787
1788 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1789 if (error)
1790 dev_err(&dev->dev,
1791 "unable to preallocate PCI-X save buffer\n");
1792 }
1793
1794 /**
1795 * pci_enable_ari - enable ARI forwarding if hardware support it
1796 * @dev: the PCI device
1797 */
1798 void pci_enable_ari(struct pci_dev *dev)
1799 {
1800 int pos;
1801 u32 cap;
1802 u16 ctrl;
1803 struct pci_dev *bridge;
1804
1805 if (!pci_is_pcie(dev) || dev->devfn)
1806 return;
1807
1808 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1809 if (!pos)
1810 return;
1811
1812 bridge = dev->bus->self;
1813 if (!bridge || !pci_is_pcie(bridge))
1814 return;
1815
1816 pos = pci_pcie_cap(bridge);
1817 if (!pos)
1818 return;
1819
1820 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1821 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1822 return;
1823
1824 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1825 ctrl |= PCI_EXP_DEVCTL2_ARI;
1826 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1827
1828 bridge->ari_enabled = 1;
1829 }
1830
1831 static int pci_acs_enable;
1832
1833 /**
1834 * pci_request_acs - ask for ACS to be enabled if supported
1835 */
1836 void pci_request_acs(void)
1837 {
1838 pci_acs_enable = 1;
1839 }
1840
1841 /**
1842 * pci_enable_acs - enable ACS if hardware support it
1843 * @dev: the PCI device
1844 */
1845 void pci_enable_acs(struct pci_dev *dev)
1846 {
1847 int pos;
1848 u16 cap;
1849 u16 ctrl;
1850
1851 if (!pci_acs_enable)
1852 return;
1853
1854 if (!pci_is_pcie(dev))
1855 return;
1856
1857 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
1858 if (!pos)
1859 return;
1860
1861 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
1862 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
1863
1864 /* Source Validation */
1865 ctrl |= (cap & PCI_ACS_SV);
1866
1867 /* P2P Request Redirect */
1868 ctrl |= (cap & PCI_ACS_RR);
1869
1870 /* P2P Completion Redirect */
1871 ctrl |= (cap & PCI_ACS_CR);
1872
1873 /* Upstream Forwarding */
1874 ctrl |= (cap & PCI_ACS_UF);
1875
1876 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
1877 }
1878
1879 /**
1880 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
1881 * @dev: the PCI device
1882 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
1883 *
1884 * Perform INTx swizzling for a device behind one level of bridge. This is
1885 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
1886 * behind bridges on add-in cards. For devices with ARI enabled, the slot
1887 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
1888 * the PCI Express Base Specification, Revision 2.1)
1889 */
1890 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
1891 {
1892 int slot;
1893
1894 if (pci_ari_enabled(dev->bus))
1895 slot = 0;
1896 else
1897 slot = PCI_SLOT(dev->devfn);
1898
1899 return (((pin - 1) + slot) % 4) + 1;
1900 }
1901
1902 int
1903 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1904 {
1905 u8 pin;
1906
1907 pin = dev->pin;
1908 if (!pin)
1909 return -1;
1910
1911 while (!pci_is_root_bus(dev->bus)) {
1912 pin = pci_swizzle_interrupt_pin(dev, pin);
1913 dev = dev->bus->self;
1914 }
1915 *bridge = dev;
1916 return pin;
1917 }
1918
1919 /**
1920 * pci_common_swizzle - swizzle INTx all the way to root bridge
1921 * @dev: the PCI device
1922 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
1923 *
1924 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
1925 * bridges all the way up to a PCI root bus.
1926 */
1927 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1928 {
1929 u8 pin = *pinp;
1930
1931 while (!pci_is_root_bus(dev->bus)) {
1932 pin = pci_swizzle_interrupt_pin(dev, pin);
1933 dev = dev->bus->self;
1934 }
1935 *pinp = pin;
1936 return PCI_SLOT(dev->devfn);
1937 }
1938
1939 /**
1940 * pci_release_region - Release a PCI bar
1941 * @pdev: PCI device whose resources were previously reserved by pci_request_region
1942 * @bar: BAR to release
1943 *
1944 * Releases the PCI I/O and memory resources previously reserved by a
1945 * successful call to pci_request_region. Call this function only
1946 * after all use of the PCI regions has ceased.
1947 */
1948 void pci_release_region(struct pci_dev *pdev, int bar)
1949 {
1950 struct pci_devres *dr;
1951
1952 if (pci_resource_len(pdev, bar) == 0)
1953 return;
1954 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
1955 release_region(pci_resource_start(pdev, bar),
1956 pci_resource_len(pdev, bar));
1957 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
1958 release_mem_region(pci_resource_start(pdev, bar),
1959 pci_resource_len(pdev, bar));
1960
1961 dr = find_pci_dr(pdev);
1962 if (dr)
1963 dr->region_mask &= ~(1 << bar);
1964 }
1965
1966 /**
1967 * __pci_request_region - Reserved PCI I/O and memory resource
1968 * @pdev: PCI device whose resources are to be reserved
1969 * @bar: BAR to be reserved
1970 * @res_name: Name to be associated with resource.
1971 * @exclusive: whether the region access is exclusive or not
1972 *
1973 * Mark the PCI region associated with PCI device @pdev BR @bar as
1974 * being reserved by owner @res_name. Do not access any
1975 * address inside the PCI regions unless this call returns
1976 * successfully.
1977 *
1978 * If @exclusive is set, then the region is marked so that userspace
1979 * is explicitly not allowed to map the resource via /dev/mem or
1980 * sysfs MMIO access.
1981 *
1982 * Returns 0 on success, or %EBUSY on error. A warning
1983 * message is also printed on failure.
1984 */
1985 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
1986 int exclusive)
1987 {
1988 struct pci_devres *dr;
1989
1990 if (pci_resource_len(pdev, bar) == 0)
1991 return 0;
1992
1993 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
1994 if (!request_region(pci_resource_start(pdev, bar),
1995 pci_resource_len(pdev, bar), res_name))
1996 goto err_out;
1997 }
1998 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1999 if (!__request_mem_region(pci_resource_start(pdev, bar),
2000 pci_resource_len(pdev, bar), res_name,
2001 exclusive))
2002 goto err_out;
2003 }
2004
2005 dr = find_pci_dr(pdev);
2006 if (dr)
2007 dr->region_mask |= 1 << bar;
2008
2009 return 0;
2010
2011 err_out:
2012 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2013 &pdev->resource[bar]);
2014 return -EBUSY;
2015 }
2016
2017 /**
2018 * pci_request_region - Reserve PCI I/O and memory resource
2019 * @pdev: PCI device whose resources are to be reserved
2020 * @bar: BAR to be reserved
2021 * @res_name: Name to be associated with resource
2022 *
2023 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2024 * being reserved by owner @res_name. Do not access any
2025 * address inside the PCI regions unless this call returns
2026 * successfully.
2027 *
2028 * Returns 0 on success, or %EBUSY on error. A warning
2029 * message is also printed on failure.
2030 */
2031 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2032 {
2033 return __pci_request_region(pdev, bar, res_name, 0);
2034 }
2035
2036 /**
2037 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2038 * @pdev: PCI device whose resources are to be reserved
2039 * @bar: BAR to be reserved
2040 * @res_name: Name to be associated with resource.
2041 *
2042 * Mark the PCI region associated with PCI device @pdev BR @bar as
2043 * being reserved by owner @res_name. Do not access any
2044 * address inside the PCI regions unless this call returns
2045 * successfully.
2046 *
2047 * Returns 0 on success, or %EBUSY on error. A warning
2048 * message is also printed on failure.
2049 *
2050 * The key difference that _exclusive makes it that userspace is
2051 * explicitly not allowed to map the resource via /dev/mem or
2052 * sysfs.
2053 */
2054 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2055 {
2056 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2057 }
2058 /**
2059 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2060 * @pdev: PCI device whose resources were previously reserved
2061 * @bars: Bitmask of BARs to be released
2062 *
2063 * Release selected PCI I/O and memory resources previously reserved.
2064 * Call this function only after all use of the PCI regions has ceased.
2065 */
2066 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2067 {
2068 int i;
2069
2070 for (i = 0; i < 6; i++)
2071 if (bars & (1 << i))
2072 pci_release_region(pdev, i);
2073 }
2074
2075 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2076 const char *res_name, int excl)
2077 {
2078 int i;
2079
2080 for (i = 0; i < 6; i++)
2081 if (bars & (1 << i))
2082 if (__pci_request_region(pdev, i, res_name, excl))
2083 goto err_out;
2084 return 0;
2085
2086 err_out:
2087 while(--i >= 0)
2088 if (bars & (1 << i))
2089 pci_release_region(pdev, i);
2090
2091 return -EBUSY;
2092 }
2093
2094
2095 /**
2096 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2097 * @pdev: PCI device whose resources are to be reserved
2098 * @bars: Bitmask of BARs to be requested
2099 * @res_name: Name to be associated with resource
2100 */
2101 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2102 const char *res_name)
2103 {
2104 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2105 }
2106
2107 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2108 int bars, const char *res_name)
2109 {
2110 return __pci_request_selected_regions(pdev, bars, res_name,
2111 IORESOURCE_EXCLUSIVE);
2112 }
2113
2114 /**
2115 * pci_release_regions - Release reserved PCI I/O and memory resources
2116 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2117 *
2118 * Releases all PCI I/O and memory resources previously reserved by a
2119 * successful call to pci_request_regions. Call this function only
2120 * after all use of the PCI regions has ceased.
2121 */
2122
2123 void pci_release_regions(struct pci_dev *pdev)
2124 {
2125 pci_release_selected_regions(pdev, (1 << 6) - 1);
2126 }
2127
2128 /**
2129 * pci_request_regions - Reserved PCI I/O and memory resources
2130 * @pdev: PCI device whose resources are to be reserved
2131 * @res_name: Name to be associated with resource.
2132 *
2133 * Mark all PCI regions associated with PCI device @pdev as
2134 * being reserved by owner @res_name. Do not access any
2135 * address inside the PCI regions unless this call returns
2136 * successfully.
2137 *
2138 * Returns 0 on success, or %EBUSY on error. A warning
2139 * message is also printed on failure.
2140 */
2141 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2142 {
2143 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2144 }
2145
2146 /**
2147 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2148 * @pdev: PCI device whose resources are to be reserved
2149 * @res_name: Name to be associated with resource.
2150 *
2151 * Mark all PCI regions associated with PCI device @pdev as
2152 * being reserved by owner @res_name. Do not access any
2153 * address inside the PCI regions unless this call returns
2154 * successfully.
2155 *
2156 * pci_request_regions_exclusive() will mark the region so that
2157 * /dev/mem and the sysfs MMIO access will not be allowed.
2158 *
2159 * Returns 0 on success, or %EBUSY on error. A warning
2160 * message is also printed on failure.
2161 */
2162 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2163 {
2164 return pci_request_selected_regions_exclusive(pdev,
2165 ((1 << 6) - 1), res_name);
2166 }
2167
2168 static void __pci_set_master(struct pci_dev *dev, bool enable)
2169 {
2170 u16 old_cmd, cmd;
2171
2172 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2173 if (enable)
2174 cmd = old_cmd | PCI_COMMAND_MASTER;
2175 else
2176 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2177 if (cmd != old_cmd) {
2178 dev_dbg(&dev->dev, "%s bus mastering\n",
2179 enable ? "enabling" : "disabling");
2180 pci_write_config_word(dev, PCI_COMMAND, cmd);
2181 }
2182 dev->is_busmaster = enable;
2183 }
2184
2185 /**
2186 * pci_set_master - enables bus-mastering for device dev
2187 * @dev: the PCI device to enable
2188 *
2189 * Enables bus-mastering on the device and calls pcibios_set_master()
2190 * to do the needed arch specific settings.
2191 */
2192 void pci_set_master(struct pci_dev *dev)
2193 {
2194 __pci_set_master(dev, true);
2195 pcibios_set_master(dev);
2196 }
2197
2198 /**
2199 * pci_clear_master - disables bus-mastering for device dev
2200 * @dev: the PCI device to disable
2201 */
2202 void pci_clear_master(struct pci_dev *dev)
2203 {
2204 __pci_set_master(dev, false);
2205 }
2206
2207 /**
2208 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2209 * @dev: the PCI device for which MWI is to be enabled
2210 *
2211 * Helper function for pci_set_mwi.
2212 * Originally copied from drivers/net/acenic.c.
2213 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2214 *
2215 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2216 */
2217 int pci_set_cacheline_size(struct pci_dev *dev)
2218 {
2219 u8 cacheline_size;
2220
2221 if (!pci_cache_line_size)
2222 return -EINVAL;
2223
2224 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2225 equal to or multiple of the right value. */
2226 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2227 if (cacheline_size >= pci_cache_line_size &&
2228 (cacheline_size % pci_cache_line_size) == 0)
2229 return 0;
2230
2231 /* Write the correct value. */
2232 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2233 /* Read it back. */
2234 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2235 if (cacheline_size == pci_cache_line_size)
2236 return 0;
2237
2238 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2239 "supported\n", pci_cache_line_size << 2);
2240
2241 return -EINVAL;
2242 }
2243 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2244
2245 #ifdef PCI_DISABLE_MWI
2246 int pci_set_mwi(struct pci_dev *dev)
2247 {
2248 return 0;
2249 }
2250
2251 int pci_try_set_mwi(struct pci_dev *dev)
2252 {
2253 return 0;
2254 }
2255
2256 void pci_clear_mwi(struct pci_dev *dev)
2257 {
2258 }
2259
2260 #else
2261
2262 /**
2263 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2264 * @dev: the PCI device for which MWI is enabled
2265 *
2266 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2267 *
2268 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2269 */
2270 int
2271 pci_set_mwi(struct pci_dev *dev)
2272 {
2273 int rc;
2274 u16 cmd;
2275
2276 rc = pci_set_cacheline_size(dev);
2277 if (rc)
2278 return rc;
2279
2280 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2281 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2282 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2283 cmd |= PCI_COMMAND_INVALIDATE;
2284 pci_write_config_word(dev, PCI_COMMAND, cmd);
2285 }
2286
2287 return 0;
2288 }
2289
2290 /**
2291 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2292 * @dev: the PCI device for which MWI is enabled
2293 *
2294 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2295 * Callers are not required to check the return value.
2296 *
2297 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2298 */
2299 int pci_try_set_mwi(struct pci_dev *dev)
2300 {
2301 int rc = pci_set_mwi(dev);
2302 return rc;
2303 }
2304
2305 /**
2306 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2307 * @dev: the PCI device to disable
2308 *
2309 * Disables PCI Memory-Write-Invalidate transaction on the device
2310 */
2311 void
2312 pci_clear_mwi(struct pci_dev *dev)
2313 {
2314 u16 cmd;
2315
2316 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2317 if (cmd & PCI_COMMAND_INVALIDATE) {
2318 cmd &= ~PCI_COMMAND_INVALIDATE;
2319 pci_write_config_word(dev, PCI_COMMAND, cmd);
2320 }
2321 }
2322 #endif /* ! PCI_DISABLE_MWI */
2323
2324 /**
2325 * pci_intx - enables/disables PCI INTx for device dev
2326 * @pdev: the PCI device to operate on
2327 * @enable: boolean: whether to enable or disable PCI INTx
2328 *
2329 * Enables/disables PCI INTx for device dev
2330 */
2331 void
2332 pci_intx(struct pci_dev *pdev, int enable)
2333 {
2334 u16 pci_command, new;
2335
2336 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2337
2338 if (enable) {
2339 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2340 } else {
2341 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2342 }
2343
2344 if (new != pci_command) {
2345 struct pci_devres *dr;
2346
2347 pci_write_config_word(pdev, PCI_COMMAND, new);
2348
2349 dr = find_pci_dr(pdev);
2350 if (dr && !dr->restore_intx) {
2351 dr->restore_intx = 1;
2352 dr->orig_intx = !enable;
2353 }
2354 }
2355 }
2356
2357 /**
2358 * pci_msi_off - disables any msi or msix capabilities
2359 * @dev: the PCI device to operate on
2360 *
2361 * If you want to use msi see pci_enable_msi and friends.
2362 * This is a lower level primitive that allows us to disable
2363 * msi operation at the device level.
2364 */
2365 void pci_msi_off(struct pci_dev *dev)
2366 {
2367 int pos;
2368 u16 control;
2369
2370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2371 if (pos) {
2372 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2373 control &= ~PCI_MSI_FLAGS_ENABLE;
2374 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2375 }
2376 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2377 if (pos) {
2378 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2379 control &= ~PCI_MSIX_FLAGS_ENABLE;
2380 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2381 }
2382 }
2383 EXPORT_SYMBOL_GPL(pci_msi_off);
2384
2385 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2386 {
2387 return dma_set_max_seg_size(&dev->dev, size);
2388 }
2389 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2390
2391 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2392 {
2393 return dma_set_seg_boundary(&dev->dev, mask);
2394 }
2395 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2396
2397 static int pcie_flr(struct pci_dev *dev, int probe)
2398 {
2399 int i;
2400 int pos;
2401 u32 cap;
2402 u16 status, control;
2403
2404 pos = pci_pcie_cap(dev);
2405 if (!pos)
2406 return -ENOTTY;
2407
2408 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2409 if (!(cap & PCI_EXP_DEVCAP_FLR))
2410 return -ENOTTY;
2411
2412 if (probe)
2413 return 0;
2414
2415 /* Wait for Transaction Pending bit clean */
2416 for (i = 0; i < 4; i++) {
2417 if (i)
2418 msleep((1 << (i - 1)) * 100);
2419
2420 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2421 if (!(status & PCI_EXP_DEVSTA_TRPND))
2422 goto clear;
2423 }
2424
2425 dev_err(&dev->dev, "transaction is not cleared; "
2426 "proceeding with reset anyway\n");
2427
2428 clear:
2429 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2430 control |= PCI_EXP_DEVCTL_BCR_FLR;
2431 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2432
2433 msleep(100);
2434
2435 return 0;
2436 }
2437
2438 static int pci_af_flr(struct pci_dev *dev, int probe)
2439 {
2440 int i;
2441 int pos;
2442 u8 cap;
2443 u8 status;
2444
2445 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2446 if (!pos)
2447 return -ENOTTY;
2448
2449 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2450 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2451 return -ENOTTY;
2452
2453 if (probe)
2454 return 0;
2455
2456 /* Wait for Transaction Pending bit clean */
2457 for (i = 0; i < 4; i++) {
2458 if (i)
2459 msleep((1 << (i - 1)) * 100);
2460
2461 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2462 if (!(status & PCI_AF_STATUS_TP))
2463 goto clear;
2464 }
2465
2466 dev_err(&dev->dev, "transaction is not cleared; "
2467 "proceeding with reset anyway\n");
2468
2469 clear:
2470 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2471 msleep(100);
2472
2473 return 0;
2474 }
2475
2476 static int pci_pm_reset(struct pci_dev *dev, int probe)
2477 {
2478 u16 csr;
2479
2480 if (!dev->pm_cap)
2481 return -ENOTTY;
2482
2483 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2484 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2485 return -ENOTTY;
2486
2487 if (probe)
2488 return 0;
2489
2490 if (dev->current_state != PCI_D0)
2491 return -EINVAL;
2492
2493 csr &= ~PCI_PM_CTRL_STATE_MASK;
2494 csr |= PCI_D3hot;
2495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2496 pci_dev_d3_sleep(dev);
2497
2498 csr &= ~PCI_PM_CTRL_STATE_MASK;
2499 csr |= PCI_D0;
2500 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2501 pci_dev_d3_sleep(dev);
2502
2503 return 0;
2504 }
2505
2506 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2507 {
2508 u16 ctrl;
2509 struct pci_dev *pdev;
2510
2511 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2512 return -ENOTTY;
2513
2514 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2515 if (pdev != dev)
2516 return -ENOTTY;
2517
2518 if (probe)
2519 return 0;
2520
2521 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2522 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2523 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2524 msleep(100);
2525
2526 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2527 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2528 msleep(100);
2529
2530 return 0;
2531 }
2532
2533 static int pci_dev_reset(struct pci_dev *dev, int probe)
2534 {
2535 int rc;
2536
2537 might_sleep();
2538
2539 if (!probe) {
2540 pci_block_user_cfg_access(dev);
2541 /* block PM suspend, driver probe, etc. */
2542 device_lock(&dev->dev);
2543 }
2544
2545 rc = pci_dev_specific_reset(dev, probe);
2546 if (rc != -ENOTTY)
2547 goto done;
2548
2549 rc = pcie_flr(dev, probe);
2550 if (rc != -ENOTTY)
2551 goto done;
2552
2553 rc = pci_af_flr(dev, probe);
2554 if (rc != -ENOTTY)
2555 goto done;
2556
2557 rc = pci_pm_reset(dev, probe);
2558 if (rc != -ENOTTY)
2559 goto done;
2560
2561 rc = pci_parent_bus_reset(dev, probe);
2562 done:
2563 if (!probe) {
2564 device_unlock(&dev->dev);
2565 pci_unblock_user_cfg_access(dev);
2566 }
2567
2568 return rc;
2569 }
2570
2571 /**
2572 * __pci_reset_function - reset a PCI device function
2573 * @dev: PCI device to reset
2574 *
2575 * Some devices allow an individual function to be reset without affecting
2576 * other functions in the same device. The PCI device must be responsive
2577 * to PCI config space in order to use this function.
2578 *
2579 * The device function is presumed to be unused when this function is called.
2580 * Resetting the device will make the contents of PCI configuration space
2581 * random, so any caller of this must be prepared to reinitialise the
2582 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
2583 * etc.
2584 *
2585 * Returns 0 if the device function was successfully reset or negative if the
2586 * device doesn't support resetting a single function.
2587 */
2588 int __pci_reset_function(struct pci_dev *dev)
2589 {
2590 return pci_dev_reset(dev, 0);
2591 }
2592 EXPORT_SYMBOL_GPL(__pci_reset_function);
2593
2594 /**
2595 * pci_probe_reset_function - check whether the device can be safely reset
2596 * @dev: PCI device to reset
2597 *
2598 * Some devices allow an individual function to be reset without affecting
2599 * other functions in the same device. The PCI device must be responsive
2600 * to PCI config space in order to use this function.
2601 *
2602 * Returns 0 if the device function can be reset or negative if the
2603 * device doesn't support resetting a single function.
2604 */
2605 int pci_probe_reset_function(struct pci_dev *dev)
2606 {
2607 return pci_dev_reset(dev, 1);
2608 }
2609
2610 /**
2611 * pci_reset_function - quiesce and reset a PCI device function
2612 * @dev: PCI device to reset
2613 *
2614 * Some devices allow an individual function to be reset without affecting
2615 * other functions in the same device. The PCI device must be responsive
2616 * to PCI config space in order to use this function.
2617 *
2618 * This function does not just reset the PCI portion of a device, but
2619 * clears all the state associated with the device. This function differs
2620 * from __pci_reset_function in that it saves and restores device state
2621 * over the reset.
2622 *
2623 * Returns 0 if the device function was successfully reset or negative if the
2624 * device doesn't support resetting a single function.
2625 */
2626 int pci_reset_function(struct pci_dev *dev)
2627 {
2628 int rc;
2629
2630 rc = pci_dev_reset(dev, 1);
2631 if (rc)
2632 return rc;
2633
2634 pci_save_state(dev);
2635
2636 /*
2637 * both INTx and MSI are disabled after the Interrupt Disable bit
2638 * is set and the Bus Master bit is cleared.
2639 */
2640 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2641
2642 rc = pci_dev_reset(dev, 0);
2643
2644 pci_restore_state(dev);
2645
2646 return rc;
2647 }
2648 EXPORT_SYMBOL_GPL(pci_reset_function);
2649
2650 /**
2651 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
2652 * @dev: PCI device to query
2653 *
2654 * Returns mmrbc: maximum designed memory read count in bytes
2655 * or appropriate error value.
2656 */
2657 int pcix_get_max_mmrbc(struct pci_dev *dev)
2658 {
2659 int cap;
2660 u32 stat;
2661
2662 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2663 if (!cap)
2664 return -EINVAL;
2665
2666 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2667 return -EINVAL;
2668
2669 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
2670 }
2671 EXPORT_SYMBOL(pcix_get_max_mmrbc);
2672
2673 /**
2674 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
2675 * @dev: PCI device to query
2676 *
2677 * Returns mmrbc: maximum memory read count in bytes
2678 * or appropriate error value.
2679 */
2680 int pcix_get_mmrbc(struct pci_dev *dev)
2681 {
2682 int cap;
2683 u16 cmd;
2684
2685 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2686 if (!cap)
2687 return -EINVAL;
2688
2689 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2690 return -EINVAL;
2691
2692 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2693 }
2694 EXPORT_SYMBOL(pcix_get_mmrbc);
2695
2696 /**
2697 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
2698 * @dev: PCI device to query
2699 * @mmrbc: maximum memory read count in bytes
2700 * valid values are 512, 1024, 2048, 4096
2701 *
2702 * If possible sets maximum memory read byte count, some bridges have erratas
2703 * that prevent this.
2704 */
2705 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2706 {
2707 int cap;
2708 u32 stat, v, o;
2709 u16 cmd;
2710
2711 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2712 return -EINVAL;
2713
2714 v = ffs(mmrbc) - 10;
2715
2716 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2717 if (!cap)
2718 return -EINVAL;
2719
2720 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2721 return -EINVAL;
2722
2723 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2724 return -E2BIG;
2725
2726 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2727 return -EINVAL;
2728
2729 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2730 if (o != v) {
2731 if (v > o && dev->bus &&
2732 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
2733 return -EIO;
2734
2735 cmd &= ~PCI_X_CMD_MAX_READ;
2736 cmd |= v << 2;
2737 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
2738 return -EIO;
2739 }
2740 return 0;
2741 }
2742 EXPORT_SYMBOL(pcix_set_mmrbc);
2743
2744 /**
2745 * pcie_get_readrq - get PCI Express read request size
2746 * @dev: PCI device to query
2747 *
2748 * Returns maximum memory read request in bytes
2749 * or appropriate error value.
2750 */
2751 int pcie_get_readrq(struct pci_dev *dev)
2752 {
2753 int ret, cap;
2754 u16 ctl;
2755
2756 cap = pci_pcie_cap(dev);
2757 if (!cap)
2758 return -EINVAL;
2759
2760 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2761 if (!ret)
2762 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
2763
2764 return ret;
2765 }
2766 EXPORT_SYMBOL(pcie_get_readrq);
2767
2768 /**
2769 * pcie_set_readrq - set PCI Express maximum memory read request
2770 * @dev: PCI device to query
2771 * @rq: maximum memory read count in bytes
2772 * valid values are 128, 256, 512, 1024, 2048, 4096
2773 *
2774 * If possible sets maximum read byte count
2775 */
2776 int pcie_set_readrq(struct pci_dev *dev, int rq)
2777 {
2778 int cap, err = -EINVAL;
2779 u16 ctl, v;
2780
2781 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
2782 goto out;
2783
2784 v = (ffs(rq) - 8) << 12;
2785
2786 cap = pci_pcie_cap(dev);
2787 if (!cap)
2788 goto out;
2789
2790 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2791 if (err)
2792 goto out;
2793
2794 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
2795 ctl &= ~PCI_EXP_DEVCTL_READRQ;
2796 ctl |= v;
2797 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
2798 }
2799
2800 out:
2801 return err;
2802 }
2803 EXPORT_SYMBOL(pcie_set_readrq);
2804
2805 /**
2806 * pci_select_bars - Make BAR mask from the type of resource
2807 * @dev: the PCI device for which BAR mask is made
2808 * @flags: resource type mask to be selected
2809 *
2810 * This helper routine makes bar mask from the type of resource.
2811 */
2812 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2813 {
2814 int i, bars = 0;
2815 for (i = 0; i < PCI_NUM_RESOURCES; i++)
2816 if (pci_resource_flags(dev, i) & flags)
2817 bars |= (1 << i);
2818 return bars;
2819 }
2820
2821 /**
2822 * pci_resource_bar - get position of the BAR associated with a resource
2823 * @dev: the PCI device
2824 * @resno: the resource number
2825 * @type: the BAR type to be filled in
2826 *
2827 * Returns BAR position in config space, or 0 if the BAR is invalid.
2828 */
2829 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2830 {
2831 int reg;
2832
2833 if (resno < PCI_ROM_RESOURCE) {
2834 *type = pci_bar_unknown;
2835 return PCI_BASE_ADDRESS_0 + 4 * resno;
2836 } else if (resno == PCI_ROM_RESOURCE) {
2837 *type = pci_bar_mem32;
2838 return dev->rom_base_reg;
2839 } else if (resno < PCI_BRIDGE_RESOURCES) {
2840 /* device specific resource */
2841 reg = pci_iov_resource_bar(dev, resno, type);
2842 if (reg)
2843 return reg;
2844 }
2845
2846 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
2847 return 0;
2848 }
2849
2850 /* Some architectures require additional programming to enable VGA */
2851 static arch_set_vga_state_t arch_set_vga_state;
2852
2853 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
2854 {
2855 arch_set_vga_state = func; /* NULL disables */
2856 }
2857
2858 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
2859 unsigned int command_bits, bool change_bridge)
2860 {
2861 if (arch_set_vga_state)
2862 return arch_set_vga_state(dev, decode, command_bits,
2863 change_bridge);
2864 return 0;
2865 }
2866
2867 /**
2868 * pci_set_vga_state - set VGA decode state on device and parents if requested
2869 * @dev: the PCI device
2870 * @decode: true = enable decoding, false = disable decoding
2871 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
2872 * @change_bridge: traverse ancestors and change bridges
2873 */
2874 int pci_set_vga_state(struct pci_dev *dev, bool decode,
2875 unsigned int command_bits, bool change_bridge)
2876 {
2877 struct pci_bus *bus;
2878 struct pci_dev *bridge;
2879 u16 cmd;
2880 int rc;
2881
2882 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
2883
2884 /* ARCH specific VGA enables */
2885 rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
2886 if (rc)
2887 return rc;
2888
2889 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2890 if (decode == true)
2891 cmd |= command_bits;
2892 else
2893 cmd &= ~command_bits;
2894 pci_write_config_word(dev, PCI_COMMAND, cmd);
2895
2896 if (change_bridge == false)
2897 return 0;
2898
2899 bus = dev->bus;
2900 while (bus) {
2901 bridge = bus->self;
2902 if (bridge) {
2903 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
2904 &cmd);
2905 if (decode == true)
2906 cmd |= PCI_BRIDGE_CTL_VGA;
2907 else
2908 cmd &= ~PCI_BRIDGE_CTL_VGA;
2909 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
2910 cmd);
2911 }
2912 bus = bus->parent;
2913 }
2914 return 0;
2915 }
2916
2917 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2918 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2919 static DEFINE_SPINLOCK(resource_alignment_lock);
2920
2921 /**
2922 * pci_specified_resource_alignment - get resource alignment specified by user.
2923 * @dev: the PCI device to get
2924 *
2925 * RETURNS: Resource alignment if it is specified.
2926 * Zero if it is not specified.
2927 */
2928 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2929 {
2930 int seg, bus, slot, func, align_order, count;
2931 resource_size_t align = 0;
2932 char *p;
2933
2934 spin_lock(&resource_alignment_lock);
2935 p = resource_alignment_param;
2936 while (*p) {
2937 count = 0;
2938 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2939 p[count] == '@') {
2940 p += count + 1;
2941 } else {
2942 align_order = -1;
2943 }
2944 if (sscanf(p, "%x:%x:%x.%x%n",
2945 &seg, &bus, &slot, &func, &count) != 4) {
2946 seg = 0;
2947 if (sscanf(p, "%x:%x.%x%n",
2948 &bus, &slot, &func, &count) != 3) {
2949 /* Invalid format */
2950 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2951 p);
2952 break;
2953 }
2954 }
2955 p += count;
2956 if (seg == pci_domain_nr(dev->bus) &&
2957 bus == dev->bus->number &&
2958 slot == PCI_SLOT(dev->devfn) &&
2959 func == PCI_FUNC(dev->devfn)) {
2960 if (align_order == -1) {
2961 align = PAGE_SIZE;
2962 } else {
2963 align = 1 << align_order;
2964 }
2965 /* Found */
2966 break;
2967 }
2968 if (*p != ';' && *p != ',') {
2969 /* End of param or invalid format */
2970 break;
2971 }
2972 p++;
2973 }
2974 spin_unlock(&resource_alignment_lock);
2975 return align;
2976 }
2977
2978 /**
2979 * pci_is_reassigndev - check if specified PCI is target device to reassign
2980 * @dev: the PCI device to check
2981 *
2982 * RETURNS: non-zero for PCI device is a target device to reassign,
2983 * or zero is not.
2984 */
2985 int pci_is_reassigndev(struct pci_dev *dev)
2986 {
2987 return (pci_specified_resource_alignment(dev) != 0);
2988 }
2989
2990 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2991 {
2992 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
2993 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
2994 spin_lock(&resource_alignment_lock);
2995 strncpy(resource_alignment_param, buf, count);
2996 resource_alignment_param[count] = '\0';
2997 spin_unlock(&resource_alignment_lock);
2998 return count;
2999 }
3000
3001 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3002 {
3003 size_t count;
3004 spin_lock(&resource_alignment_lock);
3005 count = snprintf(buf, size, "%s", resource_alignment_param);
3006 spin_unlock(&resource_alignment_lock);
3007 return count;
3008 }
3009
3010 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3011 {
3012 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3013 }
3014
3015 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3016 const char *buf, size_t count)
3017 {
3018 return pci_set_resource_alignment_param(buf, count);
3019 }
3020
3021 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3022 pci_resource_alignment_store);
3023
3024 static int __init pci_resource_alignment_sysfs_init(void)
3025 {
3026 return bus_create_file(&pci_bus_type,
3027 &bus_attr_resource_alignment);
3028 }
3029
3030 late_initcall(pci_resource_alignment_sysfs_init);
3031
3032 static void __devinit pci_no_domains(void)
3033 {
3034 #ifdef CONFIG_PCI_DOMAINS
3035 pci_domains_supported = 0;
3036 #endif
3037 }
3038
3039 /**
3040 * pci_ext_cfg_enabled - can we access extended PCI config space?
3041 * @dev: The PCI device of the root bridge.
3042 *
3043 * Returns 1 if we can access PCI extended config space (offsets
3044 * greater than 0xff). This is the default implementation. Architecture
3045 * implementations can override this.
3046 */
3047 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3048 {
3049 return 1;
3050 }
3051
3052 void __weak pci_fixup_cardbus(struct pci_bus *bus)
3053 {
3054 }
3055 EXPORT_SYMBOL(pci_fixup_cardbus);
3056
3057 static int __init pci_setup(char *str)
3058 {
3059 while (str) {
3060 char *k = strchr(str, ',');
3061 if (k)
3062 *k++ = 0;
3063 if (*str && (str = pcibios_setup(str)) && *str) {
3064 if (!strcmp(str, "nomsi")) {
3065 pci_no_msi();
3066 } else if (!strcmp(str, "noaer")) {
3067 pci_no_aer();
3068 } else if (!strcmp(str, "nodomains")) {
3069 pci_no_domains();
3070 } else if (!strncmp(str, "cbiosize=", 9)) {
3071 pci_cardbus_io_size = memparse(str + 9, &str);
3072 } else if (!strncmp(str, "cbmemsize=", 10)) {
3073 pci_cardbus_mem_size = memparse(str + 10, &str);
3074 } else if (!strncmp(str, "resource_alignment=", 19)) {
3075 pci_set_resource_alignment_param(str + 19,
3076 strlen(str + 19));
3077 } else if (!strncmp(str, "ecrc=", 5)) {
3078 pcie_ecrc_get_policy(str + 5);
3079 } else if (!strncmp(str, "hpiosize=", 9)) {
3080 pci_hotplug_io_size = memparse(str + 9, &str);
3081 } else if (!strncmp(str, "hpmemsize=", 10)) {
3082 pci_hotplug_mem_size = memparse(str + 10, &str);
3083 } else {
3084 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3085 str);
3086 }
3087 }
3088 str = k;
3089 }
3090 return 0;
3091 }
3092 early_param("pci", pci_setup);
3093
3094 EXPORT_SYMBOL(pci_reenable_device);
3095 EXPORT_SYMBOL(pci_enable_device_io);
3096 EXPORT_SYMBOL(pci_enable_device_mem);
3097 EXPORT_SYMBOL(pci_enable_device);
3098 EXPORT_SYMBOL(pcim_enable_device);
3099 EXPORT_SYMBOL(pcim_pin_device);
3100 EXPORT_SYMBOL(pci_disable_device);
3101 EXPORT_SYMBOL(pci_find_capability);
3102 EXPORT_SYMBOL(pci_bus_find_capability);
3103 EXPORT_SYMBOL(pci_release_regions);
3104 EXPORT_SYMBOL(pci_request_regions);
3105 EXPORT_SYMBOL(pci_request_regions_exclusive);
3106 EXPORT_SYMBOL(pci_release_region);
3107 EXPORT_SYMBOL(pci_request_region);
3108 EXPORT_SYMBOL(pci_request_region_exclusive);
3109 EXPORT_SYMBOL(pci_release_selected_regions);
3110 EXPORT_SYMBOL(pci_request_selected_regions);
3111 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3112 EXPORT_SYMBOL(pci_set_master);
3113 EXPORT_SYMBOL(pci_clear_master);
3114 EXPORT_SYMBOL(pci_set_mwi);
3115 EXPORT_SYMBOL(pci_try_set_mwi);
3116 EXPORT_SYMBOL(pci_clear_mwi);
3117 EXPORT_SYMBOL_GPL(pci_intx);
3118 EXPORT_SYMBOL(pci_assign_resource);
3119 EXPORT_SYMBOL(pci_find_parent_resource);
3120 EXPORT_SYMBOL(pci_select_bars);
3121
3122 EXPORT_SYMBOL(pci_set_power_state);
3123 EXPORT_SYMBOL(pci_save_state);
3124 EXPORT_SYMBOL(pci_restore_state);
3125 EXPORT_SYMBOL(pci_pme_capable);
3126 EXPORT_SYMBOL(pci_pme_active);
3127 EXPORT_SYMBOL(pci_wake_from_d3);
3128 EXPORT_SYMBOL(pci_target_state);
3129 EXPORT_SYMBOL(pci_prepare_to_sleep);
3130 EXPORT_SYMBOL(pci_back_from_sleep);
3131 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
This page took 0.094759 seconds and 5 git commands to generate.