[PATCH] PCI: PCIE power management quirk
[deliverable/linux.git] / drivers / pci / pci.c
1 /*
2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3 *
4 * PCI Bus Services, see include/linux/pci.h for further explanation.
5 *
6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * David Mosberger-Tang
8 *
9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <asm/dma.h> /* isa_dma_bridge_buggy */
20 #include "pci.h"
21
22 unsigned int pci_pm_d3_delay = 10;
23
24 /**
25 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
26 * @bus: pointer to PCI bus structure to search
27 *
28 * Given a PCI bus, returns the highest PCI bus number present in the set
29 * including the given PCI bus and its list of child PCI buses.
30 */
31 unsigned char __devinit
32 pci_bus_max_busnr(struct pci_bus* bus)
33 {
34 struct list_head *tmp;
35 unsigned char max, n;
36
37 max = bus->subordinate;
38 list_for_each(tmp, &bus->children) {
39 n = pci_bus_max_busnr(pci_bus_b(tmp));
40 if(n > max)
41 max = n;
42 }
43 return max;
44 }
45 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
46
47 #if 0
48 /**
49 * pci_max_busnr - returns maximum PCI bus number
50 *
51 * Returns the highest PCI bus number present in the system global list of
52 * PCI buses.
53 */
54 unsigned char __devinit
55 pci_max_busnr(void)
56 {
57 struct pci_bus *bus = NULL;
58 unsigned char max, n;
59
60 max = 0;
61 while ((bus = pci_find_next_bus(bus)) != NULL) {
62 n = pci_bus_max_busnr(bus);
63 if(n > max)
64 max = n;
65 }
66 return max;
67 }
68
69 #endif /* 0 */
70
71 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap)
72 {
73 u8 id;
74 int ttl = 48;
75
76 while (ttl--) {
77 pci_bus_read_config_byte(bus, devfn, pos, &pos);
78 if (pos < 0x40)
79 break;
80 pos &= ~3;
81 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
82 &id);
83 if (id == 0xff)
84 break;
85 if (id == cap)
86 return pos;
87 pos += PCI_CAP_LIST_NEXT;
88 }
89 return 0;
90 }
91
92 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
93 {
94 return __pci_find_next_cap(dev->bus, dev->devfn,
95 pos + PCI_CAP_LIST_NEXT, cap);
96 }
97 EXPORT_SYMBOL_GPL(pci_find_next_capability);
98
99 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap)
100 {
101 u16 status;
102 u8 pos;
103
104 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
105 if (!(status & PCI_STATUS_CAP_LIST))
106 return 0;
107
108 switch (hdr_type) {
109 case PCI_HEADER_TYPE_NORMAL:
110 case PCI_HEADER_TYPE_BRIDGE:
111 pos = PCI_CAPABILITY_LIST;
112 break;
113 case PCI_HEADER_TYPE_CARDBUS:
114 pos = PCI_CB_CAPABILITY_LIST;
115 break;
116 default:
117 return 0;
118 }
119 return __pci_find_next_cap(bus, devfn, pos, cap);
120 }
121
122 /**
123 * pci_find_capability - query for devices' capabilities
124 * @dev: PCI device to query
125 * @cap: capability code
126 *
127 * Tell if a device supports a given PCI capability.
128 * Returns the address of the requested capability structure within the
129 * device's PCI configuration space or 0 in case the device does not
130 * support it. Possible values for @cap:
131 *
132 * %PCI_CAP_ID_PM Power Management
133 * %PCI_CAP_ID_AGP Accelerated Graphics Port
134 * %PCI_CAP_ID_VPD Vital Product Data
135 * %PCI_CAP_ID_SLOTID Slot Identification
136 * %PCI_CAP_ID_MSI Message Signalled Interrupts
137 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
138 * %PCI_CAP_ID_PCIX PCI-X
139 * %PCI_CAP_ID_EXP PCI Express
140 */
141 int pci_find_capability(struct pci_dev *dev, int cap)
142 {
143 return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap);
144 }
145
146 /**
147 * pci_bus_find_capability - query for devices' capabilities
148 * @bus: the PCI bus to query
149 * @devfn: PCI device to query
150 * @cap: capability code
151 *
152 * Like pci_find_capability() but works for pci devices that do not have a
153 * pci_dev structure set up yet.
154 *
155 * Returns the address of the requested capability structure within the
156 * device's PCI configuration space or 0 in case the device does not
157 * support it.
158 */
159 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
160 {
161 u8 hdr_type;
162
163 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
164
165 return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap);
166 }
167
168 /**
169 * pci_find_ext_capability - Find an extended capability
170 * @dev: PCI device to query
171 * @cap: capability code
172 *
173 * Returns the address of the requested extended capability structure
174 * within the device's PCI configuration space or 0 if the device does
175 * not support it. Possible values for @cap:
176 *
177 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
178 * %PCI_EXT_CAP_ID_VC Virtual Channel
179 * %PCI_EXT_CAP_ID_DSN Device Serial Number
180 * %PCI_EXT_CAP_ID_PWR Power Budgeting
181 */
182 int pci_find_ext_capability(struct pci_dev *dev, int cap)
183 {
184 u32 header;
185 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
186 int pos = 0x100;
187
188 if (dev->cfg_size <= 256)
189 return 0;
190
191 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
192 return 0;
193
194 /*
195 * If we have no capabilities, this is indicated by cap ID,
196 * cap version and next pointer all being 0.
197 */
198 if (header == 0)
199 return 0;
200
201 while (ttl-- > 0) {
202 if (PCI_EXT_CAP_ID(header) == cap)
203 return pos;
204
205 pos = PCI_EXT_CAP_NEXT(header);
206 if (pos < 0x100)
207 break;
208
209 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
210 break;
211 }
212
213 return 0;
214 }
215 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
216
217 /**
218 * pci_find_parent_resource - return resource region of parent bus of given region
219 * @dev: PCI device structure contains resources to be searched
220 * @res: child resource record for which parent is sought
221 *
222 * For given resource region of given device, return the resource
223 * region of parent bus the given region is contained in or where
224 * it should be allocated from.
225 */
226 struct resource *
227 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
228 {
229 const struct pci_bus *bus = dev->bus;
230 int i;
231 struct resource *best = NULL;
232
233 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
234 struct resource *r = bus->resource[i];
235 if (!r)
236 continue;
237 if (res->start && !(res->start >= r->start && res->end <= r->end))
238 continue; /* Not contained */
239 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
240 continue; /* Wrong type */
241 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
242 return r; /* Exact match */
243 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
244 best = r; /* Approximating prefetchable by non-prefetchable */
245 }
246 return best;
247 }
248
249 /**
250 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
251 * @dev: PCI device to have its BARs restored
252 *
253 * Restore the BAR values for a given device, so as to make it
254 * accessible by its driver.
255 */
256 void
257 pci_restore_bars(struct pci_dev *dev)
258 {
259 int i, numres;
260
261 switch (dev->hdr_type) {
262 case PCI_HEADER_TYPE_NORMAL:
263 numres = 6;
264 break;
265 case PCI_HEADER_TYPE_BRIDGE:
266 numres = 2;
267 break;
268 case PCI_HEADER_TYPE_CARDBUS:
269 numres = 1;
270 break;
271 default:
272 /* Should never get here, but just in case... */
273 return;
274 }
275
276 for (i = 0; i < numres; i ++)
277 pci_update_resource(dev, &dev->resource[i], i);
278 }
279
280 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t);
281
282 /**
283 * pci_set_power_state - Set the power state of a PCI device
284 * @dev: PCI device to be suspended
285 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
286 *
287 * Transition a device to a new power state, using the Power Management
288 * Capabilities in the device's config space.
289 *
290 * RETURN VALUE:
291 * -EINVAL if trying to enter a lower state than we're already in.
292 * 0 if we're already in the requested state.
293 * -EIO if device does not support PCI PM.
294 * 0 if we can successfully change the power state.
295 */
296 int
297 pci_set_power_state(struct pci_dev *dev, pci_power_t state)
298 {
299 int pm, need_restore = 0;
300 u16 pmcsr, pmc;
301
302 /* bound the state we're entering */
303 if (state > PCI_D3hot)
304 state = PCI_D3hot;
305
306 /* Validate current state:
307 * Can enter D0 from any state, but if we can only go deeper
308 * to sleep if we're already in a low power state
309 */
310 if (state != PCI_D0 && dev->current_state > state) {
311 printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n",
312 __FUNCTION__, pci_name(dev), state, dev->current_state);
313 return -EINVAL;
314 } else if (dev->current_state == state)
315 return 0; /* we're already there */
316
317 /*
318 * If the device or the parent bridge can't support PCI PM, ignore
319 * the request if we're doing anything besides putting it into D0
320 * (which would only happen on boot).
321 */
322 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
323 return 0;
324
325 /* find PCI PM capability in list */
326 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
327
328 /* abort if the device doesn't support PM capabilities */
329 if (!pm)
330 return -EIO;
331
332 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
333 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
334 printk(KERN_DEBUG
335 "PCI: %s has unsupported PM cap regs version (%u)\n",
336 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
337 return -EIO;
338 }
339
340 /* check if this device supports the desired state */
341 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
342 return -EIO;
343 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
344 return -EIO;
345
346 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
347
348 /* If we're (effectively) in D3, force entire word to 0.
349 * This doesn't affect PME_Status, disables PME_En, and
350 * sets PowerState to 0.
351 */
352 switch (dev->current_state) {
353 case PCI_D0:
354 case PCI_D1:
355 case PCI_D2:
356 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
357 pmcsr |= state;
358 break;
359 case PCI_UNKNOWN: /* Boot-up */
360 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
361 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
362 need_restore = 1;
363 /* Fall-through: force to D0 */
364 default:
365 pmcsr = 0;
366 break;
367 }
368
369 /* enter specified state */
370 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
371
372 /* Mandatory power management transition delays */
373 /* see PCI PM 1.1 5.6.1 table 18 */
374 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
375 msleep(pci_pm_d3_delay);
376 else if (state == PCI_D2 || dev->current_state == PCI_D2)
377 udelay(200);
378
379 /*
380 * Give firmware a chance to be called, such as ACPI _PRx, _PSx
381 * Firmware method after native method ?
382 */
383 if (platform_pci_set_power_state)
384 platform_pci_set_power_state(dev, state);
385
386 dev->current_state = state;
387
388 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
389 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
390 * from D3hot to D0 _may_ perform an internal reset, thereby
391 * going to "D0 Uninitialized" rather than "D0 Initialized".
392 * For example, at least some versions of the 3c905B and the
393 * 3c556B exhibit this behaviour.
394 *
395 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
396 * devices in a D3hot state at boot. Consequently, we need to
397 * restore at least the BARs so that the device will be
398 * accessible to its driver.
399 */
400 if (need_restore)
401 pci_restore_bars(dev);
402
403 return 0;
404 }
405
406 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
407
408 /**
409 * pci_choose_state - Choose the power state of a PCI device
410 * @dev: PCI device to be suspended
411 * @state: target sleep state for the whole system. This is the value
412 * that is passed to suspend() function.
413 *
414 * Returns PCI power state suitable for given device and given system
415 * message.
416 */
417
418 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
419 {
420 int ret;
421
422 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
423 return PCI_D0;
424
425 if (platform_pci_choose_state) {
426 ret = platform_pci_choose_state(dev, state);
427 if (ret >= 0)
428 state.event = ret;
429 }
430
431 switch (state.event) {
432 case PM_EVENT_ON:
433 return PCI_D0;
434 case PM_EVENT_FREEZE:
435 case PM_EVENT_SUSPEND:
436 return PCI_D3hot;
437 default:
438 printk("They asked me for state %d\n", state.event);
439 BUG();
440 }
441 return PCI_D0;
442 }
443
444 EXPORT_SYMBOL(pci_choose_state);
445
446 /**
447 * pci_save_state - save the PCI configuration space of a device before suspending
448 * @dev: - PCI device that we're dealing with
449 */
450 int
451 pci_save_state(struct pci_dev *dev)
452 {
453 int i;
454 /* XXX: 100% dword access ok here? */
455 for (i = 0; i < 16; i++)
456 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
457 if ((i = pci_save_msi_state(dev)) != 0)
458 return i;
459 if ((i = pci_save_msix_state(dev)) != 0)
460 return i;
461 return 0;
462 }
463
464 /**
465 * pci_restore_state - Restore the saved state of a PCI device
466 * @dev: - PCI device that we're dealing with
467 */
468 int
469 pci_restore_state(struct pci_dev *dev)
470 {
471 int i;
472 int val;
473
474 /*
475 * The Base Address register should be programmed before the command
476 * register(s)
477 */
478 for (i = 15; i >= 0; i--) {
479 pci_read_config_dword(dev, i * 4, &val);
480 if (val != dev->saved_config_space[i]) {
481 printk(KERN_DEBUG "PM: Writing back config space on "
482 "device %s at offset %x (was %x, writing %x)\n",
483 pci_name(dev), i,
484 val, (int)dev->saved_config_space[i]);
485 pci_write_config_dword(dev,i * 4,
486 dev->saved_config_space[i]);
487 }
488 }
489 pci_restore_msi_state(dev);
490 pci_restore_msix_state(dev);
491 return 0;
492 }
493
494 /**
495 * pci_enable_device_bars - Initialize some of a device for use
496 * @dev: PCI device to be initialized
497 * @bars: bitmask of BAR's that must be configured
498 *
499 * Initialize device before it's used by a driver. Ask low-level code
500 * to enable selected I/O and memory resources. Wake up the device if it
501 * was suspended. Beware, this function can fail.
502 */
503
504 int
505 pci_enable_device_bars(struct pci_dev *dev, int bars)
506 {
507 int err;
508
509 err = pci_set_power_state(dev, PCI_D0);
510 if (err < 0 && err != -EIO)
511 return err;
512 err = pcibios_enable_device(dev, bars);
513 if (err < 0)
514 return err;
515 return 0;
516 }
517
518 /**
519 * pci_enable_device - Initialize device before it's used by a driver.
520 * @dev: PCI device to be initialized
521 *
522 * Initialize device before it's used by a driver. Ask low-level code
523 * to enable I/O and memory. Wake up the device if it was suspended.
524 * Beware, this function can fail.
525 */
526 int
527 pci_enable_device(struct pci_dev *dev)
528 {
529 int err;
530
531 if (dev->is_enabled)
532 return 0;
533
534 err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
535 if (err)
536 return err;
537 pci_fixup_device(pci_fixup_enable, dev);
538 dev->is_enabled = 1;
539 return 0;
540 }
541
542 /**
543 * pcibios_disable_device - disable arch specific PCI resources for device dev
544 * @dev: the PCI device to disable
545 *
546 * Disables architecture specific PCI resources for the device. This
547 * is the default implementation. Architecture implementations can
548 * override this.
549 */
550 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
551
552 /**
553 * pci_disable_device - Disable PCI device after use
554 * @dev: PCI device to be disabled
555 *
556 * Signal to the system that the PCI device is not in use by the system
557 * anymore. This only involves disabling PCI bus-mastering, if active.
558 */
559 void
560 pci_disable_device(struct pci_dev *dev)
561 {
562 u16 pci_command;
563
564 if (dev->msi_enabled)
565 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
566 PCI_CAP_ID_MSI);
567 if (dev->msix_enabled)
568 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
569 PCI_CAP_ID_MSIX);
570
571 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
572 if (pci_command & PCI_COMMAND_MASTER) {
573 pci_command &= ~PCI_COMMAND_MASTER;
574 pci_write_config_word(dev, PCI_COMMAND, pci_command);
575 }
576 dev->is_busmaster = 0;
577
578 pcibios_disable_device(dev);
579 dev->is_enabled = 0;
580 }
581
582 /**
583 * pci_enable_wake - enable device to generate PME# when suspended
584 * @dev: - PCI device to operate on
585 * @state: - Current state of device.
586 * @enable: - Flag to enable or disable generation
587 *
588 * Set the bits in the device's PM Capabilities to generate PME# when
589 * the system is suspended.
590 *
591 * -EIO is returned if device doesn't have PM Capabilities.
592 * -EINVAL is returned if device supports it, but can't generate wake events.
593 * 0 if operation is successful.
594 *
595 */
596 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
597 {
598 int pm;
599 u16 value;
600
601 /* find PCI PM capability in list */
602 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
603
604 /* If device doesn't support PM Capabilities, but request is to disable
605 * wake events, it's a nop; otherwise fail */
606 if (!pm)
607 return enable ? -EIO : 0;
608
609 /* Check device's ability to generate PME# */
610 pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
611
612 value &= PCI_PM_CAP_PME_MASK;
613 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
614
615 /* Check if it can generate PME# from requested state. */
616 if (!value || !(value & (1 << state)))
617 return enable ? -EINVAL : 0;
618
619 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
620
621 /* Clear PME_Status by writing 1 to it and enable PME# */
622 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
623
624 if (!enable)
625 value &= ~PCI_PM_CTRL_PME_ENABLE;
626
627 pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
628
629 return 0;
630 }
631
632 int
633 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
634 {
635 u8 pin;
636
637 pin = dev->pin;
638 if (!pin)
639 return -1;
640 pin--;
641 while (dev->bus->self) {
642 pin = (pin + PCI_SLOT(dev->devfn)) % 4;
643 dev = dev->bus->self;
644 }
645 *bridge = dev;
646 return pin;
647 }
648
649 /**
650 * pci_release_region - Release a PCI bar
651 * @pdev: PCI device whose resources were previously reserved by pci_request_region
652 * @bar: BAR to release
653 *
654 * Releases the PCI I/O and memory resources previously reserved by a
655 * successful call to pci_request_region. Call this function only
656 * after all use of the PCI regions has ceased.
657 */
658 void pci_release_region(struct pci_dev *pdev, int bar)
659 {
660 if (pci_resource_len(pdev, bar) == 0)
661 return;
662 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
663 release_region(pci_resource_start(pdev, bar),
664 pci_resource_len(pdev, bar));
665 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
666 release_mem_region(pci_resource_start(pdev, bar),
667 pci_resource_len(pdev, bar));
668 }
669
670 /**
671 * pci_request_region - Reserved PCI I/O and memory resource
672 * @pdev: PCI device whose resources are to be reserved
673 * @bar: BAR to be reserved
674 * @res_name: Name to be associated with resource.
675 *
676 * Mark the PCI region associated with PCI device @pdev BR @bar as
677 * being reserved by owner @res_name. Do not access any
678 * address inside the PCI regions unless this call returns
679 * successfully.
680 *
681 * Returns 0 on success, or %EBUSY on error. A warning
682 * message is also printed on failure.
683 */
684 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
685 {
686 if (pci_resource_len(pdev, bar) == 0)
687 return 0;
688
689 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
690 if (!request_region(pci_resource_start(pdev, bar),
691 pci_resource_len(pdev, bar), res_name))
692 goto err_out;
693 }
694 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
695 if (!request_mem_region(pci_resource_start(pdev, bar),
696 pci_resource_len(pdev, bar), res_name))
697 goto err_out;
698 }
699
700 return 0;
701
702 err_out:
703 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx "
704 "for device %s\n",
705 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
706 bar + 1, /* PCI BAR # */
707 (unsigned long long)pci_resource_len(pdev, bar),
708 (unsigned long long)pci_resource_start(pdev, bar),
709 pci_name(pdev));
710 return -EBUSY;
711 }
712
713
714 /**
715 * pci_release_regions - Release reserved PCI I/O and memory resources
716 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
717 *
718 * Releases all PCI I/O and memory resources previously reserved by a
719 * successful call to pci_request_regions. Call this function only
720 * after all use of the PCI regions has ceased.
721 */
722
723 void pci_release_regions(struct pci_dev *pdev)
724 {
725 int i;
726
727 for (i = 0; i < 6; i++)
728 pci_release_region(pdev, i);
729 }
730
731 /**
732 * pci_request_regions - Reserved PCI I/O and memory resources
733 * @pdev: PCI device whose resources are to be reserved
734 * @res_name: Name to be associated with resource.
735 *
736 * Mark all PCI regions associated with PCI device @pdev as
737 * being reserved by owner @res_name. Do not access any
738 * address inside the PCI regions unless this call returns
739 * successfully.
740 *
741 * Returns 0 on success, or %EBUSY on error. A warning
742 * message is also printed on failure.
743 */
744 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
745 {
746 int i;
747
748 for (i = 0; i < 6; i++)
749 if(pci_request_region(pdev, i, res_name))
750 goto err_out;
751 return 0;
752
753 err_out:
754 while(--i >= 0)
755 pci_release_region(pdev, i);
756
757 return -EBUSY;
758 }
759
760 /**
761 * pci_set_master - enables bus-mastering for device dev
762 * @dev: the PCI device to enable
763 *
764 * Enables bus-mastering on the device and calls pcibios_set_master()
765 * to do the needed arch specific settings.
766 */
767 void
768 pci_set_master(struct pci_dev *dev)
769 {
770 u16 cmd;
771
772 pci_read_config_word(dev, PCI_COMMAND, &cmd);
773 if (! (cmd & PCI_COMMAND_MASTER)) {
774 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev));
775 cmd |= PCI_COMMAND_MASTER;
776 pci_write_config_word(dev, PCI_COMMAND, cmd);
777 }
778 dev->is_busmaster = 1;
779 pcibios_set_master(dev);
780 }
781
782 #ifndef HAVE_ARCH_PCI_MWI
783 /* This can be overridden by arch code. */
784 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
785
786 /**
787 * pci_generic_prep_mwi - helper function for pci_set_mwi
788 * @dev: the PCI device for which MWI is enabled
789 *
790 * Helper function for generic implementation of pcibios_prep_mwi
791 * function. Originally copied from drivers/net/acenic.c.
792 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
793 *
794 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
795 */
796 static int
797 pci_generic_prep_mwi(struct pci_dev *dev)
798 {
799 u8 cacheline_size;
800
801 if (!pci_cache_line_size)
802 return -EINVAL; /* The system doesn't support MWI. */
803
804 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
805 equal to or multiple of the right value. */
806 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
807 if (cacheline_size >= pci_cache_line_size &&
808 (cacheline_size % pci_cache_line_size) == 0)
809 return 0;
810
811 /* Write the correct value. */
812 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
813 /* Read it back. */
814 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
815 if (cacheline_size == pci_cache_line_size)
816 return 0;
817
818 printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
819 "by device %s\n", pci_cache_line_size << 2, pci_name(dev));
820
821 return -EINVAL;
822 }
823 #endif /* !HAVE_ARCH_PCI_MWI */
824
825 /**
826 * pci_set_mwi - enables memory-write-invalidate PCI transaction
827 * @dev: the PCI device for which MWI is enabled
828 *
829 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
830 * and then calls @pcibios_set_mwi to do the needed arch specific
831 * operations or a generic mwi-prep function.
832 *
833 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
834 */
835 int
836 pci_set_mwi(struct pci_dev *dev)
837 {
838 int rc;
839 u16 cmd;
840
841 #ifdef HAVE_ARCH_PCI_MWI
842 rc = pcibios_prep_mwi(dev);
843 #else
844 rc = pci_generic_prep_mwi(dev);
845 #endif
846
847 if (rc)
848 return rc;
849
850 pci_read_config_word(dev, PCI_COMMAND, &cmd);
851 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
852 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev));
853 cmd |= PCI_COMMAND_INVALIDATE;
854 pci_write_config_word(dev, PCI_COMMAND, cmd);
855 }
856
857 return 0;
858 }
859
860 /**
861 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
862 * @dev: the PCI device to disable
863 *
864 * Disables PCI Memory-Write-Invalidate transaction on the device
865 */
866 void
867 pci_clear_mwi(struct pci_dev *dev)
868 {
869 u16 cmd;
870
871 pci_read_config_word(dev, PCI_COMMAND, &cmd);
872 if (cmd & PCI_COMMAND_INVALIDATE) {
873 cmd &= ~PCI_COMMAND_INVALIDATE;
874 pci_write_config_word(dev, PCI_COMMAND, cmd);
875 }
876 }
877
878 /**
879 * pci_intx - enables/disables PCI INTx for device dev
880 * @pdev: the PCI device to operate on
881 * @enable: boolean: whether to enable or disable PCI INTx
882 *
883 * Enables/disables PCI INTx for device dev
884 */
885 void
886 pci_intx(struct pci_dev *pdev, int enable)
887 {
888 u16 pci_command, new;
889
890 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
891
892 if (enable) {
893 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
894 } else {
895 new = pci_command | PCI_COMMAND_INTX_DISABLE;
896 }
897
898 if (new != pci_command) {
899 pci_write_config_word(pdev, PCI_COMMAND, new);
900 }
901 }
902
903 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK
904 /*
905 * These can be overridden by arch-specific implementations
906 */
907 int
908 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
909 {
910 if (!pci_dma_supported(dev, mask))
911 return -EIO;
912
913 dev->dma_mask = mask;
914
915 return 0;
916 }
917
918 int
919 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
920 {
921 if (!pci_dma_supported(dev, mask))
922 return -EIO;
923
924 dev->dev.coherent_dma_mask = mask;
925
926 return 0;
927 }
928 #endif
929
930 static int __devinit pci_init(void)
931 {
932 struct pci_dev *dev = NULL;
933
934 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
935 pci_fixup_device(pci_fixup_final, dev);
936 }
937 return 0;
938 }
939
940 static int __devinit pci_setup(char *str)
941 {
942 while (str) {
943 char *k = strchr(str, ',');
944 if (k)
945 *k++ = 0;
946 if (*str && (str = pcibios_setup(str)) && *str) {
947 if (!strcmp(str, "nomsi")) {
948 pci_no_msi();
949 } else {
950 printk(KERN_ERR "PCI: Unknown option `%s'\n",
951 str);
952 }
953 }
954 str = k;
955 }
956 return 1;
957 }
958
959 device_initcall(pci_init);
960
961 __setup("pci=", pci_setup);
962
963 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
964 /* FIXME: Some boxes have multiple ISA bridges! */
965 struct pci_dev *isa_bridge;
966 EXPORT_SYMBOL(isa_bridge);
967 #endif
968
969 EXPORT_SYMBOL_GPL(pci_restore_bars);
970 EXPORT_SYMBOL(pci_enable_device_bars);
971 EXPORT_SYMBOL(pci_enable_device);
972 EXPORT_SYMBOL(pci_disable_device);
973 EXPORT_SYMBOL(pci_find_capability);
974 EXPORT_SYMBOL(pci_bus_find_capability);
975 EXPORT_SYMBOL(pci_release_regions);
976 EXPORT_SYMBOL(pci_request_regions);
977 EXPORT_SYMBOL(pci_release_region);
978 EXPORT_SYMBOL(pci_request_region);
979 EXPORT_SYMBOL(pci_set_master);
980 EXPORT_SYMBOL(pci_set_mwi);
981 EXPORT_SYMBOL(pci_clear_mwi);
982 EXPORT_SYMBOL_GPL(pci_intx);
983 EXPORT_SYMBOL(pci_set_dma_mask);
984 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
985 EXPORT_SYMBOL(pci_assign_resource);
986 EXPORT_SYMBOL(pci_find_parent_resource);
987
988 EXPORT_SYMBOL(pci_set_power_state);
989 EXPORT_SYMBOL(pci_save_state);
990 EXPORT_SYMBOL(pci_restore_state);
991 EXPORT_SYMBOL(pci_enable_wake);
992
993 /* Quirk info */
994
995 EXPORT_SYMBOL(isa_dma_bridge_buggy);
996 EXPORT_SYMBOL(pci_pci_problems);
This page took 0.105202 seconds and 5 git commands to generate.