drm/i915: Improve irq handling after gpu resets
[deliverable/linux.git] / drivers / gpu / drm / drm_pci.c
CommitLineData
1da177e4
LT
1/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
2/**
3 * \file drm_pci.c
4 * \brief Functions and ioctls to manage PCI memory
5 *
6 * \warning These interfaces aren't stable yet.
7 *
8 * \todo Implement the remaining ioctl's for the PCI pools.
9 * \todo The wrappers here are so thin that they would be better off inlined..
10 *
96de0e25 11 * \author José Fonseca <jrfonseca@tungstengraphics.com>
1da177e4
LT
12 * \author Leif Delgass <ldelgass@retinalburn.net>
13 */
14
15/*
96de0e25 16 * Copyright 2003 José Fonseca.
1da177e4
LT
17 * Copyright 2003 Leif Delgass.
18 * All Rights Reserved.
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the next
28 * paragraph) shall be included in all copies or substantial portions of the
29 * Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
35 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
36 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37 */
38
39#include <linux/pci.h>
5a0e3ad6 40#include <linux/slab.h>
195b3a2d 41#include <linux/dma-mapping.h>
2d1a8a48 42#include <linux/export.h>
760285e7 43#include <drm/drmP.h>
1da177e4
LT
44
45/**********************************************************************/
46/** \name PCI memory */
47/*@{*/
48
49/**
50 * \brief Allocate a PCI consistent memory block, for DMA.
51 */
e6be8d9d 52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
1da177e4 53{
9c8da5eb 54 drm_dma_handle_t *dmah;
ddf19b97
DA
55 unsigned long addr;
56 size_t sz;
1da177e4
LT
57
58 /* pci_alloc_consistent only guarantees alignment to the smallest
59 * PAGE_SIZE order which is greater than or equal to the requested size.
60 * Return NULL here for now to make sure nobody tries for larger alignment
61 */
62 if (align > size)
63 return NULL;
64
9c8da5eb
DA
65 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
66 if (!dmah)
67 return NULL;
b5e89ed5 68
9c8da5eb 69 dmah->size = size;
ddf19b97 70 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
1da177e4 71
9c8da5eb
DA
72 if (dmah->vaddr == NULL) {
73 kfree(dmah);
1da177e4 74 return NULL;
9c8da5eb 75 }
1da177e4 76
9c8da5eb 77 memset(dmah->vaddr, 0, size);
1da177e4 78
ddf19b97
DA
79 /* XXX - Is virt_to_page() legal for consistent mem? */
80 /* Reserve */
81 for (addr = (unsigned long)dmah->vaddr, sz = size;
82 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
e1e78533 83 SetPageReserved(virt_to_page((void *)addr));
ddf19b97
DA
84 }
85
9c8da5eb 86 return dmah;
1da177e4 87}
b5e89ed5 88
1da177e4
LT
89EXPORT_SYMBOL(drm_pci_alloc);
90
91/**
ddf19b97 92 * \brief Free a PCI consistent memory block without freeing its descriptor.
9c8da5eb
DA
93 *
94 * This function is for internal use in the Linux-specific DRM core code.
1da177e4 95 */
84b1fd10 96void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
1da177e4 97{
ddf19b97
DA
98 unsigned long addr;
99 size_t sz;
1da177e4 100
9a298b2a 101 if (dmah->vaddr) {
ddf19b97
DA
102 /* XXX - Is virt_to_page() legal for consistent mem? */
103 /* Unreserve */
104 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
105 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
e1e78533 106 ClearPageReserved(virt_to_page((void *)addr));
ddf19b97
DA
107 }
108 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
109 dmah->busaddr);
1da177e4 110 }
1da177e4 111}
9c8da5eb
DA
112
113/**
114 * \brief Free a PCI consistent memory block
115 */
84b1fd10 116void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
9c8da5eb
DA
117{
118 __drm_pci_free(dev, dmah);
119 kfree(dmah);
120}
b5e89ed5 121
1da177e4
LT
122EXPORT_SYMBOL(drm_pci_free);
123
dcdb1674 124#ifdef CONFIG_PCI
8410ea3b
DA
125
126static int drm_get_pci_domain(struct drm_device *dev)
127{
128#ifndef __alpha__
129 /* For historical reasons, drm_get_pci_domain() is busticated
130 * on most archs and has to remain so for userspace interface
131 * < 1.4, except on alpha which was right from the beginning
132 */
133 if (dev->if_version < 0x10004)
134 return 0;
135#endif /* __alpha__ */
136
137 return pci_domain_nr(dev->pdev->bus);
138}
139
ea9cbb06 140static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
8410ea3b
DA
141{
142 int len, ret;
8410ea3b
DA
143 master->unique_len = 40;
144 master->unique_size = master->unique_len;
145 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
146 if (master->unique == NULL)
147 return -ENOMEM;
148
149
150 len = snprintf(master->unique, master->unique_len,
151 "pci:%04x:%02x:%02x.%d",
152 drm_get_pci_domain(dev),
153 dev->pdev->bus->number,
154 PCI_SLOT(dev->pdev->devfn),
155 PCI_FUNC(dev->pdev->devfn));
156
157 if (len >= master->unique_len) {
158 DRM_ERROR("buffer overflow");
159 ret = -EINVAL;
160 goto err;
161 } else
162 master->unique_len = len;
163
8410ea3b
DA
164 return 0;
165err:
166 return ret;
167}
168
53bf2a2b
DV
169int drm_pci_set_unique(struct drm_device *dev,
170 struct drm_master *master,
171 struct drm_unique *u)
8410ea3b
DA
172{
173 int domain, bus, slot, func, ret;
8410ea3b
DA
174
175 master->unique_len = u->unique_len;
176 master->unique_size = u->unique_len + 1;
177 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
178 if (!master->unique) {
179 ret = -ENOMEM;
180 goto err;
181 }
182
183 if (copy_from_user(master->unique, u->unique, master->unique_len)) {
184 ret = -EFAULT;
185 goto err;
186 }
187
188 master->unique[master->unique_len] = '\0';
189
8410ea3b
DA
190 /* Return error if the busid submitted doesn't match the device's actual
191 * busid.
192 */
193 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
194 if (ret != 3) {
195 ret = -EINVAL;
196 goto err;
197 }
198
199 domain = bus >> 8;
200 bus &= 0xff;
201
202 if ((domain != drm_get_pci_domain(dev)) ||
203 (bus != dev->pdev->bus->number) ||
204 (slot != PCI_SLOT(dev->pdev->devfn)) ||
205 (func != PCI_FUNC(dev->pdev->devfn))) {
206 ret = -EINVAL;
207 goto err;
208 }
209 return 0;
210err:
211 return ret;
212}
213
45e97ab6 214static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
8410ea3b
DA
215{
216 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
217 (p->busnum & 0xff) != dev->pdev->bus->number ||
218 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
219 return -EINVAL;
220
221 p->irq = dev->pdev->irq;
222
223 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
224 p->irq);
225 return 0;
226}
227
eaaf8f0f
DV
228/**
229 * Get interrupt from bus id.
230 *
231 * \param inode device inode.
232 * \param file_priv DRM file private.
233 * \param cmd command.
234 * \param arg user argument, pointing to a drm_irq_busid structure.
235 * \return zero on success or a negative number on failure.
236 *
237 * Finds the PCI device with the specified bus id and gets its IRQ number.
238 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
239 * to that of the device that this DRM instance attached to.
240 */
241int drm_irq_by_busid(struct drm_device *dev, void *data,
242 struct drm_file *file_priv)
243{
244 struct drm_irq_busid *p = data;
245
246 if (drm_core_check_feature(dev, DRIVER_MODESET))
247 return -EINVAL;
248
249 /* UMS was only ever support on PCI devices. */
250 if (WARN_ON(!dev->pdev))
251 return -EINVAL;
252
253 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
254 return -EINVAL;
255
256 return drm_pci_irq_by_busid(dev, p);
257}
258
8da79ccd 259static void drm_pci_agp_init(struct drm_device *dev)
8410ea3b 260{
d9906753 261 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
8410ea3b
DA
262 if (drm_pci_device_is_agp(dev))
263 dev->agp = drm_agp_init(dev);
28185647
DV
264 if (dev->agp) {
265 dev->agp->agp_mtrr = arch_phys_wc_add(
266 dev->agp->agp_info.aper_base,
267 dev->agp->agp_info.aper_size *
268 1024 * 1024);
8410ea3b
DA
269 }
270 }
8410ea3b
DA
271}
272
4efafebe 273void drm_pci_agp_destroy(struct drm_device *dev)
28ec711c 274{
d9906753 275 if (dev->agp) {
28185647 276 arch_phys_wc_del(dev->agp->agp_mtrr);
28ec711c 277 drm_agp_clear(dev);
d6e4b28b 278 kfree(dev->agp);
28ec711c
DH
279 dev->agp = NULL;
280 }
281}
282
8410ea3b 283static struct drm_bus drm_pci_bus = {
8410ea3b 284 .set_busid = drm_pci_set_busid,
8410ea3b
DA
285};
286
dcdb1674
JC
287/**
288 * Register.
289 *
290 * \param pdev - PCI device structure
291 * \param ent entry from the PCI ID table with device type flags
292 * \return zero on success or a negative number on failure.
293 *
294 * Attempt to gets inter module "drm" information. If we are first
295 * then register the character device and inter module information.
296 * Try and register, if we fail to register, backout previous work.
297 */
298int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
299 struct drm_driver *driver)
300{
301 struct drm_device *dev;
302 int ret;
303
304 DRM_DEBUG("\n");
305
1bb72532 306 dev = drm_dev_alloc(driver, &pdev->dev);
dcdb1674
JC
307 if (!dev)
308 return -ENOMEM;
309
310 ret = pci_enable_device(pdev);
311 if (ret)
c22f0ace 312 goto err_free;
dcdb1674 313
dcdb1674 314 dev->pdev = pdev;
dcdb1674
JC
315#ifdef __alpha__
316 dev->hose = pdev->sysdata;
317#endif
318
c22f0ace 319 if (drm_core_check_feature(dev, DRIVER_MODESET))
dcdb1674 320 pci_set_drvdata(pdev, dev);
1793126f 321
2c695fa0 322 drm_pci_agp_init(dev);
2c695fa0 323
c22f0ace
DH
324 ret = drm_dev_register(dev, ent->driver_data);
325 if (ret)
2c695fa0 326 goto err_agp;
dcdb1674
JC
327
328 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
329 driver->name, driver->major, driver->minor, driver->patchlevel,
330 driver->date, pci_name(pdev), dev->primary->index);
331
b3f2333d
DV
332 /* No locking needed since shadow-attach is single-threaded since it may
333 * only be called from the per-driver module init hook. */
334 if (!drm_core_check_feature(dev, DRIVER_MODESET))
335 list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
336
dcdb1674
JC
337 return 0;
338
2c695fa0 339err_agp:
2c695fa0 340 drm_pci_agp_destroy(dev);
dcdb1674 341 pci_disable_device(pdev);
c22f0ace 342err_free:
099d1c29 343 drm_dev_unref(dev);
dcdb1674
JC
344 return ret;
345}
346EXPORT_SYMBOL(drm_get_pci_dev);
347
348/**
8410ea3b 349 * PCI device initialization. Called direct from modules at load time.
dcdb1674
JC
350 *
351 * \return zero on success or a negative number on failure.
352 *
353 * Initializes a drm_device structures,registering the
354 * stubs and initializing the AGP device.
355 *
356 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
357 * after the initialization for driver customization.
358 */
8410ea3b 359int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
dcdb1674
JC
360{
361 struct pci_dev *pdev = NULL;
362 const struct pci_device_id *pid;
363 int i;
364
8410ea3b
DA
365 DRM_DEBUG("\n");
366
8410ea3b
DA
367 driver->bus = &drm_pci_bus;
368
dcdb1674 369 if (driver->driver_features & DRIVER_MODESET)
8410ea3b 370 return pci_register_driver(pdriver);
dcdb1674
JC
371
372 /* If not using KMS, fall back to stealth mode manual scanning. */
b3f2333d 373 INIT_LIST_HEAD(&driver->legacy_dev_list);
8410ea3b
DA
374 for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
375 pid = &pdriver->id_table[i];
dcdb1674
JC
376
377 /* Loop around setting up a DRM device for each PCI device
378 * matching our ID and device class. If we had the internal
379 * function that pci_get_subsys and pci_get_class used, we'd
380 * be able to just pass pid in instead of doing a two-stage
381 * thing.
382 */
383 pdev = NULL;
384 while ((pdev =
385 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
386 pid->subdevice, pdev)) != NULL) {
387 if ((pdev->class & pid->class_mask) != pid->class)
388 continue;
389
390 /* stealth mode requires a manual probe */
391 pci_dev_get(pdev);
392 drm_get_pci_dev(pdev, pid, driver);
393 }
394 }
395 return 0;
396}
397
f4297784
DA
398int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
399{
400 struct pci_dev *root;
dd66cc2e 401 u32 lnkcap, lnkcap2;
f4297784
DA
402
403 *mask = 0;
404 if (!dev->pdev)
405 return -EINVAL;
406
f4297784
DA
407 root = dev->pdev->bus->self;
408
f4297784
DA
409 /* we've been informed via and serverworks don't make the cut */
410 if (root->vendor == PCI_VENDOR_ID_VIA ||
411 root->vendor == PCI_VENDOR_ID_SERVERWORKS)
412 return -EINVAL;
413
dd66cc2e
BH
414 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
415 pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
f4297784 416
dd66cc2e 417 if (lnkcap2) { /* PCIe r3.0-compliant */
f4297784
DA
418 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
419 *mask |= DRM_PCIE_SPEED_25;
420 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
421 *mask |= DRM_PCIE_SPEED_50;
422 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
423 *mask |= DRM_PCIE_SPEED_80;
dd66cc2e 424 } else { /* pre-r3.0 */
9fe0423e 425 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
f4297784 426 *mask |= DRM_PCIE_SPEED_25;
9fe0423e 427 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
f8acf6f4 428 *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
f4297784
DA
429 }
430
431 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
432 return 0;
433}
434EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
93711d8b
BH
435
436#else
437
438int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
439{
440 return -1;
441}
442
4efafebe 443void drm_pci_agp_destroy(struct drm_device *dev) {}
eaaf8f0f
DV
444
445int drm_irq_by_busid(struct drm_device *dev, void *data,
446 struct drm_file *file_priv)
447{
448 return -EINVAL;
449}
53bf2a2b
DV
450
451int drm_pci_set_unique(struct drm_device *dev,
452 struct drm_master *master,
453 struct drm_unique *u)
454{
455 return -EINVAL;
456}
93711d8b
BH
457#endif
458
459EXPORT_SYMBOL(drm_pci_init);
460
461/*@}*/
462void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
463{
464 struct drm_device *dev, *tmp;
465 DRM_DEBUG("\n");
466
467 if (driver->driver_features & DRIVER_MODESET) {
468 pci_unregister_driver(pdriver);
469 } else {
b3f2333d
DV
470 list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
471 legacy_dev_list) {
b3f2333d 472 list_del(&dev->legacy_dev_list);
c94adc4a 473 drm_put_dev(dev);
b3f2333d 474 }
93711d8b
BH
475 }
476 DRM_INFO("Module unloaded\n");
477}
478EXPORT_SYMBOL(drm_pci_exit);
This page took 0.937409 seconds and 5 git commands to generate.