tty: The big operations rework
[deliverable/linux.git] / drivers / pnp / manager.c
1 /*
2 * manager.c - Resource Management, Conflict Resolution, Activation and Disabling of Devices
3 *
4 * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz>
5 * Copyright 2003 Adam Belay <ambx1@neo.rr.com>
6 */
7
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/pnp.h>
13 #include <linux/slab.h>
14 #include <linux/bitmap.h>
15 #include <linux/mutex.h>
16 #include "base.h"
17
18 DEFINE_MUTEX(pnp_res_mutex);
19
20 static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
21 {
22 resource_size_t *start, *end;
23 unsigned long *flags;
24
25 if (idx >= PNP_MAX_PORT) {
26 dev_err(&dev->dev, "too many I/O port resources\n");
27 /* pretend we were successful so at least the manager won't try again */
28 return 1;
29 }
30
31 /* check if this resource has been manually set, if so skip */
32 if (!(dev->res.port_resource[idx].flags & IORESOURCE_AUTO))
33 return 1;
34
35 start = &dev->res.port_resource[idx].start;
36 end = &dev->res.port_resource[idx].end;
37 flags = &dev->res.port_resource[idx].flags;
38
39 /* set the initial values */
40 *flags |= rule->flags | IORESOURCE_IO;
41 *flags &= ~IORESOURCE_UNSET;
42
43 if (!rule->size) {
44 *flags |= IORESOURCE_DISABLED;
45 return 1; /* skip disabled resource requests */
46 }
47
48 *start = rule->min;
49 *end = *start + rule->size - 1;
50
51 /* run through until pnp_check_port is happy */
52 while (!pnp_check_port(dev, idx)) {
53 *start += rule->align;
54 *end = *start + rule->size - 1;
55 if (*start > rule->max || !rule->align)
56 return 0;
57 }
58 return 1;
59 }
60
61 static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
62 {
63 resource_size_t *start, *end;
64 unsigned long *flags;
65
66 if (idx >= PNP_MAX_MEM) {
67 dev_err(&dev->dev, "too many memory resources\n");
68 /* pretend we were successful so at least the manager won't try again */
69 return 1;
70 }
71
72 /* check if this resource has been manually set, if so skip */
73 if (!(dev->res.mem_resource[idx].flags & IORESOURCE_AUTO))
74 return 1;
75
76 start = &dev->res.mem_resource[idx].start;
77 end = &dev->res.mem_resource[idx].end;
78 flags = &dev->res.mem_resource[idx].flags;
79
80 /* set the initial values */
81 *flags |= rule->flags | IORESOURCE_MEM;
82 *flags &= ~IORESOURCE_UNSET;
83
84 /* convert pnp flags to standard Linux flags */
85 if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
86 *flags |= IORESOURCE_READONLY;
87 if (rule->flags & IORESOURCE_MEM_CACHEABLE)
88 *flags |= IORESOURCE_CACHEABLE;
89 if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
90 *flags |= IORESOURCE_RANGELENGTH;
91 if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
92 *flags |= IORESOURCE_SHADOWABLE;
93
94 if (!rule->size) {
95 *flags |= IORESOURCE_DISABLED;
96 return 1; /* skip disabled resource requests */
97 }
98
99 *start = rule->min;
100 *end = *start + rule->size - 1;
101
102 /* run through until pnp_check_mem is happy */
103 while (!pnp_check_mem(dev, idx)) {
104 *start += rule->align;
105 *end = *start + rule->size - 1;
106 if (*start > rule->max || !rule->align)
107 return 0;
108 }
109 return 1;
110 }
111
112 static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
113 {
114 resource_size_t *start, *end;
115 unsigned long *flags;
116 int i;
117
118 /* IRQ priority: this table is good for i386 */
119 static unsigned short xtab[16] = {
120 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
121 };
122
123 if (idx >= PNP_MAX_IRQ) {
124 dev_err(&dev->dev, "too many IRQ resources\n");
125 /* pretend we were successful so at least the manager won't try again */
126 return 1;
127 }
128
129 /* check if this resource has been manually set, if so skip */
130 if (!(dev->res.irq_resource[idx].flags & IORESOURCE_AUTO))
131 return 1;
132
133 start = &dev->res.irq_resource[idx].start;
134 end = &dev->res.irq_resource[idx].end;
135 flags = &dev->res.irq_resource[idx].flags;
136
137 /* set the initial values */
138 *flags |= rule->flags | IORESOURCE_IRQ;
139 *flags &= ~IORESOURCE_UNSET;
140
141 if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
142 *flags |= IORESOURCE_DISABLED;
143 return 1; /* skip disabled resource requests */
144 }
145
146 /* TBD: need check for >16 IRQ */
147 *start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
148 if (*start < PNP_IRQ_NR) {
149 *end = *start;
150 return 1;
151 }
152 for (i = 0; i < 16; i++) {
153 if (test_bit(xtab[i], rule->map)) {
154 *start = *end = xtab[i];
155 if (pnp_check_irq(dev, idx))
156 return 1;
157 }
158 }
159 return 0;
160 }
161
162 static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
163 {
164 resource_size_t *start, *end;
165 unsigned long *flags;
166 int i;
167
168 /* DMA priority: this table is good for i386 */
169 static unsigned short xtab[8] = {
170 1, 3, 5, 6, 7, 0, 2, 4
171 };
172
173 if (idx >= PNP_MAX_DMA) {
174 dev_err(&dev->dev, "too many DMA resources\n");
175 return;
176 }
177
178 /* check if this resource has been manually set, if so skip */
179 if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO))
180 return;
181
182 start = &dev->res.dma_resource[idx].start;
183 end = &dev->res.dma_resource[idx].end;
184 flags = &dev->res.dma_resource[idx].flags;
185
186 /* set the initial values */
187 *flags |= rule->flags | IORESOURCE_DMA;
188 *flags &= ~IORESOURCE_UNSET;
189
190 for (i = 0; i < 8; i++) {
191 if (rule->map & (1 << xtab[i])) {
192 *start = *end = xtab[i];
193 if (pnp_check_dma(dev, idx))
194 return;
195 }
196 }
197 #ifdef MAX_DMA_CHANNELS
198 *start = *end = MAX_DMA_CHANNELS;
199 #endif
200 *flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
201 }
202
203 /**
204 * pnp_init_resources - Resets a resource table to default values.
205 * @table: pointer to the desired resource table
206 */
207 void pnp_init_resource_table(struct pnp_resource_table *table)
208 {
209 int idx;
210
211 for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
212 table->irq_resource[idx].name = NULL;
213 table->irq_resource[idx].start = -1;
214 table->irq_resource[idx].end = -1;
215 table->irq_resource[idx].flags =
216 IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
217 }
218 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
219 table->dma_resource[idx].name = NULL;
220 table->dma_resource[idx].start = -1;
221 table->dma_resource[idx].end = -1;
222 table->dma_resource[idx].flags =
223 IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
224 }
225 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
226 table->port_resource[idx].name = NULL;
227 table->port_resource[idx].start = 0;
228 table->port_resource[idx].end = 0;
229 table->port_resource[idx].flags =
230 IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
231 }
232 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
233 table->mem_resource[idx].name = NULL;
234 table->mem_resource[idx].start = 0;
235 table->mem_resource[idx].end = 0;
236 table->mem_resource[idx].flags =
237 IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
238 }
239 }
240
241 /**
242 * pnp_clean_resources - clears resources that were not manually set
243 * @res: the resources to clean
244 */
245 static void pnp_clean_resource_table(struct pnp_resource_table *res)
246 {
247 int idx;
248
249 for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
250 if (!(res->irq_resource[idx].flags & IORESOURCE_AUTO))
251 continue;
252 res->irq_resource[idx].start = -1;
253 res->irq_resource[idx].end = -1;
254 res->irq_resource[idx].flags =
255 IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
256 }
257 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
258 if (!(res->dma_resource[idx].flags & IORESOURCE_AUTO))
259 continue;
260 res->dma_resource[idx].start = -1;
261 res->dma_resource[idx].end = -1;
262 res->dma_resource[idx].flags =
263 IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
264 }
265 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
266 if (!(res->port_resource[idx].flags & IORESOURCE_AUTO))
267 continue;
268 res->port_resource[idx].start = 0;
269 res->port_resource[idx].end = 0;
270 res->port_resource[idx].flags =
271 IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
272 }
273 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
274 if (!(res->mem_resource[idx].flags & IORESOURCE_AUTO))
275 continue;
276 res->mem_resource[idx].start = 0;
277 res->mem_resource[idx].end = 0;
278 res->mem_resource[idx].flags =
279 IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
280 }
281 }
282
283 /**
284 * pnp_assign_resources - assigns resources to the device based on the specified dependent number
285 * @dev: pointer to the desired device
286 * @depnum: the dependent function number
287 *
288 * Only set depnum to 0 if the device does not have dependent options.
289 */
290 static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
291 {
292 struct pnp_port *port;
293 struct pnp_mem *mem;
294 struct pnp_irq *irq;
295 struct pnp_dma *dma;
296 int nport = 0, nmem = 0, nirq = 0, ndma = 0;
297
298 if (!pnp_can_configure(dev))
299 return -ENODEV;
300
301 mutex_lock(&pnp_res_mutex);
302 pnp_clean_resource_table(&dev->res); /* start with a fresh slate */
303 if (dev->independent) {
304 port = dev->independent->port;
305 mem = dev->independent->mem;
306 irq = dev->independent->irq;
307 dma = dev->independent->dma;
308 while (port) {
309 if (!pnp_assign_port(dev, port, nport))
310 goto fail;
311 nport++;
312 port = port->next;
313 }
314 while (mem) {
315 if (!pnp_assign_mem(dev, mem, nmem))
316 goto fail;
317 nmem++;
318 mem = mem->next;
319 }
320 while (irq) {
321 if (!pnp_assign_irq(dev, irq, nirq))
322 goto fail;
323 nirq++;
324 irq = irq->next;
325 }
326 while (dma) {
327 pnp_assign_dma(dev, dma, ndma);
328 ndma++;
329 dma = dma->next;
330 }
331 }
332
333 if (depnum) {
334 struct pnp_option *dep;
335 int i;
336 for (i = 1, dep = dev->dependent; i < depnum;
337 i++, dep = dep->next)
338 if (!dep)
339 goto fail;
340 port = dep->port;
341 mem = dep->mem;
342 irq = dep->irq;
343 dma = dep->dma;
344 while (port) {
345 if (!pnp_assign_port(dev, port, nport))
346 goto fail;
347 nport++;
348 port = port->next;
349 }
350 while (mem) {
351 if (!pnp_assign_mem(dev, mem, nmem))
352 goto fail;
353 nmem++;
354 mem = mem->next;
355 }
356 while (irq) {
357 if (!pnp_assign_irq(dev, irq, nirq))
358 goto fail;
359 nirq++;
360 irq = irq->next;
361 }
362 while (dma) {
363 pnp_assign_dma(dev, dma, ndma);
364 ndma++;
365 dma = dma->next;
366 }
367 } else if (dev->dependent)
368 goto fail;
369
370 mutex_unlock(&pnp_res_mutex);
371 return 1;
372
373 fail:
374 pnp_clean_resource_table(&dev->res);
375 mutex_unlock(&pnp_res_mutex);
376 return 0;
377 }
378
379 /**
380 * pnp_manual_config_dev - Disables Auto Config and Manually sets the resource table
381 * @dev: pointer to the desired device
382 * @res: pointer to the new resource config
383 * @mode: 0 or PNP_CONFIG_FORCE
384 *
385 * This function can be used by drivers that want to manually set thier resources.
386 */
387 int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
388 int mode)
389 {
390 int i;
391 struct pnp_resource_table *bak;
392
393 if (!pnp_can_configure(dev))
394 return -ENODEV;
395 bak = pnp_alloc(sizeof(struct pnp_resource_table));
396 if (!bak)
397 return -ENOMEM;
398 *bak = dev->res;
399
400 mutex_lock(&pnp_res_mutex);
401 dev->res = *res;
402 if (!(mode & PNP_CONFIG_FORCE)) {
403 for (i = 0; i < PNP_MAX_PORT; i++) {
404 if (!pnp_check_port(dev, i))
405 goto fail;
406 }
407 for (i = 0; i < PNP_MAX_MEM; i++) {
408 if (!pnp_check_mem(dev, i))
409 goto fail;
410 }
411 for (i = 0; i < PNP_MAX_IRQ; i++) {
412 if (!pnp_check_irq(dev, i))
413 goto fail;
414 }
415 for (i = 0; i < PNP_MAX_DMA; i++) {
416 if (!pnp_check_dma(dev, i))
417 goto fail;
418 }
419 }
420 mutex_unlock(&pnp_res_mutex);
421
422 kfree(bak);
423 return 0;
424
425 fail:
426 dev->res = *bak;
427 mutex_unlock(&pnp_res_mutex);
428 kfree(bak);
429 return -EINVAL;
430 }
431
432 /**
433 * pnp_auto_config_dev - automatically assigns resources to a device
434 * @dev: pointer to the desired device
435 */
436 int pnp_auto_config_dev(struct pnp_dev *dev)
437 {
438 struct pnp_option *dep;
439 int i = 1;
440
441 if (!pnp_can_configure(dev)) {
442 dev_dbg(&dev->dev, "configuration not supported\n");
443 return -ENODEV;
444 }
445
446 if (!dev->dependent) {
447 if (pnp_assign_resources(dev, 0))
448 return 0;
449 } else {
450 dep = dev->dependent;
451 do {
452 if (pnp_assign_resources(dev, i))
453 return 0;
454 dep = dep->next;
455 i++;
456 } while (dep);
457 }
458
459 dev_err(&dev->dev, "unable to assign resources\n");
460 return -EBUSY;
461 }
462
463 /**
464 * pnp_start_dev - low-level start of the PnP device
465 * @dev: pointer to the desired device
466 *
467 * assumes that resources have already been allocated
468 */
469 int pnp_start_dev(struct pnp_dev *dev)
470 {
471 if (!pnp_can_write(dev)) {
472 dev_dbg(&dev->dev, "activation not supported\n");
473 return -EINVAL;
474 }
475
476 if (dev->protocol->set(dev, &dev->res) < 0) {
477 dev_err(&dev->dev, "activation failed\n");
478 return -EIO;
479 }
480
481 dev_info(&dev->dev, "activated\n");
482 return 0;
483 }
484
485 /**
486 * pnp_stop_dev - low-level disable of the PnP device
487 * @dev: pointer to the desired device
488 *
489 * does not free resources
490 */
491 int pnp_stop_dev(struct pnp_dev *dev)
492 {
493 if (!pnp_can_disable(dev)) {
494 dev_dbg(&dev->dev, "disabling not supported\n");
495 return -EINVAL;
496 }
497 if (dev->protocol->disable(dev) < 0) {
498 dev_err(&dev->dev, "disable failed\n");
499 return -EIO;
500 }
501
502 dev_info(&dev->dev, "disabled\n");
503 return 0;
504 }
505
506 /**
507 * pnp_activate_dev - activates a PnP device for use
508 * @dev: pointer to the desired device
509 *
510 * does not validate or set resources so be careful.
511 */
512 int pnp_activate_dev(struct pnp_dev *dev)
513 {
514 int error;
515
516 if (dev->active)
517 return 0;
518
519 /* ensure resources are allocated */
520 if (pnp_auto_config_dev(dev))
521 return -EBUSY;
522
523 error = pnp_start_dev(dev);
524 if (error)
525 return error;
526
527 dev->active = 1;
528 return 0;
529 }
530
531 /**
532 * pnp_disable_dev - disables device
533 * @dev: pointer to the desired device
534 *
535 * inform the correct pnp protocol so that resources can be used by other devices
536 */
537 int pnp_disable_dev(struct pnp_dev *dev)
538 {
539 int error;
540
541 if (!dev->active)
542 return 0;
543
544 error = pnp_stop_dev(dev);
545 if (error)
546 return error;
547
548 dev->active = 0;
549
550 /* release the resources so that other devices can use them */
551 mutex_lock(&pnp_res_mutex);
552 pnp_clean_resource_table(&dev->res);
553 mutex_unlock(&pnp_res_mutex);
554
555 return 0;
556 }
557
558 /**
559 * pnp_resource_change - change one resource
560 * @resource: pointer to resource to be changed
561 * @start: start of region
562 * @size: size of region
563 */
564 void pnp_resource_change(struct resource *resource, resource_size_t start,
565 resource_size_t size)
566 {
567 resource->flags &= ~(IORESOURCE_AUTO | IORESOURCE_UNSET);
568 resource->start = start;
569 resource->end = start + size - 1;
570 }
571
572 EXPORT_SYMBOL(pnp_manual_config_dev);
573 EXPORT_SYMBOL(pnp_start_dev);
574 EXPORT_SYMBOL(pnp_stop_dev);
575 EXPORT_SYMBOL(pnp_activate_dev);
576 EXPORT_SYMBOL(pnp_disable_dev);
577 EXPORT_SYMBOL(pnp_resource_change);
578 EXPORT_SYMBOL(pnp_init_resource_table);
This page took 0.051085 seconds and 5 git commands to generate.