powerpc: Use the newly added get_required_mask dma_map_ops hook
[deliverable/linux.git] / arch / powerpc / kernel / vio.c
CommitLineData
1da177e4
LT
1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 *
a90ab95a 4 * Copyright (c) 2003,2008 IBM Corp.
1da177e4
LT
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
19dbd0f6 8 * Stephen Rothwell
a90ab95a 9 * Robert Jennings <rcjenn@us.ibm.com>
1da177e4
LT
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
c7f0e8cb
SR
17#include <linux/types.h>
18#include <linux/device.h>
1da177e4 19#include <linux/init.h>
5a0e3ad6 20#include <linux/slab.h>
1da177e4 21#include <linux/console.h>
1da177e4 22#include <linux/module.h>
1da177e4
LT
23#include <linux/mm.h>
24#include <linux/dma-mapping.h>
c7f0e8cb
SR
25#include <linux/kobject.h>
26
1da177e4
LT
27#include <asm/iommu.h>
28#include <asm/dma.h>
1da177e4 29#include <asm/vio.h>
143dcec2 30#include <asm/prom.h>
e10fa773 31#include <asm/firmware.h>
c7f0e8cb
SR
32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34#include <asm/page.h>
35#include <asm/hvcall.h>
36#include <asm/iseries/vio.h>
37#include <asm/iseries/hv_types.h>
38#include <asm/iseries/hv_lp_config.h>
39#include <asm/iseries/hv_call_xm.h>
40#include <asm/iseries/iommu.h>
41
6fccab26
SR
42static struct bus_type vio_bus_type;
43
c7f0e8cb 44static struct vio_dev vio_bus_device = { /* fake "parent" device */
aab0d375 45 .name = "vio",
ac5b33c9 46 .type = "",
aab0d375 47 .dev.init_name = "vio",
ac5b33c9 48 .dev.bus = &vio_bus_type,
1da177e4 49};
ac5b33c9 50
a90ab95a
RJ
51#ifdef CONFIG_PPC_SMLPAR
52/**
53 * vio_cmo_pool - A pool of IO memory for CMO use
54 *
55 * @size: The size of the pool in bytes
56 * @free: The amount of free memory in the pool
57 */
58struct vio_cmo_pool {
59 size_t size;
60 size_t free;
61};
62
63/* How many ms to delay queued balance work */
64#define VIO_CMO_BALANCE_DELAY 100
65
66/* Portion out IO memory to CMO devices by this chunk size */
67#define VIO_CMO_BALANCE_CHUNK 131072
68
69/**
70 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
71 *
72 * @vio_dev: struct vio_dev pointer
73 * @list: pointer to other devices on bus that are being tracked
74 */
75struct vio_cmo_dev_entry {
76 struct vio_dev *viodev;
77 struct list_head list;
78};
79
80/**
81 * vio_cmo - VIO bus accounting structure for CMO entitlement
82 *
83 * @lock: spinlock for entire structure
84 * @balance_q: work queue for balancing system entitlement
85 * @device_list: list of CMO-enabled devices requiring entitlement
86 * @entitled: total system entitlement in bytes
87 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
88 * @excess: pool of excess entitlement not needed for device reserves or spare
89 * @spare: IO memory for device hotplug functionality
90 * @min: minimum necessary for system operation
91 * @desired: desired memory for system operation
92 * @curr: bytes currently allocated
93 * @high: high water mark for IO data usage
94 */
95struct vio_cmo {
96 spinlock_t lock;
97 struct delayed_work balance_q;
98 struct list_head device_list;
99 size_t entitled;
100 struct vio_cmo_pool reserve;
101 struct vio_cmo_pool excess;
102 size_t spare;
103 size_t min;
104 size_t desired;
105 size_t curr;
106 size_t high;
107} vio_cmo;
108
109/**
110 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
111 */
112static int vio_cmo_num_OF_devs(void)
113{
114 struct device_node *node_vroot;
115 int count = 0;
116
117 /*
118 * Count the number of vdevice entries with an
119 * ibm,my-dma-window OF property
120 */
121 node_vroot = of_find_node_by_name(NULL, "vdevice");
122 if (node_vroot) {
123 struct device_node *of_node;
124 struct property *prop;
125
126 for_each_child_of_node(node_vroot, of_node) {
127 prop = of_find_property(of_node, "ibm,my-dma-window",
128 NULL);
129 if (prop)
130 count++;
131 }
132 }
133 of_node_put(node_vroot);
134 return count;
135}
136
137/**
138 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
139 *
140 * @viodev: VIO device requesting IO memory
141 * @size: size of allocation requested
142 *
143 * Allocations come from memory reserved for the devices and any excess
144 * IO memory available to all devices. The spare pool used to service
145 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
146 * made available.
147 *
148 * Return codes:
149 * 0 for successful allocation and -ENOMEM for a failure
150 */
151static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
152{
153 unsigned long flags;
154 size_t reserve_free = 0;
155 size_t excess_free = 0;
156 int ret = -ENOMEM;
157
158 spin_lock_irqsave(&vio_cmo.lock, flags);
159
160 /* Determine the amount of free entitlement available in reserve */
161 if (viodev->cmo.entitled > viodev->cmo.allocated)
162 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
163
164 /* If spare is not fulfilled, the excess pool can not be used. */
165 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
166 excess_free = vio_cmo.excess.free;
167
168 /* The request can be satisfied */
169 if ((reserve_free + excess_free) >= size) {
170 vio_cmo.curr += size;
171 if (vio_cmo.curr > vio_cmo.high)
172 vio_cmo.high = vio_cmo.curr;
173 viodev->cmo.allocated += size;
174 size -= min(reserve_free, size);
175 vio_cmo.excess.free -= size;
176 ret = 0;
177 }
178
179 spin_unlock_irqrestore(&vio_cmo.lock, flags);
180 return ret;
181}
182
183/**
184 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
185 * @viodev: VIO device freeing IO memory
186 * @size: size of deallocation
187 *
188 * IO memory is freed by the device back to the correct memory pools.
189 * The spare pool is replenished first from either memory pool, then
190 * the reserve pool is used to reduce device entitlement, the excess
191 * pool is used to increase the reserve pool toward the desired entitlement
192 * target, and then the remaining memory is returned to the pools.
193 *
194 */
195static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
196{
197 unsigned long flags;
198 size_t spare_needed = 0;
199 size_t excess_freed = 0;
200 size_t reserve_freed = size;
201 size_t tmp;
202 int balance = 0;
203
204 spin_lock_irqsave(&vio_cmo.lock, flags);
205 vio_cmo.curr -= size;
206
207 /* Amount of memory freed from the excess pool */
208 if (viodev->cmo.allocated > viodev->cmo.entitled) {
209 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
210 viodev->cmo.entitled));
211 reserve_freed -= excess_freed;
212 }
213
214 /* Remove allocation from device */
215 viodev->cmo.allocated -= (reserve_freed + excess_freed);
216
217 /* Spare is a subset of the reserve pool, replenish it first. */
218 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
219
220 /*
221 * Replenish the spare in the reserve pool from the excess pool.
222 * This moves entitlement into the reserve pool.
223 */
224 if (spare_needed && excess_freed) {
225 tmp = min(excess_freed, spare_needed);
226 vio_cmo.excess.size -= tmp;
227 vio_cmo.reserve.size += tmp;
228 vio_cmo.spare += tmp;
229 excess_freed -= tmp;
230 spare_needed -= tmp;
231 balance = 1;
232 }
233
234 /*
235 * Replenish the spare in the reserve pool from the reserve pool.
236 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
237 * if needed, and gives it to the spare pool. The amount of used
238 * memory in this pool does not change.
239 */
240 if (spare_needed && reserve_freed) {
732eacc0 241 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
a90ab95a
RJ
242
243 vio_cmo.spare += tmp;
244 viodev->cmo.entitled -= tmp;
245 reserve_freed -= tmp;
246 spare_needed -= tmp;
247 balance = 1;
248 }
249
250 /*
251 * Increase the reserve pool until the desired allocation is met.
252 * Move an allocation freed from the excess pool into the reserve
253 * pool and schedule a balance operation.
254 */
255 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
256 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
257
258 vio_cmo.excess.size -= tmp;
259 vio_cmo.reserve.size += tmp;
260 excess_freed -= tmp;
261 balance = 1;
262 }
263
264 /* Return memory from the excess pool to that pool */
265 if (excess_freed)
266 vio_cmo.excess.free += excess_freed;
267
268 if (balance)
269 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
270 spin_unlock_irqrestore(&vio_cmo.lock, flags);
271}
272
273/**
274 * vio_cmo_entitlement_update - Manage system entitlement changes
275 *
276 * @new_entitlement: new system entitlement to attempt to accommodate
277 *
278 * Increases in entitlement will be used to fulfill the spare entitlement
279 * and the rest is given to the excess pool. Decreases, if they are
280 * possible, come from the excess pool and from unused device entitlement
281 *
282 * Returns: 0 on success, -ENOMEM when change can not be made
283 */
284int vio_cmo_entitlement_update(size_t new_entitlement)
285{
286 struct vio_dev *viodev;
287 struct vio_cmo_dev_entry *dev_ent;
288 unsigned long flags;
289 size_t avail, delta, tmp;
290
291 spin_lock_irqsave(&vio_cmo.lock, flags);
292
293 /* Entitlement increases */
294 if (new_entitlement > vio_cmo.entitled) {
295 delta = new_entitlement - vio_cmo.entitled;
296
297 /* Fulfill spare allocation */
298 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
299 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
300 vio_cmo.spare += tmp;
301 vio_cmo.reserve.size += tmp;
302 delta -= tmp;
303 }
304
305 /* Remaining new allocation goes to the excess pool */
306 vio_cmo.entitled += delta;
307 vio_cmo.excess.size += delta;
308 vio_cmo.excess.free += delta;
309
310 goto out;
311 }
312
313 /* Entitlement decreases */
314 delta = vio_cmo.entitled - new_entitlement;
315 avail = vio_cmo.excess.free;
316
317 /*
318 * Need to check how much unused entitlement each device can
319 * sacrifice to fulfill entitlement change.
320 */
321 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
322 if (avail >= delta)
323 break;
324
325 viodev = dev_ent->viodev;
326 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
327 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
328 avail += viodev->cmo.entitled -
329 max_t(size_t, viodev->cmo.allocated,
330 VIO_CMO_MIN_ENT);
331 }
332
333 if (delta <= avail) {
334 vio_cmo.entitled -= delta;
335
336 /* Take entitlement from the excess pool first */
337 tmp = min(vio_cmo.excess.free, delta);
338 vio_cmo.excess.size -= tmp;
339 vio_cmo.excess.free -= tmp;
340 delta -= tmp;
341
342 /*
343 * Remove all but VIO_CMO_MIN_ENT bytes from devices
344 * until entitlement change is served
345 */
346 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
347 if (!delta)
348 break;
349
350 viodev = dev_ent->viodev;
351 tmp = 0;
352 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
353 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
354 tmp = viodev->cmo.entitled -
355 max_t(size_t, viodev->cmo.allocated,
356 VIO_CMO_MIN_ENT);
357 viodev->cmo.entitled -= min(tmp, delta);
358 delta -= min(tmp, delta);
359 }
360 } else {
361 spin_unlock_irqrestore(&vio_cmo.lock, flags);
362 return -ENOMEM;
363 }
364
365out:
366 schedule_delayed_work(&vio_cmo.balance_q, 0);
367 spin_unlock_irqrestore(&vio_cmo.lock, flags);
368 return 0;
369}
370
371/**
372 * vio_cmo_balance - Balance entitlement among devices
373 *
374 * @work: work queue structure for this operation
375 *
376 * Any system entitlement above the minimum needed for devices, or
377 * already allocated to devices, can be distributed to the devices.
378 * The list of devices is iterated through to recalculate the desired
379 * entitlement level and to determine how much entitlement above the
380 * minimum entitlement is allocated to devices.
381 *
382 * Small chunks of the available entitlement are given to devices until
383 * their requirements are fulfilled or there is no entitlement left to give.
384 * Upon completion sizes of the reserve and excess pools are calculated.
385 *
386 * The system minimum entitlement level is also recalculated here.
387 * Entitlement will be reserved for devices even after vio_bus_remove to
388 * accommodate reloading the driver. The OF tree is walked to count the
389 * number of devices present and this will remove entitlement for devices
390 * that have actually left the system after having vio_bus_remove called.
391 */
392static void vio_cmo_balance(struct work_struct *work)
393{
394 struct vio_cmo *cmo;
395 struct vio_dev *viodev;
396 struct vio_cmo_dev_entry *dev_ent;
397 unsigned long flags;
398 size_t avail = 0, level, chunk, need;
399 int devcount = 0, fulfilled;
400
401 cmo = container_of(work, struct vio_cmo, balance_q.work);
402
403 spin_lock_irqsave(&vio_cmo.lock, flags);
404
405 /* Calculate minimum entitlement and fulfill spare */
406 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
407 BUG_ON(cmo->min > cmo->entitled);
408 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
409 cmo->min += cmo->spare;
410 cmo->desired = cmo->min;
411
412 /*
413 * Determine how much entitlement is available and reset device
414 * entitlements
415 */
416 avail = cmo->entitled - cmo->spare;
417 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
418 viodev = dev_ent->viodev;
419 devcount++;
420 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
421 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
422 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
423 }
424
425 /*
426 * Having provided each device with the minimum entitlement, loop
427 * over the devices portioning out the remaining entitlement
428 * until there is nothing left.
429 */
430 level = VIO_CMO_MIN_ENT;
431 while (avail) {
432 fulfilled = 0;
433 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
434 viodev = dev_ent->viodev;
435
436 if (viodev->cmo.desired <= level) {
437 fulfilled++;
438 continue;
439 }
440
441 /*
442 * Give the device up to VIO_CMO_BALANCE_CHUNK
443 * bytes of entitlement, but do not exceed the
444 * desired level of entitlement for the device.
445 */
446 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
447 chunk = min(chunk, (viodev->cmo.desired -
448 viodev->cmo.entitled));
449 viodev->cmo.entitled += chunk;
450
451 /*
452 * If the memory for this entitlement increase was
453 * already allocated to the device it does not come
454 * from the available pool being portioned out.
455 */
456 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
457 max(viodev->cmo.allocated, level);
458 avail -= need;
459
460 }
461 if (fulfilled == devcount)
462 break;
463 level += VIO_CMO_BALANCE_CHUNK;
464 }
465
466 /* Calculate new reserve and excess pool sizes */
467 cmo->reserve.size = cmo->min;
468 cmo->excess.free = 0;
469 cmo->excess.size = 0;
470 need = 0;
471 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
472 viodev = dev_ent->viodev;
473 /* Calculated reserve size above the minimum entitlement */
474 if (viodev->cmo.entitled)
475 cmo->reserve.size += (viodev->cmo.entitled -
476 VIO_CMO_MIN_ENT);
477 /* Calculated used excess entitlement */
478 if (viodev->cmo.allocated > viodev->cmo.entitled)
479 need += viodev->cmo.allocated - viodev->cmo.entitled;
480 }
481 cmo->excess.size = cmo->entitled - cmo->reserve.size;
482 cmo->excess.free = cmo->excess.size - need;
483
bf6aede7 484 cancel_delayed_work(to_delayed_work(work));
a90ab95a
RJ
485 spin_unlock_irqrestore(&vio_cmo.lock, flags);
486}
487
488static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
489 dma_addr_t *dma_handle, gfp_t flag)
490{
491 struct vio_dev *viodev = to_vio_dev(dev);
492 void *ret;
493
69b052e8 494 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
a90ab95a
RJ
495 atomic_inc(&viodev->cmo.allocs_failed);
496 return NULL;
497 }
498
499 ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
500 if (unlikely(ret == NULL)) {
69b052e8 501 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
a90ab95a
RJ
502 atomic_inc(&viodev->cmo.allocs_failed);
503 }
504
505 return ret;
506}
507
508static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
509 void *vaddr, dma_addr_t dma_handle)
510{
511 struct vio_dev *viodev = to_vio_dev(dev);
512
513 dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
514
69b052e8 515 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
a90ab95a
RJ
516}
517
f9226d57
MN
518static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
519 unsigned long offset, size_t size,
520 enum dma_data_direction direction,
521 struct dma_attrs *attrs)
a90ab95a
RJ
522{
523 struct vio_dev *viodev = to_vio_dev(dev);
524 dma_addr_t ret = DMA_ERROR_CODE;
525
526 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
527 atomic_inc(&viodev->cmo.allocs_failed);
528 return ret;
529 }
530
f9226d57 531 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
0764bf63 532 if (unlikely(dma_mapping_error(dev, ret))) {
a90ab95a
RJ
533 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
534 atomic_inc(&viodev->cmo.allocs_failed);
535 }
536
537 return ret;
538}
539
f9226d57
MN
540static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
541 size_t size,
542 enum dma_data_direction direction,
543 struct dma_attrs *attrs)
a90ab95a
RJ
544{
545 struct vio_dev *viodev = to_vio_dev(dev);
546
f9226d57 547 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
a90ab95a
RJ
548
549 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
550}
551
552static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
553 int nelems, enum dma_data_direction direction,
554 struct dma_attrs *attrs)
555{
556 struct vio_dev *viodev = to_vio_dev(dev);
557 struct scatterlist *sgl;
558 int ret, count = 0;
559 size_t alloc_size = 0;
560
561 for (sgl = sglist; count < nelems; count++, sgl++)
562 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
563
564 if (vio_cmo_alloc(viodev, alloc_size)) {
565 atomic_inc(&viodev->cmo.allocs_failed);
566 return 0;
567 }
568
569 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
570
571 if (unlikely(!ret)) {
572 vio_cmo_dealloc(viodev, alloc_size);
573 atomic_inc(&viodev->cmo.allocs_failed);
69b052e8 574 return ret;
a90ab95a
RJ
575 }
576
577 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
578 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
579 if (alloc_size)
580 vio_cmo_dealloc(viodev, alloc_size);
581
582 return ret;
583}
584
585static void vio_dma_iommu_unmap_sg(struct device *dev,
586 struct scatterlist *sglist, int nelems,
587 enum dma_data_direction direction,
588 struct dma_attrs *attrs)
589{
590 struct vio_dev *viodev = to_vio_dev(dev);
591 struct scatterlist *sgl;
592 size_t alloc_size = 0;
593 int count = 0;
594
595 for (sgl = sglist; count < nelems; count++, sgl++)
596 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
597
598 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
599
600 vio_cmo_dealloc(viodev, alloc_size);
601}
602
6d283d78
NA
603static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
604{
605 return dma_iommu_ops.dma_supported(dev, mask);
606}
607
d24f9c69
MM
608static u64 vio_dma_get_required_mask(struct device *dev)
609{
610 return dma_iommu_ops.get_required_mask(dev);
611}
612
45223c54 613struct dma_map_ops vio_dma_mapping_ops = {
a90ab95a
RJ
614 .alloc_coherent = vio_dma_iommu_alloc_coherent,
615 .free_coherent = vio_dma_iommu_free_coherent,
a90ab95a
RJ
616 .map_sg = vio_dma_iommu_map_sg,
617 .unmap_sg = vio_dma_iommu_unmap_sg,
f9226d57
MN
618 .map_page = vio_dma_iommu_map_page,
619 .unmap_page = vio_dma_iommu_unmap_page,
6d283d78 620 .dma_supported = vio_dma_iommu_dma_supported,
d24f9c69 621 .get_required_mask = vio_dma_get_required_mask,
a90ab95a
RJ
622};
623
624/**
625 * vio_cmo_set_dev_desired - Set desired entitlement for a device
626 *
627 * @viodev: struct vio_dev for device to alter
628 * @new_desired: new desired entitlement level in bytes
629 *
630 * For use by devices to request a change to their entitlement at runtime or
631 * through sysfs. The desired entitlement level is changed and a balancing
632 * of system resources is scheduled to run in the future.
633 */
634void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
635{
636 unsigned long flags;
637 struct vio_cmo_dev_entry *dev_ent;
638 int found = 0;
639
640 if (!firmware_has_feature(FW_FEATURE_CMO))
641 return;
642
643 spin_lock_irqsave(&vio_cmo.lock, flags);
644 if (desired < VIO_CMO_MIN_ENT)
645 desired = VIO_CMO_MIN_ENT;
646
647 /*
648 * Changes will not be made for devices not in the device list.
649 * If it is not in the device list, then no driver is loaded
650 * for the device and it can not receive entitlement.
651 */
652 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
653 if (viodev == dev_ent->viodev) {
654 found = 1;
655 break;
656 }
f6d8c8bb
JL
657 if (!found) {
658 spin_unlock_irqrestore(&vio_cmo.lock, flags);
a90ab95a 659 return;
f6d8c8bb 660 }
a90ab95a
RJ
661
662 /* Increase/decrease in desired device entitlement */
663 if (desired >= viodev->cmo.desired) {
664 /* Just bump the bus and device values prior to a balance*/
665 vio_cmo.desired += desired - viodev->cmo.desired;
666 viodev->cmo.desired = desired;
667 } else {
668 /* Decrease bus and device values for desired entitlement */
669 vio_cmo.desired -= viodev->cmo.desired - desired;
670 viodev->cmo.desired = desired;
671 /*
672 * If less entitlement is desired than current entitlement, move
673 * any reserve memory in the change region to the excess pool.
674 */
675 if (viodev->cmo.entitled > desired) {
676 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
677 vio_cmo.excess.size += viodev->cmo.entitled - desired;
678 /*
679 * If entitlement moving from the reserve pool to the
680 * excess pool is currently unused, add to the excess
681 * free counter.
682 */
683 if (viodev->cmo.allocated < viodev->cmo.entitled)
684 vio_cmo.excess.free += viodev->cmo.entitled -
685 max(viodev->cmo.allocated, desired);
686 viodev->cmo.entitled = desired;
687 }
688 }
689 schedule_delayed_work(&vio_cmo.balance_q, 0);
690 spin_unlock_irqrestore(&vio_cmo.lock, flags);
691}
692
693/**
694 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
695 *
696 * @viodev - Pointer to struct vio_dev for device
697 *
698 * Determine the devices IO memory entitlement needs, attempting
699 * to satisfy the system minimum entitlement at first and scheduling
700 * a balance operation to take care of the rest at a later time.
701 *
702 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
703 * -ENOMEM when entitlement is not available for device or
704 * device entry.
705 *
706 */
707static int vio_cmo_bus_probe(struct vio_dev *viodev)
708{
709 struct vio_cmo_dev_entry *dev_ent;
710 struct device *dev = &viodev->dev;
711 struct vio_driver *viodrv = to_vio_driver(dev->driver);
712 unsigned long flags;
713 size_t size;
714
715 /*
716 * Check to see that device has a DMA window and configure
717 * entitlement for the device.
718 */
58f9b0b0 719 if (of_get_property(viodev->dev.of_node,
a90ab95a
RJ
720 "ibm,my-dma-window", NULL)) {
721 /* Check that the driver is CMO enabled and get desired DMA */
722 if (!viodrv->get_desired_dma) {
723 dev_err(dev, "%s: device driver does not support CMO\n",
724 __func__);
725 return -EINVAL;
726 }
727
728 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
729 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
730 viodev->cmo.desired = VIO_CMO_MIN_ENT;
731 size = VIO_CMO_MIN_ENT;
732
733 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
734 GFP_KERNEL);
735 if (!dev_ent)
736 return -ENOMEM;
737
738 dev_ent->viodev = viodev;
739 spin_lock_irqsave(&vio_cmo.lock, flags);
740 list_add(&dev_ent->list, &vio_cmo.device_list);
741 } else {
742 viodev->cmo.desired = 0;
743 size = 0;
744 spin_lock_irqsave(&vio_cmo.lock, flags);
745 }
746
747 /*
748 * If the needs for vio_cmo.min have not changed since they
749 * were last set, the number of devices in the OF tree has
750 * been constant and the IO memory for this is already in
751 * the reserve pool.
752 */
753 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
754 VIO_CMO_MIN_ENT)) {
755 /* Updated desired entitlement if device requires it */
756 if (size)
757 vio_cmo.desired += (viodev->cmo.desired -
758 VIO_CMO_MIN_ENT);
759 } else {
760 size_t tmp;
761
762 tmp = vio_cmo.spare + vio_cmo.excess.free;
763 if (tmp < size) {
764 dev_err(dev, "%s: insufficient free "
765 "entitlement to add device. "
766 "Need %lu, have %lu\n", __func__,
767 size, (vio_cmo.spare + tmp));
768 spin_unlock_irqrestore(&vio_cmo.lock, flags);
769 return -ENOMEM;
770 }
771
772 /* Use excess pool first to fulfill request */
773 tmp = min(size, vio_cmo.excess.free);
774 vio_cmo.excess.free -= tmp;
775 vio_cmo.excess.size -= tmp;
776 vio_cmo.reserve.size += tmp;
777
778 /* Use spare if excess pool was insufficient */
779 vio_cmo.spare -= size - tmp;
780
781 /* Update bus accounting */
782 vio_cmo.min += size;
783 vio_cmo.desired += viodev->cmo.desired;
784 }
785 spin_unlock_irqrestore(&vio_cmo.lock, flags);
786 return 0;
787}
788
789/**
790 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
791 *
792 * @viodev - Pointer to struct vio_dev for device
793 *
794 * Remove the device from the cmo device list. The minimum entitlement
795 * will be reserved for the device as long as it is in the system. The
796 * rest of the entitlement the device had been allocated will be returned
797 * to the system.
798 */
799static void vio_cmo_bus_remove(struct vio_dev *viodev)
800{
801 struct vio_cmo_dev_entry *dev_ent;
802 unsigned long flags;
803 size_t tmp;
804
805 spin_lock_irqsave(&vio_cmo.lock, flags);
806 if (viodev->cmo.allocated) {
807 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
808 "allocated after remove operation.\n",
809 __func__, viodev->cmo.allocated);
810 BUG();
811 }
812
813 /*
814 * Remove the device from the device list being maintained for
815 * CMO enabled devices.
816 */
817 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
818 if (viodev == dev_ent->viodev) {
819 list_del(&dev_ent->list);
820 kfree(dev_ent);
821 break;
822 }
823
824 /*
825 * Devices may not require any entitlement and they do not need
826 * to be processed. Otherwise, return the device's entitlement
827 * back to the pools.
828 */
829 if (viodev->cmo.entitled) {
830 /*
831 * This device has not yet left the OF tree, it's
832 * minimum entitlement remains in vio_cmo.min and
833 * vio_cmo.desired
834 */
835 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
836
837 /*
838 * Save min allocation for device in reserve as long
839 * as it exists in OF tree as determined by later
840 * balance operation
841 */
842 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
843
844 /* Replenish spare from freed reserve pool */
845 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
846 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
847 vio_cmo.spare));
848 vio_cmo.spare += tmp;
849 viodev->cmo.entitled -= tmp;
850 }
851
852 /* Remaining reserve goes to excess pool */
853 vio_cmo.excess.size += viodev->cmo.entitled;
854 vio_cmo.excess.free += viodev->cmo.entitled;
855 vio_cmo.reserve.size -= viodev->cmo.entitled;
856
857 /*
858 * Until the device is removed it will keep a
859 * minimum entitlement; this will guarantee that
860 * a module unload/load will result in a success.
861 */
862 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
863 viodev->cmo.desired = VIO_CMO_MIN_ENT;
864 atomic_set(&viodev->cmo.allocs_failed, 0);
865 }
866
867 spin_unlock_irqrestore(&vio_cmo.lock, flags);
868}
869
870static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
871{
6d283d78 872 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
a90ab95a
RJ
873}
874
875/**
876 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
877 *
878 * Set up the reserve and excess entitlement pools based on available
879 * system entitlement and the number of devices in the OF tree that
880 * require entitlement in the reserve pool.
881 */
882static void vio_cmo_bus_init(void)
883{
884 struct hvcall_mpp_data mpp_data;
885 int err;
886
887 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
888 spin_lock_init(&vio_cmo.lock);
889 INIT_LIST_HEAD(&vio_cmo.device_list);
890 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
891
892 /* Get current system entitlement */
893 err = h_get_mpp(&mpp_data);
894
895 /*
896 * On failure, continue with entitlement set to 0, will panic()
897 * later when spare is reserved.
898 */
899 if (err != H_SUCCESS) {
900 printk(KERN_ERR "%s: unable to determine system IO "\
901 "entitlement. (%d)\n", __func__, err);
902 vio_cmo.entitled = 0;
903 } else {
904 vio_cmo.entitled = mpp_data.entitled_mem;
905 }
906
907 /* Set reservation and check against entitlement */
908 vio_cmo.spare = VIO_CMO_MIN_ENT;
909 vio_cmo.reserve.size = vio_cmo.spare;
910 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
911 VIO_CMO_MIN_ENT);
912 if (vio_cmo.reserve.size > vio_cmo.entitled) {
913 printk(KERN_ERR "%s: insufficient system entitlement\n",
914 __func__);
915 panic("%s: Insufficient system entitlement", __func__);
916 }
917
918 /* Set the remaining accounting variables */
919 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
920 vio_cmo.excess.free = vio_cmo.excess.size;
921 vio_cmo.min = vio_cmo.reserve.size;
922 vio_cmo.desired = vio_cmo.reserve.size;
923}
924
925/* sysfs device functions and data structures for CMO */
926
927#define viodev_cmo_rd_attr(name) \
928static ssize_t viodev_cmo_##name##_show(struct device *dev, \
929 struct device_attribute *attr, \
930 char *buf) \
931{ \
932 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
933}
934
935static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
936 struct device_attribute *attr, char *buf)
937{
938 struct vio_dev *viodev = to_vio_dev(dev);
939 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
940}
941
942static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
943 struct device_attribute *attr, const char *buf, size_t count)
944{
945 struct vio_dev *viodev = to_vio_dev(dev);
946 atomic_set(&viodev->cmo.allocs_failed, 0);
947 return count;
948}
949
950static ssize_t viodev_cmo_desired_set(struct device *dev,
951 struct device_attribute *attr, const char *buf, size_t count)
952{
953 struct vio_dev *viodev = to_vio_dev(dev);
954 size_t new_desired;
955 int ret;
956
957 ret = strict_strtoul(buf, 10, &new_desired);
958 if (ret)
959 return ret;
960
961 vio_cmo_set_dev_desired(viodev, new_desired);
962 return count;
963}
964
965viodev_cmo_rd_attr(desired);
966viodev_cmo_rd_attr(entitled);
967viodev_cmo_rd_attr(allocated);
968
969static ssize_t name_show(struct device *, struct device_attribute *, char *);
970static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
578b7cd1
BH
971static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
972 char *buf);
a90ab95a
RJ
973static struct device_attribute vio_cmo_dev_attrs[] = {
974 __ATTR_RO(name),
975 __ATTR_RO(devspec),
578b7cd1 976 __ATTR_RO(modalias),
a90ab95a
RJ
977 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
978 viodev_cmo_desired_show, viodev_cmo_desired_set),
979 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
980 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
981 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
982 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
983 __ATTR_NULL
984};
985
986/* sysfs bus functions and data structures for CMO */
987
988#define viobus_cmo_rd_attr(name) \
989static ssize_t \
990viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
991{ \
992 return sprintf(buf, "%lu\n", vio_cmo.name); \
993}
994
995#define viobus_cmo_pool_rd_attr(name, var) \
996static ssize_t \
997viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
998{ \
999 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1000}
1001
1002static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
1003 size_t count)
1004{
1005 unsigned long flags;
1006
1007 spin_lock_irqsave(&vio_cmo.lock, flags);
1008 vio_cmo.high = vio_cmo.curr;
1009 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1010
1011 return count;
1012}
1013
1014viobus_cmo_rd_attr(entitled);
1015viobus_cmo_pool_rd_attr(reserve, size);
1016viobus_cmo_pool_rd_attr(excess, size);
1017viobus_cmo_pool_rd_attr(excess, free);
1018viobus_cmo_rd_attr(spare);
1019viobus_cmo_rd_attr(min);
1020viobus_cmo_rd_attr(desired);
1021viobus_cmo_rd_attr(curr);
1022viobus_cmo_rd_attr(high);
1023
1024static struct bus_attribute vio_cmo_bus_attrs[] = {
1025 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
1026 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
1027 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
1028 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
1029 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
1030 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
1031 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
1032 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
1033 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1034 viobus_cmo_high_show, viobus_cmo_high_reset),
1035 __ATTR_NULL
1036};
1037
1038static void vio_cmo_sysfs_init(void)
1039{
1040 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1041 vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
1042}
1043#else /* CONFIG_PPC_SMLPAR */
1044/* Dummy functions for iSeries platform */
1045int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1046void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1047static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1048static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1049static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
b9fa49a9
NL
1050static void vio_cmo_bus_init(void) {}
1051static void vio_cmo_sysfs_init(void) { }
a90ab95a
RJ
1052#endif /* CONFIG_PPC_SMLPAR */
1053EXPORT_SYMBOL(vio_cmo_entitlement_update);
1054EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1055
c7f0e8cb
SR
1056static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1057{
dd9b67ab
SR
1058 const unsigned char *dma_window;
1059 struct iommu_table *tbl;
1060 unsigned long offset, size;
1061
1062 if (firmware_has_feature(FW_FEATURE_ISERIES))
1063 return vio_build_iommu_table_iseries(dev);
c7f0e8cb 1064
58f9b0b0 1065 dma_window = of_get_property(dev->dev.of_node,
dd9b67ab
SR
1066 "ibm,my-dma-window", NULL);
1067 if (!dma_window)
1068 return NULL;
c7f0e8cb 1069
7aa241fd 1070 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
0f337274 1071 if (tbl == NULL)
1072 return NULL;
c7f0e8cb 1073
58f9b0b0 1074 of_parse_dma_window(dev->dev.of_node, dma_window,
dd9b67ab 1075 &tbl->it_index, &offset, &size);
c7f0e8cb 1076
dd9b67ab
SR
1077 /* TCE table size - measured in tce entries */
1078 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
1079 /* offset for VIO should always be 0 */
1080 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
1081 tbl->it_busno = 0;
1082 tbl->it_type = TCE_VB;
7aa241fd 1083 tbl->it_blocksize = 16;
c7f0e8cb 1084
dd9b67ab 1085 return iommu_init_table(tbl, -1);
c7f0e8cb 1086}
1da177e4 1087
e10fa773
SR
1088/**
1089 * vio_match_device: - Tell if a VIO device has a matching
1090 * VIO device id structure.
1091 * @ids: array of VIO device id structures to search in
1092 * @dev: the VIO device structure to match against
1093 *
1094 * Used by a driver to check whether a VIO device present in the
1095 * system is in its list of supported devices. Returns the matching
1096 * vio_device_id structure or NULL if there is no match.
1097 */
1098static const struct vio_device_id *vio_match_device(
1099 const struct vio_device_id *ids, const struct vio_dev *dev)
1100{
1101 while (ids->type[0] != '\0') {
dd721ffd 1102 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
58f9b0b0 1103 of_device_is_compatible(dev->dev.of_node,
12d04eef 1104 ids->compat))
e10fa773
SR
1105 return ids;
1106 ids++;
1107 }
1108 return NULL;
1109}
1110
5c0b4b87
SR
1111/*
1112 * Convert from struct device to struct vio_dev and pass to driver.
1da177e4 1113 * dev->driver has already been set by generic code because vio_bus_match
5c0b4b87
SR
1114 * succeeded.
1115 */
1da177e4
LT
1116static int vio_bus_probe(struct device *dev)
1117{
1118 struct vio_dev *viodev = to_vio_dev(dev);
1119 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1120 const struct vio_device_id *id;
1121 int error = -ENODEV;
1122
1da177e4
LT
1123 if (!viodrv->probe)
1124 return error;
1125
1126 id = vio_match_device(viodrv->id_table, viodev);
a90ab95a
RJ
1127 if (id) {
1128 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1129 if (firmware_has_feature(FW_FEATURE_CMO)) {
1130 error = vio_cmo_bus_probe(viodev);
1131 if (error)
1132 return error;
1133 }
1da177e4 1134 error = viodrv->probe(viodev, id);
cd5aeb9f 1135 if (error && firmware_has_feature(FW_FEATURE_CMO))
a90ab95a
RJ
1136 vio_cmo_bus_remove(viodev);
1137 }
1da177e4
LT
1138
1139 return error;
1140}
1141
1142/* convert from struct device to struct vio_dev and pass to driver. */
1143static int vio_bus_remove(struct device *dev)
1144{
1145 struct vio_dev *viodev = to_vio_dev(dev);
1146 struct vio_driver *viodrv = to_vio_driver(dev->driver);
a90ab95a
RJ
1147 struct device *devptr;
1148 int ret = 1;
1149
1150 /*
1151 * Hold a reference to the device after the remove function is called
1152 * to allow for CMO accounting cleanup for the device.
1153 */
1154 devptr = get_device(dev);
1da177e4 1155
5c0b4b87 1156 if (viodrv->remove)
a90ab95a 1157 ret = viodrv->remove(viodev);
1da177e4 1158
a90ab95a
RJ
1159 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1160 vio_cmo_bus_remove(viodev);
1161
1162 put_device(devptr);
1163 return ret;
1da177e4
LT
1164}
1165
1166/**
1167 * vio_register_driver: - Register a new vio driver
1168 * @drv: The vio_driver structure to be registered.
1169 */
1170int vio_register_driver(struct vio_driver *viodrv)
1171{
e48b1b45 1172 printk(KERN_DEBUG "%s: driver %s registering\n", __func__,
6fdf5392 1173 viodrv->driver.name);
1da177e4
LT
1174
1175 /* fill in 'struct driver' fields */
1da177e4 1176 viodrv->driver.bus = &vio_bus_type;
1da177e4
LT
1177
1178 return driver_register(&viodrv->driver);
1179}
1180EXPORT_SYMBOL(vio_register_driver);
1181
1182/**
1183 * vio_unregister_driver - Remove registration of vio driver.
1184 * @driver: The vio_driver struct to be removed form registration
1185 */
1186void vio_unregister_driver(struct vio_driver *viodrv)
1187{
1188 driver_unregister(&viodrv->driver);
1189}
1190EXPORT_SYMBOL(vio_unregister_driver);
1191
c7f0e8cb
SR
1192/* vio_dev refcount hit 0 */
1193static void __devinit vio_dev_release(struct device *dev)
1194{
45848e0f
NA
1195 struct iommu_table *tbl = get_iommu_table_base(dev);
1196
1197 /* iSeries uses a common table for all vio devices */
1198 if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl)
1199 iommu_free_table(tbl, dev->of_node ?
1200 dev->of_node->full_name : dev_name(dev));
58f9b0b0 1201 of_node_put(dev->of_node);
c7f0e8cb
SR
1202 kfree(to_vio_dev(dev));
1203}
1204
1da177e4 1205/**
e10fa773
SR
1206 * vio_register_device_node: - Register a new vio device.
1207 * @of_node: The OF node for this device.
1da177e4 1208 *
e10fa773 1209 * Creates and initializes a vio_dev structure from the data in
12d04eef 1210 * of_node and adds it to the list of virtual devices.
e10fa773
SR
1211 * Returns a pointer to the created vio_dev or NULL if node has
1212 * NULL device_type or compatible fields.
1da177e4 1213 */
de7d812d 1214struct vio_dev *vio_register_device_node(struct device_node *of_node)
1da177e4 1215{
e10fa773 1216 struct vio_dev *viodev;
a7f67bdf 1217 const unsigned int *unit_address;
e10fa773
SR
1218
1219 /* we need the 'device_type' property, in order to match with drivers */
1220 if (of_node->type == NULL) {
1221 printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
e48b1b45 1222 __func__,
e10fa773
SR
1223 of_node->name ? of_node->name : "<unknown>");
1224 return NULL;
1da177e4 1225 }
e10fa773 1226
e2eb6392 1227 unit_address = of_get_property(of_node, "reg", NULL);
e10fa773
SR
1228 if (unit_address == NULL) {
1229 printk(KERN_WARNING "%s: node %s missing 'reg'\n",
e48b1b45 1230 __func__,
e10fa773
SR
1231 of_node->name ? of_node->name : "<unknown>");
1232 return NULL;
1233 }
1234
1235 /* allocate a vio_dev for this node */
1236 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1237 if (viodev == NULL)
1238 return NULL;
1239
0ebfff14 1240 viodev->irq = irq_of_parse_and_map(of_node, 0);
e10fa773 1241
aab0d375 1242 dev_set_name(&viodev->dev, "%x", *unit_address);
e10fa773
SR
1243 viodev->name = of_node->name;
1244 viodev->type = of_node->type;
1245 viodev->unit_address = *unit_address;
1246 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
e2eb6392 1247 unit_address = of_get_property(of_node,
e10fa773
SR
1248 "linux,unit_address", NULL);
1249 if (unit_address != NULL)
1250 viodev->unit_address = *unit_address;
1251 }
d706c1b0 1252 viodev->dev.of_node = of_node_get(of_node);
a90ab95a
RJ
1253
1254 if (firmware_has_feature(FW_FEATURE_CMO))
1255 vio_cmo_set_dma_ops(viodev);
1256 else
6d283d78 1257 set_dma_ops(&viodev->dev, &dma_iommu_ops);
738ef42e 1258 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
8fae0353 1259 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
c7f0e8cb
SR
1260
1261 /* init generic 'struct device' fields: */
1262 viodev->dev.parent = &vio_bus_device.dev;
1263 viodev->dev.bus = &vio_bus_type;
1264 viodev->dev.release = vio_dev_release;
b3c73856
NA
1265 /* needed to ensure proper operation of coherent allocations
1266 * later, in case driver doesn't set it explicitly */
1267 dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
1268 dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
e10fa773
SR
1269
1270 /* register with generic device framework */
c7f0e8cb
SR
1271 if (device_register(&viodev->dev)) {
1272 printk(KERN_ERR "%s: failed to register device %s\n",
aab0d375 1273 __func__, dev_name(&viodev->dev));
edea8f6f 1274 put_device(&viodev->dev);
e10fa773
SR
1275 return NULL;
1276 }
1277
1278 return viodev;
1da177e4 1279}
e10fa773 1280EXPORT_SYMBOL(vio_register_device_node);
1da177e4 1281
1da177e4
LT
1282/**
1283 * vio_bus_init: - Initialize the virtual IO bus
1284 */
c7f0e8cb 1285static int __init vio_bus_init(void)
1da177e4
LT
1286{
1287 int err;
e10fa773 1288 struct device_node *node_vroot;
1da177e4 1289
a90ab95a
RJ
1290 if (firmware_has_feature(FW_FEATURE_CMO))
1291 vio_cmo_sysfs_init();
1292
1da177e4
LT
1293 err = bus_register(&vio_bus_type);
1294 if (err) {
1295 printk(KERN_ERR "failed to register VIO bus\n");
1296 return err;
1297 }
1298
5c0b4b87
SR
1299 /*
1300 * The fake parent of all vio devices, just to give us
3e494c80
SR
1301 * a nice directory
1302 */
ac5b33c9 1303 err = device_register(&vio_bus_device.dev);
1da177e4 1304 if (err) {
3e494c80 1305 printk(KERN_WARNING "%s: device_register returned %i\n",
e48b1b45 1306 __func__, err);
1da177e4
LT
1307 return err;
1308 }
1309
a90ab95a
RJ
1310 if (firmware_has_feature(FW_FEATURE_CMO))
1311 vio_cmo_bus_init();
1312
30686ba6 1313 node_vroot = of_find_node_by_name(NULL, "vdevice");
e10fa773
SR
1314 if (node_vroot) {
1315 struct device_node *of_node;
1316
1317 /*
1318 * Create struct vio_devices for each virtual device in
1319 * the device tree. Drivers will associate with them later.
1320 */
1321 for (of_node = node_vroot->child; of_node != NULL;
c5467262 1322 of_node = of_node->sibling)
e10fa773 1323 vio_register_device_node(of_node);
30686ba6 1324 of_node_put(node_vroot);
e10fa773
SR
1325 }
1326
3e494c80
SR
1327 return 0;
1328}
c7f0e8cb 1329__initcall(vio_bus_init);
1da177e4 1330
e10fa773 1331static ssize_t name_show(struct device *dev,
5c0b4b87 1332 struct device_attribute *attr, char *buf)
1da177e4
LT
1333{
1334 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1335}
e10fa773
SR
1336
1337static ssize_t devspec_show(struct device *dev,
1338 struct device_attribute *attr, char *buf)
1339{
58f9b0b0 1340 struct device_node *of_node = dev->of_node;
e10fa773
SR
1341
1342 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
1343}
1344
578b7cd1
BH
1345static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1346 char *buf)
1347{
1348 const struct vio_dev *vio_dev = to_vio_dev(dev);
1349 struct device_node *dn;
1350 const char *cp;
1351
cf9b59e9 1352 dn = dev->of_node;
578b7cd1
BH
1353 if (!dn)
1354 return -ENODEV;
1355 cp = of_get_property(dn, "compatible", NULL);
1356 if (!cp)
1357 return -ENODEV;
1358
1359 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1360}
1361
e10fa773
SR
1362static struct device_attribute vio_dev_attrs[] = {
1363 __ATTR_RO(name),
1364 __ATTR_RO(devspec),
578b7cd1 1365 __ATTR_RO(modalias),
e10fa773
SR
1366 __ATTR_NULL
1367};
1da177e4 1368
1da177e4
LT
1369void __devinit vio_unregister_device(struct vio_dev *viodev)
1370{
1da177e4
LT
1371 device_unregister(&viodev->dev);
1372}
1373EXPORT_SYMBOL(vio_unregister_device);
1374
1da177e4
LT
1375static int vio_bus_match(struct device *dev, struct device_driver *drv)
1376{
1377 const struct vio_dev *vio_dev = to_vio_dev(dev);
1378 struct vio_driver *vio_drv = to_vio_driver(drv);
1379 const struct vio_device_id *ids = vio_drv->id_table;
1da177e4 1380
5c0b4b87 1381 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1da177e4
LT
1382}
1383
7eff2e7a 1384static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
143dcec2
OH
1385{
1386 const struct vio_dev *vio_dev = to_vio_dev(dev);
12d04eef 1387 struct device_node *dn;
a7f67bdf 1388 const char *cp;
143dcec2 1389
58f9b0b0 1390 dn = dev->of_node;
e10fa773 1391 if (!dn)
143dcec2 1392 return -ENODEV;
7eff2e7a 1393 cp = of_get_property(dn, "compatible", NULL);
143dcec2
OH
1394 if (!cp)
1395 return -ENODEV;
1396
7eff2e7a 1397 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
143dcec2
OH
1398 return 0;
1399}
1400
6fccab26 1401static struct bus_type vio_bus_type = {
1da177e4 1402 .name = "vio",
e10fa773 1403 .dev_attrs = vio_dev_attrs,
312c004d 1404 .uevent = vio_hotplug,
1da177e4 1405 .match = vio_bus_match,
2f53a80f
RK
1406 .probe = vio_bus_probe,
1407 .remove = vio_bus_remove,
a1263c71 1408 .pm = GENERIC_SUBSYS_PM_OPS,
1da177e4 1409};
e10fa773
SR
1410
1411/**
1412 * vio_get_attribute: - get attribute for virtual device
1413 * @vdev: The vio device to get property.
1414 * @which: The property/attribute to be extracted.
1415 * @length: Pointer to length of returned data size (unused if NULL).
1416 *
e2eb6392 1417 * Calls prom.c's of_get_property() to return the value of the
e10fa773
SR
1418 * attribute specified by @which
1419*/
1420const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1421{
58f9b0b0 1422 return of_get_property(vdev->dev.of_node, which, length);
e10fa773
SR
1423}
1424EXPORT_SYMBOL(vio_get_attribute);
c7f0e8cb
SR
1425
1426#ifdef CONFIG_PPC_PSERIES
1427/* vio_find_name() - internal because only vio.c knows how we formatted the
1428 * kobject name
c7f0e8cb 1429 */
c847c853 1430static struct vio_dev *vio_find_name(const char *name)
c7f0e8cb 1431{
c847c853 1432 struct device *found;
c7f0e8cb 1433
c847c853 1434 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
c7f0e8cb
SR
1435 if (!found)
1436 return NULL;
1437
c847c853 1438 return to_vio_dev(found);
c7f0e8cb
SR
1439}
1440
1441/**
1442 * vio_find_node - find an already-registered vio_dev
1443 * @vnode: device_node of the virtual device we're looking for
1444 */
1445struct vio_dev *vio_find_node(struct device_node *vnode)
1446{
a7f67bdf 1447 const uint32_t *unit_address;
aab0d375 1448 char kobj_name[20];
c7f0e8cb
SR
1449
1450 /* construct the kobject name from the device node */
e2eb6392 1451 unit_address = of_get_property(vnode, "reg", NULL);
c7f0e8cb
SR
1452 if (!unit_address)
1453 return NULL;
aab0d375 1454 snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
c7f0e8cb
SR
1455
1456 return vio_find_name(kobj_name);
1457}
1458EXPORT_SYMBOL(vio_find_node);
1459
1460int vio_enable_interrupts(struct vio_dev *dev)
1461{
1462 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1463 if (rc != H_SUCCESS)
1464 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1465 return rc;
1466}
1467EXPORT_SYMBOL(vio_enable_interrupts);
1468
1469int vio_disable_interrupts(struct vio_dev *dev)
1470{
1471 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1472 if (rc != H_SUCCESS)
1473 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1474 return rc;
1475}
1476EXPORT_SYMBOL(vio_disable_interrupts);
1477#endif /* CONFIG_PPC_PSERIES */
This page took 0.59733 seconds and 5 git commands to generate.