Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * IBM PowerPC Virtual I/O Infrastructure Support. | |
3 | * | |
a90ab95a | 4 | * Copyright (c) 2003,2008 IBM Corp. |
1da177e4 LT |
5 | * Dave Engebretsen engebret@us.ibm.com |
6 | * Santiago Leon santil@us.ibm.com | |
7 | * Hollis Blanchard <hollisb@us.ibm.com> | |
19dbd0f6 | 8 | * Stephen Rothwell |
a90ab95a | 9 | * Robert Jennings <rcjenn@us.ibm.com> |
1da177e4 LT |
10 | * |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
c7f0e8cb SR |
17 | #include <linux/types.h> |
18 | #include <linux/device.h> | |
1da177e4 LT |
19 | #include <linux/init.h> |
20 | #include <linux/console.h> | |
1da177e4 | 21 | #include <linux/module.h> |
1da177e4 LT |
22 | #include <linux/mm.h> |
23 | #include <linux/dma-mapping.h> | |
c7f0e8cb SR |
24 | #include <linux/kobject.h> |
25 | ||
1da177e4 LT |
26 | #include <asm/iommu.h> |
27 | #include <asm/dma.h> | |
1da177e4 | 28 | #include <asm/vio.h> |
143dcec2 | 29 | #include <asm/prom.h> |
e10fa773 | 30 | #include <asm/firmware.h> |
c7f0e8cb SR |
31 | #include <asm/tce.h> |
32 | #include <asm/abs_addr.h> | |
33 | #include <asm/page.h> | |
34 | #include <asm/hvcall.h> | |
35 | #include <asm/iseries/vio.h> | |
36 | #include <asm/iseries/hv_types.h> | |
37 | #include <asm/iseries/hv_lp_config.h> | |
38 | #include <asm/iseries/hv_call_xm.h> | |
39 | #include <asm/iseries/iommu.h> | |
40 | ||
6fccab26 SR |
41 | static struct bus_type vio_bus_type; |
42 | ||
c7f0e8cb | 43 | static struct vio_dev vio_bus_device = { /* fake "parent" device */ |
aab0d375 | 44 | .name = "vio", |
ac5b33c9 | 45 | .type = "", |
aab0d375 | 46 | .dev.init_name = "vio", |
ac5b33c9 | 47 | .dev.bus = &vio_bus_type, |
1da177e4 | 48 | }; |
ac5b33c9 | 49 | |
a90ab95a RJ |
50 | #ifdef CONFIG_PPC_SMLPAR |
51 | /** | |
52 | * vio_cmo_pool - A pool of IO memory for CMO use | |
53 | * | |
54 | * @size: The size of the pool in bytes | |
55 | * @free: The amount of free memory in the pool | |
56 | */ | |
57 | struct vio_cmo_pool { | |
58 | size_t size; | |
59 | size_t free; | |
60 | }; | |
61 | ||
62 | /* How many ms to delay queued balance work */ | |
63 | #define VIO_CMO_BALANCE_DELAY 100 | |
64 | ||
65 | /* Portion out IO memory to CMO devices by this chunk size */ | |
66 | #define VIO_CMO_BALANCE_CHUNK 131072 | |
67 | ||
68 | /** | |
69 | * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement | |
70 | * | |
71 | * @vio_dev: struct vio_dev pointer | |
72 | * @list: pointer to other devices on bus that are being tracked | |
73 | */ | |
74 | struct vio_cmo_dev_entry { | |
75 | struct vio_dev *viodev; | |
76 | struct list_head list; | |
77 | }; | |
78 | ||
79 | /** | |
80 | * vio_cmo - VIO bus accounting structure for CMO entitlement | |
81 | * | |
82 | * @lock: spinlock for entire structure | |
83 | * @balance_q: work queue for balancing system entitlement | |
84 | * @device_list: list of CMO-enabled devices requiring entitlement | |
85 | * @entitled: total system entitlement in bytes | |
86 | * @reserve: pool of memory from which devices reserve entitlement, incl. spare | |
87 | * @excess: pool of excess entitlement not needed for device reserves or spare | |
88 | * @spare: IO memory for device hotplug functionality | |
89 | * @min: minimum necessary for system operation | |
90 | * @desired: desired memory for system operation | |
91 | * @curr: bytes currently allocated | |
92 | * @high: high water mark for IO data usage | |
93 | */ | |
94 | struct vio_cmo { | |
95 | spinlock_t lock; | |
96 | struct delayed_work balance_q; | |
97 | struct list_head device_list; | |
98 | size_t entitled; | |
99 | struct vio_cmo_pool reserve; | |
100 | struct vio_cmo_pool excess; | |
101 | size_t spare; | |
102 | size_t min; | |
103 | size_t desired; | |
104 | size_t curr; | |
105 | size_t high; | |
106 | } vio_cmo; | |
107 | ||
108 | /** | |
109 | * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows | |
110 | */ | |
111 | static int vio_cmo_num_OF_devs(void) | |
112 | { | |
113 | struct device_node *node_vroot; | |
114 | int count = 0; | |
115 | ||
116 | /* | |
117 | * Count the number of vdevice entries with an | |
118 | * ibm,my-dma-window OF property | |
119 | */ | |
120 | node_vroot = of_find_node_by_name(NULL, "vdevice"); | |
121 | if (node_vroot) { | |
122 | struct device_node *of_node; | |
123 | struct property *prop; | |
124 | ||
125 | for_each_child_of_node(node_vroot, of_node) { | |
126 | prop = of_find_property(of_node, "ibm,my-dma-window", | |
127 | NULL); | |
128 | if (prop) | |
129 | count++; | |
130 | } | |
131 | } | |
132 | of_node_put(node_vroot); | |
133 | return count; | |
134 | } | |
135 | ||
136 | /** | |
137 | * vio_cmo_alloc - allocate IO memory for CMO-enable devices | |
138 | * | |
139 | * @viodev: VIO device requesting IO memory | |
140 | * @size: size of allocation requested | |
141 | * | |
142 | * Allocations come from memory reserved for the devices and any excess | |
143 | * IO memory available to all devices. The spare pool used to service | |
144 | * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be | |
145 | * made available. | |
146 | * | |
147 | * Return codes: | |
148 | * 0 for successful allocation and -ENOMEM for a failure | |
149 | */ | |
150 | static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) | |
151 | { | |
152 | unsigned long flags; | |
153 | size_t reserve_free = 0; | |
154 | size_t excess_free = 0; | |
155 | int ret = -ENOMEM; | |
156 | ||
157 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
158 | ||
159 | /* Determine the amount of free entitlement available in reserve */ | |
160 | if (viodev->cmo.entitled > viodev->cmo.allocated) | |
161 | reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; | |
162 | ||
163 | /* If spare is not fulfilled, the excess pool can not be used. */ | |
164 | if (vio_cmo.spare >= VIO_CMO_MIN_ENT) | |
165 | excess_free = vio_cmo.excess.free; | |
166 | ||
167 | /* The request can be satisfied */ | |
168 | if ((reserve_free + excess_free) >= size) { | |
169 | vio_cmo.curr += size; | |
170 | if (vio_cmo.curr > vio_cmo.high) | |
171 | vio_cmo.high = vio_cmo.curr; | |
172 | viodev->cmo.allocated += size; | |
173 | size -= min(reserve_free, size); | |
174 | vio_cmo.excess.free -= size; | |
175 | ret = 0; | |
176 | } | |
177 | ||
178 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
179 | return ret; | |
180 | } | |
181 | ||
182 | /** | |
183 | * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices | |
184 | * @viodev: VIO device freeing IO memory | |
185 | * @size: size of deallocation | |
186 | * | |
187 | * IO memory is freed by the device back to the correct memory pools. | |
188 | * The spare pool is replenished first from either memory pool, then | |
189 | * the reserve pool is used to reduce device entitlement, the excess | |
190 | * pool is used to increase the reserve pool toward the desired entitlement | |
191 | * target, and then the remaining memory is returned to the pools. | |
192 | * | |
193 | */ | |
194 | static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) | |
195 | { | |
196 | unsigned long flags; | |
197 | size_t spare_needed = 0; | |
198 | size_t excess_freed = 0; | |
199 | size_t reserve_freed = size; | |
200 | size_t tmp; | |
201 | int balance = 0; | |
202 | ||
203 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
204 | vio_cmo.curr -= size; | |
205 | ||
206 | /* Amount of memory freed from the excess pool */ | |
207 | if (viodev->cmo.allocated > viodev->cmo.entitled) { | |
208 | excess_freed = min(reserve_freed, (viodev->cmo.allocated - | |
209 | viodev->cmo.entitled)); | |
210 | reserve_freed -= excess_freed; | |
211 | } | |
212 | ||
213 | /* Remove allocation from device */ | |
214 | viodev->cmo.allocated -= (reserve_freed + excess_freed); | |
215 | ||
216 | /* Spare is a subset of the reserve pool, replenish it first. */ | |
217 | spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; | |
218 | ||
219 | /* | |
220 | * Replenish the spare in the reserve pool from the excess pool. | |
221 | * This moves entitlement into the reserve pool. | |
222 | */ | |
223 | if (spare_needed && excess_freed) { | |
224 | tmp = min(excess_freed, spare_needed); | |
225 | vio_cmo.excess.size -= tmp; | |
226 | vio_cmo.reserve.size += tmp; | |
227 | vio_cmo.spare += tmp; | |
228 | excess_freed -= tmp; | |
229 | spare_needed -= tmp; | |
230 | balance = 1; | |
231 | } | |
232 | ||
233 | /* | |
234 | * Replenish the spare in the reserve pool from the reserve pool. | |
235 | * This removes entitlement from the device down to VIO_CMO_MIN_ENT, | |
236 | * if needed, and gives it to the spare pool. The amount of used | |
237 | * memory in this pool does not change. | |
238 | */ | |
239 | if (spare_needed && reserve_freed) { | |
240 | tmp = min(spare_needed, min(reserve_freed, | |
241 | (viodev->cmo.entitled - | |
242 | VIO_CMO_MIN_ENT))); | |
243 | ||
244 | vio_cmo.spare += tmp; | |
245 | viodev->cmo.entitled -= tmp; | |
246 | reserve_freed -= tmp; | |
247 | spare_needed -= tmp; | |
248 | balance = 1; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Increase the reserve pool until the desired allocation is met. | |
253 | * Move an allocation freed from the excess pool into the reserve | |
254 | * pool and schedule a balance operation. | |
255 | */ | |
256 | if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { | |
257 | tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); | |
258 | ||
259 | vio_cmo.excess.size -= tmp; | |
260 | vio_cmo.reserve.size += tmp; | |
261 | excess_freed -= tmp; | |
262 | balance = 1; | |
263 | } | |
264 | ||
265 | /* Return memory from the excess pool to that pool */ | |
266 | if (excess_freed) | |
267 | vio_cmo.excess.free += excess_freed; | |
268 | ||
269 | if (balance) | |
270 | schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); | |
271 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
272 | } | |
273 | ||
274 | /** | |
275 | * vio_cmo_entitlement_update - Manage system entitlement changes | |
276 | * | |
277 | * @new_entitlement: new system entitlement to attempt to accommodate | |
278 | * | |
279 | * Increases in entitlement will be used to fulfill the spare entitlement | |
280 | * and the rest is given to the excess pool. Decreases, if they are | |
281 | * possible, come from the excess pool and from unused device entitlement | |
282 | * | |
283 | * Returns: 0 on success, -ENOMEM when change can not be made | |
284 | */ | |
285 | int vio_cmo_entitlement_update(size_t new_entitlement) | |
286 | { | |
287 | struct vio_dev *viodev; | |
288 | struct vio_cmo_dev_entry *dev_ent; | |
289 | unsigned long flags; | |
290 | size_t avail, delta, tmp; | |
291 | ||
292 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
293 | ||
294 | /* Entitlement increases */ | |
295 | if (new_entitlement > vio_cmo.entitled) { | |
296 | delta = new_entitlement - vio_cmo.entitled; | |
297 | ||
298 | /* Fulfill spare allocation */ | |
299 | if (vio_cmo.spare < VIO_CMO_MIN_ENT) { | |
300 | tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); | |
301 | vio_cmo.spare += tmp; | |
302 | vio_cmo.reserve.size += tmp; | |
303 | delta -= tmp; | |
304 | } | |
305 | ||
306 | /* Remaining new allocation goes to the excess pool */ | |
307 | vio_cmo.entitled += delta; | |
308 | vio_cmo.excess.size += delta; | |
309 | vio_cmo.excess.free += delta; | |
310 | ||
311 | goto out; | |
312 | } | |
313 | ||
314 | /* Entitlement decreases */ | |
315 | delta = vio_cmo.entitled - new_entitlement; | |
316 | avail = vio_cmo.excess.free; | |
317 | ||
318 | /* | |
319 | * Need to check how much unused entitlement each device can | |
320 | * sacrifice to fulfill entitlement change. | |
321 | */ | |
322 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
323 | if (avail >= delta) | |
324 | break; | |
325 | ||
326 | viodev = dev_ent->viodev; | |
327 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | |
328 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | |
329 | avail += viodev->cmo.entitled - | |
330 | max_t(size_t, viodev->cmo.allocated, | |
331 | VIO_CMO_MIN_ENT); | |
332 | } | |
333 | ||
334 | if (delta <= avail) { | |
335 | vio_cmo.entitled -= delta; | |
336 | ||
337 | /* Take entitlement from the excess pool first */ | |
338 | tmp = min(vio_cmo.excess.free, delta); | |
339 | vio_cmo.excess.size -= tmp; | |
340 | vio_cmo.excess.free -= tmp; | |
341 | delta -= tmp; | |
342 | ||
343 | /* | |
344 | * Remove all but VIO_CMO_MIN_ENT bytes from devices | |
345 | * until entitlement change is served | |
346 | */ | |
347 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
348 | if (!delta) | |
349 | break; | |
350 | ||
351 | viodev = dev_ent->viodev; | |
352 | tmp = 0; | |
353 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | |
354 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | |
355 | tmp = viodev->cmo.entitled - | |
356 | max_t(size_t, viodev->cmo.allocated, | |
357 | VIO_CMO_MIN_ENT); | |
358 | viodev->cmo.entitled -= min(tmp, delta); | |
359 | delta -= min(tmp, delta); | |
360 | } | |
361 | } else { | |
362 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
363 | return -ENOMEM; | |
364 | } | |
365 | ||
366 | out: | |
367 | schedule_delayed_work(&vio_cmo.balance_q, 0); | |
368 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
369 | return 0; | |
370 | } | |
371 | ||
372 | /** | |
373 | * vio_cmo_balance - Balance entitlement among devices | |
374 | * | |
375 | * @work: work queue structure for this operation | |
376 | * | |
377 | * Any system entitlement above the minimum needed for devices, or | |
378 | * already allocated to devices, can be distributed to the devices. | |
379 | * The list of devices is iterated through to recalculate the desired | |
380 | * entitlement level and to determine how much entitlement above the | |
381 | * minimum entitlement is allocated to devices. | |
382 | * | |
383 | * Small chunks of the available entitlement are given to devices until | |
384 | * their requirements are fulfilled or there is no entitlement left to give. | |
385 | * Upon completion sizes of the reserve and excess pools are calculated. | |
386 | * | |
387 | * The system minimum entitlement level is also recalculated here. | |
388 | * Entitlement will be reserved for devices even after vio_bus_remove to | |
389 | * accommodate reloading the driver. The OF tree is walked to count the | |
390 | * number of devices present and this will remove entitlement for devices | |
391 | * that have actually left the system after having vio_bus_remove called. | |
392 | */ | |
393 | static void vio_cmo_balance(struct work_struct *work) | |
394 | { | |
395 | struct vio_cmo *cmo; | |
396 | struct vio_dev *viodev; | |
397 | struct vio_cmo_dev_entry *dev_ent; | |
398 | unsigned long flags; | |
399 | size_t avail = 0, level, chunk, need; | |
400 | int devcount = 0, fulfilled; | |
401 | ||
402 | cmo = container_of(work, struct vio_cmo, balance_q.work); | |
403 | ||
404 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
405 | ||
406 | /* Calculate minimum entitlement and fulfill spare */ | |
407 | cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; | |
408 | BUG_ON(cmo->min > cmo->entitled); | |
409 | cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); | |
410 | cmo->min += cmo->spare; | |
411 | cmo->desired = cmo->min; | |
412 | ||
413 | /* | |
414 | * Determine how much entitlement is available and reset device | |
415 | * entitlements | |
416 | */ | |
417 | avail = cmo->entitled - cmo->spare; | |
418 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
419 | viodev = dev_ent->viodev; | |
420 | devcount++; | |
421 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | |
422 | cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); | |
423 | avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); | |
424 | } | |
425 | ||
426 | /* | |
427 | * Having provided each device with the minimum entitlement, loop | |
428 | * over the devices portioning out the remaining entitlement | |
429 | * until there is nothing left. | |
430 | */ | |
431 | level = VIO_CMO_MIN_ENT; | |
432 | while (avail) { | |
433 | fulfilled = 0; | |
434 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
435 | viodev = dev_ent->viodev; | |
436 | ||
437 | if (viodev->cmo.desired <= level) { | |
438 | fulfilled++; | |
439 | continue; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Give the device up to VIO_CMO_BALANCE_CHUNK | |
444 | * bytes of entitlement, but do not exceed the | |
445 | * desired level of entitlement for the device. | |
446 | */ | |
447 | chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); | |
448 | chunk = min(chunk, (viodev->cmo.desired - | |
449 | viodev->cmo.entitled)); | |
450 | viodev->cmo.entitled += chunk; | |
451 | ||
452 | /* | |
453 | * If the memory for this entitlement increase was | |
454 | * already allocated to the device it does not come | |
455 | * from the available pool being portioned out. | |
456 | */ | |
457 | need = max(viodev->cmo.allocated, viodev->cmo.entitled)- | |
458 | max(viodev->cmo.allocated, level); | |
459 | avail -= need; | |
460 | ||
461 | } | |
462 | if (fulfilled == devcount) | |
463 | break; | |
464 | level += VIO_CMO_BALANCE_CHUNK; | |
465 | } | |
466 | ||
467 | /* Calculate new reserve and excess pool sizes */ | |
468 | cmo->reserve.size = cmo->min; | |
469 | cmo->excess.free = 0; | |
470 | cmo->excess.size = 0; | |
471 | need = 0; | |
472 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
473 | viodev = dev_ent->viodev; | |
474 | /* Calculated reserve size above the minimum entitlement */ | |
475 | if (viodev->cmo.entitled) | |
476 | cmo->reserve.size += (viodev->cmo.entitled - | |
477 | VIO_CMO_MIN_ENT); | |
478 | /* Calculated used excess entitlement */ | |
479 | if (viodev->cmo.allocated > viodev->cmo.entitled) | |
480 | need += viodev->cmo.allocated - viodev->cmo.entitled; | |
481 | } | |
482 | cmo->excess.size = cmo->entitled - cmo->reserve.size; | |
483 | cmo->excess.free = cmo->excess.size - need; | |
484 | ||
bf6aede7 | 485 | cancel_delayed_work(to_delayed_work(work)); |
a90ab95a RJ |
486 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
487 | } | |
488 | ||
489 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | |
490 | dma_addr_t *dma_handle, gfp_t flag) | |
491 | { | |
492 | struct vio_dev *viodev = to_vio_dev(dev); | |
493 | void *ret; | |
494 | ||
69b052e8 | 495 | if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { |
a90ab95a RJ |
496 | atomic_inc(&viodev->cmo.allocs_failed); |
497 | return NULL; | |
498 | } | |
499 | ||
500 | ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); | |
501 | if (unlikely(ret == NULL)) { | |
69b052e8 | 502 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
a90ab95a RJ |
503 | atomic_inc(&viodev->cmo.allocs_failed); |
504 | } | |
505 | ||
506 | return ret; | |
507 | } | |
508 | ||
509 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | |
510 | void *vaddr, dma_addr_t dma_handle) | |
511 | { | |
512 | struct vio_dev *viodev = to_vio_dev(dev); | |
513 | ||
514 | dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); | |
515 | ||
69b052e8 | 516 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
a90ab95a RJ |
517 | } |
518 | ||
f9226d57 MN |
519 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, |
520 | unsigned long offset, size_t size, | |
521 | enum dma_data_direction direction, | |
522 | struct dma_attrs *attrs) | |
a90ab95a RJ |
523 | { |
524 | struct vio_dev *viodev = to_vio_dev(dev); | |
525 | dma_addr_t ret = DMA_ERROR_CODE; | |
526 | ||
527 | if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { | |
528 | atomic_inc(&viodev->cmo.allocs_failed); | |
529 | return ret; | |
530 | } | |
531 | ||
f9226d57 | 532 | ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); |
0764bf63 | 533 | if (unlikely(dma_mapping_error(dev, ret))) { |
a90ab95a RJ |
534 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); |
535 | atomic_inc(&viodev->cmo.allocs_failed); | |
536 | } | |
537 | ||
538 | return ret; | |
539 | } | |
540 | ||
f9226d57 MN |
541 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
542 | size_t size, | |
543 | enum dma_data_direction direction, | |
544 | struct dma_attrs *attrs) | |
a90ab95a RJ |
545 | { |
546 | struct vio_dev *viodev = to_vio_dev(dev); | |
547 | ||
f9226d57 | 548 | dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); |
a90ab95a RJ |
549 | |
550 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | |
551 | } | |
552 | ||
553 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |
554 | int nelems, enum dma_data_direction direction, | |
555 | struct dma_attrs *attrs) | |
556 | { | |
557 | struct vio_dev *viodev = to_vio_dev(dev); | |
558 | struct scatterlist *sgl; | |
559 | int ret, count = 0; | |
560 | size_t alloc_size = 0; | |
561 | ||
562 | for (sgl = sglist; count < nelems; count++, sgl++) | |
563 | alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); | |
564 | ||
565 | if (vio_cmo_alloc(viodev, alloc_size)) { | |
566 | atomic_inc(&viodev->cmo.allocs_failed); | |
567 | return 0; | |
568 | } | |
569 | ||
570 | ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); | |
571 | ||
572 | if (unlikely(!ret)) { | |
573 | vio_cmo_dealloc(viodev, alloc_size); | |
574 | atomic_inc(&viodev->cmo.allocs_failed); | |
69b052e8 | 575 | return ret; |
a90ab95a RJ |
576 | } |
577 | ||
578 | for (sgl = sglist, count = 0; count < ret; count++, sgl++) | |
579 | alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | |
580 | if (alloc_size) | |
581 | vio_cmo_dealloc(viodev, alloc_size); | |
582 | ||
583 | return ret; | |
584 | } | |
585 | ||
586 | static void vio_dma_iommu_unmap_sg(struct device *dev, | |
587 | struct scatterlist *sglist, int nelems, | |
588 | enum dma_data_direction direction, | |
589 | struct dma_attrs *attrs) | |
590 | { | |
591 | struct vio_dev *viodev = to_vio_dev(dev); | |
592 | struct scatterlist *sgl; | |
593 | size_t alloc_size = 0; | |
594 | int count = 0; | |
595 | ||
596 | for (sgl = sglist; count < nelems; count++, sgl++) | |
597 | alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | |
598 | ||
599 | dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); | |
600 | ||
601 | vio_cmo_dealloc(viodev, alloc_size); | |
602 | } | |
603 | ||
604 | struct dma_mapping_ops vio_dma_mapping_ops = { | |
605 | .alloc_coherent = vio_dma_iommu_alloc_coherent, | |
606 | .free_coherent = vio_dma_iommu_free_coherent, | |
a90ab95a RJ |
607 | .map_sg = vio_dma_iommu_map_sg, |
608 | .unmap_sg = vio_dma_iommu_unmap_sg, | |
f9226d57 MN |
609 | .map_page = vio_dma_iommu_map_page, |
610 | .unmap_page = vio_dma_iommu_unmap_page, | |
611 | ||
a90ab95a RJ |
612 | }; |
613 | ||
614 | /** | |
615 | * vio_cmo_set_dev_desired - Set desired entitlement for a device | |
616 | * | |
617 | * @viodev: struct vio_dev for device to alter | |
618 | * @new_desired: new desired entitlement level in bytes | |
619 | * | |
620 | * For use by devices to request a change to their entitlement at runtime or | |
621 | * through sysfs. The desired entitlement level is changed and a balancing | |
622 | * of system resources is scheduled to run in the future. | |
623 | */ | |
624 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) | |
625 | { | |
626 | unsigned long flags; | |
627 | struct vio_cmo_dev_entry *dev_ent; | |
628 | int found = 0; | |
629 | ||
630 | if (!firmware_has_feature(FW_FEATURE_CMO)) | |
631 | return; | |
632 | ||
633 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
634 | if (desired < VIO_CMO_MIN_ENT) | |
635 | desired = VIO_CMO_MIN_ENT; | |
636 | ||
637 | /* | |
638 | * Changes will not be made for devices not in the device list. | |
639 | * If it is not in the device list, then no driver is loaded | |
640 | * for the device and it can not receive entitlement. | |
641 | */ | |
642 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | |
643 | if (viodev == dev_ent->viodev) { | |
644 | found = 1; | |
645 | break; | |
646 | } | |
647 | if (!found) | |
648 | return; | |
649 | ||
650 | /* Increase/decrease in desired device entitlement */ | |
651 | if (desired >= viodev->cmo.desired) { | |
652 | /* Just bump the bus and device values prior to a balance*/ | |
653 | vio_cmo.desired += desired - viodev->cmo.desired; | |
654 | viodev->cmo.desired = desired; | |
655 | } else { | |
656 | /* Decrease bus and device values for desired entitlement */ | |
657 | vio_cmo.desired -= viodev->cmo.desired - desired; | |
658 | viodev->cmo.desired = desired; | |
659 | /* | |
660 | * If less entitlement is desired than current entitlement, move | |
661 | * any reserve memory in the change region to the excess pool. | |
662 | */ | |
663 | if (viodev->cmo.entitled > desired) { | |
664 | vio_cmo.reserve.size -= viodev->cmo.entitled - desired; | |
665 | vio_cmo.excess.size += viodev->cmo.entitled - desired; | |
666 | /* | |
667 | * If entitlement moving from the reserve pool to the | |
668 | * excess pool is currently unused, add to the excess | |
669 | * free counter. | |
670 | */ | |
671 | if (viodev->cmo.allocated < viodev->cmo.entitled) | |
672 | vio_cmo.excess.free += viodev->cmo.entitled - | |
673 | max(viodev->cmo.allocated, desired); | |
674 | viodev->cmo.entitled = desired; | |
675 | } | |
676 | } | |
677 | schedule_delayed_work(&vio_cmo.balance_q, 0); | |
678 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
679 | } | |
680 | ||
681 | /** | |
682 | * vio_cmo_bus_probe - Handle CMO specific bus probe activities | |
683 | * | |
684 | * @viodev - Pointer to struct vio_dev for device | |
685 | * | |
686 | * Determine the devices IO memory entitlement needs, attempting | |
687 | * to satisfy the system minimum entitlement at first and scheduling | |
688 | * a balance operation to take care of the rest at a later time. | |
689 | * | |
690 | * Returns: 0 on success, -EINVAL when device doesn't support CMO, and | |
691 | * -ENOMEM when entitlement is not available for device or | |
692 | * device entry. | |
693 | * | |
694 | */ | |
695 | static int vio_cmo_bus_probe(struct vio_dev *viodev) | |
696 | { | |
697 | struct vio_cmo_dev_entry *dev_ent; | |
698 | struct device *dev = &viodev->dev; | |
699 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
700 | unsigned long flags; | |
701 | size_t size; | |
702 | ||
703 | /* | |
704 | * Check to see that device has a DMA window and configure | |
705 | * entitlement for the device. | |
706 | */ | |
707 | if (of_get_property(viodev->dev.archdata.of_node, | |
708 | "ibm,my-dma-window", NULL)) { | |
709 | /* Check that the driver is CMO enabled and get desired DMA */ | |
710 | if (!viodrv->get_desired_dma) { | |
711 | dev_err(dev, "%s: device driver does not support CMO\n", | |
712 | __func__); | |
713 | return -EINVAL; | |
714 | } | |
715 | ||
716 | viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); | |
717 | if (viodev->cmo.desired < VIO_CMO_MIN_ENT) | |
718 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | |
719 | size = VIO_CMO_MIN_ENT; | |
720 | ||
721 | dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), | |
722 | GFP_KERNEL); | |
723 | if (!dev_ent) | |
724 | return -ENOMEM; | |
725 | ||
726 | dev_ent->viodev = viodev; | |
727 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
728 | list_add(&dev_ent->list, &vio_cmo.device_list); | |
729 | } else { | |
730 | viodev->cmo.desired = 0; | |
731 | size = 0; | |
732 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
733 | } | |
734 | ||
735 | /* | |
736 | * If the needs for vio_cmo.min have not changed since they | |
737 | * were last set, the number of devices in the OF tree has | |
738 | * been constant and the IO memory for this is already in | |
739 | * the reserve pool. | |
740 | */ | |
741 | if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * | |
742 | VIO_CMO_MIN_ENT)) { | |
743 | /* Updated desired entitlement if device requires it */ | |
744 | if (size) | |
745 | vio_cmo.desired += (viodev->cmo.desired - | |
746 | VIO_CMO_MIN_ENT); | |
747 | } else { | |
748 | size_t tmp; | |
749 | ||
750 | tmp = vio_cmo.spare + vio_cmo.excess.free; | |
751 | if (tmp < size) { | |
752 | dev_err(dev, "%s: insufficient free " | |
753 | "entitlement to add device. " | |
754 | "Need %lu, have %lu\n", __func__, | |
755 | size, (vio_cmo.spare + tmp)); | |
756 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
757 | return -ENOMEM; | |
758 | } | |
759 | ||
760 | /* Use excess pool first to fulfill request */ | |
761 | tmp = min(size, vio_cmo.excess.free); | |
762 | vio_cmo.excess.free -= tmp; | |
763 | vio_cmo.excess.size -= tmp; | |
764 | vio_cmo.reserve.size += tmp; | |
765 | ||
766 | /* Use spare if excess pool was insufficient */ | |
767 | vio_cmo.spare -= size - tmp; | |
768 | ||
769 | /* Update bus accounting */ | |
770 | vio_cmo.min += size; | |
771 | vio_cmo.desired += viodev->cmo.desired; | |
772 | } | |
773 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
774 | return 0; | |
775 | } | |
776 | ||
777 | /** | |
778 | * vio_cmo_bus_remove - Handle CMO specific bus removal activities | |
779 | * | |
780 | * @viodev - Pointer to struct vio_dev for device | |
781 | * | |
782 | * Remove the device from the cmo device list. The minimum entitlement | |
783 | * will be reserved for the device as long as it is in the system. The | |
784 | * rest of the entitlement the device had been allocated will be returned | |
785 | * to the system. | |
786 | */ | |
787 | static void vio_cmo_bus_remove(struct vio_dev *viodev) | |
788 | { | |
789 | struct vio_cmo_dev_entry *dev_ent; | |
790 | unsigned long flags; | |
791 | size_t tmp; | |
792 | ||
793 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
794 | if (viodev->cmo.allocated) { | |
795 | dev_err(&viodev->dev, "%s: device had %lu bytes of IO " | |
796 | "allocated after remove operation.\n", | |
797 | __func__, viodev->cmo.allocated); | |
798 | BUG(); | |
799 | } | |
800 | ||
801 | /* | |
802 | * Remove the device from the device list being maintained for | |
803 | * CMO enabled devices. | |
804 | */ | |
805 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | |
806 | if (viodev == dev_ent->viodev) { | |
807 | list_del(&dev_ent->list); | |
808 | kfree(dev_ent); | |
809 | break; | |
810 | } | |
811 | ||
812 | /* | |
813 | * Devices may not require any entitlement and they do not need | |
814 | * to be processed. Otherwise, return the device's entitlement | |
815 | * back to the pools. | |
816 | */ | |
817 | if (viodev->cmo.entitled) { | |
818 | /* | |
819 | * This device has not yet left the OF tree, it's | |
820 | * minimum entitlement remains in vio_cmo.min and | |
821 | * vio_cmo.desired | |
822 | */ | |
823 | vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); | |
824 | ||
825 | /* | |
826 | * Save min allocation for device in reserve as long | |
827 | * as it exists in OF tree as determined by later | |
828 | * balance operation | |
829 | */ | |
830 | viodev->cmo.entitled -= VIO_CMO_MIN_ENT; | |
831 | ||
832 | /* Replenish spare from freed reserve pool */ | |
833 | if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { | |
834 | tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - | |
835 | vio_cmo.spare)); | |
836 | vio_cmo.spare += tmp; | |
837 | viodev->cmo.entitled -= tmp; | |
838 | } | |
839 | ||
840 | /* Remaining reserve goes to excess pool */ | |
841 | vio_cmo.excess.size += viodev->cmo.entitled; | |
842 | vio_cmo.excess.free += viodev->cmo.entitled; | |
843 | vio_cmo.reserve.size -= viodev->cmo.entitled; | |
844 | ||
845 | /* | |
846 | * Until the device is removed it will keep a | |
847 | * minimum entitlement; this will guarantee that | |
848 | * a module unload/load will result in a success. | |
849 | */ | |
850 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | |
851 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | |
852 | atomic_set(&viodev->cmo.allocs_failed, 0); | |
853 | } | |
854 | ||
855 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
856 | } | |
857 | ||
858 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) | |
859 | { | |
860 | vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported; | |
861 | viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops; | |
862 | } | |
863 | ||
864 | /** | |
865 | * vio_cmo_bus_init - CMO entitlement initialization at bus init time | |
866 | * | |
867 | * Set up the reserve and excess entitlement pools based on available | |
868 | * system entitlement and the number of devices in the OF tree that | |
869 | * require entitlement in the reserve pool. | |
870 | */ | |
871 | static void vio_cmo_bus_init(void) | |
872 | { | |
873 | struct hvcall_mpp_data mpp_data; | |
874 | int err; | |
875 | ||
876 | memset(&vio_cmo, 0, sizeof(struct vio_cmo)); | |
877 | spin_lock_init(&vio_cmo.lock); | |
878 | INIT_LIST_HEAD(&vio_cmo.device_list); | |
879 | INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); | |
880 | ||
881 | /* Get current system entitlement */ | |
882 | err = h_get_mpp(&mpp_data); | |
883 | ||
884 | /* | |
885 | * On failure, continue with entitlement set to 0, will panic() | |
886 | * later when spare is reserved. | |
887 | */ | |
888 | if (err != H_SUCCESS) { | |
889 | printk(KERN_ERR "%s: unable to determine system IO "\ | |
890 | "entitlement. (%d)\n", __func__, err); | |
891 | vio_cmo.entitled = 0; | |
892 | } else { | |
893 | vio_cmo.entitled = mpp_data.entitled_mem; | |
894 | } | |
895 | ||
896 | /* Set reservation and check against entitlement */ | |
897 | vio_cmo.spare = VIO_CMO_MIN_ENT; | |
898 | vio_cmo.reserve.size = vio_cmo.spare; | |
899 | vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * | |
900 | VIO_CMO_MIN_ENT); | |
901 | if (vio_cmo.reserve.size > vio_cmo.entitled) { | |
902 | printk(KERN_ERR "%s: insufficient system entitlement\n", | |
903 | __func__); | |
904 | panic("%s: Insufficient system entitlement", __func__); | |
905 | } | |
906 | ||
907 | /* Set the remaining accounting variables */ | |
908 | vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; | |
909 | vio_cmo.excess.free = vio_cmo.excess.size; | |
910 | vio_cmo.min = vio_cmo.reserve.size; | |
911 | vio_cmo.desired = vio_cmo.reserve.size; | |
912 | } | |
913 | ||
914 | /* sysfs device functions and data structures for CMO */ | |
915 | ||
916 | #define viodev_cmo_rd_attr(name) \ | |
917 | static ssize_t viodev_cmo_##name##_show(struct device *dev, \ | |
918 | struct device_attribute *attr, \ | |
919 | char *buf) \ | |
920 | { \ | |
921 | return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ | |
922 | } | |
923 | ||
924 | static ssize_t viodev_cmo_allocs_failed_show(struct device *dev, | |
925 | struct device_attribute *attr, char *buf) | |
926 | { | |
927 | struct vio_dev *viodev = to_vio_dev(dev); | |
928 | return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); | |
929 | } | |
930 | ||
931 | static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev, | |
932 | struct device_attribute *attr, const char *buf, size_t count) | |
933 | { | |
934 | struct vio_dev *viodev = to_vio_dev(dev); | |
935 | atomic_set(&viodev->cmo.allocs_failed, 0); | |
936 | return count; | |
937 | } | |
938 | ||
939 | static ssize_t viodev_cmo_desired_set(struct device *dev, | |
940 | struct device_attribute *attr, const char *buf, size_t count) | |
941 | { | |
942 | struct vio_dev *viodev = to_vio_dev(dev); | |
943 | size_t new_desired; | |
944 | int ret; | |
945 | ||
946 | ret = strict_strtoul(buf, 10, &new_desired); | |
947 | if (ret) | |
948 | return ret; | |
949 | ||
950 | vio_cmo_set_dev_desired(viodev, new_desired); | |
951 | return count; | |
952 | } | |
953 | ||
954 | viodev_cmo_rd_attr(desired); | |
955 | viodev_cmo_rd_attr(entitled); | |
956 | viodev_cmo_rd_attr(allocated); | |
957 | ||
958 | static ssize_t name_show(struct device *, struct device_attribute *, char *); | |
959 | static ssize_t devspec_show(struct device *, struct device_attribute *, char *); | |
960 | static struct device_attribute vio_cmo_dev_attrs[] = { | |
961 | __ATTR_RO(name), | |
962 | __ATTR_RO(devspec), | |
963 | __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | |
964 | viodev_cmo_desired_show, viodev_cmo_desired_set), | |
965 | __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), | |
966 | __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL), | |
967 | __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | |
968 | viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset), | |
969 | __ATTR_NULL | |
970 | }; | |
971 | ||
972 | /* sysfs bus functions and data structures for CMO */ | |
973 | ||
974 | #define viobus_cmo_rd_attr(name) \ | |
975 | static ssize_t \ | |
976 | viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \ | |
977 | { \ | |
978 | return sprintf(buf, "%lu\n", vio_cmo.name); \ | |
979 | } | |
980 | ||
981 | #define viobus_cmo_pool_rd_attr(name, var) \ | |
982 | static ssize_t \ | |
983 | viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \ | |
984 | { \ | |
985 | return sprintf(buf, "%lu\n", vio_cmo.name.var); \ | |
986 | } | |
987 | ||
988 | static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, | |
989 | size_t count) | |
990 | { | |
991 | unsigned long flags; | |
992 | ||
993 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
994 | vio_cmo.high = vio_cmo.curr; | |
995 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
996 | ||
997 | return count; | |
998 | } | |
999 | ||
1000 | viobus_cmo_rd_attr(entitled); | |
1001 | viobus_cmo_pool_rd_attr(reserve, size); | |
1002 | viobus_cmo_pool_rd_attr(excess, size); | |
1003 | viobus_cmo_pool_rd_attr(excess, free); | |
1004 | viobus_cmo_rd_attr(spare); | |
1005 | viobus_cmo_rd_attr(min); | |
1006 | viobus_cmo_rd_attr(desired); | |
1007 | viobus_cmo_rd_attr(curr); | |
1008 | viobus_cmo_rd_attr(high); | |
1009 | ||
1010 | static struct bus_attribute vio_cmo_bus_attrs[] = { | |
1011 | __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL), | |
1012 | __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL), | |
1013 | __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL), | |
1014 | __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL), | |
1015 | __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL), | |
1016 | __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL), | |
1017 | __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL), | |
1018 | __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL), | |
1019 | __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | |
1020 | viobus_cmo_high_show, viobus_cmo_high_reset), | |
1021 | __ATTR_NULL | |
1022 | }; | |
1023 | ||
1024 | static void vio_cmo_sysfs_init(void) | |
1025 | { | |
1026 | vio_bus_type.dev_attrs = vio_cmo_dev_attrs; | |
1027 | vio_bus_type.bus_attrs = vio_cmo_bus_attrs; | |
1028 | } | |
1029 | #else /* CONFIG_PPC_SMLPAR */ | |
1030 | /* Dummy functions for iSeries platform */ | |
1031 | int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } | |
1032 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} | |
1033 | static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } | |
1034 | static void vio_cmo_bus_remove(struct vio_dev *viodev) {} | |
1035 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} | |
b9fa49a9 NL |
1036 | static void vio_cmo_bus_init(void) {} |
1037 | static void vio_cmo_sysfs_init(void) { } | |
a90ab95a RJ |
1038 | #endif /* CONFIG_PPC_SMLPAR */ |
1039 | EXPORT_SYMBOL(vio_cmo_entitlement_update); | |
1040 | EXPORT_SYMBOL(vio_cmo_set_dev_desired); | |
1041 | ||
c7f0e8cb SR |
1042 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) |
1043 | { | |
dd9b67ab SR |
1044 | const unsigned char *dma_window; |
1045 | struct iommu_table *tbl; | |
1046 | unsigned long offset, size; | |
1047 | ||
1048 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | |
1049 | return vio_build_iommu_table_iseries(dev); | |
c7f0e8cb | 1050 | |
dd9b67ab SR |
1051 | dma_window = of_get_property(dev->dev.archdata.of_node, |
1052 | "ibm,my-dma-window", NULL); | |
1053 | if (!dma_window) | |
1054 | return NULL; | |
c7f0e8cb | 1055 | |
dd9b67ab | 1056 | tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); |
c7f0e8cb | 1057 | |
dd9b67ab SR |
1058 | of_parse_dma_window(dev->dev.archdata.of_node, dma_window, |
1059 | &tbl->it_index, &offset, &size); | |
c7f0e8cb | 1060 | |
dd9b67ab SR |
1061 | /* TCE table size - measured in tce entries */ |
1062 | tbl->it_size = size >> IOMMU_PAGE_SHIFT; | |
1063 | /* offset for VIO should always be 0 */ | |
1064 | tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; | |
1065 | tbl->it_busno = 0; | |
1066 | tbl->it_type = TCE_VB; | |
c7f0e8cb | 1067 | |
dd9b67ab | 1068 | return iommu_init_table(tbl, -1); |
c7f0e8cb | 1069 | } |
1da177e4 | 1070 | |
e10fa773 SR |
1071 | /** |
1072 | * vio_match_device: - Tell if a VIO device has a matching | |
1073 | * VIO device id structure. | |
1074 | * @ids: array of VIO device id structures to search in | |
1075 | * @dev: the VIO device structure to match against | |
1076 | * | |
1077 | * Used by a driver to check whether a VIO device present in the | |
1078 | * system is in its list of supported devices. Returns the matching | |
1079 | * vio_device_id structure or NULL if there is no match. | |
1080 | */ | |
1081 | static const struct vio_device_id *vio_match_device( | |
1082 | const struct vio_device_id *ids, const struct vio_dev *dev) | |
1083 | { | |
1084 | while (ids->type[0] != '\0') { | |
dd721ffd | 1085 | if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && |
55b61fec | 1086 | of_device_is_compatible(dev->dev.archdata.of_node, |
12d04eef | 1087 | ids->compat)) |
e10fa773 SR |
1088 | return ids; |
1089 | ids++; | |
1090 | } | |
1091 | return NULL; | |
1092 | } | |
1093 | ||
5c0b4b87 SR |
1094 | /* |
1095 | * Convert from struct device to struct vio_dev and pass to driver. | |
1da177e4 | 1096 | * dev->driver has already been set by generic code because vio_bus_match |
5c0b4b87 SR |
1097 | * succeeded. |
1098 | */ | |
1da177e4 LT |
1099 | static int vio_bus_probe(struct device *dev) |
1100 | { | |
1101 | struct vio_dev *viodev = to_vio_dev(dev); | |
1102 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
1103 | const struct vio_device_id *id; | |
1104 | int error = -ENODEV; | |
1105 | ||
1da177e4 LT |
1106 | if (!viodrv->probe) |
1107 | return error; | |
1108 | ||
1109 | id = vio_match_device(viodrv->id_table, viodev); | |
a90ab95a RJ |
1110 | if (id) { |
1111 | memset(&viodev->cmo, 0, sizeof(viodev->cmo)); | |
1112 | if (firmware_has_feature(FW_FEATURE_CMO)) { | |
1113 | error = vio_cmo_bus_probe(viodev); | |
1114 | if (error) | |
1115 | return error; | |
1116 | } | |
1da177e4 | 1117 | error = viodrv->probe(viodev, id); |
cd5aeb9f | 1118 | if (error && firmware_has_feature(FW_FEATURE_CMO)) |
a90ab95a RJ |
1119 | vio_cmo_bus_remove(viodev); |
1120 | } | |
1da177e4 LT |
1121 | |
1122 | return error; | |
1123 | } | |
1124 | ||
1125 | /* convert from struct device to struct vio_dev and pass to driver. */ | |
1126 | static int vio_bus_remove(struct device *dev) | |
1127 | { | |
1128 | struct vio_dev *viodev = to_vio_dev(dev); | |
1129 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
a90ab95a RJ |
1130 | struct device *devptr; |
1131 | int ret = 1; | |
1132 | ||
1133 | /* | |
1134 | * Hold a reference to the device after the remove function is called | |
1135 | * to allow for CMO accounting cleanup for the device. | |
1136 | */ | |
1137 | devptr = get_device(dev); | |
1da177e4 | 1138 | |
5c0b4b87 | 1139 | if (viodrv->remove) |
a90ab95a | 1140 | ret = viodrv->remove(viodev); |
1da177e4 | 1141 | |
a90ab95a RJ |
1142 | if (!ret && firmware_has_feature(FW_FEATURE_CMO)) |
1143 | vio_cmo_bus_remove(viodev); | |
1144 | ||
1145 | put_device(devptr); | |
1146 | return ret; | |
1da177e4 LT |
1147 | } |
1148 | ||
1149 | /** | |
1150 | * vio_register_driver: - Register a new vio driver | |
1151 | * @drv: The vio_driver structure to be registered. | |
1152 | */ | |
1153 | int vio_register_driver(struct vio_driver *viodrv) | |
1154 | { | |
e48b1b45 | 1155 | printk(KERN_DEBUG "%s: driver %s registering\n", __func__, |
6fdf5392 | 1156 | viodrv->driver.name); |
1da177e4 LT |
1157 | |
1158 | /* fill in 'struct driver' fields */ | |
1da177e4 | 1159 | viodrv->driver.bus = &vio_bus_type; |
1da177e4 LT |
1160 | |
1161 | return driver_register(&viodrv->driver); | |
1162 | } | |
1163 | EXPORT_SYMBOL(vio_register_driver); | |
1164 | ||
1165 | /** | |
1166 | * vio_unregister_driver - Remove registration of vio driver. | |
1167 | * @driver: The vio_driver struct to be removed form registration | |
1168 | */ | |
1169 | void vio_unregister_driver(struct vio_driver *viodrv) | |
1170 | { | |
1171 | driver_unregister(&viodrv->driver); | |
1172 | } | |
1173 | EXPORT_SYMBOL(vio_unregister_driver); | |
1174 | ||
c7f0e8cb SR |
1175 | /* vio_dev refcount hit 0 */ |
1176 | static void __devinit vio_dev_release(struct device *dev) | |
1177 | { | |
6690faeb MK |
1178 | /* XXX should free TCE table */ |
1179 | of_node_put(dev->archdata.of_node); | |
c7f0e8cb SR |
1180 | kfree(to_vio_dev(dev)); |
1181 | } | |
1182 | ||
1da177e4 | 1183 | /** |
e10fa773 SR |
1184 | * vio_register_device_node: - Register a new vio device. |
1185 | * @of_node: The OF node for this device. | |
1da177e4 | 1186 | * |
e10fa773 | 1187 | * Creates and initializes a vio_dev structure from the data in |
12d04eef | 1188 | * of_node and adds it to the list of virtual devices. |
e10fa773 SR |
1189 | * Returns a pointer to the created vio_dev or NULL if node has |
1190 | * NULL device_type or compatible fields. | |
1da177e4 | 1191 | */ |
de7d812d | 1192 | struct vio_dev *vio_register_device_node(struct device_node *of_node) |
1da177e4 | 1193 | { |
e10fa773 | 1194 | struct vio_dev *viodev; |
a7f67bdf | 1195 | const unsigned int *unit_address; |
e10fa773 SR |
1196 | |
1197 | /* we need the 'device_type' property, in order to match with drivers */ | |
1198 | if (of_node->type == NULL) { | |
1199 | printk(KERN_WARNING "%s: node %s missing 'device_type'\n", | |
e48b1b45 | 1200 | __func__, |
e10fa773 SR |
1201 | of_node->name ? of_node->name : "<unknown>"); |
1202 | return NULL; | |
1da177e4 | 1203 | } |
e10fa773 | 1204 | |
e2eb6392 | 1205 | unit_address = of_get_property(of_node, "reg", NULL); |
e10fa773 SR |
1206 | if (unit_address == NULL) { |
1207 | printk(KERN_WARNING "%s: node %s missing 'reg'\n", | |
e48b1b45 | 1208 | __func__, |
e10fa773 SR |
1209 | of_node->name ? of_node->name : "<unknown>"); |
1210 | return NULL; | |
1211 | } | |
1212 | ||
1213 | /* allocate a vio_dev for this node */ | |
1214 | viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); | |
1215 | if (viodev == NULL) | |
1216 | return NULL; | |
1217 | ||
0ebfff14 | 1218 | viodev->irq = irq_of_parse_and_map(of_node, 0); |
e10fa773 | 1219 | |
aab0d375 | 1220 | dev_set_name(&viodev->dev, "%x", *unit_address); |
e10fa773 SR |
1221 | viodev->name = of_node->name; |
1222 | viodev->type = of_node->type; | |
1223 | viodev->unit_address = *unit_address; | |
1224 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
e2eb6392 | 1225 | unit_address = of_get_property(of_node, |
e10fa773 SR |
1226 | "linux,unit_address", NULL); |
1227 | if (unit_address != NULL) | |
1228 | viodev->unit_address = *unit_address; | |
1229 | } | |
12d04eef | 1230 | viodev->dev.archdata.of_node = of_node_get(of_node); |
a90ab95a RJ |
1231 | |
1232 | if (firmware_has_feature(FW_FEATURE_CMO)) | |
1233 | vio_cmo_set_dma_ops(viodev); | |
1234 | else | |
1235 | viodev->dev.archdata.dma_ops = &dma_iommu_ops; | |
12d04eef | 1236 | viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); |
8fae0353 | 1237 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); |
c7f0e8cb SR |
1238 | |
1239 | /* init generic 'struct device' fields: */ | |
1240 | viodev->dev.parent = &vio_bus_device.dev; | |
1241 | viodev->dev.bus = &vio_bus_type; | |
1242 | viodev->dev.release = vio_dev_release; | |
e10fa773 SR |
1243 | |
1244 | /* register with generic device framework */ | |
c7f0e8cb SR |
1245 | if (device_register(&viodev->dev)) { |
1246 | printk(KERN_ERR "%s: failed to register device %s\n", | |
aab0d375 | 1247 | __func__, dev_name(&viodev->dev)); |
e10fa773 SR |
1248 | /* XXX free TCE table */ |
1249 | kfree(viodev); | |
1250 | return NULL; | |
1251 | } | |
1252 | ||
1253 | return viodev; | |
1da177e4 | 1254 | } |
e10fa773 | 1255 | EXPORT_SYMBOL(vio_register_device_node); |
1da177e4 | 1256 | |
1da177e4 LT |
1257 | /** |
1258 | * vio_bus_init: - Initialize the virtual IO bus | |
1259 | */ | |
c7f0e8cb | 1260 | static int __init vio_bus_init(void) |
1da177e4 LT |
1261 | { |
1262 | int err; | |
e10fa773 | 1263 | struct device_node *node_vroot; |
1da177e4 | 1264 | |
a90ab95a RJ |
1265 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1266 | vio_cmo_sysfs_init(); | |
1267 | ||
1da177e4 LT |
1268 | err = bus_register(&vio_bus_type); |
1269 | if (err) { | |
1270 | printk(KERN_ERR "failed to register VIO bus\n"); | |
1271 | return err; | |
1272 | } | |
1273 | ||
5c0b4b87 SR |
1274 | /* |
1275 | * The fake parent of all vio devices, just to give us | |
3e494c80 SR |
1276 | * a nice directory |
1277 | */ | |
ac5b33c9 | 1278 | err = device_register(&vio_bus_device.dev); |
1da177e4 | 1279 | if (err) { |
3e494c80 | 1280 | printk(KERN_WARNING "%s: device_register returned %i\n", |
e48b1b45 | 1281 | __func__, err); |
1da177e4 LT |
1282 | return err; |
1283 | } | |
1284 | ||
a90ab95a RJ |
1285 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1286 | vio_cmo_bus_init(); | |
1287 | ||
30686ba6 | 1288 | node_vroot = of_find_node_by_name(NULL, "vdevice"); |
e10fa773 SR |
1289 | if (node_vroot) { |
1290 | struct device_node *of_node; | |
1291 | ||
1292 | /* | |
1293 | * Create struct vio_devices for each virtual device in | |
1294 | * the device tree. Drivers will associate with them later. | |
1295 | */ | |
1296 | for (of_node = node_vroot->child; of_node != NULL; | |
c5467262 | 1297 | of_node = of_node->sibling) |
e10fa773 | 1298 | vio_register_device_node(of_node); |
30686ba6 | 1299 | of_node_put(node_vroot); |
e10fa773 SR |
1300 | } |
1301 | ||
3e494c80 SR |
1302 | return 0; |
1303 | } | |
c7f0e8cb | 1304 | __initcall(vio_bus_init); |
1da177e4 | 1305 | |
e10fa773 | 1306 | static ssize_t name_show(struct device *dev, |
5c0b4b87 | 1307 | struct device_attribute *attr, char *buf) |
1da177e4 LT |
1308 | { |
1309 | return sprintf(buf, "%s\n", to_vio_dev(dev)->name); | |
1310 | } | |
e10fa773 SR |
1311 | |
1312 | static ssize_t devspec_show(struct device *dev, | |
1313 | struct device_attribute *attr, char *buf) | |
1314 | { | |
12d04eef | 1315 | struct device_node *of_node = dev->archdata.of_node; |
e10fa773 SR |
1316 | |
1317 | return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); | |
1318 | } | |
1319 | ||
1320 | static struct device_attribute vio_dev_attrs[] = { | |
1321 | __ATTR_RO(name), | |
1322 | __ATTR_RO(devspec), | |
1323 | __ATTR_NULL | |
1324 | }; | |
1da177e4 | 1325 | |
1da177e4 LT |
1326 | void __devinit vio_unregister_device(struct vio_dev *viodev) |
1327 | { | |
1da177e4 LT |
1328 | device_unregister(&viodev->dev); |
1329 | } | |
1330 | EXPORT_SYMBOL(vio_unregister_device); | |
1331 | ||
1da177e4 LT |
1332 | static int vio_bus_match(struct device *dev, struct device_driver *drv) |
1333 | { | |
1334 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
1335 | struct vio_driver *vio_drv = to_vio_driver(drv); | |
1336 | const struct vio_device_id *ids = vio_drv->id_table; | |
1da177e4 | 1337 | |
5c0b4b87 | 1338 | return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); |
1da177e4 LT |
1339 | } |
1340 | ||
7eff2e7a | 1341 | static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) |
143dcec2 OH |
1342 | { |
1343 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
12d04eef | 1344 | struct device_node *dn; |
a7f67bdf | 1345 | const char *cp; |
143dcec2 | 1346 | |
12d04eef | 1347 | dn = dev->archdata.of_node; |
e10fa773 | 1348 | if (!dn) |
143dcec2 | 1349 | return -ENODEV; |
7eff2e7a | 1350 | cp = of_get_property(dn, "compatible", NULL); |
143dcec2 OH |
1351 | if (!cp) |
1352 | return -ENODEV; | |
1353 | ||
7eff2e7a | 1354 | add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); |
143dcec2 OH |
1355 | return 0; |
1356 | } | |
1357 | ||
6fccab26 | 1358 | static struct bus_type vio_bus_type = { |
1da177e4 | 1359 | .name = "vio", |
e10fa773 | 1360 | .dev_attrs = vio_dev_attrs, |
312c004d | 1361 | .uevent = vio_hotplug, |
1da177e4 | 1362 | .match = vio_bus_match, |
2f53a80f RK |
1363 | .probe = vio_bus_probe, |
1364 | .remove = vio_bus_remove, | |
1da177e4 | 1365 | }; |
e10fa773 SR |
1366 | |
1367 | /** | |
1368 | * vio_get_attribute: - get attribute for virtual device | |
1369 | * @vdev: The vio device to get property. | |
1370 | * @which: The property/attribute to be extracted. | |
1371 | * @length: Pointer to length of returned data size (unused if NULL). | |
1372 | * | |
e2eb6392 | 1373 | * Calls prom.c's of_get_property() to return the value of the |
e10fa773 SR |
1374 | * attribute specified by @which |
1375 | */ | |
1376 | const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) | |
1377 | { | |
e2eb6392 | 1378 | return of_get_property(vdev->dev.archdata.of_node, which, length); |
e10fa773 SR |
1379 | } |
1380 | EXPORT_SYMBOL(vio_get_attribute); | |
c7f0e8cb SR |
1381 | |
1382 | #ifdef CONFIG_PPC_PSERIES | |
1383 | /* vio_find_name() - internal because only vio.c knows how we formatted the | |
1384 | * kobject name | |
c7f0e8cb | 1385 | */ |
c847c853 | 1386 | static struct vio_dev *vio_find_name(const char *name) |
c7f0e8cb | 1387 | { |
c847c853 | 1388 | struct device *found; |
c7f0e8cb | 1389 | |
c847c853 | 1390 | found = bus_find_device_by_name(&vio_bus_type, NULL, name); |
c7f0e8cb SR |
1391 | if (!found) |
1392 | return NULL; | |
1393 | ||
c847c853 | 1394 | return to_vio_dev(found); |
c7f0e8cb SR |
1395 | } |
1396 | ||
1397 | /** | |
1398 | * vio_find_node - find an already-registered vio_dev | |
1399 | * @vnode: device_node of the virtual device we're looking for | |
1400 | */ | |
1401 | struct vio_dev *vio_find_node(struct device_node *vnode) | |
1402 | { | |
a7f67bdf | 1403 | const uint32_t *unit_address; |
aab0d375 | 1404 | char kobj_name[20]; |
c7f0e8cb SR |
1405 | |
1406 | /* construct the kobject name from the device node */ | |
e2eb6392 | 1407 | unit_address = of_get_property(vnode, "reg", NULL); |
c7f0e8cb SR |
1408 | if (!unit_address) |
1409 | return NULL; | |
aab0d375 | 1410 | snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); |
c7f0e8cb SR |
1411 | |
1412 | return vio_find_name(kobj_name); | |
1413 | } | |
1414 | EXPORT_SYMBOL(vio_find_node); | |
1415 | ||
1416 | int vio_enable_interrupts(struct vio_dev *dev) | |
1417 | { | |
1418 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); | |
1419 | if (rc != H_SUCCESS) | |
1420 | printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); | |
1421 | return rc; | |
1422 | } | |
1423 | EXPORT_SYMBOL(vio_enable_interrupts); | |
1424 | ||
1425 | int vio_disable_interrupts(struct vio_dev *dev) | |
1426 | { | |
1427 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); | |
1428 | if (rc != H_SUCCESS) | |
1429 | printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); | |
1430 | return rc; | |
1431 | } | |
1432 | EXPORT_SYMBOL(vio_disable_interrupts); | |
1433 | #endif /* CONFIG_PPC_PSERIES */ |