Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * IBM PowerPC Virtual I/O Infrastructure Support. | |
3 | * | |
a90ab95a | 4 | * Copyright (c) 2003,2008 IBM Corp. |
1da177e4 LT |
5 | * Dave Engebretsen engebret@us.ibm.com |
6 | * Santiago Leon santil@us.ibm.com | |
7 | * Hollis Blanchard <hollisb@us.ibm.com> | |
19dbd0f6 | 8 | * Stephen Rothwell |
a90ab95a | 9 | * Robert Jennings <rcjenn@us.ibm.com> |
1da177e4 LT |
10 | * |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
c7f0e8cb | 17 | #include <linux/types.h> |
b56eade5 | 18 | #include <linux/stat.h> |
c7f0e8cb | 19 | #include <linux/device.h> |
1da177e4 | 20 | #include <linux/init.h> |
5a0e3ad6 | 21 | #include <linux/slab.h> |
1da177e4 | 22 | #include <linux/console.h> |
4b16f8e2 | 23 | #include <linux/export.h> |
1da177e4 LT |
24 | #include <linux/mm.h> |
25 | #include <linux/dma-mapping.h> | |
c7f0e8cb SR |
26 | #include <linux/kobject.h> |
27 | ||
1da177e4 LT |
28 | #include <asm/iommu.h> |
29 | #include <asm/dma.h> | |
1da177e4 | 30 | #include <asm/vio.h> |
143dcec2 | 31 | #include <asm/prom.h> |
e10fa773 | 32 | #include <asm/firmware.h> |
c7f0e8cb SR |
33 | #include <asm/tce.h> |
34 | #include <asm/abs_addr.h> | |
35 | #include <asm/page.h> | |
36 | #include <asm/hvcall.h> | |
37 | #include <asm/iseries/vio.h> | |
38 | #include <asm/iseries/hv_types.h> | |
39 | #include <asm/iseries/hv_lp_config.h> | |
40 | #include <asm/iseries/hv_call_xm.h> | |
41 | #include <asm/iseries/iommu.h> | |
42 | ||
6fccab26 SR |
43 | static struct bus_type vio_bus_type; |
44 | ||
c7f0e8cb | 45 | static struct vio_dev vio_bus_device = { /* fake "parent" device */ |
aab0d375 | 46 | .name = "vio", |
ac5b33c9 | 47 | .type = "", |
aab0d375 | 48 | .dev.init_name = "vio", |
ac5b33c9 | 49 | .dev.bus = &vio_bus_type, |
1da177e4 | 50 | }; |
ac5b33c9 | 51 | |
a90ab95a RJ |
52 | #ifdef CONFIG_PPC_SMLPAR |
53 | /** | |
54 | * vio_cmo_pool - A pool of IO memory for CMO use | |
55 | * | |
56 | * @size: The size of the pool in bytes | |
57 | * @free: The amount of free memory in the pool | |
58 | */ | |
59 | struct vio_cmo_pool { | |
60 | size_t size; | |
61 | size_t free; | |
62 | }; | |
63 | ||
64 | /* How many ms to delay queued balance work */ | |
65 | #define VIO_CMO_BALANCE_DELAY 100 | |
66 | ||
67 | /* Portion out IO memory to CMO devices by this chunk size */ | |
68 | #define VIO_CMO_BALANCE_CHUNK 131072 | |
69 | ||
70 | /** | |
71 | * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement | |
72 | * | |
73 | * @vio_dev: struct vio_dev pointer | |
74 | * @list: pointer to other devices on bus that are being tracked | |
75 | */ | |
76 | struct vio_cmo_dev_entry { | |
77 | struct vio_dev *viodev; | |
78 | struct list_head list; | |
79 | }; | |
80 | ||
81 | /** | |
82 | * vio_cmo - VIO bus accounting structure for CMO entitlement | |
83 | * | |
84 | * @lock: spinlock for entire structure | |
85 | * @balance_q: work queue for balancing system entitlement | |
86 | * @device_list: list of CMO-enabled devices requiring entitlement | |
87 | * @entitled: total system entitlement in bytes | |
88 | * @reserve: pool of memory from which devices reserve entitlement, incl. spare | |
89 | * @excess: pool of excess entitlement not needed for device reserves or spare | |
90 | * @spare: IO memory for device hotplug functionality | |
91 | * @min: minimum necessary for system operation | |
92 | * @desired: desired memory for system operation | |
93 | * @curr: bytes currently allocated | |
94 | * @high: high water mark for IO data usage | |
95 | */ | |
96 | struct vio_cmo { | |
97 | spinlock_t lock; | |
98 | struct delayed_work balance_q; | |
99 | struct list_head device_list; | |
100 | size_t entitled; | |
101 | struct vio_cmo_pool reserve; | |
102 | struct vio_cmo_pool excess; | |
103 | size_t spare; | |
104 | size_t min; | |
105 | size_t desired; | |
106 | size_t curr; | |
107 | size_t high; | |
108 | } vio_cmo; | |
109 | ||
110 | /** | |
111 | * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows | |
112 | */ | |
113 | static int vio_cmo_num_OF_devs(void) | |
114 | { | |
115 | struct device_node *node_vroot; | |
116 | int count = 0; | |
117 | ||
118 | /* | |
119 | * Count the number of vdevice entries with an | |
120 | * ibm,my-dma-window OF property | |
121 | */ | |
122 | node_vroot = of_find_node_by_name(NULL, "vdevice"); | |
123 | if (node_vroot) { | |
124 | struct device_node *of_node; | |
125 | struct property *prop; | |
126 | ||
127 | for_each_child_of_node(node_vroot, of_node) { | |
128 | prop = of_find_property(of_node, "ibm,my-dma-window", | |
129 | NULL); | |
130 | if (prop) | |
131 | count++; | |
132 | } | |
133 | } | |
134 | of_node_put(node_vroot); | |
135 | return count; | |
136 | } | |
137 | ||
138 | /** | |
139 | * vio_cmo_alloc - allocate IO memory for CMO-enable devices | |
140 | * | |
141 | * @viodev: VIO device requesting IO memory | |
142 | * @size: size of allocation requested | |
143 | * | |
144 | * Allocations come from memory reserved for the devices and any excess | |
145 | * IO memory available to all devices. The spare pool used to service | |
146 | * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be | |
147 | * made available. | |
148 | * | |
149 | * Return codes: | |
150 | * 0 for successful allocation and -ENOMEM for a failure | |
151 | */ | |
152 | static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) | |
153 | { | |
154 | unsigned long flags; | |
155 | size_t reserve_free = 0; | |
156 | size_t excess_free = 0; | |
157 | int ret = -ENOMEM; | |
158 | ||
159 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
160 | ||
161 | /* Determine the amount of free entitlement available in reserve */ | |
162 | if (viodev->cmo.entitled > viodev->cmo.allocated) | |
163 | reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; | |
164 | ||
165 | /* If spare is not fulfilled, the excess pool can not be used. */ | |
166 | if (vio_cmo.spare >= VIO_CMO_MIN_ENT) | |
167 | excess_free = vio_cmo.excess.free; | |
168 | ||
169 | /* The request can be satisfied */ | |
170 | if ((reserve_free + excess_free) >= size) { | |
171 | vio_cmo.curr += size; | |
172 | if (vio_cmo.curr > vio_cmo.high) | |
173 | vio_cmo.high = vio_cmo.curr; | |
174 | viodev->cmo.allocated += size; | |
175 | size -= min(reserve_free, size); | |
176 | vio_cmo.excess.free -= size; | |
177 | ret = 0; | |
178 | } | |
179 | ||
180 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
181 | return ret; | |
182 | } | |
183 | ||
184 | /** | |
185 | * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices | |
186 | * @viodev: VIO device freeing IO memory | |
187 | * @size: size of deallocation | |
188 | * | |
189 | * IO memory is freed by the device back to the correct memory pools. | |
190 | * The spare pool is replenished first from either memory pool, then | |
191 | * the reserve pool is used to reduce device entitlement, the excess | |
192 | * pool is used to increase the reserve pool toward the desired entitlement | |
193 | * target, and then the remaining memory is returned to the pools. | |
194 | * | |
195 | */ | |
196 | static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) | |
197 | { | |
198 | unsigned long flags; | |
199 | size_t spare_needed = 0; | |
200 | size_t excess_freed = 0; | |
201 | size_t reserve_freed = size; | |
202 | size_t tmp; | |
203 | int balance = 0; | |
204 | ||
205 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
206 | vio_cmo.curr -= size; | |
207 | ||
208 | /* Amount of memory freed from the excess pool */ | |
209 | if (viodev->cmo.allocated > viodev->cmo.entitled) { | |
210 | excess_freed = min(reserve_freed, (viodev->cmo.allocated - | |
211 | viodev->cmo.entitled)); | |
212 | reserve_freed -= excess_freed; | |
213 | } | |
214 | ||
215 | /* Remove allocation from device */ | |
216 | viodev->cmo.allocated -= (reserve_freed + excess_freed); | |
217 | ||
218 | /* Spare is a subset of the reserve pool, replenish it first. */ | |
219 | spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; | |
220 | ||
221 | /* | |
222 | * Replenish the spare in the reserve pool from the excess pool. | |
223 | * This moves entitlement into the reserve pool. | |
224 | */ | |
225 | if (spare_needed && excess_freed) { | |
226 | tmp = min(excess_freed, spare_needed); | |
227 | vio_cmo.excess.size -= tmp; | |
228 | vio_cmo.reserve.size += tmp; | |
229 | vio_cmo.spare += tmp; | |
230 | excess_freed -= tmp; | |
231 | spare_needed -= tmp; | |
232 | balance = 1; | |
233 | } | |
234 | ||
235 | /* | |
236 | * Replenish the spare in the reserve pool from the reserve pool. | |
237 | * This removes entitlement from the device down to VIO_CMO_MIN_ENT, | |
238 | * if needed, and gives it to the spare pool. The amount of used | |
239 | * memory in this pool does not change. | |
240 | */ | |
241 | if (spare_needed && reserve_freed) { | |
732eacc0 | 242 | tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); |
a90ab95a RJ |
243 | |
244 | vio_cmo.spare += tmp; | |
245 | viodev->cmo.entitled -= tmp; | |
246 | reserve_freed -= tmp; | |
247 | spare_needed -= tmp; | |
248 | balance = 1; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Increase the reserve pool until the desired allocation is met. | |
253 | * Move an allocation freed from the excess pool into the reserve | |
254 | * pool and schedule a balance operation. | |
255 | */ | |
256 | if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { | |
257 | tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); | |
258 | ||
259 | vio_cmo.excess.size -= tmp; | |
260 | vio_cmo.reserve.size += tmp; | |
261 | excess_freed -= tmp; | |
262 | balance = 1; | |
263 | } | |
264 | ||
265 | /* Return memory from the excess pool to that pool */ | |
266 | if (excess_freed) | |
267 | vio_cmo.excess.free += excess_freed; | |
268 | ||
269 | if (balance) | |
270 | schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); | |
271 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
272 | } | |
273 | ||
274 | /** | |
275 | * vio_cmo_entitlement_update - Manage system entitlement changes | |
276 | * | |
277 | * @new_entitlement: new system entitlement to attempt to accommodate | |
278 | * | |
279 | * Increases in entitlement will be used to fulfill the spare entitlement | |
280 | * and the rest is given to the excess pool. Decreases, if they are | |
281 | * possible, come from the excess pool and from unused device entitlement | |
282 | * | |
283 | * Returns: 0 on success, -ENOMEM when change can not be made | |
284 | */ | |
285 | int vio_cmo_entitlement_update(size_t new_entitlement) | |
286 | { | |
287 | struct vio_dev *viodev; | |
288 | struct vio_cmo_dev_entry *dev_ent; | |
289 | unsigned long flags; | |
290 | size_t avail, delta, tmp; | |
291 | ||
292 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
293 | ||
294 | /* Entitlement increases */ | |
295 | if (new_entitlement > vio_cmo.entitled) { | |
296 | delta = new_entitlement - vio_cmo.entitled; | |
297 | ||
298 | /* Fulfill spare allocation */ | |
299 | if (vio_cmo.spare < VIO_CMO_MIN_ENT) { | |
300 | tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); | |
301 | vio_cmo.spare += tmp; | |
302 | vio_cmo.reserve.size += tmp; | |
303 | delta -= tmp; | |
304 | } | |
305 | ||
306 | /* Remaining new allocation goes to the excess pool */ | |
307 | vio_cmo.entitled += delta; | |
308 | vio_cmo.excess.size += delta; | |
309 | vio_cmo.excess.free += delta; | |
310 | ||
311 | goto out; | |
312 | } | |
313 | ||
314 | /* Entitlement decreases */ | |
315 | delta = vio_cmo.entitled - new_entitlement; | |
316 | avail = vio_cmo.excess.free; | |
317 | ||
318 | /* | |
319 | * Need to check how much unused entitlement each device can | |
320 | * sacrifice to fulfill entitlement change. | |
321 | */ | |
322 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
323 | if (avail >= delta) | |
324 | break; | |
325 | ||
326 | viodev = dev_ent->viodev; | |
327 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | |
328 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | |
329 | avail += viodev->cmo.entitled - | |
330 | max_t(size_t, viodev->cmo.allocated, | |
331 | VIO_CMO_MIN_ENT); | |
332 | } | |
333 | ||
334 | if (delta <= avail) { | |
335 | vio_cmo.entitled -= delta; | |
336 | ||
337 | /* Take entitlement from the excess pool first */ | |
338 | tmp = min(vio_cmo.excess.free, delta); | |
339 | vio_cmo.excess.size -= tmp; | |
340 | vio_cmo.excess.free -= tmp; | |
341 | delta -= tmp; | |
342 | ||
343 | /* | |
344 | * Remove all but VIO_CMO_MIN_ENT bytes from devices | |
345 | * until entitlement change is served | |
346 | */ | |
347 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
348 | if (!delta) | |
349 | break; | |
350 | ||
351 | viodev = dev_ent->viodev; | |
352 | tmp = 0; | |
353 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | |
354 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | |
355 | tmp = viodev->cmo.entitled - | |
356 | max_t(size_t, viodev->cmo.allocated, | |
357 | VIO_CMO_MIN_ENT); | |
358 | viodev->cmo.entitled -= min(tmp, delta); | |
359 | delta -= min(tmp, delta); | |
360 | } | |
361 | } else { | |
362 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
363 | return -ENOMEM; | |
364 | } | |
365 | ||
366 | out: | |
367 | schedule_delayed_work(&vio_cmo.balance_q, 0); | |
368 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
369 | return 0; | |
370 | } | |
371 | ||
372 | /** | |
373 | * vio_cmo_balance - Balance entitlement among devices | |
374 | * | |
375 | * @work: work queue structure for this operation | |
376 | * | |
377 | * Any system entitlement above the minimum needed for devices, or | |
378 | * already allocated to devices, can be distributed to the devices. | |
379 | * The list of devices is iterated through to recalculate the desired | |
380 | * entitlement level and to determine how much entitlement above the | |
381 | * minimum entitlement is allocated to devices. | |
382 | * | |
383 | * Small chunks of the available entitlement are given to devices until | |
384 | * their requirements are fulfilled or there is no entitlement left to give. | |
385 | * Upon completion sizes of the reserve and excess pools are calculated. | |
386 | * | |
387 | * The system minimum entitlement level is also recalculated here. | |
388 | * Entitlement will be reserved for devices even after vio_bus_remove to | |
389 | * accommodate reloading the driver. The OF tree is walked to count the | |
390 | * number of devices present and this will remove entitlement for devices | |
391 | * that have actually left the system after having vio_bus_remove called. | |
392 | */ | |
393 | static void vio_cmo_balance(struct work_struct *work) | |
394 | { | |
395 | struct vio_cmo *cmo; | |
396 | struct vio_dev *viodev; | |
397 | struct vio_cmo_dev_entry *dev_ent; | |
398 | unsigned long flags; | |
399 | size_t avail = 0, level, chunk, need; | |
400 | int devcount = 0, fulfilled; | |
401 | ||
402 | cmo = container_of(work, struct vio_cmo, balance_q.work); | |
403 | ||
404 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
405 | ||
406 | /* Calculate minimum entitlement and fulfill spare */ | |
407 | cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; | |
408 | BUG_ON(cmo->min > cmo->entitled); | |
409 | cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); | |
410 | cmo->min += cmo->spare; | |
411 | cmo->desired = cmo->min; | |
412 | ||
413 | /* | |
414 | * Determine how much entitlement is available and reset device | |
415 | * entitlements | |
416 | */ | |
417 | avail = cmo->entitled - cmo->spare; | |
418 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
419 | viodev = dev_ent->viodev; | |
420 | devcount++; | |
421 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | |
422 | cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); | |
423 | avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); | |
424 | } | |
425 | ||
426 | /* | |
427 | * Having provided each device with the minimum entitlement, loop | |
428 | * over the devices portioning out the remaining entitlement | |
429 | * until there is nothing left. | |
430 | */ | |
431 | level = VIO_CMO_MIN_ENT; | |
432 | while (avail) { | |
433 | fulfilled = 0; | |
434 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
435 | viodev = dev_ent->viodev; | |
436 | ||
437 | if (viodev->cmo.desired <= level) { | |
438 | fulfilled++; | |
439 | continue; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Give the device up to VIO_CMO_BALANCE_CHUNK | |
444 | * bytes of entitlement, but do not exceed the | |
445 | * desired level of entitlement for the device. | |
446 | */ | |
447 | chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); | |
448 | chunk = min(chunk, (viodev->cmo.desired - | |
449 | viodev->cmo.entitled)); | |
450 | viodev->cmo.entitled += chunk; | |
451 | ||
452 | /* | |
453 | * If the memory for this entitlement increase was | |
454 | * already allocated to the device it does not come | |
455 | * from the available pool being portioned out. | |
456 | */ | |
457 | need = max(viodev->cmo.allocated, viodev->cmo.entitled)- | |
458 | max(viodev->cmo.allocated, level); | |
459 | avail -= need; | |
460 | ||
461 | } | |
462 | if (fulfilled == devcount) | |
463 | break; | |
464 | level += VIO_CMO_BALANCE_CHUNK; | |
465 | } | |
466 | ||
467 | /* Calculate new reserve and excess pool sizes */ | |
468 | cmo->reserve.size = cmo->min; | |
469 | cmo->excess.free = 0; | |
470 | cmo->excess.size = 0; | |
471 | need = 0; | |
472 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
473 | viodev = dev_ent->viodev; | |
474 | /* Calculated reserve size above the minimum entitlement */ | |
475 | if (viodev->cmo.entitled) | |
476 | cmo->reserve.size += (viodev->cmo.entitled - | |
477 | VIO_CMO_MIN_ENT); | |
478 | /* Calculated used excess entitlement */ | |
479 | if (viodev->cmo.allocated > viodev->cmo.entitled) | |
480 | need += viodev->cmo.allocated - viodev->cmo.entitled; | |
481 | } | |
482 | cmo->excess.size = cmo->entitled - cmo->reserve.size; | |
483 | cmo->excess.free = cmo->excess.size - need; | |
484 | ||
bf6aede7 | 485 | cancel_delayed_work(to_delayed_work(work)); |
a90ab95a RJ |
486 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
487 | } | |
488 | ||
489 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | |
bfbf7d61 AP |
490 | dma_addr_t *dma_handle, gfp_t flag, |
491 | struct dma_attrs *attrs) | |
a90ab95a RJ |
492 | { |
493 | struct vio_dev *viodev = to_vio_dev(dev); | |
494 | void *ret; | |
495 | ||
69b052e8 | 496 | if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { |
a90ab95a RJ |
497 | atomic_inc(&viodev->cmo.allocs_failed); |
498 | return NULL; | |
499 | } | |
500 | ||
bfbf7d61 | 501 | ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); |
a90ab95a | 502 | if (unlikely(ret == NULL)) { |
69b052e8 | 503 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
a90ab95a RJ |
504 | atomic_inc(&viodev->cmo.allocs_failed); |
505 | } | |
506 | ||
507 | return ret; | |
508 | } | |
509 | ||
510 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | |
bfbf7d61 AP |
511 | void *vaddr, dma_addr_t dma_handle, |
512 | struct dma_attrs *attrs) | |
a90ab95a RJ |
513 | { |
514 | struct vio_dev *viodev = to_vio_dev(dev); | |
515 | ||
bfbf7d61 | 516 | dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); |
a90ab95a | 517 | |
69b052e8 | 518 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
a90ab95a RJ |
519 | } |
520 | ||
f9226d57 MN |
521 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, |
522 | unsigned long offset, size_t size, | |
523 | enum dma_data_direction direction, | |
524 | struct dma_attrs *attrs) | |
a90ab95a RJ |
525 | { |
526 | struct vio_dev *viodev = to_vio_dev(dev); | |
527 | dma_addr_t ret = DMA_ERROR_CODE; | |
528 | ||
529 | if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { | |
530 | atomic_inc(&viodev->cmo.allocs_failed); | |
531 | return ret; | |
532 | } | |
533 | ||
f9226d57 | 534 | ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); |
0764bf63 | 535 | if (unlikely(dma_mapping_error(dev, ret))) { |
a90ab95a RJ |
536 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); |
537 | atomic_inc(&viodev->cmo.allocs_failed); | |
538 | } | |
539 | ||
540 | return ret; | |
541 | } | |
542 | ||
f9226d57 MN |
543 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
544 | size_t size, | |
545 | enum dma_data_direction direction, | |
546 | struct dma_attrs *attrs) | |
a90ab95a RJ |
547 | { |
548 | struct vio_dev *viodev = to_vio_dev(dev); | |
549 | ||
f9226d57 | 550 | dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); |
a90ab95a RJ |
551 | |
552 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | |
553 | } | |
554 | ||
555 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |
556 | int nelems, enum dma_data_direction direction, | |
557 | struct dma_attrs *attrs) | |
558 | { | |
559 | struct vio_dev *viodev = to_vio_dev(dev); | |
560 | struct scatterlist *sgl; | |
561 | int ret, count = 0; | |
562 | size_t alloc_size = 0; | |
563 | ||
564 | for (sgl = sglist; count < nelems; count++, sgl++) | |
565 | alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); | |
566 | ||
567 | if (vio_cmo_alloc(viodev, alloc_size)) { | |
568 | atomic_inc(&viodev->cmo.allocs_failed); | |
569 | return 0; | |
570 | } | |
571 | ||
572 | ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); | |
573 | ||
574 | if (unlikely(!ret)) { | |
575 | vio_cmo_dealloc(viodev, alloc_size); | |
576 | atomic_inc(&viodev->cmo.allocs_failed); | |
69b052e8 | 577 | return ret; |
a90ab95a RJ |
578 | } |
579 | ||
580 | for (sgl = sglist, count = 0; count < ret; count++, sgl++) | |
581 | alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | |
582 | if (alloc_size) | |
583 | vio_cmo_dealloc(viodev, alloc_size); | |
584 | ||
585 | return ret; | |
586 | } | |
587 | ||
588 | static void vio_dma_iommu_unmap_sg(struct device *dev, | |
589 | struct scatterlist *sglist, int nelems, | |
590 | enum dma_data_direction direction, | |
591 | struct dma_attrs *attrs) | |
592 | { | |
593 | struct vio_dev *viodev = to_vio_dev(dev); | |
594 | struct scatterlist *sgl; | |
595 | size_t alloc_size = 0; | |
596 | int count = 0; | |
597 | ||
598 | for (sgl = sglist; count < nelems; count++, sgl++) | |
599 | alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | |
600 | ||
601 | dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); | |
602 | ||
603 | vio_cmo_dealloc(viodev, alloc_size); | |
604 | } | |
605 | ||
6d283d78 NA |
606 | static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) |
607 | { | |
608 | return dma_iommu_ops.dma_supported(dev, mask); | |
609 | } | |
610 | ||
d24f9c69 MM |
611 | static u64 vio_dma_get_required_mask(struct device *dev) |
612 | { | |
613 | return dma_iommu_ops.get_required_mask(dev); | |
614 | } | |
615 | ||
45223c54 | 616 | struct dma_map_ops vio_dma_mapping_ops = { |
bfbf7d61 AP |
617 | .alloc = vio_dma_iommu_alloc_coherent, |
618 | .free = vio_dma_iommu_free_coherent, | |
2eccacd0 MM |
619 | .map_sg = vio_dma_iommu_map_sg, |
620 | .unmap_sg = vio_dma_iommu_unmap_sg, | |
621 | .map_page = vio_dma_iommu_map_page, | |
622 | .unmap_page = vio_dma_iommu_unmap_page, | |
623 | .dma_supported = vio_dma_iommu_dma_supported, | |
d24f9c69 | 624 | .get_required_mask = vio_dma_get_required_mask, |
a90ab95a RJ |
625 | }; |
626 | ||
627 | /** | |
628 | * vio_cmo_set_dev_desired - Set desired entitlement for a device | |
629 | * | |
630 | * @viodev: struct vio_dev for device to alter | |
631 | * @new_desired: new desired entitlement level in bytes | |
632 | * | |
633 | * For use by devices to request a change to their entitlement at runtime or | |
634 | * through sysfs. The desired entitlement level is changed and a balancing | |
635 | * of system resources is scheduled to run in the future. | |
636 | */ | |
637 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) | |
638 | { | |
639 | unsigned long flags; | |
640 | struct vio_cmo_dev_entry *dev_ent; | |
641 | int found = 0; | |
642 | ||
643 | if (!firmware_has_feature(FW_FEATURE_CMO)) | |
644 | return; | |
645 | ||
646 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
647 | if (desired < VIO_CMO_MIN_ENT) | |
648 | desired = VIO_CMO_MIN_ENT; | |
649 | ||
650 | /* | |
651 | * Changes will not be made for devices not in the device list. | |
652 | * If it is not in the device list, then no driver is loaded | |
653 | * for the device and it can not receive entitlement. | |
654 | */ | |
655 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | |
656 | if (viodev == dev_ent->viodev) { | |
657 | found = 1; | |
658 | break; | |
659 | } | |
f6d8c8bb JL |
660 | if (!found) { |
661 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
a90ab95a | 662 | return; |
f6d8c8bb | 663 | } |
a90ab95a RJ |
664 | |
665 | /* Increase/decrease in desired device entitlement */ | |
666 | if (desired >= viodev->cmo.desired) { | |
667 | /* Just bump the bus and device values prior to a balance*/ | |
668 | vio_cmo.desired += desired - viodev->cmo.desired; | |
669 | viodev->cmo.desired = desired; | |
670 | } else { | |
671 | /* Decrease bus and device values for desired entitlement */ | |
672 | vio_cmo.desired -= viodev->cmo.desired - desired; | |
673 | viodev->cmo.desired = desired; | |
674 | /* | |
675 | * If less entitlement is desired than current entitlement, move | |
676 | * any reserve memory in the change region to the excess pool. | |
677 | */ | |
678 | if (viodev->cmo.entitled > desired) { | |
679 | vio_cmo.reserve.size -= viodev->cmo.entitled - desired; | |
680 | vio_cmo.excess.size += viodev->cmo.entitled - desired; | |
681 | /* | |
682 | * If entitlement moving from the reserve pool to the | |
683 | * excess pool is currently unused, add to the excess | |
684 | * free counter. | |
685 | */ | |
686 | if (viodev->cmo.allocated < viodev->cmo.entitled) | |
687 | vio_cmo.excess.free += viodev->cmo.entitled - | |
688 | max(viodev->cmo.allocated, desired); | |
689 | viodev->cmo.entitled = desired; | |
690 | } | |
691 | } | |
692 | schedule_delayed_work(&vio_cmo.balance_q, 0); | |
693 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
694 | } | |
695 | ||
696 | /** | |
697 | * vio_cmo_bus_probe - Handle CMO specific bus probe activities | |
698 | * | |
699 | * @viodev - Pointer to struct vio_dev for device | |
700 | * | |
701 | * Determine the devices IO memory entitlement needs, attempting | |
702 | * to satisfy the system minimum entitlement at first and scheduling | |
703 | * a balance operation to take care of the rest at a later time. | |
704 | * | |
705 | * Returns: 0 on success, -EINVAL when device doesn't support CMO, and | |
706 | * -ENOMEM when entitlement is not available for device or | |
707 | * device entry. | |
708 | * | |
709 | */ | |
710 | static int vio_cmo_bus_probe(struct vio_dev *viodev) | |
711 | { | |
712 | struct vio_cmo_dev_entry *dev_ent; | |
713 | struct device *dev = &viodev->dev; | |
714 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
715 | unsigned long flags; | |
716 | size_t size; | |
717 | ||
718 | /* | |
719 | * Check to see that device has a DMA window and configure | |
720 | * entitlement for the device. | |
721 | */ | |
58f9b0b0 | 722 | if (of_get_property(viodev->dev.of_node, |
a90ab95a RJ |
723 | "ibm,my-dma-window", NULL)) { |
724 | /* Check that the driver is CMO enabled and get desired DMA */ | |
725 | if (!viodrv->get_desired_dma) { | |
726 | dev_err(dev, "%s: device driver does not support CMO\n", | |
727 | __func__); | |
728 | return -EINVAL; | |
729 | } | |
730 | ||
731 | viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); | |
732 | if (viodev->cmo.desired < VIO_CMO_MIN_ENT) | |
733 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | |
734 | size = VIO_CMO_MIN_ENT; | |
735 | ||
736 | dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), | |
737 | GFP_KERNEL); | |
738 | if (!dev_ent) | |
739 | return -ENOMEM; | |
740 | ||
741 | dev_ent->viodev = viodev; | |
742 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
743 | list_add(&dev_ent->list, &vio_cmo.device_list); | |
744 | } else { | |
745 | viodev->cmo.desired = 0; | |
746 | size = 0; | |
747 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
748 | } | |
749 | ||
750 | /* | |
751 | * If the needs for vio_cmo.min have not changed since they | |
752 | * were last set, the number of devices in the OF tree has | |
753 | * been constant and the IO memory for this is already in | |
754 | * the reserve pool. | |
755 | */ | |
756 | if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * | |
757 | VIO_CMO_MIN_ENT)) { | |
758 | /* Updated desired entitlement if device requires it */ | |
759 | if (size) | |
760 | vio_cmo.desired += (viodev->cmo.desired - | |
761 | VIO_CMO_MIN_ENT); | |
762 | } else { | |
763 | size_t tmp; | |
764 | ||
765 | tmp = vio_cmo.spare + vio_cmo.excess.free; | |
766 | if (tmp < size) { | |
767 | dev_err(dev, "%s: insufficient free " | |
768 | "entitlement to add device. " | |
769 | "Need %lu, have %lu\n", __func__, | |
770 | size, (vio_cmo.spare + tmp)); | |
771 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
772 | return -ENOMEM; | |
773 | } | |
774 | ||
775 | /* Use excess pool first to fulfill request */ | |
776 | tmp = min(size, vio_cmo.excess.free); | |
777 | vio_cmo.excess.free -= tmp; | |
778 | vio_cmo.excess.size -= tmp; | |
779 | vio_cmo.reserve.size += tmp; | |
780 | ||
781 | /* Use spare if excess pool was insufficient */ | |
782 | vio_cmo.spare -= size - tmp; | |
783 | ||
784 | /* Update bus accounting */ | |
785 | vio_cmo.min += size; | |
786 | vio_cmo.desired += viodev->cmo.desired; | |
787 | } | |
788 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
789 | return 0; | |
790 | } | |
791 | ||
792 | /** | |
793 | * vio_cmo_bus_remove - Handle CMO specific bus removal activities | |
794 | * | |
795 | * @viodev - Pointer to struct vio_dev for device | |
796 | * | |
797 | * Remove the device from the cmo device list. The minimum entitlement | |
798 | * will be reserved for the device as long as it is in the system. The | |
799 | * rest of the entitlement the device had been allocated will be returned | |
800 | * to the system. | |
801 | */ | |
802 | static void vio_cmo_bus_remove(struct vio_dev *viodev) | |
803 | { | |
804 | struct vio_cmo_dev_entry *dev_ent; | |
805 | unsigned long flags; | |
806 | size_t tmp; | |
807 | ||
808 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
809 | if (viodev->cmo.allocated) { | |
810 | dev_err(&viodev->dev, "%s: device had %lu bytes of IO " | |
811 | "allocated after remove operation.\n", | |
812 | __func__, viodev->cmo.allocated); | |
813 | BUG(); | |
814 | } | |
815 | ||
816 | /* | |
817 | * Remove the device from the device list being maintained for | |
818 | * CMO enabled devices. | |
819 | */ | |
820 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | |
821 | if (viodev == dev_ent->viodev) { | |
822 | list_del(&dev_ent->list); | |
823 | kfree(dev_ent); | |
824 | break; | |
825 | } | |
826 | ||
827 | /* | |
828 | * Devices may not require any entitlement and they do not need | |
829 | * to be processed. Otherwise, return the device's entitlement | |
830 | * back to the pools. | |
831 | */ | |
832 | if (viodev->cmo.entitled) { | |
833 | /* | |
834 | * This device has not yet left the OF tree, it's | |
835 | * minimum entitlement remains in vio_cmo.min and | |
836 | * vio_cmo.desired | |
837 | */ | |
838 | vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); | |
839 | ||
840 | /* | |
841 | * Save min allocation for device in reserve as long | |
842 | * as it exists in OF tree as determined by later | |
843 | * balance operation | |
844 | */ | |
845 | viodev->cmo.entitled -= VIO_CMO_MIN_ENT; | |
846 | ||
847 | /* Replenish spare from freed reserve pool */ | |
848 | if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { | |
849 | tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - | |
850 | vio_cmo.spare)); | |
851 | vio_cmo.spare += tmp; | |
852 | viodev->cmo.entitled -= tmp; | |
853 | } | |
854 | ||
855 | /* Remaining reserve goes to excess pool */ | |
856 | vio_cmo.excess.size += viodev->cmo.entitled; | |
857 | vio_cmo.excess.free += viodev->cmo.entitled; | |
858 | vio_cmo.reserve.size -= viodev->cmo.entitled; | |
859 | ||
860 | /* | |
861 | * Until the device is removed it will keep a | |
862 | * minimum entitlement; this will guarantee that | |
863 | * a module unload/load will result in a success. | |
864 | */ | |
865 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | |
866 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | |
867 | atomic_set(&viodev->cmo.allocs_failed, 0); | |
868 | } | |
869 | ||
870 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
871 | } | |
872 | ||
873 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) | |
874 | { | |
6d283d78 | 875 | set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); |
a90ab95a RJ |
876 | } |
877 | ||
878 | /** | |
879 | * vio_cmo_bus_init - CMO entitlement initialization at bus init time | |
880 | * | |
881 | * Set up the reserve and excess entitlement pools based on available | |
882 | * system entitlement and the number of devices in the OF tree that | |
883 | * require entitlement in the reserve pool. | |
884 | */ | |
885 | static void vio_cmo_bus_init(void) | |
886 | { | |
887 | struct hvcall_mpp_data mpp_data; | |
888 | int err; | |
889 | ||
890 | memset(&vio_cmo, 0, sizeof(struct vio_cmo)); | |
891 | spin_lock_init(&vio_cmo.lock); | |
892 | INIT_LIST_HEAD(&vio_cmo.device_list); | |
893 | INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); | |
894 | ||
895 | /* Get current system entitlement */ | |
896 | err = h_get_mpp(&mpp_data); | |
897 | ||
898 | /* | |
899 | * On failure, continue with entitlement set to 0, will panic() | |
900 | * later when spare is reserved. | |
901 | */ | |
902 | if (err != H_SUCCESS) { | |
903 | printk(KERN_ERR "%s: unable to determine system IO "\ | |
904 | "entitlement. (%d)\n", __func__, err); | |
905 | vio_cmo.entitled = 0; | |
906 | } else { | |
907 | vio_cmo.entitled = mpp_data.entitled_mem; | |
908 | } | |
909 | ||
910 | /* Set reservation and check against entitlement */ | |
911 | vio_cmo.spare = VIO_CMO_MIN_ENT; | |
912 | vio_cmo.reserve.size = vio_cmo.spare; | |
913 | vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * | |
914 | VIO_CMO_MIN_ENT); | |
915 | if (vio_cmo.reserve.size > vio_cmo.entitled) { | |
916 | printk(KERN_ERR "%s: insufficient system entitlement\n", | |
917 | __func__); | |
918 | panic("%s: Insufficient system entitlement", __func__); | |
919 | } | |
920 | ||
921 | /* Set the remaining accounting variables */ | |
922 | vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; | |
923 | vio_cmo.excess.free = vio_cmo.excess.size; | |
924 | vio_cmo.min = vio_cmo.reserve.size; | |
925 | vio_cmo.desired = vio_cmo.reserve.size; | |
926 | } | |
927 | ||
928 | /* sysfs device functions and data structures for CMO */ | |
929 | ||
930 | #define viodev_cmo_rd_attr(name) \ | |
931 | static ssize_t viodev_cmo_##name##_show(struct device *dev, \ | |
932 | struct device_attribute *attr, \ | |
933 | char *buf) \ | |
934 | { \ | |
935 | return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ | |
936 | } | |
937 | ||
938 | static ssize_t viodev_cmo_allocs_failed_show(struct device *dev, | |
939 | struct device_attribute *attr, char *buf) | |
940 | { | |
941 | struct vio_dev *viodev = to_vio_dev(dev); | |
942 | return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); | |
943 | } | |
944 | ||
945 | static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev, | |
946 | struct device_attribute *attr, const char *buf, size_t count) | |
947 | { | |
948 | struct vio_dev *viodev = to_vio_dev(dev); | |
949 | atomic_set(&viodev->cmo.allocs_failed, 0); | |
950 | return count; | |
951 | } | |
952 | ||
953 | static ssize_t viodev_cmo_desired_set(struct device *dev, | |
954 | struct device_attribute *attr, const char *buf, size_t count) | |
955 | { | |
956 | struct vio_dev *viodev = to_vio_dev(dev); | |
957 | size_t new_desired; | |
958 | int ret; | |
959 | ||
960 | ret = strict_strtoul(buf, 10, &new_desired); | |
961 | if (ret) | |
962 | return ret; | |
963 | ||
964 | vio_cmo_set_dev_desired(viodev, new_desired); | |
965 | return count; | |
966 | } | |
967 | ||
968 | viodev_cmo_rd_attr(desired); | |
969 | viodev_cmo_rd_attr(entitled); | |
970 | viodev_cmo_rd_attr(allocated); | |
971 | ||
972 | static ssize_t name_show(struct device *, struct device_attribute *, char *); | |
973 | static ssize_t devspec_show(struct device *, struct device_attribute *, char *); | |
578b7cd1 BH |
974 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
975 | char *buf); | |
a90ab95a RJ |
976 | static struct device_attribute vio_cmo_dev_attrs[] = { |
977 | __ATTR_RO(name), | |
978 | __ATTR_RO(devspec), | |
578b7cd1 | 979 | __ATTR_RO(modalias), |
a90ab95a RJ |
980 | __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, |
981 | viodev_cmo_desired_show, viodev_cmo_desired_set), | |
982 | __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), | |
983 | __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL), | |
984 | __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | |
985 | viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset), | |
986 | __ATTR_NULL | |
987 | }; | |
988 | ||
989 | /* sysfs bus functions and data structures for CMO */ | |
990 | ||
991 | #define viobus_cmo_rd_attr(name) \ | |
992 | static ssize_t \ | |
993 | viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \ | |
994 | { \ | |
995 | return sprintf(buf, "%lu\n", vio_cmo.name); \ | |
996 | } | |
997 | ||
998 | #define viobus_cmo_pool_rd_attr(name, var) \ | |
999 | static ssize_t \ | |
1000 | viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \ | |
1001 | { \ | |
1002 | return sprintf(buf, "%lu\n", vio_cmo.name.var); \ | |
1003 | } | |
1004 | ||
1005 | static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, | |
1006 | size_t count) | |
1007 | { | |
1008 | unsigned long flags; | |
1009 | ||
1010 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
1011 | vio_cmo.high = vio_cmo.curr; | |
1012 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
1013 | ||
1014 | return count; | |
1015 | } | |
1016 | ||
1017 | viobus_cmo_rd_attr(entitled); | |
1018 | viobus_cmo_pool_rd_attr(reserve, size); | |
1019 | viobus_cmo_pool_rd_attr(excess, size); | |
1020 | viobus_cmo_pool_rd_attr(excess, free); | |
1021 | viobus_cmo_rd_attr(spare); | |
1022 | viobus_cmo_rd_attr(min); | |
1023 | viobus_cmo_rd_attr(desired); | |
1024 | viobus_cmo_rd_attr(curr); | |
1025 | viobus_cmo_rd_attr(high); | |
1026 | ||
1027 | static struct bus_attribute vio_cmo_bus_attrs[] = { | |
1028 | __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL), | |
1029 | __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL), | |
1030 | __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL), | |
1031 | __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL), | |
1032 | __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL), | |
1033 | __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL), | |
1034 | __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL), | |
1035 | __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL), | |
1036 | __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | |
1037 | viobus_cmo_high_show, viobus_cmo_high_reset), | |
1038 | __ATTR_NULL | |
1039 | }; | |
1040 | ||
1041 | static void vio_cmo_sysfs_init(void) | |
1042 | { | |
1043 | vio_bus_type.dev_attrs = vio_cmo_dev_attrs; | |
1044 | vio_bus_type.bus_attrs = vio_cmo_bus_attrs; | |
1045 | } | |
1046 | #else /* CONFIG_PPC_SMLPAR */ | |
1047 | /* Dummy functions for iSeries platform */ | |
1048 | int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } | |
1049 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} | |
1050 | static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } | |
1051 | static void vio_cmo_bus_remove(struct vio_dev *viodev) {} | |
1052 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} | |
b9fa49a9 NL |
1053 | static void vio_cmo_bus_init(void) {} |
1054 | static void vio_cmo_sysfs_init(void) { } | |
a90ab95a RJ |
1055 | #endif /* CONFIG_PPC_SMLPAR */ |
1056 | EXPORT_SYMBOL(vio_cmo_entitlement_update); | |
1057 | EXPORT_SYMBOL(vio_cmo_set_dev_desired); | |
1058 | ||
c7f0e8cb SR |
1059 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) |
1060 | { | |
dd9b67ab SR |
1061 | const unsigned char *dma_window; |
1062 | struct iommu_table *tbl; | |
1063 | unsigned long offset, size; | |
1064 | ||
1065 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | |
1066 | return vio_build_iommu_table_iseries(dev); | |
c7f0e8cb | 1067 | |
58f9b0b0 | 1068 | dma_window = of_get_property(dev->dev.of_node, |
dd9b67ab SR |
1069 | "ibm,my-dma-window", NULL); |
1070 | if (!dma_window) | |
1071 | return NULL; | |
c7f0e8cb | 1072 | |
7aa241fd | 1073 | tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); |
0f337274 | 1074 | if (tbl == NULL) |
1075 | return NULL; | |
c7f0e8cb | 1076 | |
58f9b0b0 | 1077 | of_parse_dma_window(dev->dev.of_node, dma_window, |
dd9b67ab | 1078 | &tbl->it_index, &offset, &size); |
c7f0e8cb | 1079 | |
dd9b67ab SR |
1080 | /* TCE table size - measured in tce entries */ |
1081 | tbl->it_size = size >> IOMMU_PAGE_SHIFT; | |
1082 | /* offset for VIO should always be 0 */ | |
1083 | tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; | |
1084 | tbl->it_busno = 0; | |
1085 | tbl->it_type = TCE_VB; | |
7aa241fd | 1086 | tbl->it_blocksize = 16; |
c7f0e8cb | 1087 | |
dd9b67ab | 1088 | return iommu_init_table(tbl, -1); |
c7f0e8cb | 1089 | } |
1da177e4 | 1090 | |
e10fa773 SR |
1091 | /** |
1092 | * vio_match_device: - Tell if a VIO device has a matching | |
1093 | * VIO device id structure. | |
1094 | * @ids: array of VIO device id structures to search in | |
1095 | * @dev: the VIO device structure to match against | |
1096 | * | |
1097 | * Used by a driver to check whether a VIO device present in the | |
1098 | * system is in its list of supported devices. Returns the matching | |
1099 | * vio_device_id structure or NULL if there is no match. | |
1100 | */ | |
1101 | static const struct vio_device_id *vio_match_device( | |
1102 | const struct vio_device_id *ids, const struct vio_dev *dev) | |
1103 | { | |
1104 | while (ids->type[0] != '\0') { | |
dd721ffd | 1105 | if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && |
58f9b0b0 | 1106 | of_device_is_compatible(dev->dev.of_node, |
12d04eef | 1107 | ids->compat)) |
e10fa773 SR |
1108 | return ids; |
1109 | ids++; | |
1110 | } | |
1111 | return NULL; | |
1112 | } | |
1113 | ||
5c0b4b87 SR |
1114 | /* |
1115 | * Convert from struct device to struct vio_dev and pass to driver. | |
1da177e4 | 1116 | * dev->driver has already been set by generic code because vio_bus_match |
5c0b4b87 SR |
1117 | * succeeded. |
1118 | */ | |
1da177e4 LT |
1119 | static int vio_bus_probe(struct device *dev) |
1120 | { | |
1121 | struct vio_dev *viodev = to_vio_dev(dev); | |
1122 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
1123 | const struct vio_device_id *id; | |
1124 | int error = -ENODEV; | |
1125 | ||
1da177e4 LT |
1126 | if (!viodrv->probe) |
1127 | return error; | |
1128 | ||
1129 | id = vio_match_device(viodrv->id_table, viodev); | |
a90ab95a RJ |
1130 | if (id) { |
1131 | memset(&viodev->cmo, 0, sizeof(viodev->cmo)); | |
1132 | if (firmware_has_feature(FW_FEATURE_CMO)) { | |
1133 | error = vio_cmo_bus_probe(viodev); | |
1134 | if (error) | |
1135 | return error; | |
1136 | } | |
1da177e4 | 1137 | error = viodrv->probe(viodev, id); |
cd5aeb9f | 1138 | if (error && firmware_has_feature(FW_FEATURE_CMO)) |
a90ab95a RJ |
1139 | vio_cmo_bus_remove(viodev); |
1140 | } | |
1da177e4 LT |
1141 | |
1142 | return error; | |
1143 | } | |
1144 | ||
1145 | /* convert from struct device to struct vio_dev and pass to driver. */ | |
1146 | static int vio_bus_remove(struct device *dev) | |
1147 | { | |
1148 | struct vio_dev *viodev = to_vio_dev(dev); | |
1149 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
a90ab95a RJ |
1150 | struct device *devptr; |
1151 | int ret = 1; | |
1152 | ||
1153 | /* | |
1154 | * Hold a reference to the device after the remove function is called | |
1155 | * to allow for CMO accounting cleanup for the device. | |
1156 | */ | |
1157 | devptr = get_device(dev); | |
1da177e4 | 1158 | |
5c0b4b87 | 1159 | if (viodrv->remove) |
a90ab95a | 1160 | ret = viodrv->remove(viodev); |
1da177e4 | 1161 | |
a90ab95a RJ |
1162 | if (!ret && firmware_has_feature(FW_FEATURE_CMO)) |
1163 | vio_cmo_bus_remove(viodev); | |
1164 | ||
1165 | put_device(devptr); | |
1166 | return ret; | |
1da177e4 LT |
1167 | } |
1168 | ||
1169 | /** | |
1170 | * vio_register_driver: - Register a new vio driver | |
1171 | * @drv: The vio_driver structure to be registered. | |
1172 | */ | |
1173 | int vio_register_driver(struct vio_driver *viodrv) | |
1174 | { | |
e48b1b45 | 1175 | printk(KERN_DEBUG "%s: driver %s registering\n", __func__, |
6fdf5392 | 1176 | viodrv->driver.name); |
1da177e4 LT |
1177 | |
1178 | /* fill in 'struct driver' fields */ | |
1da177e4 | 1179 | viodrv->driver.bus = &vio_bus_type; |
1da177e4 LT |
1180 | |
1181 | return driver_register(&viodrv->driver); | |
1182 | } | |
1183 | EXPORT_SYMBOL(vio_register_driver); | |
1184 | ||
1185 | /** | |
1186 | * vio_unregister_driver - Remove registration of vio driver. | |
1187 | * @driver: The vio_driver struct to be removed form registration | |
1188 | */ | |
1189 | void vio_unregister_driver(struct vio_driver *viodrv) | |
1190 | { | |
1191 | driver_unregister(&viodrv->driver); | |
1192 | } | |
1193 | EXPORT_SYMBOL(vio_unregister_driver); | |
1194 | ||
c7f0e8cb SR |
1195 | /* vio_dev refcount hit 0 */ |
1196 | static void __devinit vio_dev_release(struct device *dev) | |
1197 | { | |
45848e0f NA |
1198 | struct iommu_table *tbl = get_iommu_table_base(dev); |
1199 | ||
1200 | /* iSeries uses a common table for all vio devices */ | |
1201 | if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl) | |
1202 | iommu_free_table(tbl, dev->of_node ? | |
1203 | dev->of_node->full_name : dev_name(dev)); | |
58f9b0b0 | 1204 | of_node_put(dev->of_node); |
c7f0e8cb SR |
1205 | kfree(to_vio_dev(dev)); |
1206 | } | |
1207 | ||
1da177e4 | 1208 | /** |
e10fa773 SR |
1209 | * vio_register_device_node: - Register a new vio device. |
1210 | * @of_node: The OF node for this device. | |
1da177e4 | 1211 | * |
e10fa773 | 1212 | * Creates and initializes a vio_dev structure from the data in |
12d04eef | 1213 | * of_node and adds it to the list of virtual devices. |
e10fa773 SR |
1214 | * Returns a pointer to the created vio_dev or NULL if node has |
1215 | * NULL device_type or compatible fields. | |
1da177e4 | 1216 | */ |
de7d812d | 1217 | struct vio_dev *vio_register_device_node(struct device_node *of_node) |
1da177e4 | 1218 | { |
e10fa773 | 1219 | struct vio_dev *viodev; |
a7f67bdf | 1220 | const unsigned int *unit_address; |
e10fa773 SR |
1221 | |
1222 | /* we need the 'device_type' property, in order to match with drivers */ | |
1223 | if (of_node->type == NULL) { | |
1224 | printk(KERN_WARNING "%s: node %s missing 'device_type'\n", | |
e48b1b45 | 1225 | __func__, |
e10fa773 SR |
1226 | of_node->name ? of_node->name : "<unknown>"); |
1227 | return NULL; | |
1da177e4 | 1228 | } |
e10fa773 | 1229 | |
e2eb6392 | 1230 | unit_address = of_get_property(of_node, "reg", NULL); |
e10fa773 SR |
1231 | if (unit_address == NULL) { |
1232 | printk(KERN_WARNING "%s: node %s missing 'reg'\n", | |
e48b1b45 | 1233 | __func__, |
e10fa773 SR |
1234 | of_node->name ? of_node->name : "<unknown>"); |
1235 | return NULL; | |
1236 | } | |
1237 | ||
1238 | /* allocate a vio_dev for this node */ | |
1239 | viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); | |
1240 | if (viodev == NULL) | |
1241 | return NULL; | |
1242 | ||
0ebfff14 | 1243 | viodev->irq = irq_of_parse_and_map(of_node, 0); |
e10fa773 | 1244 | |
aab0d375 | 1245 | dev_set_name(&viodev->dev, "%x", *unit_address); |
e10fa773 SR |
1246 | viodev->name = of_node->name; |
1247 | viodev->type = of_node->type; | |
1248 | viodev->unit_address = *unit_address; | |
1249 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
e2eb6392 | 1250 | unit_address = of_get_property(of_node, |
e10fa773 SR |
1251 | "linux,unit_address", NULL); |
1252 | if (unit_address != NULL) | |
1253 | viodev->unit_address = *unit_address; | |
1254 | } | |
d706c1b0 | 1255 | viodev->dev.of_node = of_node_get(of_node); |
a90ab95a RJ |
1256 | |
1257 | if (firmware_has_feature(FW_FEATURE_CMO)) | |
1258 | vio_cmo_set_dma_ops(viodev); | |
1259 | else | |
6d283d78 | 1260 | set_dma_ops(&viodev->dev, &dma_iommu_ops); |
738ef42e | 1261 | set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); |
8fae0353 | 1262 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); |
c7f0e8cb SR |
1263 | |
1264 | /* init generic 'struct device' fields: */ | |
1265 | viodev->dev.parent = &vio_bus_device.dev; | |
1266 | viodev->dev.bus = &vio_bus_type; | |
1267 | viodev->dev.release = vio_dev_release; | |
b3c73856 NA |
1268 | /* needed to ensure proper operation of coherent allocations |
1269 | * later, in case driver doesn't set it explicitly */ | |
1270 | dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); | |
1271 | dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); | |
e10fa773 SR |
1272 | |
1273 | /* register with generic device framework */ | |
c7f0e8cb SR |
1274 | if (device_register(&viodev->dev)) { |
1275 | printk(KERN_ERR "%s: failed to register device %s\n", | |
aab0d375 | 1276 | __func__, dev_name(&viodev->dev)); |
edea8f6f | 1277 | put_device(&viodev->dev); |
e10fa773 SR |
1278 | return NULL; |
1279 | } | |
1280 | ||
1281 | return viodev; | |
1da177e4 | 1282 | } |
e10fa773 | 1283 | EXPORT_SYMBOL(vio_register_device_node); |
1da177e4 | 1284 | |
1da177e4 LT |
1285 | /** |
1286 | * vio_bus_init: - Initialize the virtual IO bus | |
1287 | */ | |
c7f0e8cb | 1288 | static int __init vio_bus_init(void) |
1da177e4 LT |
1289 | { |
1290 | int err; | |
e10fa773 | 1291 | struct device_node *node_vroot; |
1da177e4 | 1292 | |
a90ab95a RJ |
1293 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1294 | vio_cmo_sysfs_init(); | |
1295 | ||
1da177e4 LT |
1296 | err = bus_register(&vio_bus_type); |
1297 | if (err) { | |
1298 | printk(KERN_ERR "failed to register VIO bus\n"); | |
1299 | return err; | |
1300 | } | |
1301 | ||
5c0b4b87 SR |
1302 | /* |
1303 | * The fake parent of all vio devices, just to give us | |
3e494c80 SR |
1304 | * a nice directory |
1305 | */ | |
ac5b33c9 | 1306 | err = device_register(&vio_bus_device.dev); |
1da177e4 | 1307 | if (err) { |
3e494c80 | 1308 | printk(KERN_WARNING "%s: device_register returned %i\n", |
e48b1b45 | 1309 | __func__, err); |
1da177e4 LT |
1310 | return err; |
1311 | } | |
1312 | ||
a90ab95a RJ |
1313 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1314 | vio_cmo_bus_init(); | |
1315 | ||
30686ba6 | 1316 | node_vroot = of_find_node_by_name(NULL, "vdevice"); |
e10fa773 SR |
1317 | if (node_vroot) { |
1318 | struct device_node *of_node; | |
1319 | ||
1320 | /* | |
1321 | * Create struct vio_devices for each virtual device in | |
1322 | * the device tree. Drivers will associate with them later. | |
1323 | */ | |
1324 | for (of_node = node_vroot->child; of_node != NULL; | |
c5467262 | 1325 | of_node = of_node->sibling) |
e10fa773 | 1326 | vio_register_device_node(of_node); |
30686ba6 | 1327 | of_node_put(node_vroot); |
e10fa773 SR |
1328 | } |
1329 | ||
3e494c80 SR |
1330 | return 0; |
1331 | } | |
c7f0e8cb | 1332 | __initcall(vio_bus_init); |
1da177e4 | 1333 | |
e10fa773 | 1334 | static ssize_t name_show(struct device *dev, |
5c0b4b87 | 1335 | struct device_attribute *attr, char *buf) |
1da177e4 LT |
1336 | { |
1337 | return sprintf(buf, "%s\n", to_vio_dev(dev)->name); | |
1338 | } | |
e10fa773 SR |
1339 | |
1340 | static ssize_t devspec_show(struct device *dev, | |
1341 | struct device_attribute *attr, char *buf) | |
1342 | { | |
58f9b0b0 | 1343 | struct device_node *of_node = dev->of_node; |
e10fa773 SR |
1344 | |
1345 | return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); | |
1346 | } | |
1347 | ||
578b7cd1 BH |
1348 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
1349 | char *buf) | |
1350 | { | |
1351 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
1352 | struct device_node *dn; | |
1353 | const char *cp; | |
1354 | ||
cf9b59e9 | 1355 | dn = dev->of_node; |
578b7cd1 BH |
1356 | if (!dn) |
1357 | return -ENODEV; | |
1358 | cp = of_get_property(dn, "compatible", NULL); | |
1359 | if (!cp) | |
1360 | return -ENODEV; | |
1361 | ||
1362 | return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); | |
1363 | } | |
1364 | ||
e10fa773 SR |
1365 | static struct device_attribute vio_dev_attrs[] = { |
1366 | __ATTR_RO(name), | |
1367 | __ATTR_RO(devspec), | |
578b7cd1 | 1368 | __ATTR_RO(modalias), |
e10fa773 SR |
1369 | __ATTR_NULL |
1370 | }; | |
1da177e4 | 1371 | |
1da177e4 LT |
1372 | void __devinit vio_unregister_device(struct vio_dev *viodev) |
1373 | { | |
1da177e4 LT |
1374 | device_unregister(&viodev->dev); |
1375 | } | |
1376 | EXPORT_SYMBOL(vio_unregister_device); | |
1377 | ||
1da177e4 LT |
1378 | static int vio_bus_match(struct device *dev, struct device_driver *drv) |
1379 | { | |
1380 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
1381 | struct vio_driver *vio_drv = to_vio_driver(drv); | |
1382 | const struct vio_device_id *ids = vio_drv->id_table; | |
1da177e4 | 1383 | |
5c0b4b87 | 1384 | return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); |
1da177e4 LT |
1385 | } |
1386 | ||
7eff2e7a | 1387 | static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) |
143dcec2 OH |
1388 | { |
1389 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
12d04eef | 1390 | struct device_node *dn; |
a7f67bdf | 1391 | const char *cp; |
143dcec2 | 1392 | |
58f9b0b0 | 1393 | dn = dev->of_node; |
e10fa773 | 1394 | if (!dn) |
143dcec2 | 1395 | return -ENODEV; |
7eff2e7a | 1396 | cp = of_get_property(dn, "compatible", NULL); |
143dcec2 OH |
1397 | if (!cp) |
1398 | return -ENODEV; | |
1399 | ||
7eff2e7a | 1400 | add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); |
143dcec2 OH |
1401 | return 0; |
1402 | } | |
1403 | ||
6fccab26 | 1404 | static struct bus_type vio_bus_type = { |
1da177e4 | 1405 | .name = "vio", |
e10fa773 | 1406 | .dev_attrs = vio_dev_attrs, |
312c004d | 1407 | .uevent = vio_hotplug, |
1da177e4 | 1408 | .match = vio_bus_match, |
2f53a80f RK |
1409 | .probe = vio_bus_probe, |
1410 | .remove = vio_bus_remove, | |
1da177e4 | 1411 | }; |
e10fa773 SR |
1412 | |
1413 | /** | |
1414 | * vio_get_attribute: - get attribute for virtual device | |
1415 | * @vdev: The vio device to get property. | |
1416 | * @which: The property/attribute to be extracted. | |
1417 | * @length: Pointer to length of returned data size (unused if NULL). | |
1418 | * | |
e2eb6392 | 1419 | * Calls prom.c's of_get_property() to return the value of the |
e10fa773 SR |
1420 | * attribute specified by @which |
1421 | */ | |
1422 | const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) | |
1423 | { | |
58f9b0b0 | 1424 | return of_get_property(vdev->dev.of_node, which, length); |
e10fa773 SR |
1425 | } |
1426 | EXPORT_SYMBOL(vio_get_attribute); | |
c7f0e8cb SR |
1427 | |
1428 | #ifdef CONFIG_PPC_PSERIES | |
1429 | /* vio_find_name() - internal because only vio.c knows how we formatted the | |
1430 | * kobject name | |
c7f0e8cb | 1431 | */ |
c847c853 | 1432 | static struct vio_dev *vio_find_name(const char *name) |
c7f0e8cb | 1433 | { |
c847c853 | 1434 | struct device *found; |
c7f0e8cb | 1435 | |
c847c853 | 1436 | found = bus_find_device_by_name(&vio_bus_type, NULL, name); |
c7f0e8cb SR |
1437 | if (!found) |
1438 | return NULL; | |
1439 | ||
c847c853 | 1440 | return to_vio_dev(found); |
c7f0e8cb SR |
1441 | } |
1442 | ||
1443 | /** | |
1444 | * vio_find_node - find an already-registered vio_dev | |
1445 | * @vnode: device_node of the virtual device we're looking for | |
1446 | */ | |
1447 | struct vio_dev *vio_find_node(struct device_node *vnode) | |
1448 | { | |
a7f67bdf | 1449 | const uint32_t *unit_address; |
aab0d375 | 1450 | char kobj_name[20]; |
c7f0e8cb SR |
1451 | |
1452 | /* construct the kobject name from the device node */ | |
e2eb6392 | 1453 | unit_address = of_get_property(vnode, "reg", NULL); |
c7f0e8cb SR |
1454 | if (!unit_address) |
1455 | return NULL; | |
aab0d375 | 1456 | snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); |
c7f0e8cb SR |
1457 | |
1458 | return vio_find_name(kobj_name); | |
1459 | } | |
1460 | EXPORT_SYMBOL(vio_find_node); | |
1461 | ||
1462 | int vio_enable_interrupts(struct vio_dev *dev) | |
1463 | { | |
1464 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); | |
1465 | if (rc != H_SUCCESS) | |
1466 | printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); | |
1467 | return rc; | |
1468 | } | |
1469 | EXPORT_SYMBOL(vio_enable_interrupts); | |
1470 | ||
1471 | int vio_disable_interrupts(struct vio_dev *dev) | |
1472 | { | |
1473 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); | |
1474 | if (rc != H_SUCCESS) | |
1475 | printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); | |
1476 | return rc; | |
1477 | } | |
1478 | EXPORT_SYMBOL(vio_disable_interrupts); | |
1479 | #endif /* CONFIG_PPC_PSERIES */ |