alpha: use iommu_num_pages function in IOMMU code
[deliverable/linux.git] / arch / sparc64 / kernel / pci_sun4v.c
CommitLineData
8f6a93a1
DM
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
d284142c 3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
8f6a93a1
DM
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
18397944 12#include <linux/percpu.h>
35a17eb6
DM
13#include <linux/irq.h>
14#include <linux/msi.h>
59db8102 15#include <linux/log2.h>
3822b509 16#include <linux/of_device.h>
8f6a93a1 17
8f6a93a1
DM
18#include <asm/iommu.h>
19#include <asm/irq.h>
8f6a93a1 20#include <asm/hypervisor.h>
e87dc350 21#include <asm/prom.h>
8f6a93a1
DM
22
23#include "pci_impl.h"
24#include "iommu_common.h"
25
bade5622
DM
26#include "pci_sun4v.h"
27
3822b509
DM
28#define DRIVER_NAME "pci_sun4v"
29#define PFX DRIVER_NAME ": "
30
e01c0d6d
DM
31static unsigned long vpci_major = 1;
32static unsigned long vpci_minor = 1;
33
7c8f486a 34#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
18397944 35
16ce82d8 36struct iommu_batch {
ad7ad57c 37 struct device *dev; /* Device mapping is for. */
6a32fd4d
DM
38 unsigned long prot; /* IOMMU page protections */
39 unsigned long entry; /* Index into IOTSB. */
40 u64 *pglist; /* List of physical pages */
41 unsigned long npages; /* Number of pages in list. */
18397944
DM
42};
43
ad7ad57c 44static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
d3ae4b5b 45static int iommu_batch_initialized;
6a32fd4d
DM
46
47/* Interrupts must be disabled. */
ad7ad57c 48static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
6a32fd4d 49{
ad7ad57c 50 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d 51
ad7ad57c 52 p->dev = dev;
6a32fd4d
DM
53 p->prot = prot;
54 p->entry = entry;
55 p->npages = 0;
56}
57
58/* Interrupts must be disabled. */
ad7ad57c 59static long iommu_batch_flush(struct iommu_batch *p)
6a32fd4d 60{
ad7ad57c 61 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
a2fb23af 62 unsigned long devhandle = pbm->devhandle;
6a32fd4d
DM
63 unsigned long prot = p->prot;
64 unsigned long entry = p->entry;
65 u64 *pglist = p->pglist;
66 unsigned long npages = p->npages;
67
d82965c1 68 while (npages != 0) {
6a32fd4d
DM
69 long num;
70
71 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist));
73 if (unlikely(num < 0)) {
74 if (printk_ratelimit())
ad7ad57c 75 printk("iommu_batch_flush: IOMMU map of "
6a32fd4d
DM
76 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
77 "status %ld\n",
78 devhandle, HV_PCI_TSBID(0, entry),
79 npages, prot, __pa(pglist), num);
80 return -1;
81 }
82
83 entry += num;
84 npages -= num;
85 pglist += num;
d82965c1 86 }
6a32fd4d
DM
87
88 p->entry = entry;
89 p->npages = 0;
90
91 return 0;
92}
93
13fa14e1
DM
94static inline void iommu_batch_new_entry(unsigned long entry)
95{
96 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98 if (p->entry + p->npages == entry)
99 return;
100 if (p->entry != ~0UL)
101 iommu_batch_flush(p);
102 p->entry = entry;
103}
104
6a32fd4d 105/* Interrupts must be disabled. */
ad7ad57c 106static inline long iommu_batch_add(u64 phys_page)
6a32fd4d 107{
ad7ad57c 108 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d
DM
109
110 BUG_ON(p->npages >= PGLIST_NENTS);
111
112 p->pglist[p->npages++] = phys_page;
113 if (p->npages == PGLIST_NENTS)
ad7ad57c 114 return iommu_batch_flush(p);
6a32fd4d
DM
115
116 return 0;
117}
118
119/* Interrupts must be disabled. */
ad7ad57c 120static inline long iommu_batch_end(void)
6a32fd4d 121{
ad7ad57c 122 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d
DM
123
124 BUG_ON(p->npages >= PGLIST_NENTS);
125
ad7ad57c 126 return iommu_batch_flush(p);
6a32fd4d 127}
18397944 128
ad7ad57c
DM
129static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130 dma_addr_t *dma_addrp, gfp_t gfp)
8f6a93a1 131{
7c8f486a 132 unsigned long flags, order, first_page, npages, n;
c1b1a5f1
DM
133 struct iommu *iommu;
134 struct page *page;
18397944
DM
135 void *ret;
136 long entry;
c1b1a5f1 137 int nid;
18397944
DM
138
139 size = IO_PAGE_ALIGN(size);
140 order = get_order(size);
6a32fd4d 141 if (unlikely(order >= MAX_ORDER))
18397944
DM
142 return NULL;
143
144 npages = size >> IO_PAGE_SHIFT;
18397944 145
c1b1a5f1
DM
146 nid = dev->archdata.numa_node;
147 page = alloc_pages_node(nid, gfp, order);
148 if (unlikely(!page))
18397944 149 return NULL;
e7a0453e 150
c1b1a5f1 151 first_page = (unsigned long) page_address(page);
18397944
DM
152 memset((char *)first_page, 0, PAGE_SIZE << order);
153
ad7ad57c 154 iommu = dev->archdata.iommu;
18397944
DM
155
156 spin_lock_irqsave(&iommu->lock, flags);
d284142c 157 entry = iommu_range_alloc(dev, iommu, npages, NULL);
18397944
DM
158 spin_unlock_irqrestore(&iommu->lock, flags);
159
d284142c
DM
160 if (unlikely(entry == DMA_ERROR_CODE))
161 goto range_alloc_fail;
18397944
DM
162
163 *dma_addrp = (iommu->page_table_map_base +
164 (entry << IO_PAGE_SHIFT));
165 ret = (void *) first_page;
166 first_page = __pa(first_page);
167
6a32fd4d 168 local_irq_save(flags);
18397944 169
ad7ad57c
DM
170 iommu_batch_start(dev,
171 (HV_PCI_MAP_ATTR_READ |
172 HV_PCI_MAP_ATTR_WRITE),
173 entry);
18397944 174
6a32fd4d 175 for (n = 0; n < npages; n++) {
ad7ad57c 176 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
6a32fd4d
DM
177 if (unlikely(err < 0L))
178 goto iommu_map_fail;
179 }
18397944 180
ad7ad57c 181 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 182 goto iommu_map_fail;
18397944 183
6a32fd4d 184 local_irq_restore(flags);
18397944
DM
185
186 return ret;
6a32fd4d
DM
187
188iommu_map_fail:
189 /* Interrupts are disabled. */
190 spin_lock(&iommu->lock);
d284142c 191 iommu_range_free(iommu, *dma_addrp, npages);
6a32fd4d
DM
192 spin_unlock_irqrestore(&iommu->lock, flags);
193
d284142c 194range_alloc_fail:
6a32fd4d
DM
195 free_pages(first_page, order);
196 return NULL;
8f6a93a1
DM
197}
198
ad7ad57c
DM
199static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200 dma_addr_t dvma)
8f6a93a1 201{
a2fb23af 202 struct pci_pbm_info *pbm;
16ce82d8 203 struct iommu *iommu;
7c8f486a
DM
204 unsigned long flags, order, npages, entry;
205 u32 devhandle;
18397944
DM
206
207 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c
DM
208 iommu = dev->archdata.iommu;
209 pbm = dev->archdata.host_controller;
a2fb23af 210 devhandle = pbm->devhandle;
18397944
DM
211 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
212
213 spin_lock_irqsave(&iommu->lock, flags);
214
d284142c 215 iommu_range_free(iommu, dvma, npages);
18397944
DM
216
217 do {
218 unsigned long num;
219
220 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221 npages);
222 entry += num;
223 npages -= num;
224 } while (npages != 0);
225
226 spin_unlock_irqrestore(&iommu->lock, flags);
227
228 order = get_order(size);
229 if (order < 10)
230 free_pages((unsigned long)cpu, order);
8f6a93a1
DM
231}
232
ad7ad57c
DM
233static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
234 enum dma_data_direction direction)
8f6a93a1 235{
16ce82d8 236 struct iommu *iommu;
18397944 237 unsigned long flags, npages, oaddr;
7c8f486a 238 unsigned long i, base_paddr;
6a32fd4d 239 u32 bus_addr, ret;
18397944
DM
240 unsigned long prot;
241 long entry;
18397944 242
ad7ad57c 243 iommu = dev->archdata.iommu;
18397944 244
ad7ad57c 245 if (unlikely(direction == DMA_NONE))
18397944
DM
246 goto bad;
247
248 oaddr = (unsigned long)ptr;
249 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
250 npages >>= IO_PAGE_SHIFT;
18397944
DM
251
252 spin_lock_irqsave(&iommu->lock, flags);
d284142c 253 entry = iommu_range_alloc(dev, iommu, npages, NULL);
18397944
DM
254 spin_unlock_irqrestore(&iommu->lock, flags);
255
d284142c 256 if (unlikely(entry == DMA_ERROR_CODE))
18397944
DM
257 goto bad;
258
259 bus_addr = (iommu->page_table_map_base +
260 (entry << IO_PAGE_SHIFT));
261 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
262 base_paddr = __pa(oaddr & IO_PAGE_MASK);
263 prot = HV_PCI_MAP_ATTR_READ;
ad7ad57c 264 if (direction != DMA_TO_DEVICE)
18397944
DM
265 prot |= HV_PCI_MAP_ATTR_WRITE;
266
6a32fd4d 267 local_irq_save(flags);
18397944 268
ad7ad57c 269 iommu_batch_start(dev, prot, entry);
18397944 270
6a32fd4d 271 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
ad7ad57c 272 long err = iommu_batch_add(base_paddr);
6a32fd4d
DM
273 if (unlikely(err < 0L))
274 goto iommu_map_fail;
275 }
ad7ad57c 276 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 277 goto iommu_map_fail;
18397944 278
6a32fd4d 279 local_irq_restore(flags);
18397944
DM
280
281 return ret;
282
283bad:
284 if (printk_ratelimit())
285 WARN_ON(1);
ad7ad57c 286 return DMA_ERROR_CODE;
6a32fd4d
DM
287
288iommu_map_fail:
289 /* Interrupts are disabled. */
290 spin_lock(&iommu->lock);
d284142c 291 iommu_range_free(iommu, bus_addr, npages);
6a32fd4d
DM
292 spin_unlock_irqrestore(&iommu->lock, flags);
293
ad7ad57c 294 return DMA_ERROR_CODE;
8f6a93a1
DM
295}
296
ad7ad57c
DM
297static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
298 size_t sz, enum dma_data_direction direction)
8f6a93a1 299{
a2fb23af 300 struct pci_pbm_info *pbm;
16ce82d8 301 struct iommu *iommu;
7c8f486a 302 unsigned long flags, npages;
18397944 303 long entry;
7c8f486a 304 u32 devhandle;
18397944 305
ad7ad57c 306 if (unlikely(direction == DMA_NONE)) {
18397944
DM
307 if (printk_ratelimit())
308 WARN_ON(1);
309 return;
310 }
311
ad7ad57c
DM
312 iommu = dev->archdata.iommu;
313 pbm = dev->archdata.host_controller;
a2fb23af 314 devhandle = pbm->devhandle;
18397944
DM
315
316 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
317 npages >>= IO_PAGE_SHIFT;
318 bus_addr &= IO_PAGE_MASK;
319
320 spin_lock_irqsave(&iommu->lock, flags);
321
d284142c 322 iommu_range_free(iommu, bus_addr, npages);
18397944 323
d284142c 324 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
18397944
DM
325 do {
326 unsigned long num;
327
328 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
329 npages);
330 entry += num;
331 npages -= num;
332 } while (npages != 0);
333
334 spin_unlock_irqrestore(&iommu->lock, flags);
335}
336
ad7ad57c
DM
337static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
338 int nelems, enum dma_data_direction direction)
8f6a93a1 339{
13fa14e1
DM
340 struct scatterlist *s, *outs, *segstart;
341 unsigned long flags, handle, prot;
342 dma_addr_t dma_next = 0, dma_addr;
343 unsigned int max_seg_size;
f0880257 344 unsigned long seg_boundary_size;
13fa14e1 345 int outcount, incount, i;
16ce82d8 346 struct iommu *iommu;
f0880257 347 unsigned long base_shift;
13fa14e1
DM
348 long err;
349
350 BUG_ON(direction == DMA_NONE);
18397944 351
ad7ad57c 352 iommu = dev->archdata.iommu;
13fa14e1
DM
353 if (nelems == 0 || !iommu)
354 return 0;
18397944 355
13fa14e1
DM
356 prot = HV_PCI_MAP_ATTR_READ;
357 if (direction != DMA_TO_DEVICE)
358 prot |= HV_PCI_MAP_ATTR_WRITE;
18397944 359
13fa14e1
DM
360 outs = s = segstart = &sglist[0];
361 outcount = 1;
362 incount = nelems;
363 handle = 0;
18397944 364
13fa14e1
DM
365 /* Init first segment length for backout at failure */
366 outs->dma_length = 0;
18397944 367
13fa14e1 368 spin_lock_irqsave(&iommu->lock, flags);
18397944 369
13fa14e1 370 iommu_batch_start(dev, prot, ~0UL);
18397944 371
13fa14e1 372 max_seg_size = dma_get_max_seg_size(dev);
f0880257
FT
373 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
374 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
375 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
13fa14e1 376 for_each_sg(sglist, s, nelems, i) {
f0880257 377 unsigned long paddr, npages, entry, out_entry = 0, slen;
38192d52 378
13fa14e1
DM
379 slen = s->length;
380 /* Sanity check */
381 if (slen == 0) {
382 dma_next = 0;
383 continue;
384 }
385 /* Allocate iommu entries for that segment */
386 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
a7375762 387 npages = iommu_nr_pages(paddr, slen);
13fa14e1 388 entry = iommu_range_alloc(dev, iommu, npages, &handle);
38192d52 389
13fa14e1
DM
390 /* Handle failure */
391 if (unlikely(entry == DMA_ERROR_CODE)) {
392 if (printk_ratelimit())
393 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
394 " npages %lx\n", iommu, paddr, npages);
395 goto iommu_map_failed;
396 }
38192d52 397
13fa14e1 398 iommu_batch_new_entry(entry);
38192d52 399
13fa14e1
DM
400 /* Convert entry to a dma_addr_t */
401 dma_addr = iommu->page_table_map_base +
402 (entry << IO_PAGE_SHIFT);
403 dma_addr |= (s->offset & ~IO_PAGE_MASK);
38192d52 404
13fa14e1 405 /* Insert into HW table */
38192d52 406 paddr &= IO_PAGE_MASK;
13fa14e1 407 while (npages--) {
38192d52 408 err = iommu_batch_add(paddr);
13fa14e1 409 if (unlikely(err < 0L))
38192d52 410 goto iommu_map_failed;
13fa14e1
DM
411 paddr += IO_PAGE_SIZE;
412 }
413
414 /* If we are in an open segment, try merging */
415 if (segstart != s) {
416 /* We cannot merge if:
417 * - allocated dma_addr isn't contiguous to previous allocation
418 */
419 if ((dma_addr != dma_next) ||
f0880257
FT
420 (outs->dma_length + s->length > max_seg_size) ||
421 (is_span_boundary(out_entry, base_shift,
422 seg_boundary_size, outs, s))) {
13fa14e1
DM
423 /* Can't merge: create a new segment */
424 segstart = s;
425 outcount++;
426 outs = sg_next(outs);
427 } else {
428 outs->dma_length += s->length;
38192d52 429 }
13fa14e1 430 }
38192d52 431
13fa14e1
DM
432 if (segstart == s) {
433 /* This is a new segment, fill entries */
434 outs->dma_address = dma_addr;
435 outs->dma_length = slen;
f0880257 436 out_entry = entry;
38192d52 437 }
13fa14e1
DM
438
439 /* Calculate next page pointer for contiguous check */
440 dma_next = dma_addr + slen;
38192d52
DM
441 }
442
443 err = iommu_batch_end();
444
6a32fd4d
DM
445 if (unlikely(err < 0L))
446 goto iommu_map_failed;
18397944 447
13fa14e1 448 spin_unlock_irqrestore(&iommu->lock, flags);
18397944 449
13fa14e1
DM
450 if (outcount < incount) {
451 outs = sg_next(outs);
452 outs->dma_address = DMA_ERROR_CODE;
453 outs->dma_length = 0;
454 }
455
456 return outcount;
6a32fd4d
DM
457
458iommu_map_failed:
13fa14e1
DM
459 for_each_sg(sglist, s, nelems, i) {
460 if (s->dma_length != 0) {
461 unsigned long vaddr, npages;
462
463 vaddr = s->dma_address & IO_PAGE_MASK;
a7375762 464 npages = iommu_nr_pages(s->dma_address, s->dma_length);
13fa14e1
DM
465 iommu_range_free(iommu, vaddr, npages);
466 /* XXX demap? XXX */
467 s->dma_address = DMA_ERROR_CODE;
468 s->dma_length = 0;
469 }
470 if (s == outs)
471 break;
472 }
6a32fd4d
DM
473 spin_unlock_irqrestore(&iommu->lock, flags);
474
475 return 0;
8f6a93a1
DM
476}
477
ad7ad57c
DM
478static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
479 int nelems, enum dma_data_direction direction)
8f6a93a1 480{
a2fb23af 481 struct pci_pbm_info *pbm;
13fa14e1 482 struct scatterlist *sg;
16ce82d8 483 struct iommu *iommu;
13fa14e1
DM
484 unsigned long flags;
485 u32 devhandle;
18397944 486
13fa14e1 487 BUG_ON(direction == DMA_NONE);
18397944 488
ad7ad57c
DM
489 iommu = dev->archdata.iommu;
490 pbm = dev->archdata.host_controller;
a2fb23af 491 devhandle = pbm->devhandle;
18397944 492
18397944
DM
493 spin_lock_irqsave(&iommu->lock, flags);
494
13fa14e1
DM
495 sg = sglist;
496 while (nelems--) {
497 dma_addr_t dma_handle = sg->dma_address;
498 unsigned int len = sg->dma_length;
499 unsigned long npages, entry;
500
501 if (!len)
502 break;
a7375762 503 npages = iommu_nr_pages(dma_handle, len);
13fa14e1
DM
504 iommu_range_free(iommu, dma_handle, npages);
505
506 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
507 while (npages) {
508 unsigned long num;
509
510 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
511 npages);
512 entry += num;
513 npages -= num;
514 }
18397944 515
13fa14e1
DM
516 sg = sg_next(sg);
517 }
18397944
DM
518
519 spin_unlock_irqrestore(&iommu->lock, flags);
8f6a93a1
DM
520}
521
ad7ad57c
DM
522static void dma_4v_sync_single_for_cpu(struct device *dev,
523 dma_addr_t bus_addr, size_t sz,
524 enum dma_data_direction direction)
8f6a93a1 525{
18397944 526 /* Nothing to do... */
8f6a93a1
DM
527}
528
ad7ad57c
DM
529static void dma_4v_sync_sg_for_cpu(struct device *dev,
530 struct scatterlist *sglist, int nelems,
531 enum dma_data_direction direction)
8f6a93a1 532{
18397944 533 /* Nothing to do... */
8f6a93a1
DM
534}
535
908f5162 536static const struct dma_ops sun4v_dma_ops = {
ad7ad57c
DM
537 .alloc_coherent = dma_4v_alloc_coherent,
538 .free_coherent = dma_4v_free_coherent,
539 .map_single = dma_4v_map_single,
540 .unmap_single = dma_4v_unmap_single,
541 .map_sg = dma_4v_map_sg,
542 .unmap_sg = dma_4v_unmap_sg,
543 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
544 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
8f6a93a1
DM
545};
546
e822358a
DM
547static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
548 struct device *parent)
bade5622 549{
e87dc350
DM
550 struct property *prop;
551 struct device_node *dp;
552
22fecbae 553 dp = pbm->op->node;
34768bc8
DM
554 prop = of_find_property(dp, "66mhz-capable", NULL);
555 pbm->is_66mhz_capable = (prop != NULL);
e822358a 556 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
c2609267
DM
557
558 /* XXX register error interrupt handlers XXX */
bade5622
DM
559}
560
4c622258
AB
561static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
562 struct iommu *iommu)
18397944 563{
9b3627f3 564 struct iommu_arena *arena = &iommu->arena;
e7a0453e 565 unsigned long i, cnt = 0;
7c8f486a 566 u32 devhandle;
18397944
DM
567
568 devhandle = pbm->devhandle;
569 for (i = 0; i < arena->limit; i++) {
570 unsigned long ret, io_attrs, ra;
571
572 ret = pci_sun4v_iommu_getmap(devhandle,
573 HV_PCI_TSBID(0, i),
574 &io_attrs, &ra);
e7a0453e 575 if (ret == HV_EOK) {
c2a5a46b
DM
576 if (page_in_phys_avail(ra)) {
577 pci_sun4v_iommu_demap(devhandle,
578 HV_PCI_TSBID(0, i), 1);
579 } else {
580 cnt++;
581 __set_bit(i, arena->map);
582 }
e7a0453e 583 }
18397944 584 }
e7a0453e
DM
585
586 return cnt;
18397944
DM
587}
588
3822b509 589static int __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
bade5622 590{
8aef7278 591 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
16ce82d8 592 struct iommu *iommu = pbm->iommu;
59db8102 593 unsigned long num_tsb_entries, sz, tsbsize;
8aef7278
DM
594 u32 dma_mask, dma_offset;
595 const u32 *vdma;
596
22fecbae 597 vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
8aef7278
DM
598 if (!vdma)
599 vdma = vdma_default;
18397944 600
59db8102 601 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
3822b509
DM
602 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
603 vdma[0], vdma[1]);
604 return -EINVAL;
18397944
DM
605 };
606
59db8102
DM
607 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
608 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
609 tsbsize = num_tsb_entries * sizeof(iopte_t);
18397944
DM
610
611 dma_offset = vdma[0];
612
613 /* Setup initial software IOMMU state. */
614 spin_lock_init(&iommu->lock);
615 iommu->ctx_lowest_free = 1;
616 iommu->page_table_map_base = dma_offset;
617 iommu->dma_addr_mask = dma_mask;
618
619 /* Allocate and initialize the free area map. */
59db8102 620 sz = (num_tsb_entries + 7) / 8;
18397944 621 sz = (sz + 7UL) & ~7UL;
982c2064 622 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
18397944 623 if (!iommu->arena.map) {
3822b509
DM
624 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
625 return -ENOMEM;
18397944 626 }
18397944
DM
627 iommu->arena.limit = num_tsb_entries;
628
e7a0453e 629 sz = probe_existing_entries(pbm, iommu);
c2a5a46b
DM
630 if (sz)
631 printk("%s: Imported %lu TSB entries from OBP\n",
632 pbm->name, sz);
3822b509
DM
633
634 return 0;
bade5622
DM
635}
636
35a17eb6
DM
637#ifdef CONFIG_PCI_MSI
638struct pci_sun4v_msiq_entry {
639 u64 version_type;
640#define MSIQ_VERSION_MASK 0xffffffff00000000UL
641#define MSIQ_VERSION_SHIFT 32
642#define MSIQ_TYPE_MASK 0x00000000000000ffUL
643#define MSIQ_TYPE_SHIFT 0
644#define MSIQ_TYPE_NONE 0x00
645#define MSIQ_TYPE_MSG 0x01
646#define MSIQ_TYPE_MSI32 0x02
647#define MSIQ_TYPE_MSI64 0x03
648#define MSIQ_TYPE_INTX 0x08
649#define MSIQ_TYPE_NONE2 0xff
650
651 u64 intx_sysino;
652 u64 reserved1;
653 u64 stick;
654 u64 req_id; /* bus/device/func */
655#define MSIQ_REQID_BUS_MASK 0xff00UL
656#define MSIQ_REQID_BUS_SHIFT 8
657#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
658#define MSIQ_REQID_DEVICE_SHIFT 3
659#define MSIQ_REQID_FUNC_MASK 0x0007UL
660#define MSIQ_REQID_FUNC_SHIFT 0
661
662 u64 msi_address;
663
e5dd42e4 664 /* The format of this value is message type dependent.
35a17eb6
DM
665 * For MSI bits 15:0 are the data from the MSI packet.
666 * For MSI-X bits 31:0 are the data from the MSI packet.
667 * For MSG, the message code and message routing code where:
668 * bits 39:32 is the bus/device/fn of the msg target-id
669 * bits 18:16 is the message routing code
670 * bits 7:0 is the message code
671 * For INTx the low order 2-bits are:
672 * 00 - INTA
673 * 01 - INTB
674 * 10 - INTC
675 * 11 - INTD
676 */
677 u64 msi_data;
678
679 u64 reserved2;
680};
681
759f89e0
DM
682static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
683 unsigned long *head)
35a17eb6 684{
759f89e0 685 unsigned long err, limit;
35a17eb6 686
759f89e0 687 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
35a17eb6 688 if (unlikely(err))
759f89e0 689 return -ENXIO;
35a17eb6 690
759f89e0
DM
691 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
692 if (unlikely(*head >= limit))
693 return -EFBIG;
694
695 return 0;
696}
697
698static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
699 unsigned long msiqid, unsigned long *head,
700 unsigned long *msi)
701{
702 struct pci_sun4v_msiq_entry *ep;
703 unsigned long err, type;
704
705 /* Note: void pointer arithmetic, 'head' is a byte offset */
706 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
707 (pbm->msiq_ent_count *
708 sizeof(struct pci_sun4v_msiq_entry))) +
709 *head);
710
711 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
712 return 0;
35a17eb6 713
759f89e0
DM
714 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
715 if (unlikely(type != MSIQ_TYPE_MSI32 &&
716 type != MSIQ_TYPE_MSI64))
717 return -EINVAL;
35a17eb6 718
759f89e0
DM
719 *msi = ep->msi_data;
720
721 err = pci_sun4v_msi_setstate(pbm->devhandle,
722 ep->msi_data /* msi_num */,
723 HV_MSISTATE_IDLE);
724 if (unlikely(err))
725 return -ENXIO;
35a17eb6 726
759f89e0
DM
727 /* Clear the entry. */
728 ep->version_type &= ~MSIQ_TYPE_MASK;
35a17eb6 729
759f89e0
DM
730 (*head) += sizeof(struct pci_sun4v_msiq_entry);
731 if (*head >=
732 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
733 *head = 0;
35a17eb6 734
759f89e0 735 return 1;
35a17eb6
DM
736}
737
759f89e0
DM
738static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
739 unsigned long head)
35a17eb6 740{
759f89e0 741 unsigned long err;
35a17eb6 742
759f89e0
DM
743 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
744 if (unlikely(err))
745 return -EINVAL;
35a17eb6 746
759f89e0
DM
747 return 0;
748}
35a17eb6 749
759f89e0
DM
750static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
751 unsigned long msi, int is_msi64)
752{
753 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
754 (is_msi64 ?
755 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
756 return -ENXIO;
757 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
758 return -ENXIO;
759 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
760 return -ENXIO;
35a17eb6
DM
761 return 0;
762}
763
759f89e0 764static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
35a17eb6 765{
759f89e0
DM
766 unsigned long err, msiqid;
767
768 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
769 if (err)
770 return -ENXIO;
771
772 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
773
774 return 0;
35a17eb6
DM
775}
776
759f89e0 777static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
35a17eb6
DM
778{
779 unsigned long q_size, alloc_size, pages, order;
780 int i;
781
782 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
783 alloc_size = (pbm->msiq_num * q_size);
784 order = get_order(alloc_size);
785 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
786 if (pages == 0UL) {
787 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
788 order);
789 return -ENOMEM;
790 }
791 memset((char *)pages, 0, PAGE_SIZE << order);
792 pbm->msi_queues = (void *) pages;
793
794 for (i = 0; i < pbm->msiq_num; i++) {
795 unsigned long err, base = __pa(pages + (i * q_size));
796 unsigned long ret1, ret2;
797
798 err = pci_sun4v_msiq_conf(pbm->devhandle,
799 pbm->msiq_first + i,
800 base, pbm->msiq_ent_count);
801 if (err) {
802 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
803 err);
804 goto h_error;
805 }
806
807 err = pci_sun4v_msiq_info(pbm->devhandle,
808 pbm->msiq_first + i,
809 &ret1, &ret2);
810 if (err) {
811 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
812 err);
813 goto h_error;
814 }
815 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
816 printk(KERN_ERR "MSI: Bogus qconf "
817 "expected[%lx:%x] got[%lx:%lx]\n",
818 base, pbm->msiq_ent_count,
819 ret1, ret2);
820 goto h_error;
821 }
822 }
823
824 return 0;
825
826h_error:
827 free_pages(pages, order);
828 return -EINVAL;
829}
830
759f89e0 831static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
35a17eb6 832{
759f89e0 833 unsigned long q_size, alloc_size, pages, order;
35a17eb6
DM
834 int i;
835
759f89e0
DM
836 for (i = 0; i < pbm->msiq_num; i++) {
837 unsigned long msiqid = pbm->msiq_first + i;
35a17eb6 838
759f89e0 839 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
35a17eb6 840 }
7fe3730d 841
759f89e0
DM
842 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
843 alloc_size = (pbm->msiq_num * q_size);
844 order = get_order(alloc_size);
35a17eb6 845
759f89e0 846 pages = (unsigned long) pbm->msi_queues;
35a17eb6 847
759f89e0 848 free_pages(pages, order);
35a17eb6 849
759f89e0 850 pbm->msi_queues = NULL;
35a17eb6
DM
851}
852
759f89e0
DM
853static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
854 unsigned long msiqid,
855 unsigned long devino)
35a17eb6 856{
759f89e0 857 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
35a17eb6 858
759f89e0
DM
859 if (!virt_irq)
860 return -ENOMEM;
35a17eb6 861
759f89e0
DM
862 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
863 return -EINVAL;
864 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
865 return -EINVAL;
35a17eb6 866
759f89e0 867 return virt_irq;
35a17eb6 868}
e9870c4c 869
759f89e0
DM
870static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
871 .get_head = pci_sun4v_get_head,
872 .dequeue_msi = pci_sun4v_dequeue_msi,
873 .set_head = pci_sun4v_set_head,
874 .msi_setup = pci_sun4v_msi_setup,
875 .msi_teardown = pci_sun4v_msi_teardown,
876 .msiq_alloc = pci_sun4v_msiq_alloc,
877 .msiq_free = pci_sun4v_msiq_free,
878 .msiq_build_irq = pci_sun4v_msiq_build_irq,
879};
880
e9870c4c
DM
881static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
882{
759f89e0 883 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
e9870c4c 884}
35a17eb6
DM
885#else /* CONFIG_PCI_MSI */
886static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
887{
888}
889#endif /* !(CONFIG_PCI_MSI) */
890
d3ae4b5b 891static int __init pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
e822358a 892 struct of_device *op, u32 devhandle)
bade5622 893{
e822358a 894 struct device_node *dp = op->node;
3822b509 895 int err;
bade5622 896
c1b1a5f1
DM
897 pbm->numa_node = of_node_to_nid(dp);
898
ca3dd88e
DM
899 pbm->pci_ops = &sun4v_pci_ops;
900 pbm->config_space_reg_bits = 12;
34768bc8 901
6c108f12
DM
902 pbm->index = pci_num_pbms++;
903
22fecbae 904 pbm->op = op;
bade5622 905
3833789b 906 pbm->devhandle = devhandle;
bade5622 907
e87dc350 908 pbm->name = dp->full_name;
bade5622 909
e87dc350 910 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
c1b1a5f1 911 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
bade5622 912
9fd8b647 913 pci_determine_mem_io_space(pbm);
bade5622 914
cfa0652c 915 pci_get_pbm_props(pbm);
3822b509
DM
916
917 err = pci_sun4v_iommu_init(pbm);
918 if (err)
919 return err;
920
35a17eb6 921 pci_sun4v_msi_init(pbm);
3822b509 922
e822358a 923 pci_sun4v_scan_bus(pbm, &op->dev);
3822b509 924
d3ae4b5b
DM
925 pbm->next = pci_pbm_root;
926 pci_pbm_root = pbm;
927
3822b509 928 return 0;
bade5622
DM
929}
930
3822b509
DM
931static int __devinit pci_sun4v_probe(struct of_device *op,
932 const struct of_device_id *match)
8f6a93a1 933{
3822b509 934 const struct linux_prom64_registers *regs;
e01c0d6d 935 static int hvapi_negotiated = 0;
34768bc8 936 struct pci_pbm_info *pbm;
3822b509 937 struct device_node *dp;
16ce82d8 938 struct iommu *iommu;
7c8f486a 939 u32 devhandle;
d7472c38 940 int i, err;
3833789b 941
3822b509
DM
942 dp = op->node;
943
e01c0d6d 944 if (!hvapi_negotiated++) {
8d2aec51
DM
945 err = sun4v_hvapi_register(HV_GRP_PCI,
946 vpci_major,
947 &vpci_minor);
e01c0d6d
DM
948
949 if (err) {
3822b509
DM
950 printk(KERN_ERR PFX "Could not register hvapi, "
951 "err=%d\n", err);
952 return err;
e01c0d6d 953 }
3822b509 954 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
e01c0d6d 955 vpci_major, vpci_minor);
ad7ad57c
DM
956
957 dma_ops = &sun4v_dma_ops;
e01c0d6d
DM
958 }
959
3822b509 960 regs = of_get_property(dp, "reg", NULL);
d7472c38 961 err = -ENODEV;
3822b509
DM
962 if (!regs) {
963 printk(KERN_ERR PFX "Could not find config registers\n");
d7472c38 964 goto out_err;
75c6d141 965 }
e87dc350 966 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
3833789b 967
d7472c38 968 err = -ENOMEM;
d3ae4b5b
DM
969 if (!iommu_batch_initialized) {
970 for_each_possible_cpu(i) {
971 unsigned long page = get_zeroed_page(GFP_KERNEL);
7c8f486a 972
d3ae4b5b
DM
973 if (!page)
974 goto out_err;
7c8f486a 975
d3ae4b5b
DM
976 per_cpu(iommu_batch, i).pglist = (u64 *) page;
977 }
978 iommu_batch_initialized = 1;
bade5622 979 }
7c8f486a 980
d3ae4b5b
DM
981 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
982 if (!pbm) {
983 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
d7472c38 984 goto out_err;
3822b509 985 }
7c8f486a 986
d3ae4b5b 987 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
3822b509 988 if (!iommu) {
d3ae4b5b 989 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
d7472c38 990 goto out_free_controller;
3822b509 991 }
7c8f486a 992
d3ae4b5b 993 pbm->iommu = iommu;
bade5622 994
d3ae4b5b
DM
995 err = pci_sun4v_pbm_init(pbm, op, devhandle);
996 if (err)
997 goto out_free_iommu;
7c8f486a 998
d3ae4b5b 999 dev_set_drvdata(&op->dev, pbm);
bade5622 1000
d3ae4b5b 1001 return 0;
7c8f486a 1002
d3ae4b5b
DM
1003out_free_iommu:
1004 kfree(pbm->iommu);
d7472c38
DM
1005
1006out_free_controller:
d3ae4b5b 1007 kfree(pbm);
d7472c38
DM
1008
1009out_err:
1010 return err;
8f6a93a1 1011}
3822b509 1012
fd098316 1013static struct of_device_id __initdata pci_sun4v_match[] = {
3822b509
DM
1014 {
1015 .name = "pci",
1016 .compatible = "SUNW,sun4v-pci",
1017 },
1018 {},
1019};
1020
1021static struct of_platform_driver pci_sun4v_driver = {
1022 .name = DRIVER_NAME,
1023 .match_table = pci_sun4v_match,
1024 .probe = pci_sun4v_probe,
1025};
1026
1027static int __init pci_sun4v_init(void)
1028{
1029 return of_register_driver(&pci_sun4v_driver, &of_bus_type);
1030}
1031
1032subsys_initcall(pci_sun4v_init);
This page took 0.560326 seconds and 5 git commands to generate.