[SPARC64]: iommu_common.h tidy ups...
[deliverable/linux.git] / arch / sparc64 / kernel / pci_sun4v.c
CommitLineData
8f6a93a1
DM
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
9fd8b647 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
8f6a93a1
DM
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
18397944 12#include <linux/percpu.h>
35a17eb6
DM
13#include <linux/irq.h>
14#include <linux/msi.h>
59db8102 15#include <linux/log2.h>
8f6a93a1 16
8f6a93a1
DM
17#include <asm/iommu.h>
18#include <asm/irq.h>
19#include <asm/upa.h>
20#include <asm/pstate.h>
21#include <asm/oplib.h>
22#include <asm/hypervisor.h>
e87dc350 23#include <asm/prom.h>
8f6a93a1
DM
24
25#include "pci_impl.h"
26#include "iommu_common.h"
27
bade5622
DM
28#include "pci_sun4v.h"
29
e01c0d6d
DM
30static unsigned long vpci_major = 1;
31static unsigned long vpci_minor = 1;
32
7c8f486a 33#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
18397944 34
16ce82d8 35struct iommu_batch {
ad7ad57c 36 struct device *dev; /* Device mapping is for. */
6a32fd4d
DM
37 unsigned long prot; /* IOMMU page protections */
38 unsigned long entry; /* Index into IOTSB. */
39 u64 *pglist; /* List of physical pages */
40 unsigned long npages; /* Number of pages in list. */
18397944
DM
41};
42
ad7ad57c 43static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
6a32fd4d
DM
44
45/* Interrupts must be disabled. */
ad7ad57c 46static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
6a32fd4d 47{
ad7ad57c 48 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d 49
ad7ad57c 50 p->dev = dev;
6a32fd4d
DM
51 p->prot = prot;
52 p->entry = entry;
53 p->npages = 0;
54}
55
56/* Interrupts must be disabled. */
ad7ad57c 57static long iommu_batch_flush(struct iommu_batch *p)
6a32fd4d 58{
ad7ad57c 59 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
a2fb23af 60 unsigned long devhandle = pbm->devhandle;
6a32fd4d
DM
61 unsigned long prot = p->prot;
62 unsigned long entry = p->entry;
63 u64 *pglist = p->pglist;
64 unsigned long npages = p->npages;
65
d82965c1 66 while (npages != 0) {
6a32fd4d
DM
67 long num;
68
69 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
70 npages, prot, __pa(pglist));
71 if (unlikely(num < 0)) {
72 if (printk_ratelimit())
ad7ad57c 73 printk("iommu_batch_flush: IOMMU map of "
6a32fd4d
DM
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
75 "status %ld\n",
76 devhandle, HV_PCI_TSBID(0, entry),
77 npages, prot, __pa(pglist), num);
78 return -1;
79 }
80
81 entry += num;
82 npages -= num;
83 pglist += num;
d82965c1 84 }
6a32fd4d
DM
85
86 p->entry = entry;
87 p->npages = 0;
88
89 return 0;
90}
91
92/* Interrupts must be disabled. */
ad7ad57c 93static inline long iommu_batch_add(u64 phys_page)
6a32fd4d 94{
ad7ad57c 95 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d
DM
96
97 BUG_ON(p->npages >= PGLIST_NENTS);
98
99 p->pglist[p->npages++] = phys_page;
100 if (p->npages == PGLIST_NENTS)
ad7ad57c 101 return iommu_batch_flush(p);
6a32fd4d
DM
102
103 return 0;
104}
105
106/* Interrupts must be disabled. */
ad7ad57c 107static inline long iommu_batch_end(void)
6a32fd4d 108{
ad7ad57c 109 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d
DM
110
111 BUG_ON(p->npages >= PGLIST_NENTS);
112
ad7ad57c 113 return iommu_batch_flush(p);
6a32fd4d 114}
18397944 115
ad7ad57c 116static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
18397944
DM
117{
118 unsigned long n, i, start, end, limit;
119 int pass;
120
121 limit = arena->limit;
122 start = arena->hint;
123 pass = 0;
124
125again:
126 n = find_next_zero_bit(arena->map, limit, start);
127 end = n + npages;
128 if (unlikely(end >= limit)) {
129 if (likely(pass < 1)) {
130 limit = start;
131 start = 0;
132 pass++;
133 goto again;
134 } else {
135 /* Scanned the whole thing, give up. */
136 return -1;
137 }
138 }
139
140 for (i = n; i < end; i++) {
141 if (test_bit(i, arena->map)) {
142 start = i + 1;
143 goto again;
144 }
145 }
146
147 for (i = n; i < end; i++)
148 __set_bit(i, arena->map);
149
150 arena->hint = end;
151
152 return n;
153}
154
ad7ad57c
DM
155static void arena_free(struct iommu_arena *arena, unsigned long base,
156 unsigned long npages)
18397944
DM
157{
158 unsigned long i;
159
160 for (i = base; i < (base + npages); i++)
161 __clear_bit(i, arena->map);
162}
163
ad7ad57c
DM
164static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
165 dma_addr_t *dma_addrp, gfp_t gfp)
8f6a93a1 166{
16ce82d8 167 struct iommu *iommu;
7c8f486a 168 unsigned long flags, order, first_page, npages, n;
18397944
DM
169 void *ret;
170 long entry;
18397944
DM
171
172 size = IO_PAGE_ALIGN(size);
173 order = get_order(size);
6a32fd4d 174 if (unlikely(order >= MAX_ORDER))
18397944
DM
175 return NULL;
176
177 npages = size >> IO_PAGE_SHIFT;
18397944 178
42f14237 179 first_page = __get_free_pages(gfp, order);
6a32fd4d 180 if (unlikely(first_page == 0UL))
18397944 181 return NULL;
e7a0453e 182
18397944
DM
183 memset((char *)first_page, 0, PAGE_SIZE << order);
184
ad7ad57c 185 iommu = dev->archdata.iommu;
18397944
DM
186
187 spin_lock_irqsave(&iommu->lock, flags);
ad7ad57c 188 entry = arena_alloc(&iommu->arena, npages);
18397944
DM
189 spin_unlock_irqrestore(&iommu->lock, flags);
190
6a32fd4d
DM
191 if (unlikely(entry < 0L))
192 goto arena_alloc_fail;
18397944
DM
193
194 *dma_addrp = (iommu->page_table_map_base +
195 (entry << IO_PAGE_SHIFT));
196 ret = (void *) first_page;
197 first_page = __pa(first_page);
198
6a32fd4d 199 local_irq_save(flags);
18397944 200
ad7ad57c
DM
201 iommu_batch_start(dev,
202 (HV_PCI_MAP_ATTR_READ |
203 HV_PCI_MAP_ATTR_WRITE),
204 entry);
18397944 205
6a32fd4d 206 for (n = 0; n < npages; n++) {
ad7ad57c 207 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
6a32fd4d
DM
208 if (unlikely(err < 0L))
209 goto iommu_map_fail;
210 }
18397944 211
ad7ad57c 212 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 213 goto iommu_map_fail;
18397944 214
6a32fd4d 215 local_irq_restore(flags);
18397944
DM
216
217 return ret;
6a32fd4d
DM
218
219iommu_map_fail:
220 /* Interrupts are disabled. */
221 spin_lock(&iommu->lock);
ad7ad57c 222 arena_free(&iommu->arena, entry, npages);
6a32fd4d
DM
223 spin_unlock_irqrestore(&iommu->lock, flags);
224
225arena_alloc_fail:
226 free_pages(first_page, order);
227 return NULL;
8f6a93a1
DM
228}
229
ad7ad57c
DM
230static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
231 dma_addr_t dvma)
8f6a93a1 232{
a2fb23af 233 struct pci_pbm_info *pbm;
16ce82d8 234 struct iommu *iommu;
7c8f486a
DM
235 unsigned long flags, order, npages, entry;
236 u32 devhandle;
18397944
DM
237
238 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c
DM
239 iommu = dev->archdata.iommu;
240 pbm = dev->archdata.host_controller;
a2fb23af 241 devhandle = pbm->devhandle;
18397944
DM
242 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
243
244 spin_lock_irqsave(&iommu->lock, flags);
245
ad7ad57c 246 arena_free(&iommu->arena, entry, npages);
18397944
DM
247
248 do {
249 unsigned long num;
250
251 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
252 npages);
253 entry += num;
254 npages -= num;
255 } while (npages != 0);
256
257 spin_unlock_irqrestore(&iommu->lock, flags);
258
259 order = get_order(size);
260 if (order < 10)
261 free_pages((unsigned long)cpu, order);
8f6a93a1
DM
262}
263
ad7ad57c
DM
264static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
265 enum dma_data_direction direction)
8f6a93a1 266{
16ce82d8 267 struct iommu *iommu;
18397944 268 unsigned long flags, npages, oaddr;
7c8f486a 269 unsigned long i, base_paddr;
6a32fd4d 270 u32 bus_addr, ret;
18397944
DM
271 unsigned long prot;
272 long entry;
18397944 273
ad7ad57c 274 iommu = dev->archdata.iommu;
18397944 275
ad7ad57c 276 if (unlikely(direction == DMA_NONE))
18397944
DM
277 goto bad;
278
279 oaddr = (unsigned long)ptr;
280 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
281 npages >>= IO_PAGE_SHIFT;
18397944
DM
282
283 spin_lock_irqsave(&iommu->lock, flags);
ad7ad57c 284 entry = arena_alloc(&iommu->arena, npages);
18397944
DM
285 spin_unlock_irqrestore(&iommu->lock, flags);
286
287 if (unlikely(entry < 0L))
288 goto bad;
289
290 bus_addr = (iommu->page_table_map_base +
291 (entry << IO_PAGE_SHIFT));
292 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
293 base_paddr = __pa(oaddr & IO_PAGE_MASK);
294 prot = HV_PCI_MAP_ATTR_READ;
ad7ad57c 295 if (direction != DMA_TO_DEVICE)
18397944
DM
296 prot |= HV_PCI_MAP_ATTR_WRITE;
297
6a32fd4d 298 local_irq_save(flags);
18397944 299
ad7ad57c 300 iommu_batch_start(dev, prot, entry);
18397944 301
6a32fd4d 302 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
ad7ad57c 303 long err = iommu_batch_add(base_paddr);
6a32fd4d
DM
304 if (unlikely(err < 0L))
305 goto iommu_map_fail;
306 }
ad7ad57c 307 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 308 goto iommu_map_fail;
18397944 309
6a32fd4d 310 local_irq_restore(flags);
18397944
DM
311
312 return ret;
313
314bad:
315 if (printk_ratelimit())
316 WARN_ON(1);
ad7ad57c 317 return DMA_ERROR_CODE;
6a32fd4d
DM
318
319iommu_map_fail:
320 /* Interrupts are disabled. */
321 spin_lock(&iommu->lock);
ad7ad57c 322 arena_free(&iommu->arena, entry, npages);
6a32fd4d
DM
323 spin_unlock_irqrestore(&iommu->lock, flags);
324
ad7ad57c 325 return DMA_ERROR_CODE;
8f6a93a1
DM
326}
327
ad7ad57c
DM
328static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
329 size_t sz, enum dma_data_direction direction)
8f6a93a1 330{
a2fb23af 331 struct pci_pbm_info *pbm;
16ce82d8 332 struct iommu *iommu;
7c8f486a 333 unsigned long flags, npages;
18397944 334 long entry;
7c8f486a 335 u32 devhandle;
18397944 336
ad7ad57c 337 if (unlikely(direction == DMA_NONE)) {
18397944
DM
338 if (printk_ratelimit())
339 WARN_ON(1);
340 return;
341 }
342
ad7ad57c
DM
343 iommu = dev->archdata.iommu;
344 pbm = dev->archdata.host_controller;
a2fb23af 345 devhandle = pbm->devhandle;
18397944
DM
346
347 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
348 npages >>= IO_PAGE_SHIFT;
349 bus_addr &= IO_PAGE_MASK;
350
351 spin_lock_irqsave(&iommu->lock, flags);
352
353 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
ad7ad57c 354 arena_free(&iommu->arena, entry, npages);
18397944
DM
355
356 do {
357 unsigned long num;
358
359 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
360 npages);
361 entry += num;
362 npages -= num;
363 } while (npages != 0);
364
365 spin_unlock_irqrestore(&iommu->lock, flags);
366}
367
ad7ad57c
DM
368static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
369 int nelems, enum dma_data_direction direction)
8f6a93a1 370{
38192d52
DM
371 unsigned long flags, npages, i, prot;
372 struct scatterlist *sg;
16ce82d8 373 struct iommu *iommu;
6a32fd4d 374 long entry, err;
38192d52 375 u32 dma_base;
18397944
DM
376
377 /* Fast path single entry scatterlists. */
378 if (nelems == 1) {
379 sglist->dma_address =
58b053e4 380 dma_4v_map_single(dev, sg_virt(sglist),
18397944 381 sglist->length, direction);
ad7ad57c 382 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
18397944
DM
383 return 0;
384 sglist->dma_length = sglist->length;
385 return 1;
386 }
387
ad7ad57c 388 iommu = dev->archdata.iommu;
18397944 389
ad7ad57c 390 if (unlikely(direction == DMA_NONE))
18397944
DM
391 goto bad;
392
38192d52 393 npages = calc_npages(sglist, nelems);
18397944 394
18397944 395 spin_lock_irqsave(&iommu->lock, flags);
ad7ad57c 396 entry = arena_alloc(&iommu->arena, npages);
18397944
DM
397 spin_unlock_irqrestore(&iommu->lock, flags);
398
399 if (unlikely(entry < 0L))
400 goto bad;
401
402 dma_base = iommu->page_table_map_base +
403 (entry << IO_PAGE_SHIFT);
404
18397944 405 prot = HV_PCI_MAP_ATTR_READ;
ad7ad57c 406 if (direction != DMA_TO_DEVICE)
18397944
DM
407 prot |= HV_PCI_MAP_ATTR_WRITE;
408
38192d52
DM
409 local_irq_save(flags);
410
411 iommu_batch_start(dev, prot, entry);
412
413 for_each_sg(sglist, sg, nelems, i) {
414 unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
415 unsigned long slen = sg->length;
416 unsigned long this_npages;
417
418 this_npages = iommu_num_pages(paddr, slen);
419
420 sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
421 sg->dma_length = slen;
422
423 paddr &= IO_PAGE_MASK;
424 while (this_npages--) {
425 err = iommu_batch_add(paddr);
426 if (unlikely(err < 0L)) {
427 local_irq_restore(flags);
428 goto iommu_map_failed;
429 }
430
431 paddr += IO_PAGE_SIZE;
432 dma_base += IO_PAGE_SIZE;
433 }
434 }
435
436 err = iommu_batch_end();
437
438 local_irq_restore(flags);
439
6a32fd4d
DM
440 if (unlikely(err < 0L))
441 goto iommu_map_failed;
18397944 442
38192d52 443 return nelems;
18397944
DM
444
445bad:
446 if (printk_ratelimit())
447 WARN_ON(1);
448 return 0;
6a32fd4d
DM
449
450iommu_map_failed:
451 spin_lock_irqsave(&iommu->lock, flags);
ad7ad57c 452 arena_free(&iommu->arena, entry, npages);
6a32fd4d
DM
453 spin_unlock_irqrestore(&iommu->lock, flags);
454
455 return 0;
8f6a93a1
DM
456}
457
ad7ad57c
DM
458static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
459 int nelems, enum dma_data_direction direction)
8f6a93a1 460{
38192d52 461 unsigned long flags, npages;
a2fb23af 462 struct pci_pbm_info *pbm;
38192d52 463 u32 devhandle, bus_addr;
16ce82d8 464 struct iommu *iommu;
18397944 465 long entry;
18397944 466
ad7ad57c 467 if (unlikely(direction == DMA_NONE)) {
18397944
DM
468 if (printk_ratelimit())
469 WARN_ON(1);
470 }
471
ad7ad57c
DM
472 iommu = dev->archdata.iommu;
473 pbm = dev->archdata.host_controller;
a2fb23af 474 devhandle = pbm->devhandle;
18397944
DM
475
476 bus_addr = sglist->dma_address & IO_PAGE_MASK;
2c941a20 477
38192d52 478 npages = calc_npages(sglist, nelems);
18397944
DM
479
480 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
481
482 spin_lock_irqsave(&iommu->lock, flags);
483
ad7ad57c 484 arena_free(&iommu->arena, entry, npages);
18397944
DM
485
486 do {
487 unsigned long num;
488
489 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
490 npages);
491 entry += num;
492 npages -= num;
493 } while (npages != 0);
494
495 spin_unlock_irqrestore(&iommu->lock, flags);
8f6a93a1
DM
496}
497
ad7ad57c
DM
498static void dma_4v_sync_single_for_cpu(struct device *dev,
499 dma_addr_t bus_addr, size_t sz,
500 enum dma_data_direction direction)
8f6a93a1 501{
18397944 502 /* Nothing to do... */
8f6a93a1
DM
503}
504
ad7ad57c
DM
505static void dma_4v_sync_sg_for_cpu(struct device *dev,
506 struct scatterlist *sglist, int nelems,
507 enum dma_data_direction direction)
8f6a93a1 508{
18397944 509 /* Nothing to do... */
8f6a93a1
DM
510}
511
ad7ad57c
DM
512const struct dma_ops sun4v_dma_ops = {
513 .alloc_coherent = dma_4v_alloc_coherent,
514 .free_coherent = dma_4v_free_coherent,
515 .map_single = dma_4v_map_single,
516 .unmap_single = dma_4v_unmap_single,
517 .map_sg = dma_4v_map_sg,
518 .unmap_sg = dma_4v_unmap_sg,
519 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
520 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
8f6a93a1
DM
521};
522
a1f35ba3 523static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
bade5622 524{
e87dc350
DM
525 struct property *prop;
526 struct device_node *dp;
527
34768bc8
DM
528 dp = pbm->prom_node;
529 prop = of_find_property(dp, "66mhz-capable", NULL);
530 pbm->is_66mhz_capable = (prop != NULL);
531 pbm->pci_bus = pci_scan_one_pbm(pbm);
c2609267
DM
532
533 /* XXX register error interrupt handlers XXX */
bade5622
DM
534}
535
4c622258
AB
536static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
537 struct iommu *iommu)
18397944 538{
9b3627f3 539 struct iommu_arena *arena = &iommu->arena;
e7a0453e 540 unsigned long i, cnt = 0;
7c8f486a 541 u32 devhandle;
18397944
DM
542
543 devhandle = pbm->devhandle;
544 for (i = 0; i < arena->limit; i++) {
545 unsigned long ret, io_attrs, ra;
546
547 ret = pci_sun4v_iommu_getmap(devhandle,
548 HV_PCI_TSBID(0, i),
549 &io_attrs, &ra);
e7a0453e 550 if (ret == HV_EOK) {
c2a5a46b
DM
551 if (page_in_phys_avail(ra)) {
552 pci_sun4v_iommu_demap(devhandle,
553 HV_PCI_TSBID(0, i), 1);
554 } else {
555 cnt++;
556 __set_bit(i, arena->map);
557 }
e7a0453e 558 }
18397944 559 }
e7a0453e
DM
560
561 return cnt;
18397944
DM
562}
563
4c622258 564static void __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
bade5622 565{
16ce82d8 566 struct iommu *iommu = pbm->iommu;
e87dc350 567 struct property *prop;
59db8102 568 unsigned long num_tsb_entries, sz, tsbsize;
18397944 569 u32 vdma[2], dma_mask, dma_offset;
e87dc350
DM
570
571 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
572 if (prop) {
573 u32 *val = prop->value;
18397944 574
e87dc350
DM
575 vdma[0] = val[0];
576 vdma[1] = val[1];
577 } else {
18397944
DM
578 /* No property, use default values. */
579 vdma[0] = 0x80000000;
580 vdma[1] = 0x80000000;
581 }
582
59db8102
DM
583 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
584 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
585 vdma[0], vdma[1]);
586 prom_halt();
18397944
DM
587 };
588
59db8102
DM
589 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
590 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
591 tsbsize = num_tsb_entries * sizeof(iopte_t);
18397944
DM
592
593 dma_offset = vdma[0];
594
595 /* Setup initial software IOMMU state. */
596 spin_lock_init(&iommu->lock);
597 iommu->ctx_lowest_free = 1;
598 iommu->page_table_map_base = dma_offset;
599 iommu->dma_addr_mask = dma_mask;
600
601 /* Allocate and initialize the free area map. */
59db8102 602 sz = (num_tsb_entries + 7) / 8;
18397944 603 sz = (sz + 7UL) & ~7UL;
982c2064 604 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
18397944
DM
605 if (!iommu->arena.map) {
606 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
607 prom_halt();
608 }
18397944
DM
609 iommu->arena.limit = num_tsb_entries;
610
e7a0453e 611 sz = probe_existing_entries(pbm, iommu);
c2a5a46b
DM
612 if (sz)
613 printk("%s: Imported %lu TSB entries from OBP\n",
614 pbm->name, sz);
bade5622
DM
615}
616
35a17eb6
DM
617#ifdef CONFIG_PCI_MSI
618struct pci_sun4v_msiq_entry {
619 u64 version_type;
620#define MSIQ_VERSION_MASK 0xffffffff00000000UL
621#define MSIQ_VERSION_SHIFT 32
622#define MSIQ_TYPE_MASK 0x00000000000000ffUL
623#define MSIQ_TYPE_SHIFT 0
624#define MSIQ_TYPE_NONE 0x00
625#define MSIQ_TYPE_MSG 0x01
626#define MSIQ_TYPE_MSI32 0x02
627#define MSIQ_TYPE_MSI64 0x03
628#define MSIQ_TYPE_INTX 0x08
629#define MSIQ_TYPE_NONE2 0xff
630
631 u64 intx_sysino;
632 u64 reserved1;
633 u64 stick;
634 u64 req_id; /* bus/device/func */
635#define MSIQ_REQID_BUS_MASK 0xff00UL
636#define MSIQ_REQID_BUS_SHIFT 8
637#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
638#define MSIQ_REQID_DEVICE_SHIFT 3
639#define MSIQ_REQID_FUNC_MASK 0x0007UL
640#define MSIQ_REQID_FUNC_SHIFT 0
641
642 u64 msi_address;
643
e5dd42e4 644 /* The format of this value is message type dependent.
35a17eb6
DM
645 * For MSI bits 15:0 are the data from the MSI packet.
646 * For MSI-X bits 31:0 are the data from the MSI packet.
647 * For MSG, the message code and message routing code where:
648 * bits 39:32 is the bus/device/fn of the msg target-id
649 * bits 18:16 is the message routing code
650 * bits 7:0 is the message code
651 * For INTx the low order 2-bits are:
652 * 00 - INTA
653 * 01 - INTB
654 * 10 - INTC
655 * 11 - INTD
656 */
657 u64 msi_data;
658
659 u64 reserved2;
660};
661
759f89e0
DM
662static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
663 unsigned long *head)
35a17eb6 664{
759f89e0 665 unsigned long err, limit;
35a17eb6 666
759f89e0 667 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
35a17eb6 668 if (unlikely(err))
759f89e0 669 return -ENXIO;
35a17eb6 670
759f89e0
DM
671 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
672 if (unlikely(*head >= limit))
673 return -EFBIG;
674
675 return 0;
676}
677
678static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
679 unsigned long msiqid, unsigned long *head,
680 unsigned long *msi)
681{
682 struct pci_sun4v_msiq_entry *ep;
683 unsigned long err, type;
684
685 /* Note: void pointer arithmetic, 'head' is a byte offset */
686 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
687 (pbm->msiq_ent_count *
688 sizeof(struct pci_sun4v_msiq_entry))) +
689 *head);
690
691 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
692 return 0;
35a17eb6 693
759f89e0
DM
694 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
695 if (unlikely(type != MSIQ_TYPE_MSI32 &&
696 type != MSIQ_TYPE_MSI64))
697 return -EINVAL;
35a17eb6 698
759f89e0
DM
699 *msi = ep->msi_data;
700
701 err = pci_sun4v_msi_setstate(pbm->devhandle,
702 ep->msi_data /* msi_num */,
703 HV_MSISTATE_IDLE);
704 if (unlikely(err))
705 return -ENXIO;
35a17eb6 706
759f89e0
DM
707 /* Clear the entry. */
708 ep->version_type &= ~MSIQ_TYPE_MASK;
35a17eb6 709
759f89e0
DM
710 (*head) += sizeof(struct pci_sun4v_msiq_entry);
711 if (*head >=
712 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
713 *head = 0;
35a17eb6 714
759f89e0 715 return 1;
35a17eb6
DM
716}
717
759f89e0
DM
718static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
719 unsigned long head)
35a17eb6 720{
759f89e0 721 unsigned long err;
35a17eb6 722
759f89e0
DM
723 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
724 if (unlikely(err))
725 return -EINVAL;
35a17eb6 726
759f89e0
DM
727 return 0;
728}
35a17eb6 729
759f89e0
DM
730static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
731 unsigned long msi, int is_msi64)
732{
733 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
734 (is_msi64 ?
735 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
736 return -ENXIO;
737 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
738 return -ENXIO;
739 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
740 return -ENXIO;
35a17eb6
DM
741 return 0;
742}
743
759f89e0 744static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
35a17eb6 745{
759f89e0
DM
746 unsigned long err, msiqid;
747
748 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
749 if (err)
750 return -ENXIO;
751
752 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
753
754 return 0;
35a17eb6
DM
755}
756
759f89e0 757static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
35a17eb6
DM
758{
759 unsigned long q_size, alloc_size, pages, order;
760 int i;
761
762 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
763 alloc_size = (pbm->msiq_num * q_size);
764 order = get_order(alloc_size);
765 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
766 if (pages == 0UL) {
767 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
768 order);
769 return -ENOMEM;
770 }
771 memset((char *)pages, 0, PAGE_SIZE << order);
772 pbm->msi_queues = (void *) pages;
773
774 for (i = 0; i < pbm->msiq_num; i++) {
775 unsigned long err, base = __pa(pages + (i * q_size));
776 unsigned long ret1, ret2;
777
778 err = pci_sun4v_msiq_conf(pbm->devhandle,
779 pbm->msiq_first + i,
780 base, pbm->msiq_ent_count);
781 if (err) {
782 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
783 err);
784 goto h_error;
785 }
786
787 err = pci_sun4v_msiq_info(pbm->devhandle,
788 pbm->msiq_first + i,
789 &ret1, &ret2);
790 if (err) {
791 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
792 err);
793 goto h_error;
794 }
795 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
796 printk(KERN_ERR "MSI: Bogus qconf "
797 "expected[%lx:%x] got[%lx:%lx]\n",
798 base, pbm->msiq_ent_count,
799 ret1, ret2);
800 goto h_error;
801 }
802 }
803
804 return 0;
805
806h_error:
807 free_pages(pages, order);
808 return -EINVAL;
809}
810
759f89e0 811static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
35a17eb6 812{
759f89e0 813 unsigned long q_size, alloc_size, pages, order;
35a17eb6
DM
814 int i;
815
759f89e0
DM
816 for (i = 0; i < pbm->msiq_num; i++) {
817 unsigned long msiqid = pbm->msiq_first + i;
35a17eb6 818
759f89e0 819 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
35a17eb6 820 }
7fe3730d 821
759f89e0
DM
822 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
823 alloc_size = (pbm->msiq_num * q_size);
824 order = get_order(alloc_size);
35a17eb6 825
759f89e0 826 pages = (unsigned long) pbm->msi_queues;
35a17eb6 827
759f89e0 828 free_pages(pages, order);
35a17eb6 829
759f89e0 830 pbm->msi_queues = NULL;
35a17eb6
DM
831}
832
759f89e0
DM
833static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
834 unsigned long msiqid,
835 unsigned long devino)
35a17eb6 836{
759f89e0 837 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
35a17eb6 838
759f89e0
DM
839 if (!virt_irq)
840 return -ENOMEM;
35a17eb6 841
759f89e0
DM
842 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
843 return -EINVAL;
844 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
845 return -EINVAL;
35a17eb6 846
759f89e0 847 return virt_irq;
35a17eb6 848}
e9870c4c 849
759f89e0
DM
850static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
851 .get_head = pci_sun4v_get_head,
852 .dequeue_msi = pci_sun4v_dequeue_msi,
853 .set_head = pci_sun4v_set_head,
854 .msi_setup = pci_sun4v_msi_setup,
855 .msi_teardown = pci_sun4v_msi_teardown,
856 .msiq_alloc = pci_sun4v_msiq_alloc,
857 .msiq_free = pci_sun4v_msiq_free,
858 .msiq_build_irq = pci_sun4v_msiq_build_irq,
859};
860
e9870c4c
DM
861static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
862{
759f89e0 863 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
e9870c4c 864}
35a17eb6
DM
865#else /* CONFIG_PCI_MSI */
866static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
867{
868}
869#endif /* !(CONFIG_PCI_MSI) */
870
a1f35ba3
SR
871static void __init pci_sun4v_pbm_init(struct pci_controller_info *p,
872 struct device_node *dp, u32 devhandle)
bade5622
DM
873{
874 struct pci_pbm_info *pbm;
bade5622 875
3833789b
DM
876 if (devhandle & 0x40)
877 pbm = &p->pbm_B;
878 else
879 pbm = &p->pbm_A;
bade5622 880
34768bc8
DM
881 pbm->next = pci_pbm_root;
882 pci_pbm_root = pbm;
883
884 pbm->scan_bus = pci_sun4v_scan_bus;
ca3dd88e
DM
885 pbm->pci_ops = &sun4v_pci_ops;
886 pbm->config_space_reg_bits = 12;
34768bc8 887
6c108f12
DM
888 pbm->index = pci_num_pbms++;
889
bade5622 890 pbm->parent = p;
e87dc350 891 pbm->prom_node = dp;
bade5622 892
3833789b 893 pbm->devhandle = devhandle;
bade5622 894
e87dc350 895 pbm->name = dp->full_name;
bade5622 896
e87dc350 897 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
bade5622 898
9fd8b647 899 pci_determine_mem_io_space(pbm);
bade5622 900
cfa0652c 901 pci_get_pbm_props(pbm);
bade5622 902 pci_sun4v_iommu_init(pbm);
35a17eb6 903 pci_sun4v_msi_init(pbm);
bade5622
DM
904}
905
f0429bf7 906void __init sun4v_pci_init(struct device_node *dp, char *model_name)
8f6a93a1 907{
e01c0d6d 908 static int hvapi_negotiated = 0;
bade5622 909 struct pci_controller_info *p;
34768bc8 910 struct pci_pbm_info *pbm;
16ce82d8 911 struct iommu *iommu;
e87dc350
DM
912 struct property *prop;
913 struct linux_prom64_registers *regs;
7c8f486a
DM
914 u32 devhandle;
915 int i;
3833789b 916
e01c0d6d
DM
917 if (!hvapi_negotiated++) {
918 int err = sun4v_hvapi_register(HV_GRP_PCI,
919 vpci_major,
920 &vpci_minor);
921
922 if (err) {
923 prom_printf("SUN4V_PCI: Could not register hvapi, "
924 "err=%d\n", err);
925 prom_halt();
926 }
927 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
928 vpci_major, vpci_minor);
ad7ad57c
DM
929
930 dma_ops = &sun4v_dma_ops;
e01c0d6d
DM
931 }
932
e87dc350 933 prop = of_find_property(dp, "reg", NULL);
75c6d141
CG
934 if (!prop) {
935 prom_printf("SUN4V_PCI: Could not find config registers\n");
936 prom_halt();
937 }
e87dc350
DM
938 regs = prop->value;
939
940 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
3833789b 941
34768bc8 942 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
0b522497 943 if (pbm->devhandle == (devhandle ^ 0x40)) {
34768bc8 944 pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
0b522497
DM
945 return;
946 }
3833789b 947 }
bade5622 948
a283a525 949 for_each_possible_cpu(i) {
7c8f486a
DM
950 unsigned long page = get_zeroed_page(GFP_ATOMIC);
951
952 if (!page)
953 goto fatal_memory_error;
954
ad7ad57c 955 per_cpu(iommu_batch, i).pglist = (u64 *) page;
bade5622 956 }
7c8f486a 957
982c2064 958 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
7c8f486a
DM
959 if (!p)
960 goto fatal_memory_error;
961
16ce82d8 962 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
7c8f486a
DM
963 if (!iommu)
964 goto fatal_memory_error;
965
bade5622
DM
966 p->pbm_A.iommu = iommu;
967
16ce82d8 968 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
7c8f486a
DM
969 if (!iommu)
970 goto fatal_memory_error;
971
bade5622
DM
972 p->pbm_B.iommu = iommu;
973
e87dc350 974 pci_sun4v_pbm_init(p, dp, devhandle);
7c8f486a
DM
975 return;
976
977fatal_memory_error:
978 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
979 prom_halt();
8f6a93a1 980}
This page took 0.416508 seconds and 5 git commands to generate.