Merge branch 'akpm' (patches from Andrew)
[deliverable/linux.git] / arch / alpha / kernel / sys_marvel.c
1 /*
2 * linux/arch/alpha/kernel/sys_marvel.c
3 *
4 * Marvel / IO7 support
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/sched.h>
11 #include <linux/pci.h>
12 #include <linux/init.h>
13 #include <linux/bitops.h>
14
15 #include <asm/ptrace.h>
16 #include <asm/dma.h>
17 #include <asm/irq.h>
18 #include <asm/mmu_context.h>
19 #include <asm/io.h>
20 #include <asm/pgtable.h>
21 #include <asm/core_marvel.h>
22 #include <asm/hwrpb.h>
23 #include <asm/tlbflush.h>
24 #include <asm/vga.h>
25
26 #include "proto.h"
27 #include "err_impl.h"
28 #include "irq_impl.h"
29 #include "pci_impl.h"
30 #include "machvec_impl.h"
31
32 #if NR_IRQS < MARVEL_NR_IRQS
33 # error NR_IRQS < MARVEL_NR_IRQS !!!
34 #endif
35
36 \f
37 /*
38 * Interrupt handling.
39 */
40 static void
41 io7_device_interrupt(unsigned long vector)
42 {
43 unsigned int pid;
44 unsigned int irq;
45
46 /*
47 * Vector is 0x800 + (interrupt)
48 *
49 * where (interrupt) is:
50 *
51 * ...16|15 14|13 4|3 0
52 * -----+-----+--------+---
53 * PE | 0 | irq | 0
54 *
55 * where (irq) is
56 *
57 * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4)
58 * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4)
59 */
60 pid = vector >> 16;
61 irq = ((vector & 0xffff) - 0x800) >> 4;
62
63 irq += 16; /* offset for legacy */
64 irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */
65 irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
66
67 handle_irq(irq);
68 }
69
70 static volatile unsigned long *
71 io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
72 {
73 volatile unsigned long *ctl;
74 unsigned int pid;
75 struct io7 *io7;
76
77 pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT;
78
79 if (!(io7 = marvel_find_io7(pid))) {
80 printk(KERN_ERR
81 "%s for nonexistent io7 -- vec %x, pid %d\n",
82 __func__, irq, pid);
83 return NULL;
84 }
85
86 irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */
87 irq -= 16; /* subtract legacy bias */
88
89 if (irq >= 0x180) {
90 printk(KERN_ERR
91 "%s for invalid irq -- pid %d adjusted irq %x\n",
92 __func__, pid, irq);
93 return NULL;
94 }
95
96 ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */
97 if (irq >= 0x80) /* MSI */
98 ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr;
99
100 if (pio7) *pio7 = io7;
101 return ctl;
102 }
103
104 static void
105 io7_enable_irq(struct irq_data *d)
106 {
107 volatile unsigned long *ctl;
108 unsigned int irq = d->irq;
109 struct io7 *io7;
110
111 ctl = io7_get_irq_ctl(irq, &io7);
112 if (!ctl || !io7) {
113 printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
114 __func__, irq);
115 return;
116 }
117
118 spin_lock(&io7->irq_lock);
119 *ctl |= 1UL << 24;
120 mb();
121 *ctl;
122 spin_unlock(&io7->irq_lock);
123 }
124
125 static void
126 io7_disable_irq(struct irq_data *d)
127 {
128 volatile unsigned long *ctl;
129 unsigned int irq = d->irq;
130 struct io7 *io7;
131
132 ctl = io7_get_irq_ctl(irq, &io7);
133 if (!ctl || !io7) {
134 printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
135 __func__, irq);
136 return;
137 }
138
139 spin_lock(&io7->irq_lock);
140 *ctl &= ~(1UL << 24);
141 mb();
142 *ctl;
143 spin_unlock(&io7->irq_lock);
144 }
145
146 static void
147 marvel_irq_noop(struct irq_data *d)
148 {
149 return;
150 }
151
152 static struct irq_chip marvel_legacy_irq_type = {
153 .name = "LEGACY",
154 .irq_mask = marvel_irq_noop,
155 .irq_unmask = marvel_irq_noop,
156 };
157
158 static struct irq_chip io7_lsi_irq_type = {
159 .name = "LSI",
160 .irq_unmask = io7_enable_irq,
161 .irq_mask = io7_disable_irq,
162 .irq_mask_ack = io7_disable_irq,
163 };
164
165 static struct irq_chip io7_msi_irq_type = {
166 .name = "MSI",
167 .irq_unmask = io7_enable_irq,
168 .irq_mask = io7_disable_irq,
169 .irq_ack = marvel_irq_noop,
170 };
171
172 static void
173 io7_redirect_irq(struct io7 *io7,
174 volatile unsigned long *csr,
175 unsigned int where)
176 {
177 unsigned long val;
178
179 val = *csr;
180 val &= ~(0x1ffUL << 24); /* clear the target pid */
181 val |= ((unsigned long)where << 24); /* set the new target pid */
182
183 *csr = val;
184 mb();
185 *csr;
186 }
187
188 static void
189 io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where)
190 {
191 unsigned long val;
192
193 /*
194 * LSI_CTL has target PID @ 14
195 */
196 val = io7->csrs->PO7_LSI_CTL[which].csr;
197 val &= ~(0x1ffUL << 14); /* clear the target pid */
198 val |= ((unsigned long)where << 14); /* set the new target pid */
199
200 io7->csrs->PO7_LSI_CTL[which].csr = val;
201 mb();
202 io7->csrs->PO7_LSI_CTL[which].csr;
203 }
204
205 static void
206 io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where)
207 {
208 unsigned long val;
209
210 /*
211 * MSI_CTL has target PID @ 14
212 */
213 val = io7->csrs->PO7_MSI_CTL[which].csr;
214 val &= ~(0x1ffUL << 14); /* clear the target pid */
215 val |= ((unsigned long)where << 14); /* set the new target pid */
216
217 io7->csrs->PO7_MSI_CTL[which].csr = val;
218 mb();
219 io7->csrs->PO7_MSI_CTL[which].csr;
220 }
221
222 static void __init
223 init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where)
224 {
225 /*
226 * LSI_CTL has target PID @ 14
227 */
228 io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14);
229 mb();
230 io7->csrs->PO7_LSI_CTL[which].csr;
231 }
232
233 static void __init
234 init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
235 {
236 /*
237 * MSI_CTL has target PID @ 14
238 */
239 io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14);
240 mb();
241 io7->csrs->PO7_MSI_CTL[which].csr;
242 }
243
244 static void __init
245 init_io7_irqs(struct io7 *io7,
246 struct irq_chip *lsi_ops,
247 struct irq_chip *msi_ops)
248 {
249 long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
250 long i;
251
252 printk("Initializing interrupts for IO7 at PE %u - base %lx\n",
253 io7->pe, base);
254
255 /*
256 * Where should interrupts from this IO7 go?
257 *
258 * They really should be sent to the local CPU to avoid having to
259 * traverse the mesh, but if it's not an SMP kernel, they have to
260 * go to the boot CPU. Send them all to the boot CPU for now,
261 * as each secondary starts, it can redirect it's local device
262 * interrupts.
263 */
264 printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid);
265
266 spin_lock(&io7->irq_lock);
267
268 /* set up the error irqs */
269 io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid);
270 io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid);
271 io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid);
272 io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid);
273 io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid);
274
275 /* Set up the lsi irqs. */
276 for (i = 0; i < 128; ++i) {
277 irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq);
278 irq_set_status_flags(i, IRQ_LEVEL);
279 }
280
281 /* Disable the implemented irqs in hardware. */
282 for (i = 0; i < 0x60; ++i)
283 init_one_io7_lsi(io7, i, boot_cpuid);
284
285 init_one_io7_lsi(io7, 0x74, boot_cpuid);
286 init_one_io7_lsi(io7, 0x75, boot_cpuid);
287
288
289 /* Set up the msi irqs. */
290 for (i = 128; i < (128 + 512); ++i) {
291 irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq);
292 irq_set_status_flags(i, IRQ_LEVEL);
293 }
294
295 for (i = 0; i < 16; ++i)
296 init_one_io7_msi(io7, i, boot_cpuid);
297
298 spin_unlock(&io7->irq_lock);
299 }
300
301 static void __init
302 marvel_init_irq(void)
303 {
304 int i;
305 struct io7 *io7 = NULL;
306
307 /* Reserve the legacy irqs. */
308 for (i = 0; i < 16; ++i) {
309 irq_set_chip_and_handler(i, &marvel_legacy_irq_type,
310 handle_level_irq);
311 }
312
313 /* Init the io7 irqs. */
314 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
315 init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type);
316 }
317
318 static int
319 marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
320 {
321 struct pci_dev *dev = (struct pci_dev *)cdev;
322 struct pci_controller *hose = dev->sysdata;
323 struct io7_port *io7_port = hose->sysdata;
324 struct io7 *io7 = io7_port->io7;
325 int msi_loc, msi_data_off;
326 u16 msg_ctl;
327 u16 msg_dat;
328 u8 intline;
329 int irq;
330
331 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
332 irq = intline;
333
334 msi_loc = dev->msi_cap;
335 msg_ctl = 0;
336 if (msi_loc)
337 pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
338
339 if (msg_ctl & PCI_MSI_FLAGS_ENABLE) {
340 msi_data_off = PCI_MSI_DATA_32;
341 if (msg_ctl & PCI_MSI_FLAGS_64BIT)
342 msi_data_off = PCI_MSI_DATA_64;
343 pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat);
344
345 irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */
346 irq += 0x80; /* offset for lsi */
347
348 #if 1
349 printk("PCI:%d:%d:%d (hose %d) is using MSI\n",
350 dev->bus->number,
351 PCI_SLOT(dev->devfn),
352 PCI_FUNC(dev->devfn),
353 hose->index);
354 printk(" %d message(s) from 0x%04x\n",
355 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
356 msg_dat);
357 printk(" reporting on %d IRQ(s) from %d (0x%x)\n",
358 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
359 (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT),
360 (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT));
361 #endif
362
363 #if 0
364 pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS,
365 msg_ctl & ~PCI_MSI_FLAGS_ENABLE);
366 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
367 irq = intline;
368
369 printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq);
370 #endif
371 }
372
373 irq += 16; /* offset for legacy */
374 irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
375
376 return irq;
377 }
378
379 static void __init
380 marvel_init_pci(void)
381 {
382 struct io7 *io7;
383
384 marvel_register_error_handlers();
385
386 /* Indicate that we trust the console to configure things properly */
387 pci_set_flags(PCI_PROBE_ONLY);
388 common_init_pci();
389 locate_and_init_vga(NULL);
390
391 /* Clear any io7 errors. */
392 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
393 io7_clear_errors(io7);
394 }
395
396 static void __init
397 marvel_init_rtc(void)
398 {
399 init_rtc_irq();
400 }
401
402 static void
403 marvel_smp_callin(void)
404 {
405 int cpuid = hard_smp_processor_id();
406 struct io7 *io7 = marvel_find_io7(cpuid);
407 unsigned int i;
408
409 if (!io7)
410 return;
411
412 /*
413 * There is a local IO7 - redirect all of its interrupts here.
414 */
415 printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid);
416
417 /* Redirect the error IRQS here. */
418 io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid);
419 io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid);
420 io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid);
421 io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid);
422 io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid);
423
424 /* Redirect the implemented LSIs here. */
425 for (i = 0; i < 0x60; ++i)
426 io7_redirect_one_lsi(io7, i, cpuid);
427
428 io7_redirect_one_lsi(io7, 0x74, cpuid);
429 io7_redirect_one_lsi(io7, 0x75, cpuid);
430
431 /* Redirect the MSIs here. */
432 for (i = 0; i < 16; ++i)
433 io7_redirect_one_msi(io7, i, cpuid);
434 }
435 \f
436 /*
437 * System Vectors
438 */
439 struct alpha_machine_vector marvel_ev7_mv __initmv = {
440 .vector_name = "MARVEL/EV7",
441 DO_EV7_MMU,
442 .rtc_port = 0x70,
443 .rtc_boot_cpu_only = 1,
444 DO_MARVEL_IO,
445 .machine_check = marvel_machine_check,
446 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
447 .min_io_address = DEFAULT_IO_BASE,
448 .min_mem_address = DEFAULT_MEM_BASE,
449 .pci_dac_offset = IO7_DAC_OFFSET,
450
451 .nr_irqs = MARVEL_NR_IRQS,
452 .device_interrupt = io7_device_interrupt,
453
454 .agp_info = marvel_agp_info,
455
456 .smp_callin = marvel_smp_callin,
457 .init_arch = marvel_init_arch,
458 .init_irq = marvel_init_irq,
459 .init_rtc = marvel_init_rtc,
460 .init_pci = marvel_init_pci,
461 .kill_arch = marvel_kill_arch,
462 .pci_map_irq = marvel_map_irq,
463 .pci_swizzle = common_swizzle,
464
465 .pa_to_nid = marvel_pa_to_nid,
466 .cpuid_to_nid = marvel_cpuid_to_nid,
467 .node_mem_start = marvel_node_mem_start,
468 .node_mem_size = marvel_node_mem_size,
469 };
470 ALIAS_MV(marvel_ev7)
This page took 0.047212 seconds and 5 git commands to generate.