tile/PCI: use for_each_pci_dev to simplify the code
[deliverable/linux.git] / arch / tile / kernel / pci.c
CommitLineData
f02cbbe6 1/*
398fa5a9 2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
f02cbbe6
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/delay.h>
18#include <linux/string.h>
19#include <linux/init.h>
20#include <linux/capability.h>
21#include <linux/sched.h>
22#include <linux/errno.h>
23#include <linux/bootmem.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/uaccess.h>
3989efb7 27#include <linux/export.h>
f02cbbe6
CM
28
29#include <asm/processor.h>
30#include <asm/sections.h>
31#include <asm/byteorder.h>
32#include <asm/hv_driver.h>
33#include <hv/drv_pcie_rc_intf.h>
34
35
36/*
37 * Initialization flow and process
38 * -------------------------------
39 *
25985edc 40 * This files contains the routines to search for PCI buses,
f02cbbe6
CM
41 * enumerate the buses, and configure any attached devices.
42 *
43 * There are two entry points here:
44 * 1) tile_pci_init
45 * This sets up the pci_controller structs, and opens the
46 * FDs to the hypervisor. This is called from setup_arch() early
47 * in the boot process.
48 * 2) pcibios_init
49 * This probes the PCI bus(es) for any attached hardware. It's
50 * called by subsys_initcall. All of the real work is done by the
51 * generic Linux PCI layer.
52 *
53 */
54
55/*
56 * This flag tells if the platform is TILEmpower that needs
57 * special configuration for the PLX switch chip.
58 */
59int __write_once tile_plx_gen1;
60
61static struct pci_controller controllers[TILE_NUM_PCIE];
62static int num_controllers;
398fa5a9 63static int pci_scan_flags[TILE_NUM_PCIE];
f02cbbe6
CM
64
65static struct pci_ops tile_cfg_ops;
66
67
68/*
69 * We don't need to worry about the alignment of resources.
70 */
71resource_size_t pcibios_align_resource(void *data, const struct resource *res,
72 resource_size_t size, resource_size_t align)
73{
74 return res->start;
75}
76EXPORT_SYMBOL(pcibios_align_resource);
77
78/*
79 * Open a FD to the hypervisor PCI device.
80 *
81 * controller_id is the controller number, config type is 0 or 1 for
82 * config0 or config1 operations.
83 */
398fa5a9 84static int __devinit tile_pcie_open(int controller_id, int config_type)
f02cbbe6
CM
85{
86 char filename[32];
87 int fd;
88
89 sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
90
91 fd = hv_dev_open((HV_VirtAddr)filename, 0);
92
93 return fd;
94}
95
96
97/*
98 * Get the IRQ numbers from the HV and set up the handlers for them.
99 */
398fa5a9 100static int __devinit tile_init_irqs(int controller_id,
f02cbbe6
CM
101 struct pci_controller *controller)
102{
103 char filename[32];
104 int fd;
105 int ret;
106 int x;
107 struct pcie_rc_config rc_config;
108
109 sprintf(filename, "pcie/%d/ctl", controller_id);
110 fd = hv_dev_open((HV_VirtAddr)filename, 0);
111 if (fd < 0) {
112 pr_err("PCI: hv_dev_open(%s) failed\n", filename);
113 return -1;
114 }
115 ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
116 sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
117 hv_dev_close(fd);
118 if (ret != sizeof(rc_config)) {
119 pr_err("PCI: wanted %zd bytes, got %d\n",
120 sizeof(rc_config), ret);
121 return -1;
122 }
123 /* Record irq_base so that we can map INTx to IRQ # later. */
124 controller->irq_base = rc_config.intr;
125
126 for (x = 0; x < 4; x++)
127 tile_irq_activate(rc_config.intr + x,
128 TILE_IRQ_HW_CLEAR);
129
130 if (rc_config.plx_gen1)
131 controller->plx_gen1 = 1;
132
133 return 0;
134}
135
136/*
137 * First initialization entry point, called from setup_arch().
138 *
139 * Find valid controllers and fill in pci_controller structs for each
140 * of them.
141 *
142 * Returns the number of controllers discovered.
143 */
05ef1b79 144int __init tile_pci_init(void)
f02cbbe6
CM
145{
146 int i;
147
148 pr_info("PCI: Searching for controllers...\n");
149
398fa5a9
CM
150 /* Re-init number of PCIe controllers to support hot-plug feature. */
151 num_controllers = 0;
152
f02cbbe6
CM
153 /* Do any configuration we need before using the PCIe */
154
155 for (i = 0; i < TILE_NUM_PCIE; i++) {
f02cbbe6 156 /*
398fa5a9
CM
157 * To see whether we need a real config op based on
158 * the results of pcibios_init(), to support PCIe hot-plug.
f02cbbe6 159 */
398fa5a9
CM
160 if (pci_scan_flags[i] == 0) {
161 int hv_cfg_fd0 = -1;
162 int hv_cfg_fd1 = -1;
163 int hv_mem_fd = -1;
164 char name[32];
165 struct pci_controller *controller;
166
167 /*
168 * Open the fd to the HV. If it fails then this
169 * device doesn't exist.
170 */
171 hv_cfg_fd0 = tile_pcie_open(i, 0);
172 if (hv_cfg_fd0 < 0)
173 continue;
174 hv_cfg_fd1 = tile_pcie_open(i, 1);
175 if (hv_cfg_fd1 < 0) {
176 pr_err("PCI: Couldn't open config fd to HV "
177 "for controller %d\n", i);
178 goto err_cont;
179 }
f02cbbe6 180
398fa5a9
CM
181 sprintf(name, "pcie/%d/mem", i);
182 hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
183 if (hv_mem_fd < 0) {
184 pr_err("PCI: Could not open mem fd to HV!\n");
185 goto err_cont;
186 }
f02cbbe6 187
398fa5a9 188 pr_info("PCI: Found PCI controller #%d\n", i);
f02cbbe6 189
398fa5a9 190 controller = &controllers[i];
f02cbbe6 191
398fa5a9
CM
192 controller->index = i;
193 controller->hv_cfg_fd[0] = hv_cfg_fd0;
194 controller->hv_cfg_fd[1] = hv_cfg_fd1;
195 controller->hv_mem_fd = hv_mem_fd;
196 controller->first_busno = 0;
197 controller->last_busno = 0xff;
198 controller->ops = &tile_cfg_ops;
f02cbbe6 199
398fa5a9
CM
200 num_controllers++;
201 continue;
f02cbbe6
CM
202
203err_cont:
398fa5a9
CM
204 if (hv_cfg_fd0 >= 0)
205 hv_dev_close(hv_cfg_fd0);
206 if (hv_cfg_fd1 >= 0)
207 hv_dev_close(hv_cfg_fd1);
208 if (hv_mem_fd >= 0)
209 hv_dev_close(hv_mem_fd);
210 continue;
211 }
f02cbbe6
CM
212 }
213
214 /*
215 * Before using the PCIe, see if we need to do any platform-specific
216 * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
217 */
218 for (i = 0; i < num_controllers; i++) {
219 struct pci_controller *controller = &controllers[i];
220
221 if (controller->plx_gen1)
222 tile_plx_gen1 = 1;
223 }
224
225 return num_controllers;
226}
227
228/*
229 * (pin - 1) converts from the PCI standard's [1:4] convention to
230 * a normal [0:3] range.
231 */
d5341942 232static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
f02cbbe6
CM
233{
234 struct pci_controller *controller =
235 (struct pci_controller *)dev->sysdata;
236 return (pin - 1) + controller->irq_base;
237}
238
239
398fa5a9 240static void __devinit fixup_read_and_payload_sizes(void)
f02cbbe6
CM
241{
242 struct pci_dev *dev = NULL;
243 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
244 int max_read_size = 0x2; /* Limit to 512 byte reads. */
245 u16 new_values;
246
247 /* Scan for the smallest maximum payload size. */
17a26354 248 for_each_pci_dev(dev) {
f02cbbe6
CM
249 u32 devcap;
250 int max_payload;
251
424ffc94 252 if (!pci_is_pcie(dev))
f02cbbe6
CM
253 continue;
254
424ffc94 255 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap);
f02cbbe6
CM
256 max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
257 if (max_payload < smallest_max_payload)
258 smallest_max_payload = max_payload;
259 }
260
261 /* Now, set the max_payload_size for all devices to that value. */
262 new_values = (max_read_size << 12) | (smallest_max_payload << 5);
17a26354 263 for_each_pci_dev(dev)
424ffc94
JL
264 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
265 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
266 new_values);
f02cbbe6
CM
267}
268
269
270/*
271 * Second PCI initialization entry point, called by subsys_initcall.
272 *
273 * The controllers have been set up by the time we get here, by a call to
274 * tile_pci_init.
275 */
05ef1b79 276int __init pcibios_init(void)
f02cbbe6
CM
277{
278 int i;
279
280 pr_info("PCI: Probing PCI hardware\n");
281
282 /*
283 * Delay a bit in case devices aren't ready. Some devices are
284 * known to require at least 20ms here, but we use a more
285 * conservative value.
286 */
287 mdelay(250);
288
289 /* Scan all of the recorded PCI controllers. */
398fa5a9 290 for (i = 0; i < TILE_NUM_PCIE; i++) {
f02cbbe6 291 /*
398fa5a9
CM
292 * Do real pcibios init ops if the controller is initialized
293 * by tile_pci_init() successfully and not initialized by
294 * pcibios_init() yet to support PCIe hot-plug.
f02cbbe6 295 */
398fa5a9
CM
296 if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
297 struct pci_controller *controller = &controllers[i];
298 struct pci_bus *bus;
b17c0e6f 299 LIST_HEAD(resources);
398fa5a9 300
f4de51de
CM
301 if (tile_init_irqs(i, controller)) {
302 pr_err("PCI: Could not initialize IRQs\n");
303 continue;
304 }
305
398fa5a9
CM
306 pr_info("PCI: initializing controller #%d\n", i);
307
308 /*
309 * This comes from the generic Linux PCI driver.
310 *
311 * It reads the PCI tree for this bus into the Linux
312 * data structures.
313 *
314 * This is inlined in linux/pci.h and calls into
315 * pci_scan_bus_parented() in probe.c.
316 */
b17c0e6f
YL
317 pci_add_resource(&resources, &ioport_resource);
318 pci_add_resource(&resources, &iomem_resource);
319 bus = pci_scan_root_bus(NULL, 0, controller->ops, controller, &resources);
398fa5a9 320 controller->root_bus = bus;
b918c62e 321 controller->last_busno = bus->busn_res.end;
398fa5a9 322 }
f02cbbe6
CM
323 }
324
325 /* Do machine dependent PCI interrupt routing */
326 pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
327
328 /*
329 * This comes from the generic Linux PCI driver.
330 *
331 * It allocates all of the resources (I/O memory, etc)
332 * associated with the devices read in above.
333 */
f02cbbe6
CM
334 pci_assign_unassigned_resources();
335
336 /* Configure the max_read_size and max_payload_size values. */
337 fixup_read_and_payload_sizes();
338
339 /* Record the I/O resources in the PCI controller structure. */
398fa5a9
CM
340 for (i = 0; i < TILE_NUM_PCIE; i++) {
341 /*
342 * Do real pcibios init ops if the controller is initialized
343 * by tile_pci_init() successfully and not initialized by
344 * pcibios_init() yet to support PCIe hot-plug.
345 */
346 if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
347 struct pci_bus *root_bus = controllers[i].root_bus;
348 struct pci_bus *next_bus;
349 struct pci_dev *dev;
350
351 list_for_each_entry(dev, &root_bus->devices, bus_list) {
352 /*
353 * Find the PCI host controller, ie. the 1st
354 * bridge.
355 */
356 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
357 (PCI_SLOT(dev->devfn) == 0)) {
7f240b7d 358 next_bus = dev->subordinate;
398fa5a9
CM
359 controllers[i].mem_resources[0] =
360 *next_bus->resource[0];
361 controllers[i].mem_resources[1] =
362 *next_bus->resource[1];
363 controllers[i].mem_resources[2] =
364 *next_bus->resource[2];
365
366 /* Setup flags. */
367 pci_scan_flags[i] = 1;
368
369 break;
370 }
f02cbbe6
CM
371 }
372 }
f02cbbe6
CM
373 }
374
375 return 0;
376}
377subsys_initcall(pcibios_init);
378
379/*
380 * No bus fixups needed.
381 */
382void __devinit pcibios_fixup_bus(struct pci_bus *bus)
383{
384 /* Nothing needs to be done. */
385}
386
cf1c5230
MS
387void pcibios_set_master(struct pci_dev *dev)
388{
389 /* No special bus mastering setup handling. */
390}
391
f02cbbe6
CM
392/*
393 * Enable memory and/or address decoding, as appropriate, for the
394 * device described by the 'dev' struct.
395 *
396 * This is called from the generic PCI layer, and can be called
397 * for bridges or endpoints.
398 */
399int pcibios_enable_device(struct pci_dev *dev, int mask)
400{
401 u16 cmd, old_cmd;
402 u8 header_type;
403 int i;
404 struct resource *r;
405
406 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
407
408 pci_read_config_word(dev, PCI_COMMAND, &cmd);
409 old_cmd = cmd;
410 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
411 /*
412 * For bridges, we enable both memory and I/O decoding
413 * in call cases.
414 */
415 cmd |= PCI_COMMAND_IO;
416 cmd |= PCI_COMMAND_MEMORY;
417 } else {
418 /*
419 * For endpoints, we enable memory and/or I/O decoding
420 * only if they have a memory resource of that type.
421 */
422 for (i = 0; i < 6; i++) {
423 r = &dev->resource[i];
424 if (r->flags & IORESOURCE_UNSET) {
425 pr_err("PCI: Device %s not available "
426 "because of resource collisions\n",
427 pci_name(dev));
428 return -EINVAL;
429 }
430 if (r->flags & IORESOURCE_IO)
431 cmd |= PCI_COMMAND_IO;
432 if (r->flags & IORESOURCE_MEM)
433 cmd |= PCI_COMMAND_MEMORY;
434 }
435 }
436
437 /*
438 * We only write the command if it changed.
439 */
440 if (cmd != old_cmd)
441 pci_write_config_word(dev, PCI_COMMAND, cmd);
442 return 0;
443}
444
f02cbbe6
CM
445/****************************************************************
446 *
447 * Tile PCI config space read/write routines
448 *
449 ****************************************************************/
450
451/*
452 * These are the normal read and write ops
453 * These are expanded with macros from pci_bus_read_config_byte() etc.
454 *
455 * devfn is the combined PCI slot & function.
456 *
457 * offset is in bytes, from the start of config space for the
458 * specified bus & slot.
459 */
460
461static int __devinit tile_cfg_read(struct pci_bus *bus,
462 unsigned int devfn,
463 int offset,
464 int size,
465 u32 *val)
466{
467 struct pci_controller *controller = bus->sysdata;
468 int busnum = bus->number & 0xff;
469 int slot = (devfn >> 3) & 0x1f;
470 int function = devfn & 0x7;
471 u32 addr;
472 int config_mode = 1;
473
474 /*
475 * There is no bridge between the Tile and bus 0, so we
476 * use config0 to talk to bus 0.
477 *
478 * If we're talking to a bus other than zero then we
479 * must have found a bridge.
480 */
481 if (busnum == 0) {
482 /*
483 * We fake an empty slot for (busnum == 0) && (slot > 0),
484 * since there is only one slot on bus 0.
485 */
486 if (slot) {
487 *val = 0xFFFFFFFF;
488 return 0;
489 }
490 config_mode = 0;
491 }
492
493 addr = busnum << 20; /* Bus in 27:20 */
494 addr |= slot << 15; /* Slot (device) in 19:15 */
495 addr |= function << 12; /* Function is in 14:12 */
496 addr |= (offset & 0xFFF); /* byte address in 0:11 */
497
498 return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
499 (HV_VirtAddr)(val), size, addr);
500}
501
502
503/*
25985edc 504 * See tile_cfg_read() for relevant comments.
f02cbbe6
CM
505 * Note that "val" is the value to write, not a pointer to that value.
506 */
507static int __devinit tile_cfg_write(struct pci_bus *bus,
508 unsigned int devfn,
509 int offset,
510 int size,
511 u32 val)
512{
513 struct pci_controller *controller = bus->sysdata;
514 int busnum = bus->number & 0xff;
515 int slot = (devfn >> 3) & 0x1f;
516 int function = devfn & 0x7;
517 u32 addr;
518 int config_mode = 1;
519 HV_VirtAddr valp = (HV_VirtAddr)&val;
520
521 /*
522 * For bus 0 slot 0 we use config 0 accesses.
523 */
524 if (busnum == 0) {
525 /*
526 * We fake an empty slot for (busnum == 0) && (slot > 0),
527 * since there is only one slot on bus 0.
528 */
529 if (slot)
530 return 0;
531 config_mode = 0;
532 }
533
534 addr = busnum << 20; /* Bus in 27:20 */
535 addr |= slot << 15; /* Slot (device) in 19:15 */
536 addr |= function << 12; /* Function is in 14:12 */
537 addr |= (offset & 0xFFF); /* byte address in 0:11 */
538
539#ifdef __BIG_ENDIAN
540 /* Point to the correct part of the 32-bit "val". */
541 valp += 4 - size;
542#endif
543
544 return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
545 valp, size, addr);
546}
547
548
549static struct pci_ops tile_cfg_ops = {
550 .read = tile_cfg_read,
551 .write = tile_cfg_write,
552};
553
554
555/*
556 * In the following, each PCI controller's mem_resources[1]
557 * represents its (non-prefetchable) PCI memory resource.
558 * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
559 * prefetchable PCI memory resources, respectively.
560 * For more details, see pci_setup_bridge() in setup-bus.c.
561 * By comparing the target PCI memory address against the
562 * end address of controller 0, we can determine the controller
563 * that should accept the PCI memory access.
564 */
565#define TILE_READ(size, type) \
566type _tile_read##size(unsigned long addr) \
567{ \
568 type val; \
569 int idx = 0; \
570 if (addr > controllers[0].mem_resources[1].end && \
571 addr > controllers[0].mem_resources[2].end) \
572 idx = 1; \
573 if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
574 (HV_VirtAddr)(&val), sizeof(type), addr)) \
575 pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
576 sizeof(type), addr); \
577 return val; \
578} \
579EXPORT_SYMBOL(_tile_read##size)
580
581TILE_READ(b, u8);
582TILE_READ(w, u16);
583TILE_READ(l, u32);
584TILE_READ(q, u64);
585
586#define TILE_WRITE(size, type) \
587void _tile_write##size(type val, unsigned long addr) \
588{ \
589 int idx = 0; \
590 if (addr > controllers[0].mem_resources[1].end && \
591 addr > controllers[0].mem_resources[2].end) \
592 idx = 1; \
593 if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
594 (HV_VirtAddr)(&val), sizeof(type), addr)) \
595 pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
596 sizeof(type), addr); \
597} \
598EXPORT_SYMBOL(_tile_write##size)
599
600TILE_WRITE(b, u8);
601TILE_WRITE(w, u16);
602TILE_WRITE(l, u32);
603TILE_WRITE(q, u64);
This page took 0.151421 seconds and 5 git commands to generate.