Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[deliverable/linux.git] / arch / powerpc / platforms / powernv / pci.h
1 #ifndef __POWERNV_PCI_H
2 #define __POWERNV_PCI_H
3
4 struct pci_dn;
5
6 enum pnv_phb_type {
7 PNV_PHB_IODA1 = 0,
8 PNV_PHB_IODA2 = 1,
9 PNV_PHB_NPU = 2,
10 };
11
12 /* Precise PHB model for error management */
13 enum pnv_phb_model {
14 PNV_PHB_MODEL_UNKNOWN,
15 PNV_PHB_MODEL_P7IOC,
16 PNV_PHB_MODEL_PHB3,
17 PNV_PHB_MODEL_NPU,
18 };
19
20 #define PNV_PCI_DIAG_BUF_SIZE 8192
21 #define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
22 #define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
23 #define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
24 #define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
25 #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
26 #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
27 #define PNV_IODA_PE_PEER (1 << 6) /* PE has peers */
28
29 /* Data associated with a PE, including IOMMU tracking etc.. */
30 struct pnv_phb;
31 struct pnv_ioda_pe {
32 unsigned long flags;
33 struct pnv_phb *phb;
34
35 #define PNV_IODA_MAX_PEER_PES 8
36 struct pnv_ioda_pe *peers[PNV_IODA_MAX_PEER_PES];
37
38 /* A PE can be associated with a single device or an
39 * entire bus (& children). In the former case, pdev
40 * is populated, in the later case, pbus is.
41 */
42 #ifdef CONFIG_PCI_IOV
43 struct pci_dev *parent_dev;
44 #endif
45 struct pci_dev *pdev;
46 struct pci_bus *pbus;
47
48 /* Effective RID (device RID for a device PE and base bus
49 * RID with devfn 0 for a bus PE)
50 */
51 unsigned int rid;
52
53 /* PE number */
54 unsigned int pe_number;
55
56 /* "Weight" assigned to the PE for the sake of DMA resource
57 * allocations
58 */
59 unsigned int dma_weight;
60
61 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
62 int tce32_seg;
63 int tce32_segcount;
64 struct iommu_table_group table_group;
65
66 /* 64-bit TCE bypass region */
67 bool tce_bypass_enabled;
68 uint64_t tce_bypass_base;
69
70 /* MSIs. MVE index is identical for for 32 and 64 bit MSI
71 * and -1 if not supported. (It's actually identical to the
72 * PE number)
73 */
74 int mve_number;
75
76 /* PEs in compound case */
77 struct pnv_ioda_pe *master;
78 struct list_head slaves;
79
80 /* Link in list of PE#s */
81 struct list_head dma_link;
82 struct list_head list;
83 };
84
85 #define PNV_PHB_FLAG_EEH (1 << 0)
86
87 struct pnv_phb {
88 struct pci_controller *hose;
89 enum pnv_phb_type type;
90 enum pnv_phb_model model;
91 u64 hub_id;
92 u64 opal_id;
93 int flags;
94 void __iomem *regs;
95 int initialized;
96 spinlock_t lock;
97
98 #ifdef CONFIG_DEBUG_FS
99 int has_dbgfs;
100 struct dentry *dbgfs;
101 #endif
102
103 #ifdef CONFIG_PCI_MSI
104 unsigned int msi_base;
105 unsigned int msi32_support;
106 struct msi_bitmap msi_bmp;
107 #endif
108 int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
109 unsigned int hwirq, unsigned int virq,
110 unsigned int is_64, struct msi_msg *msg);
111 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
112 void (*fixup_phb)(struct pci_controller *hose);
113 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
114 int (*init_m64)(struct pnv_phb *phb);
115 void (*reserve_m64_pe)(struct pci_bus *bus,
116 unsigned long *pe_bitmap, bool all);
117 int (*pick_m64_pe)(struct pci_bus *bus, bool all);
118 int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
119 void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
120 int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
121
122 struct {
123 /* Global bridge info */
124 unsigned int total_pe;
125 unsigned int reserved_pe;
126
127 /* 32-bit MMIO window */
128 unsigned int m32_size;
129 unsigned int m32_segsize;
130 unsigned int m32_pci_base;
131
132 /* 64-bit MMIO window */
133 unsigned int m64_bar_idx;
134 unsigned long m64_size;
135 unsigned long m64_segsize;
136 unsigned long m64_base;
137 unsigned long m64_bar_alloc;
138
139 /* IO ports */
140 unsigned int io_size;
141 unsigned int io_segsize;
142 unsigned int io_pci_base;
143
144 /* PE allocation bitmap */
145 unsigned long *pe_alloc;
146 /* PE allocation mutex */
147 struct mutex pe_alloc_mutex;
148
149 /* M32 & IO segment maps */
150 unsigned int *m32_segmap;
151 unsigned int *io_segmap;
152 struct pnv_ioda_pe *pe_array;
153
154 /* IRQ chip */
155 int irq_chip_init;
156 struct irq_chip irq_chip;
157
158 /* Sorted list of used PE's based
159 * on the sequence of creation
160 */
161 struct list_head pe_list;
162 struct mutex pe_list_mutex;
163
164 /* Reverse map of PEs, will have to extend if
165 * we are to support more than 256 PEs, indexed
166 * bus { bus, devfn }
167 */
168 unsigned char pe_rmap[0x10000];
169
170 /* 32-bit TCE tables allocation */
171 unsigned long tce32_count;
172
173 /* Total "weight" for the sake of DMA resources
174 * allocation
175 */
176 unsigned int dma_weight;
177 unsigned int dma_pe_count;
178
179 /* Sorted list of used PE's, sorted at
180 * boot for resource allocation purposes
181 */
182 struct list_head pe_dma_list;
183
184 /* TCE cache invalidate registers (physical and
185 * remapped)
186 */
187 phys_addr_t tce_inval_reg_phys;
188 __be64 __iomem *tce_inval_reg;
189 } ioda;
190
191 /* PHB and hub status structure */
192 union {
193 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
194 struct OpalIoP7IOCPhbErrorData p7ioc;
195 struct OpalIoPhb3ErrorData phb3;
196 struct OpalIoP7IOCErrorData hub_diag;
197 } diag;
198
199 };
200
201 extern struct pci_ops pnv_pci_ops;
202 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
203 unsigned long uaddr, enum dma_data_direction direction,
204 struct dma_attrs *attrs);
205 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
206 extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
207 unsigned long *hpa, enum dma_data_direction *direction);
208 extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
209
210 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
211 unsigned char *log_buff);
212 int pnv_pci_cfg_read(struct pci_dn *pdn,
213 int where, int size, u32 *val);
214 int pnv_pci_cfg_write(struct pci_dn *pdn,
215 int where, int size, u32 val);
216 extern struct iommu_table *pnv_pci_table_alloc(int nid);
217
218 extern long pnv_pci_link_table_and_group(int node, int num,
219 struct iommu_table *tbl,
220 struct iommu_table_group *table_group);
221 extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
222 struct iommu_table_group *table_group);
223 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
224 void *tce_mem, u64 tce_size,
225 u64 dma_offset, unsigned page_shift);
226 extern void pnv_pci_init_ioda_hub(struct device_node *np);
227 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
228 extern void pnv_pci_init_npu_phb(struct device_node *np);
229 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
230 __be64 *startp, __be64 *endp, bool rm);
231 extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
232 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
233
234 extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
235 extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
236 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
237 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
238
239 /* Nvlink functions */
240 extern void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe);
241 extern void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
242 struct iommu_table *tbl,
243 unsigned long index,
244 unsigned long npages,
245 bool rm);
246 extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe);
247 extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe);
248 extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled);
249 extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask);
250
251 #endif /* __POWERNV_PCI_H */
This page took 0.037284 seconds and 6 git commands to generate.