Merge branch 'for-linus' of git://neil.brown.name/md
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9
10 struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
13
14 dma_addr_t *pages;
15 unsigned nr_pages;
16
17 unsigned pte_start;
18 bool bound;
19 };
20
21 static int
22 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23 struct page **pages, struct page *dummy_read_page)
24 {
25 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26 struct drm_device *dev = nvbe->dev;
27
28 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30 if (nvbe->pages)
31 return -EINVAL;
32
33 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34 if (!nvbe->pages)
35 return -ENOMEM;
36
37 nvbe->nr_pages = 0;
38 while (num_pages--) {
39 nvbe->pages[nvbe->nr_pages] =
40 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42 if (pci_dma_mapping_error(dev->pdev,
43 nvbe->pages[nvbe->nr_pages])) {
44 be->func->clear(be);
45 return -EFAULT;
46 }
47
48 nvbe->nr_pages++;
49 }
50
51 return 0;
52 }
53
54 static void
55 nouveau_sgdma_clear(struct ttm_backend *be)
56 {
57 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58 struct drm_device *dev;
59
60 if (nvbe && nvbe->pages) {
61 dev = nvbe->dev;
62 NV_DEBUG(dev, "\n");
63
64 if (nvbe->bound)
65 be->func->unbind(be);
66
67 while (nvbe->nr_pages--) {
68 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70 }
71 kfree(nvbe->pages);
72 nvbe->pages = NULL;
73 nvbe->nr_pages = 0;
74 }
75 }
76
77 static inline unsigned
78 nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79 {
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83 if (dev_priv->card_type < NV_50)
84 return pte + 2;
85
86 return pte << 1;
87 }
88
89 static int
90 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91 {
92 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93 struct drm_device *dev = nvbe->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte;
97
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
101 nvbe->pte_start = pte;
102 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset);
105 uint32_t offset_h = upper_32_bits(dma_offset);
106
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108 if (dev_priv->card_type < NV_50)
109 nv_wo32(dev, gpuobj, pte++, offset_l | 3);
110 else {
111 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
112 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
113 }
114
115 dma_offset += NV_CTXDMA_PAGE_SIZE;
116 }
117 }
118 dev_priv->engine.instmem.flush(nvbe->dev);
119
120 if (dev_priv->card_type == NV_50) {
121 nv50_vm_flush(dev, 5); /* PGRAPH */
122 nv50_vm_flush(dev, 0); /* PFIFO */
123 }
124
125 nvbe->bound = true;
126 return 0;
127 }
128
129 static int
130 nouveau_sgdma_unbind(struct ttm_backend *be)
131 {
132 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
133 struct drm_device *dev = nvbe->dev;
134 struct drm_nouveau_private *dev_priv = dev->dev_private;
135 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
136 unsigned i, j, pte;
137
138 NV_DEBUG(dev, "\n");
139
140 if (!nvbe->bound)
141 return 0;
142
143 pte = nvbe->pte_start;
144 for (i = 0; i < nvbe->nr_pages; i++) {
145 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
146
147 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
148 if (dev_priv->card_type < NV_50)
149 nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
150 else {
151 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
152 nv_wo32(dev, gpuobj, pte++, 0x00000000);
153 }
154
155 dma_offset += NV_CTXDMA_PAGE_SIZE;
156 }
157 }
158 dev_priv->engine.instmem.flush(nvbe->dev);
159
160 if (dev_priv->card_type == NV_50) {
161 nv50_vm_flush(dev, 5);
162 nv50_vm_flush(dev, 0);
163 }
164
165 nvbe->bound = false;
166 return 0;
167 }
168
169 static void
170 nouveau_sgdma_destroy(struct ttm_backend *be)
171 {
172 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
173
174 if (be) {
175 NV_DEBUG(nvbe->dev, "\n");
176
177 if (nvbe) {
178 if (nvbe->pages)
179 be->func->clear(be);
180 kfree(nvbe);
181 }
182 }
183 }
184
185 static struct ttm_backend_func nouveau_sgdma_backend = {
186 .populate = nouveau_sgdma_populate,
187 .clear = nouveau_sgdma_clear,
188 .bind = nouveau_sgdma_bind,
189 .unbind = nouveau_sgdma_unbind,
190 .destroy = nouveau_sgdma_destroy
191 };
192
193 struct ttm_backend *
194 nouveau_sgdma_init_ttm(struct drm_device *dev)
195 {
196 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 struct nouveau_sgdma_be *nvbe;
198
199 if (!dev_priv->gart_info.sg_ctxdma)
200 return NULL;
201
202 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
203 if (!nvbe)
204 return NULL;
205
206 nvbe->dev = dev;
207
208 nvbe->backend.func = &nouveau_sgdma_backend;
209
210 return &nvbe->backend;
211 }
212
213 int
214 nouveau_sgdma_init(struct drm_device *dev)
215 {
216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct nouveau_gpuobj *gpuobj = NULL;
218 uint32_t aper_size, obj_size;
219 int i, ret;
220
221 if (dev_priv->card_type < NV_50) {
222 aper_size = (64 * 1024 * 1024);
223 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
224 obj_size += 8; /* ctxdma header */
225 } else {
226 /* 1 entire VM page table */
227 aper_size = (512 * 1024 * 1024);
228 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
229 }
230
231 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
232 NVOBJ_FLAG_ALLOW_NO_REFS |
233 NVOBJ_FLAG_ZERO_ALLOC |
234 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
235 if (ret) {
236 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
237 return ret;
238 }
239
240 dev_priv->gart_info.sg_dummy_page =
241 alloc_page(GFP_KERNEL|__GFP_DMA32);
242 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
243 dev_priv->gart_info.sg_dummy_bus =
244 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
245 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
246
247 if (dev_priv->card_type < NV_50) {
248 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
249 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
250 * on those cards? */
251 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
252 (1 << 12) /* PT present */ |
253 (0 << 13) /* PT *not* linear */ |
254 (NV_DMA_ACCESS_RW << 14) |
255 (NV_DMA_TARGET_PCI << 16));
256 nv_wo32(dev, gpuobj, 1, aper_size - 1);
257 for (i = 2; i < 2 + (aper_size >> 12); i++) {
258 nv_wo32(dev, gpuobj, i,
259 dev_priv->gart_info.sg_dummy_bus | 3);
260 }
261 } else {
262 for (i = 0; i < obj_size; i += 8) {
263 nv_wo32(dev, gpuobj, (i+0)/4,
264 dev_priv->gart_info.sg_dummy_bus | 0x21);
265 nv_wo32(dev, gpuobj, (i+4)/4, 0);
266 }
267 }
268 dev_priv->engine.instmem.flush(dev);
269
270 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
271 dev_priv->gart_info.aper_base = 0;
272 dev_priv->gart_info.aper_size = aper_size;
273 dev_priv->gart_info.sg_ctxdma = gpuobj;
274 return 0;
275 }
276
277 void
278 nouveau_sgdma_takedown(struct drm_device *dev)
279 {
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281
282 if (dev_priv->gart_info.sg_dummy_page) {
283 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
284 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
285 unlock_page(dev_priv->gart_info.sg_dummy_page);
286 __free_page(dev_priv->gart_info.sg_dummy_page);
287 dev_priv->gart_info.sg_dummy_page = NULL;
288 dev_priv->gart_info.sg_dummy_bus = 0;
289 }
290
291 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
292 }
293
294 int
295 nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
296 {
297 struct drm_nouveau_private *dev_priv = dev->dev_private;
298 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
299 int pte;
300
301 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
302 if (dev_priv->card_type < NV_50) {
303 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
304 return 0;
305 }
306
307 NV_ERROR(dev, "Unimplemented on NV50\n");
308 return -EINVAL;
309 }
This page took 0.044924 seconds and 5 git commands to generate.