Commit | Line | Data |
---|---|---|
6ee73861 | 1 | /* |
c420b2dc | 2 | * Copyright (C) 2012 Ben Skeggs. |
6ee73861 BS |
3 | * All Rights Reserved. |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
26 | ||
27 | #include "drmP.h" | |
28 | #include "drm.h" | |
29 | #include "nouveau_drv.h" | |
c420b2dc | 30 | #include "nouveau_fifo.h" |
a8eaebc6 | 31 | #include "nouveau_ramht.h" |
a11c3198 | 32 | #include "nouveau_vm.h" |
6ee73861 | 33 | |
c420b2dc BS |
34 | struct nv50_fifo_priv { |
35 | struct nouveau_fifo_priv base; | |
36 | struct nouveau_gpuobj *playlist[2]; | |
37 | int cur_playlist; | |
38 | }; | |
39 | ||
40 | struct nv50_fifo_chan { | |
41 | struct nouveau_fifo_chan base; | |
42 | }; | |
43 | ||
44 | void | |
ac94a343 | 45 | nv50_fifo_playlist_update(struct drm_device *dev) |
6ee73861 | 46 | { |
c420b2dc | 47 | struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
6ee73861 | 48 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
a8eaebc6 | 49 | struct nouveau_gpuobj *cur; |
694931d2 | 50 | int i, p; |
6ee73861 | 51 | |
c420b2dc BS |
52 | cur = priv->playlist[priv->cur_playlist]; |
53 | priv->cur_playlist = !priv->cur_playlist; | |
6ee73861 | 54 | |
c420b2dc | 55 | for (i = 0, p = 0; i < priv->base.channels; i++) { |
694931d2 BS |
56 | if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000) |
57 | nv_wo32(cur, p++ * 4, i); | |
6ee73861 | 58 | } |
694931d2 | 59 | |
f56cb86f | 60 | dev_priv->engine.instmem.flush(dev); |
6ee73861 | 61 | |
c420b2dc BS |
62 | nv_wr32(dev, 0x0032f4, cur->vinst >> 12); |
63 | nv_wr32(dev, 0x0032ec, p); | |
64 | nv_wr32(dev, 0x002500, 0x00000101); | |
6ee73861 BS |
65 | } |
66 | ||
c420b2dc BS |
67 | static int |
68 | nv50_fifo_context_new(struct nouveau_channel *chan, int engine) | |
6ee73861 | 69 | { |
c420b2dc BS |
70 | struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine); |
71 | struct nv50_fifo_chan *fctx; | |
6ee73861 BS |
72 | struct drm_device *dev = chan->dev; |
73 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
c420b2dc BS |
74 | u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; |
75 | u64 instance = chan->ramin->vinst >> 12; | |
ff9e5279 | 76 | unsigned long flags; |
c420b2dc | 77 | int ret = 0, i; |
6ee73861 | 78 | |
c420b2dc BS |
79 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); |
80 | if (!fctx) | |
81 | return -ENOMEM; | |
82 | atomic_inc(&chan->vm->engref[engine]); | |
6ee73861 | 83 | |
d908175c BS |
84 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + |
85 | NV50_USER(chan->id), PAGE_SIZE); | |
c420b2dc BS |
86 | if (!chan->user) { |
87 | ret = -ENOMEM; | |
88 | goto error; | |
6ee73861 BS |
89 | } |
90 | ||
c420b2dc BS |
91 | for (i = 0; i < 0x100; i += 4) |
92 | nv_wo32(chan->ramin, i, 0x00000000); | |
93 | nv_wo32(chan->ramin, 0x3c, 0x403f6078); | |
94 | nv_wo32(chan->ramin, 0x40, 0x00000000); | |
95 | nv_wo32(chan->ramin, 0x44, 0x01003fff); | |
96 | nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4); | |
97 | nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset)); | |
98 | nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) | | |
99 | drm_order(chan->dma.ib_max + 1) << 16); | |
100 | nv_wo32(chan->ramin, 0x60, 0x7fffffff); | |
101 | nv_wo32(chan->ramin, 0x78, 0x00000000); | |
102 | nv_wo32(chan->ramin, 0x7c, 0x30000001); | |
103 | nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) | | |
104 | (4 << 24) /* SEARCH_FULL */ | | |
105 | (chan->ramht->gpuobj->cinst >> 4)); | |
106 | ||
f56cb86f | 107 | dev_priv->engine.instmem.flush(dev); |
6ee73861 | 108 | |
c420b2dc BS |
109 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
110 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance); | |
ac94a343 | 111 | nv50_fifo_playlist_update(dev); |
ff9e5279 | 112 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
c420b2dc BS |
113 | |
114 | error: | |
115 | if (ret) | |
116 | priv->base.base.context_del(chan, engine); | |
117 | return ret; | |
6ee73861 BS |
118 | } |
119 | ||
03bd6efa | 120 | static bool |
c420b2dc | 121 | nv50_fifo_kickoff(struct nouveau_channel *chan) |
03bd6efa | 122 | { |
c420b2dc BS |
123 | struct drm_device *dev = chan->dev; |
124 | bool done = true; | |
125 | u32 me; | |
126 | ||
127 | /* HW bug workaround: | |
128 | * | |
129 | * PFIFO will hang forever if the connected engines don't report | |
130 | * that they've processed the context switch request. | |
131 | * | |
132 | * In order for the kickoff to work, we need to ensure all the | |
133 | * connected engines are in a state where they can answer. | |
134 | * | |
135 | * Newer chipsets don't seem to suffer from this issue, and well, | |
136 | * there's also a "ignore these engines" bitmask reg we can use | |
137 | * if we hit the issue there.. | |
138 | */ | |
139 | ||
140 | /* PME: make sure engine is enabled */ | |
141 | me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001); | |
142 | ||
143 | /* do the kickoff... */ | |
144 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); | |
145 | if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) { | |
146 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); | |
147 | done = false; | |
03bd6efa BS |
148 | } |
149 | ||
c420b2dc BS |
150 | /* restore any engine states we changed, and exit */ |
151 | nv_wr32(dev, 0x00b860, me); | |
152 | return done; | |
03bd6efa BS |
153 | } |
154 | ||
c420b2dc BS |
155 | static void |
156 | nv50_fifo_context_del(struct nouveau_channel *chan, int engine) | |
6ee73861 | 157 | { |
c420b2dc | 158 | struct nv50_fifo_chan *fctx = chan->engctx[engine]; |
6ee73861 | 159 | struct drm_device *dev = chan->dev; |
3945e475 | 160 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
3945e475 | 161 | unsigned long flags; |
6ee73861 | 162 | |
03bd6efa | 163 | /* remove channel from playlist, will context switch if active */ |
3945e475 | 164 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
03bd6efa | 165 | nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); |
ac94a343 | 166 | nv50_fifo_playlist_update(dev); |
a87ff62a | 167 | |
03bd6efa | 168 | /* tell any engines on this channel to unload their contexts */ |
c420b2dc | 169 | nv50_fifo_kickoff(chan); |
03bd6efa BS |
170 | |
171 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); | |
3945e475 FJ |
172 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
173 | ||
03bd6efa | 174 | /* clean up */ |
d908175c BS |
175 | if (chan->user) { |
176 | iounmap(chan->user); | |
177 | chan->user = NULL; | |
178 | } | |
03bd6efa | 179 | |
c420b2dc BS |
180 | atomic_dec(&chan->vm->engref[engine]); |
181 | chan->engctx[engine] = NULL; | |
182 | kfree(fctx); | |
6ee73861 BS |
183 | } |
184 | ||
c420b2dc BS |
185 | static int |
186 | nv50_fifo_init(struct drm_device *dev, int engine) | |
6ee73861 | 187 | { |
c420b2dc BS |
188 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
189 | u32 instance; | |
190 | int i; | |
191 | ||
192 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); | |
193 | nv_mask(dev, 0x000200, 0x00000100, 0x00000100); | |
194 | nv_wr32(dev, 0x00250c, 0x6f3cfc34); | |
195 | nv_wr32(dev, 0x002044, 0x01003fff); | |
196 | ||
197 | nv_wr32(dev, 0x002100, 0xffffffff); | |
198 | nv_wr32(dev, 0x002140, 0xffffffff); | |
199 | ||
200 | for (i = 0; i < 128; i++) { | |
201 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | |
202 | if (chan && chan->engctx[engine]) | |
203 | instance = 0x80000000 | chan->ramin->vinst >> 12; | |
204 | else | |
205 | instance = 0x00000000; | |
206 | nv_wr32(dev, 0x002600 + (i * 4), instance); | |
207 | } | |
208 | ||
209 | nv50_fifo_playlist_update(dev); | |
210 | ||
211 | nv_wr32(dev, 0x003200, 1); | |
212 | nv_wr32(dev, 0x003250, 1); | |
213 | nv_wr32(dev, 0x002500, 1); | |
6ee73861 BS |
214 | return 0; |
215 | } | |
216 | ||
c420b2dc BS |
217 | static int |
218 | nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend) | |
6ee73861 BS |
219 | { |
220 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
c420b2dc | 221 | struct nv50_fifo_priv *priv = nv_engine(dev, engine); |
03bd6efa | 222 | int i; |
6ee73861 | 223 | |
03bd6efa BS |
224 | /* set playlist length to zero, fifo will unload context */ |
225 | nv_wr32(dev, 0x0032ec, 0); | |
226 | ||
227 | /* tell all connected engines to unload their contexts */ | |
c420b2dc | 228 | for (i = 0; i < priv->base.channels; i++) { |
03bd6efa | 229 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
c420b2dc | 230 | if (chan && !nv50_fifo_kickoff(chan)) |
03bd6efa | 231 | return -EBUSY; |
6ee73861 BS |
232 | } |
233 | ||
c420b2dc | 234 | nv_wr32(dev, 0x002140, 0); |
6ee73861 BS |
235 | return 0; |
236 | } | |
237 | ||
56ac7475 | 238 | void |
c420b2dc | 239 | nv50_fifo_tlb_flush(struct drm_device *dev, int engine) |
56ac7475 | 240 | { |
a11c3198 | 241 | nv50_vm_flush_engine(dev, 5); |
56ac7475 | 242 | } |
c420b2dc BS |
243 | |
244 | void | |
245 | nv50_fifo_destroy(struct drm_device *dev, int engine) | |
246 | { | |
247 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
248 | struct nv50_fifo_priv *priv = nv_engine(dev, engine); | |
249 | ||
250 | nouveau_irq_unregister(dev, 8); | |
251 | ||
252 | nouveau_gpuobj_ref(NULL, &priv->playlist[0]); | |
253 | nouveau_gpuobj_ref(NULL, &priv->playlist[1]); | |
254 | ||
255 | dev_priv->eng[engine] = NULL; | |
256 | kfree(priv); | |
257 | } | |
258 | ||
259 | int | |
260 | nv50_fifo_create(struct drm_device *dev) | |
261 | { | |
262 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
263 | struct nv50_fifo_priv *priv; | |
264 | int ret; | |
265 | ||
266 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
267 | if (!priv) | |
268 | return -ENOMEM; | |
269 | ||
270 | priv->base.base.destroy = nv50_fifo_destroy; | |
271 | priv->base.base.init = nv50_fifo_init; | |
272 | priv->base.base.fini = nv50_fifo_fini; | |
273 | priv->base.base.context_new = nv50_fifo_context_new; | |
274 | priv->base.base.context_del = nv50_fifo_context_del; | |
275 | priv->base.base.tlb_flush = nv50_fifo_tlb_flush; | |
276 | priv->base.channels = 127; | |
277 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | |
278 | ||
279 | ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, | |
280 | NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]); | |
281 | if (ret) | |
282 | goto error; | |
283 | ||
284 | ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, | |
285 | NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]); | |
286 | if (ret) | |
287 | goto error; | |
288 | ||
289 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | |
290 | error: | |
291 | if (ret) | |
292 | priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); | |
293 | return ret; | |
294 | } |