Commit | Line | Data |
---|---|---|
06c0dd96 | 1 | /* |
bfcdfb0e | 2 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. |
06c0dd96 RC |
3 | * Copyright (C) 2013 Red Hat |
4 | * Author: Rob Clark <robdclark@gmail.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published by | |
8 | * the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | ||
20 | #include "mdp5_kms.h" | |
21 | #include "mdp5_smp.h" | |
22 | ||
23 | ||
24 | /* SMP - Shared Memory Pool | |
25 | * | |
26 | * These are shared between all the clients, where each plane in a | |
27 | * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on | |
28 | * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. | |
29 | * | |
30 | * Based on the size of the attached scanout buffer, a certain # of | |
31 | * blocks must be allocated to that client out of the shared pool. | |
32 | * | |
bfcdfb0e SV |
33 | * In some hw, some blocks are statically allocated for certain pipes |
34 | * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). | |
35 | * | |
36 | * For each block that can be dynamically allocated, it can be either | |
657c63f0 WX |
37 | * free: |
38 | * The block is free. | |
39 | * | |
40 | * pending: | |
41 | * The block is allocated to some client and not free. | |
42 | * | |
43 | * configured: | |
44 | * The block is allocated to some client, and assigned to that | |
7b59c7e4 | 45 | * client in MDP5_SMP_ALLOC registers. |
657c63f0 WX |
46 | * |
47 | * inuse: | |
48 | * The block is being actively used by a client. | |
49 | * | |
50 | * The updates happen in the following steps: | |
06c0dd96 RC |
51 | * |
52 | * 1) mdp5_smp_request(): | |
53 | * When plane scanout is setup, calculate required number of | |
657c63f0 WX |
54 | * blocks needed per client, and request. Blocks neither inuse nor |
55 | * configured nor pending by any other client are added to client's | |
56 | * pending set. | |
57 | * For shrinking, blocks in pending but not in configured can be freed | |
58 | * directly, but those already in configured will be freed later by | |
59 | * mdp5_smp_commit. | |
06c0dd96 RC |
60 | * |
61 | * 2) mdp5_smp_configure(): | |
7b59c7e4 | 62 | * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers |
06c0dd96 | 63 | * are configured for the union(pending, inuse) |
657c63f0 WX |
64 | * Current pending is copied to configured. |
65 | * It is assumed that mdp5_smp_request and mdp5_smp_configure not run | |
66 | * concurrently for the same pipe. | |
06c0dd96 RC |
67 | * |
68 | * 3) mdp5_smp_commit(): | |
657c63f0 | 69 | * After next vblank, copy configured -> inuse. Optionally update |
06c0dd96 RC |
70 | * MDP5_SMP_ALLOC registers if there are newly unused blocks |
71 | * | |
657c63f0 WX |
72 | * 4) mdp5_smp_release(): |
73 | * Must be called after the pipe is disabled and no longer uses any SMB | |
74 | * | |
06c0dd96 RC |
75 | * On the next vblank after changes have been committed to hw, the |
76 | * client's pending blocks become it's in-use blocks (and no-longer | |
77 | * in-use blocks become available to other clients). | |
78 | * | |
79 | * btw, hurray for confusing overloaded acronyms! :-/ | |
80 | * | |
81 | * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1 | |
82 | * should happen at (or before)? atomic->check(). And we'd need | |
83 | * an API to discard previous requests if update is aborted or | |
84 | * (test-only). | |
85 | * | |
86 | * TODO would perhaps be nice to have debugfs to dump out kernel | |
87 | * inuse and pending state of all clients.. | |
88 | */ | |
89 | ||
bfcdfb0e SV |
90 | struct mdp5_smp { |
91 | struct drm_device *dev; | |
92 | ||
60fb49ca | 93 | uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */ |
9cc137a3 | 94 | |
bfcdfb0e SV |
95 | int blk_cnt; |
96 | int blk_size; | |
97 | ||
98 | spinlock_t state_lock; | |
99 | mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */ | |
100 | ||
6fa6acdf | 101 | struct mdp5_client_smp_state client_state[MAX_CLIENTS]; |
bfcdfb0e | 102 | }; |
06c0dd96 | 103 | |
657c63f0 WX |
104 | static void update_smp_state(struct mdp5_smp *smp, |
105 | u32 cid, mdp5_smp_state_t *assigned); | |
106 | ||
bfcdfb0e SV |
107 | static inline |
108 | struct mdp5_kms *get_kms(struct mdp5_smp *smp) | |
109 | { | |
110 | struct msm_drm_private *priv = smp->dev->dev_private; | |
111 | ||
112 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | |
113 | } | |
114 | ||
6fa6acdf | 115 | static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) |
bfcdfb0e | 116 | { |
6fa6acdf SV |
117 | #define CID_UNUSED 0 |
118 | ||
119 | if (WARN_ON(plane >= pipe2nclients(pipe))) | |
120 | return CID_UNUSED; | |
121 | ||
122 | /* | |
123 | * Note on SMP clients: | |
124 | * For ViG pipes, fetch Y/Cr/Cb-components clients are always | |
125 | * consecutive, and in that order. | |
126 | * | |
127 | * e.g.: | |
128 | * if mdp5_cfg->smp.clients[SSPP_VIG0] = N, | |
129 | * Y plane's client ID is N | |
130 | * Cr plane's client ID is N + 1 | |
131 | * Cb plane's client ID is N + 2 | |
132 | */ | |
133 | ||
134 | return mdp5_cfg->smp.clients[pipe] + plane; | |
bfcdfb0e | 135 | } |
06c0dd96 RC |
136 | |
137 | /* step #1: update # of blocks pending for the client: */ | |
bfcdfb0e | 138 | static int smp_request_block(struct mdp5_smp *smp, |
6fa6acdf | 139 | u32 cid, int nblks) |
06c0dd96 | 140 | { |
bfcdfb0e SV |
141 | struct mdp5_kms *mdp5_kms = get_kms(smp); |
142 | struct mdp5_client_smp_state *ps = &smp->client_state[cid]; | |
143 | int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; | |
60fb49ca | 144 | uint8_t reserved; |
06c0dd96 RC |
145 | unsigned long flags; |
146 | ||
60fb49ca | 147 | reserved = smp->reserved[cid]; |
2e362e17 | 148 | |
bfcdfb0e SV |
149 | spin_lock_irqsave(&smp->state_lock, flags); |
150 | ||
2559d19f SV |
151 | if (reserved) { |
152 | nblks = max(0, nblks - reserved); | |
bfcdfb0e | 153 | DBG("%d MMBs allocated (%d reserved)", nblks, reserved); |
2559d19f | 154 | } |
06c0dd96 | 155 | |
bfcdfb0e | 156 | avail = cnt - bitmap_weight(smp->state, cnt); |
06c0dd96 | 157 | if (nblks > avail) { |
bfcdfb0e SV |
158 | dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n", |
159 | nblks, avail); | |
06c0dd96 RC |
160 | ret = -ENOSPC; |
161 | goto fail; | |
162 | } | |
163 | ||
164 | cur_nblks = bitmap_weight(ps->pending, cnt); | |
165 | if (nblks > cur_nblks) { | |
166 | /* grow the existing pending reservation: */ | |
167 | for (i = cur_nblks; i < nblks; i++) { | |
bfcdfb0e | 168 | int blk = find_first_zero_bit(smp->state, cnt); |
06c0dd96 | 169 | set_bit(blk, ps->pending); |
bfcdfb0e | 170 | set_bit(blk, smp->state); |
06c0dd96 RC |
171 | } |
172 | } else { | |
173 | /* shrink the existing pending reservation: */ | |
174 | for (i = cur_nblks; i > nblks; i--) { | |
175 | int blk = find_first_bit(ps->pending, cnt); | |
176 | clear_bit(blk, ps->pending); | |
657c63f0 WX |
177 | |
178 | /* clear in global smp_state if not in configured | |
179 | * otherwise until _commit() | |
180 | */ | |
181 | if (!test_bit(blk, ps->configured)) | |
182 | clear_bit(blk, smp->state); | |
06c0dd96 RC |
183 | } |
184 | } | |
185 | ||
186 | fail: | |
bfcdfb0e SV |
187 | spin_unlock_irqrestore(&smp->state_lock, flags); |
188 | return 0; | |
189 | } | |
190 | ||
191 | static void set_fifo_thresholds(struct mdp5_smp *smp, | |
192 | enum mdp5_pipe pipe, int nblks) | |
193 | { | |
194 | struct mdp5_kms *mdp5_kms = get_kms(smp); | |
195 | u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); | |
196 | u32 val; | |
197 | ||
198 | /* 1/4 of SMP pool that is being fetched */ | |
199 | val = (nblks * smp_entries_per_blk) / 4; | |
200 | ||
201 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); | |
202 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); | |
203 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); | |
204 | } | |
205 | ||
206 | /* | |
207 | * NOTE: looks like if horizontal decimation is used (if we supported that) | |
208 | * then the width used to calculate SMP block requirements is the post- | |
209 | * decimated width. Ie. SMP buffering sits downstream of decimation (which | |
210 | * presumably happens during the dma from scanout buffer). | |
211 | */ | |
9cc137a3 WX |
212 | int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, |
213 | const struct mdp_format *format, u32 width, bool hdecim) | |
bfcdfb0e | 214 | { |
bfcdfb0e SV |
215 | struct mdp5_kms *mdp5_kms = get_kms(smp); |
216 | struct drm_device *dev = mdp5_kms->dev; | |
42238da8 | 217 | int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); |
bfcdfb0e | 218 | int i, hsub, nplanes, nlines, nblks, ret; |
9cc137a3 | 219 | u32 fmt = format->base.pixel_format; |
bfcdfb0e SV |
220 | |
221 | nplanes = drm_format_num_planes(fmt); | |
222 | hsub = drm_format_horz_chroma_subsampling(fmt); | |
223 | ||
224 | /* different if BWC (compressed framebuffer?) enabled: */ | |
225 | nlines = 2; | |
226 | ||
9cc137a3 WX |
227 | /* Newer MDPs have split/packing logic, which fetches sub-sampled |
228 | * U and V components (splits them from Y if necessary) and packs | |
229 | * them together, writes to SMP using a single client. | |
230 | */ | |
231 | if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { | |
232 | fmt = DRM_FORMAT_NV24; | |
233 | nplanes = 2; | |
234 | ||
235 | /* if decimation is enabled, HW decimates less on the | |
236 | * sub sampled chroma components | |
237 | */ | |
238 | if (hdecim && (hsub > 1)) | |
239 | hsub = 1; | |
240 | } | |
241 | ||
bfcdfb0e SV |
242 | for (i = 0, nblks = 0; i < nplanes; i++) { |
243 | int n, fetch_stride, cpp; | |
244 | ||
245 | cpp = drm_format_plane_cpp(fmt, i); | |
246 | fetch_stride = width * cpp / (i ? hsub : 1); | |
247 | ||
248 | n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); | |
249 | ||
250 | /* for hw rev v1.00 */ | |
2e362e17 | 251 | if (rev == 0) |
bfcdfb0e SV |
252 | n = roundup_pow_of_two(n); |
253 | ||
254 | DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); | |
255 | ret = smp_request_block(smp, pipe2client(pipe, i), n); | |
256 | if (ret) { | |
257 | dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", | |
258 | n, ret); | |
259 | return ret; | |
260 | } | |
261 | ||
262 | nblks += n; | |
263 | } | |
264 | ||
265 | set_fifo_thresholds(smp, pipe, nblks); | |
266 | ||
06c0dd96 RC |
267 | return 0; |
268 | } | |
269 | ||
bfcdfb0e | 270 | /* Release SMP blocks for all clients of the pipe */ |
42238da8 | 271 | void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) |
bfcdfb0e | 272 | { |
657c63f0 WX |
273 | int i; |
274 | unsigned long flags; | |
275 | int cnt = smp->blk_cnt; | |
276 | ||
277 | for (i = 0; i < pipe2nclients(pipe); i++) { | |
278 | mdp5_smp_state_t assigned; | |
279 | u32 cid = pipe2client(pipe, i); | |
280 | struct mdp5_client_smp_state *ps = &smp->client_state[cid]; | |
281 | ||
282 | spin_lock_irqsave(&smp->state_lock, flags); | |
283 | ||
284 | /* clear hw assignment */ | |
285 | bitmap_or(assigned, ps->inuse, ps->configured, cnt); | |
286 | update_smp_state(smp, CID_UNUSED, &assigned); | |
287 | ||
288 | /* free to global pool */ | |
289 | bitmap_andnot(smp->state, smp->state, ps->pending, cnt); | |
290 | bitmap_andnot(smp->state, smp->state, assigned, cnt); | |
291 | ||
292 | /* clear client's infor */ | |
293 | bitmap_zero(ps->pending, cnt); | |
294 | bitmap_zero(ps->configured, cnt); | |
295 | bitmap_zero(ps->inuse, cnt); | |
296 | ||
297 | spin_unlock_irqrestore(&smp->state_lock, flags); | |
298 | } | |
bfcdfb0e | 299 | |
bfcdfb0e SV |
300 | set_fifo_thresholds(smp, pipe, 0); |
301 | } | |
302 | ||
303 | static void update_smp_state(struct mdp5_smp *smp, | |
6fa6acdf | 304 | u32 cid, mdp5_smp_state_t *assigned) |
06c0dd96 | 305 | { |
bfcdfb0e SV |
306 | struct mdp5_kms *mdp5_kms = get_kms(smp); |
307 | int cnt = smp->blk_cnt; | |
308 | u32 blk, val; | |
06c0dd96 RC |
309 | |
310 | for_each_set_bit(blk, *assigned, cnt) { | |
311 | int idx = blk / 3; | |
312 | int fld = blk % 3; | |
313 | ||
7b59c7e4 | 314 | val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); |
06c0dd96 RC |
315 | |
316 | switch (fld) { | |
317 | case 0: | |
7b59c7e4 AT |
318 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; |
319 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); | |
06c0dd96 RC |
320 | break; |
321 | case 1: | |
7b59c7e4 AT |
322 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; |
323 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); | |
06c0dd96 RC |
324 | break; |
325 | case 2: | |
7b59c7e4 AT |
326 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; |
327 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); | |
06c0dd96 RC |
328 | break; |
329 | } | |
330 | ||
7b59c7e4 AT |
331 | mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); |
332 | mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); | |
06c0dd96 RC |
333 | } |
334 | } | |
335 | ||
336 | /* step #2: configure hw for union(pending, inuse): */ | |
42238da8 | 337 | void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) |
06c0dd96 | 338 | { |
bfcdfb0e | 339 | int cnt = smp->blk_cnt; |
06c0dd96 | 340 | mdp5_smp_state_t assigned; |
bfcdfb0e SV |
341 | int i; |
342 | ||
343 | for (i = 0; i < pipe2nclients(pipe); i++) { | |
6fa6acdf | 344 | u32 cid = pipe2client(pipe, i); |
bfcdfb0e | 345 | struct mdp5_client_smp_state *ps = &smp->client_state[cid]; |
06c0dd96 | 346 | |
657c63f0 WX |
347 | /* |
348 | * if vblank has not happened since last smp_configure | |
349 | * skip the configure for now | |
350 | */ | |
351 | if (!bitmap_equal(ps->inuse, ps->configured, cnt)) | |
352 | continue; | |
353 | ||
354 | bitmap_copy(ps->configured, ps->pending, cnt); | |
355 | bitmap_or(assigned, ps->inuse, ps->configured, cnt); | |
bfcdfb0e SV |
356 | update_smp_state(smp, cid, &assigned); |
357 | } | |
06c0dd96 RC |
358 | } |
359 | ||
657c63f0 | 360 | /* step #3: after vblank, copy configured -> inuse: */ |
42238da8 | 361 | void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) |
06c0dd96 | 362 | { |
bfcdfb0e | 363 | int cnt = smp->blk_cnt; |
06c0dd96 | 364 | mdp5_smp_state_t released; |
bfcdfb0e SV |
365 | int i; |
366 | ||
367 | for (i = 0; i < pipe2nclients(pipe); i++) { | |
6fa6acdf | 368 | u32 cid = pipe2client(pipe, i); |
bfcdfb0e SV |
369 | struct mdp5_client_smp_state *ps = &smp->client_state[cid]; |
370 | ||
371 | /* | |
372 | * Figure out if there are any blocks we where previously | |
373 | * using, which can be released and made available to other | |
374 | * clients: | |
375 | */ | |
657c63f0 | 376 | if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) { |
bfcdfb0e SV |
377 | unsigned long flags; |
378 | ||
379 | spin_lock_irqsave(&smp->state_lock, flags); | |
380 | /* clear released blocks: */ | |
381 | bitmap_andnot(smp->state, smp->state, released, cnt); | |
382 | spin_unlock_irqrestore(&smp->state_lock, flags); | |
383 | ||
384 | update_smp_state(smp, CID_UNUSED, &released); | |
385 | } | |
06c0dd96 | 386 | |
657c63f0 | 387 | bitmap_copy(ps->inuse, ps->configured, cnt); |
06c0dd96 | 388 | } |
bfcdfb0e SV |
389 | } |
390 | ||
42238da8 | 391 | void mdp5_smp_destroy(struct mdp5_smp *smp) |
bfcdfb0e | 392 | { |
bfcdfb0e SV |
393 | kfree(smp); |
394 | } | |
395 | ||
42238da8 | 396 | struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg) |
bfcdfb0e SV |
397 | { |
398 | struct mdp5_smp *smp = NULL; | |
399 | int ret; | |
400 | ||
401 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); | |
402 | if (unlikely(!smp)) { | |
403 | ret = -ENOMEM; | |
404 | goto fail; | |
405 | } | |
406 | ||
407 | smp->dev = dev; | |
408 | smp->blk_cnt = cfg->mmb_count; | |
409 | smp->blk_size = cfg->mmb_size; | |
410 | ||
411 | /* statically tied MMBs cannot be re-allocated: */ | |
412 | bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt); | |
60fb49ca | 413 | memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); |
bfcdfb0e SV |
414 | spin_lock_init(&smp->state_lock); |
415 | ||
416 | return smp; | |
417 | fail: | |
418 | if (smp) | |
419 | mdp5_smp_destroy(smp); | |
06c0dd96 | 420 | |
bfcdfb0e | 421 | return ERR_PTR(ret); |
06c0dd96 | 422 | } |