drm/gem: Warn on illegal use of the dumb buffer interface v2
[deliverable/linux.git] / drivers / gpu / drm / drm_atomic.c
CommitLineData
cc4ceb48
DV
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic.h>
31#include <drm/drm_plane_helper.h>
32
33static void kfree_state(struct drm_atomic_state *state)
34{
35 kfree(state->connectors);
36 kfree(state->connector_states);
37 kfree(state->crtcs);
38 kfree(state->crtc_states);
39 kfree(state->planes);
40 kfree(state->plane_states);
41 kfree(state);
42}
43
44/**
45 * drm_atomic_state_alloc - allocate atomic state
46 * @dev: DRM device
47 *
48 * This allocates an empty atomic state to track updates.
49 */
50struct drm_atomic_state *
51drm_atomic_state_alloc(struct drm_device *dev)
52{
53 struct drm_atomic_state *state;
54
55 state = kzalloc(sizeof(*state), GFP_KERNEL);
56 if (!state)
57 return NULL;
58
f52b69f1
DV
59 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
60
cc4ceb48
DV
61 state->crtcs = kcalloc(dev->mode_config.num_crtc,
62 sizeof(*state->crtcs), GFP_KERNEL);
63 if (!state->crtcs)
64 goto fail;
65 state->crtc_states = kcalloc(dev->mode_config.num_crtc,
66 sizeof(*state->crtc_states), GFP_KERNEL);
67 if (!state->crtc_states)
68 goto fail;
69 state->planes = kcalloc(dev->mode_config.num_total_plane,
70 sizeof(*state->planes), GFP_KERNEL);
71 if (!state->planes)
72 goto fail;
73 state->plane_states = kcalloc(dev->mode_config.num_total_plane,
74 sizeof(*state->plane_states), GFP_KERNEL);
75 if (!state->plane_states)
76 goto fail;
f52b69f1 77 state->connectors = kcalloc(state->num_connector,
cc4ceb48
DV
78 sizeof(*state->connectors),
79 GFP_KERNEL);
80 if (!state->connectors)
81 goto fail;
f52b69f1 82 state->connector_states = kcalloc(state->num_connector,
cc4ceb48
DV
83 sizeof(*state->connector_states),
84 GFP_KERNEL);
85 if (!state->connector_states)
86 goto fail;
87
88 state->dev = dev;
89
90 DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
91
92 return state;
93fail:
94 kfree_state(state);
95
96 return NULL;
97}
98EXPORT_SYMBOL(drm_atomic_state_alloc);
99
100/**
101 * drm_atomic_state_clear - clear state object
102 * @state: atomic state
103 *
104 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
105 * all locks. So someone else could sneak in and change the current modeset
106 * configuration. Which means that all the state assembled in @state is no
107 * longer an atomic update to the current state, but to some arbitrary earlier
108 * state. Which could break assumptions the driver's ->atomic_check likely
109 * relies on.
110 *
111 * Hence we must clear all cached state and completely start over, using this
112 * function.
113 */
114void drm_atomic_state_clear(struct drm_atomic_state *state)
115{
116 struct drm_device *dev = state->dev;
6f75cea6 117 struct drm_mode_config *config = &dev->mode_config;
cc4ceb48
DV
118 int i;
119
120 DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
121
f52b69f1 122 for (i = 0; i < state->num_connector; i++) {
cc4ceb48
DV
123 struct drm_connector *connector = state->connectors[i];
124
125 if (!connector)
126 continue;
127
6f75cea6
DV
128 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
129
cc4ceb48
DV
130 connector->funcs->atomic_destroy_state(connector,
131 state->connector_states[i]);
132 }
133
6f75cea6 134 for (i = 0; i < config->num_crtc; i++) {
cc4ceb48
DV
135 struct drm_crtc *crtc = state->crtcs[i];
136
137 if (!crtc)
138 continue;
139
140 crtc->funcs->atomic_destroy_state(crtc,
141 state->crtc_states[i]);
142 }
143
6f75cea6 144 for (i = 0; i < config->num_total_plane; i++) {
cc4ceb48
DV
145 struct drm_plane *plane = state->planes[i];
146
147 if (!plane)
148 continue;
149
150 plane->funcs->atomic_destroy_state(plane,
151 state->plane_states[i]);
152 }
153}
154EXPORT_SYMBOL(drm_atomic_state_clear);
155
156/**
157 * drm_atomic_state_free - free all memory for an atomic state
158 * @state: atomic state to deallocate
159 *
160 * This frees all memory associated with an atomic state, including all the
161 * per-object state for planes, crtcs and connectors.
162 */
163void drm_atomic_state_free(struct drm_atomic_state *state)
164{
165 drm_atomic_state_clear(state);
166
167 DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
168
169 kfree_state(state);
170}
171EXPORT_SYMBOL(drm_atomic_state_free);
172
173/**
174 * drm_atomic_get_crtc_state - get crtc state
175 * @state: global atomic state object
176 * @crtc: crtc to get state object for
177 *
178 * This function returns the crtc state for the given crtc, allocating it if
179 * needed. It will also grab the relevant crtc lock to make sure that the state
180 * is consistent.
181 *
182 * Returns:
183 *
184 * Either the allocated state or the error code encoded into the pointer. When
185 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
186 * entire atomic sequence must be restarted. All other errors are fatal.
187 */
188struct drm_crtc_state *
189drm_atomic_get_crtc_state(struct drm_atomic_state *state,
190 struct drm_crtc *crtc)
191{
192 int ret, index;
193 struct drm_crtc_state *crtc_state;
194
195 index = drm_crtc_index(crtc);
196
197 if (state->crtc_states[index])
198 return state->crtc_states[index];
199
200 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
201 if (ret)
202 return ERR_PTR(ret);
203
204 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
205 if (!crtc_state)
206 return ERR_PTR(-ENOMEM);
207
208 state->crtc_states[index] = crtc_state;
209 state->crtcs[index] = crtc;
210 crtc_state->state = state;
211
212 DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
213 crtc->base.id, crtc_state, state);
214
215 return crtc_state;
216}
217EXPORT_SYMBOL(drm_atomic_get_crtc_state);
218
219/**
220 * drm_atomic_get_plane_state - get plane state
221 * @state: global atomic state object
222 * @plane: plane to get state object for
223 *
224 * This function returns the plane state for the given plane, allocating it if
225 * needed. It will also grab the relevant plane lock to make sure that the state
226 * is consistent.
227 *
228 * Returns:
229 *
230 * Either the allocated state or the error code encoded into the pointer. When
231 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
232 * entire atomic sequence must be restarted. All other errors are fatal.
233 */
234struct drm_plane_state *
235drm_atomic_get_plane_state(struct drm_atomic_state *state,
236 struct drm_plane *plane)
237{
238 int ret, index;
239 struct drm_plane_state *plane_state;
240
241 index = drm_plane_index(plane);
242
243 if (state->plane_states[index])
244 return state->plane_states[index];
245
246 /*
247 * TODO: We currently don't have per-plane mutexes. So instead of trying
248 * crazy tricks with deferring plane->crtc and hoping for the best just
249 * grab all crtc locks. Once we have per-plane locks we must update this
250 * to only take the plane mutex.
251 */
4d02e2de 252 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
cc4ceb48
DV
253 if (ret)
254 return ERR_PTR(ret);
255
256 plane_state = plane->funcs->atomic_duplicate_state(plane);
257 if (!plane_state)
258 return ERR_PTR(-ENOMEM);
259
260 state->plane_states[index] = plane_state;
261 state->planes[index] = plane;
262 plane_state->state = state;
263
264 DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
265 plane->base.id, plane_state, state);
266
267 if (plane_state->crtc) {
268 struct drm_crtc_state *crtc_state;
269
270 crtc_state = drm_atomic_get_crtc_state(state,
271 plane_state->crtc);
272 if (IS_ERR(crtc_state))
273 return ERR_CAST(crtc_state);
274 }
275
276 return plane_state;
277}
278EXPORT_SYMBOL(drm_atomic_get_plane_state);
279
280/**
281 * drm_atomic_get_connector_state - get connector state
282 * @state: global atomic state object
283 * @connector: connector to get state object for
284 *
285 * This function returns the connector state for the given connector,
286 * allocating it if needed. It will also grab the relevant connector lock to
287 * make sure that the state is consistent.
288 *
289 * Returns:
290 *
291 * Either the allocated state or the error code encoded into the pointer. When
292 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
293 * entire atomic sequence must be restarted. All other errors are fatal.
294 */
295struct drm_connector_state *
296drm_atomic_get_connector_state(struct drm_atomic_state *state,
297 struct drm_connector *connector)
298{
299 int ret, index;
300 struct drm_mode_config *config = &connector->dev->mode_config;
301 struct drm_connector_state *connector_state;
302
c7eb76f4
DV
303 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
304 if (ret)
305 return ERR_PTR(ret);
306
cc4ceb48
DV
307 index = drm_connector_index(connector);
308
f52b69f1
DV
309 /*
310 * Construction of atomic state updates can race with a connector
311 * hot-add which might overflow. In this case flip the table and just
312 * restart the entire ioctl - no one is fast enough to livelock a cpu
313 * with physical hotplug events anyway.
314 *
315 * Note that we only grab the indexes once we have the right lock to
316 * prevent hotplug/unplugging of connectors. So removal is no problem,
317 * at most the array is a bit too large.
318 */
319 if (index >= state->num_connector) {
320 DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
321 return -EAGAIN;
322 }
323
cc4ceb48
DV
324 if (state->connector_states[index])
325 return state->connector_states[index];
326
cc4ceb48
DV
327 connector_state = connector->funcs->atomic_duplicate_state(connector);
328 if (!connector_state)
329 return ERR_PTR(-ENOMEM);
330
331 state->connector_states[index] = connector_state;
332 state->connectors[index] = connector;
333 connector_state->state = state;
334
335 DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
336 connector->base.id, connector_state, state);
337
338 if (connector_state->crtc) {
339 struct drm_crtc_state *crtc_state;
340
341 crtc_state = drm_atomic_get_crtc_state(state,
342 connector_state->crtc);
343 if (IS_ERR(crtc_state))
344 return ERR_CAST(crtc_state);
345 }
346
347 return connector_state;
348}
349EXPORT_SYMBOL(drm_atomic_get_connector_state);
350
351/**
352 * drm_atomic_set_crtc_for_plane - set crtc for plane
353 * @plane_state: atomic state object for the plane
354 * @crtc: crtc to use for the plane
355 *
356 * Changing the assigned crtc for a plane requires us to grab the lock and state
357 * for the new crtc, as needed. This function takes care of all these details
358 * besides updating the pointer in the state object itself.
359 *
360 * Returns:
361 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
362 * then the w/w mutex code has detected a deadlock and the entire atomic
363 * sequence must be restarted. All other errors are fatal.
364 */
365int
366drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
367 struct drm_crtc *crtc)
368{
369 struct drm_crtc_state *crtc_state;
370
371 if (crtc) {
372 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
373 crtc);
374 if (IS_ERR(crtc_state))
375 return PTR_ERR(crtc_state);
376 }
377
378 plane_state->crtc = crtc;
379
380 if (crtc)
381 DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
382 plane_state, crtc->base.id);
383 else
384 DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
385
386 return 0;
387}
388EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
389
321ebf04
DV
390/**
391 * drm_atomic_set_fb_for_plane - set crtc for plane
392 * @plane_state: atomic state object for the plane
393 * @fb: fb to use for the plane
394 *
395 * Changing the assigned framebuffer for a plane requires us to grab a reference
396 * to the new fb and drop the reference to the old fb, if there is one. This
397 * function takes care of all these details besides updating the pointer in the
398 * state object itself.
399 */
400void
401drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
402 struct drm_framebuffer *fb)
403{
404 if (plane_state->fb)
405 drm_framebuffer_unreference(plane_state->fb);
406 if (fb)
407 drm_framebuffer_reference(fb);
408 plane_state->fb = fb;
409
410 if (fb)
411 DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
412 fb->base.id, plane_state);
413 else
414 DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
415}
416EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
417
cc4ceb48
DV
418/**
419 * drm_atomic_set_crtc_for_connector - set crtc for connector
420 * @conn_state: atomic state object for the connector
421 * @crtc: crtc to use for the connector
422 *
423 * Changing the assigned crtc for a connector requires us to grab the lock and
424 * state for the new crtc, as needed. This function takes care of all these
425 * details besides updating the pointer in the state object itself.
426 *
427 * Returns:
428 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
429 * then the w/w mutex code has detected a deadlock and the entire atomic
430 * sequence must be restarted. All other errors are fatal.
431 */
432int
433drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
434 struct drm_crtc *crtc)
435{
436 struct drm_crtc_state *crtc_state;
437
438 if (crtc) {
439 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
440 if (IS_ERR(crtc_state))
441 return PTR_ERR(crtc_state);
442 }
443
444 conn_state->crtc = crtc;
445
446 if (crtc)
447 DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
448 conn_state, crtc->base.id);
449 else
450 DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
451 conn_state);
452
453 return 0;
454}
455EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
456
457/**
458 * drm_atomic_add_affected_connectors - add connectors for crtc
459 * @state: atomic state
460 * @crtc: DRM crtc
461 *
462 * This function walks the current configuration and adds all connectors
463 * currently using @crtc to the atomic configuration @state. Note that this
464 * function must acquire the connection mutex. This can potentially cause
465 * unneeded seralization if the update is just for the planes on one crtc. Hence
466 * drivers and helpers should only call this when really needed (e.g. when a
467 * full modeset needs to happen due to some change).
468 *
469 * Returns:
470 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
471 * then the w/w mutex code has detected a deadlock and the entire atomic
472 * sequence must be restarted. All other errors are fatal.
473 */
474int
475drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
476 struct drm_crtc *crtc)
477{
478 struct drm_mode_config *config = &state->dev->mode_config;
479 struct drm_connector *connector;
480 struct drm_connector_state *conn_state;
481 int ret;
482
483 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
484 if (ret)
485 return ret;
486
487 DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
488 crtc->base.id, state);
489
490 /*
491 * Changed connectors are already in @state, so only need to look at the
492 * current configuration.
493 */
494 list_for_each_entry(connector, &config->connector_list, head) {
495 if (connector->state->crtc != crtc)
496 continue;
497
498 conn_state = drm_atomic_get_connector_state(state, connector);
499 if (IS_ERR(conn_state))
500 return PTR_ERR(conn_state);
501 }
502
503 return 0;
504}
505EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
506
507/**
508 * drm_atomic_connectors_for_crtc - count number of connected outputs
509 * @state: atomic state
510 * @crtc: DRM crtc
511 *
512 * This function counts all connectors which will be connected to @crtc
513 * according to @state. Useful to recompute the enable state for @crtc.
514 */
515int
516drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
517 struct drm_crtc *crtc)
518{
cc4ceb48
DV
519 int i, num_connected_connectors = 0;
520
f52b69f1 521 for (i = 0; i < state->num_connector; i++) {
cc4ceb48
DV
522 struct drm_connector_state *conn_state;
523
524 conn_state = state->connector_states[i];
525
526 if (conn_state && conn_state->crtc == crtc)
527 num_connected_connectors++;
528 }
529
530 DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
531 state, num_connected_connectors, crtc->base.id);
532
533 return num_connected_connectors;
534}
535EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
536
537/**
538 * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
539 * @state: atomic state
540 *
541 * This function should be used by legacy entry points which don't understand
542 * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
543 * the slowpath completed.
544 */
545void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
546{
547 int ret;
548
549retry:
550 drm_modeset_backoff(state->acquire_ctx);
551
552 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
553 state->acquire_ctx);
554 if (ret)
555 goto retry;
556 ret = drm_modeset_lock_all_crtcs(state->dev,
557 state->acquire_ctx);
558 if (ret)
559 goto retry;
560}
561EXPORT_SYMBOL(drm_atomic_legacy_backoff);
562
563/**
564 * drm_atomic_check_only - check whether a given config would work
565 * @state: atomic configuration to check
566 *
567 * Note that this function can return -EDEADLK if the driver needed to acquire
568 * more locks but encountered a deadlock. The caller must then do the usual w/w
569 * backoff dance and restart. All other errors are fatal.
570 *
571 * Returns:
572 * 0 on success, negative error code on failure.
573 */
574int drm_atomic_check_only(struct drm_atomic_state *state)
575{
576 struct drm_mode_config *config = &state->dev->mode_config;
577
578 DRM_DEBUG_KMS("checking %p\n", state);
579
580 if (config->funcs->atomic_check)
581 return config->funcs->atomic_check(state->dev, state);
582 else
583 return 0;
584}
585EXPORT_SYMBOL(drm_atomic_check_only);
586
587/**
588 * drm_atomic_commit - commit configuration atomically
589 * @state: atomic configuration to check
590 *
591 * Note that this function can return -EDEADLK if the driver needed to acquire
592 * more locks but encountered a deadlock. The caller must then do the usual w/w
593 * backoff dance and restart. All other errors are fatal.
594 *
595 * Also note that on successful execution ownership of @state is transferred
596 * from the caller of this function to the function itself. The caller must not
597 * free or in any other way access @state. If the function fails then the caller
598 * must clean up @state itself.
599 *
600 * Returns:
601 * 0 on success, negative error code on failure.
602 */
603int drm_atomic_commit(struct drm_atomic_state *state)
604{
605 struct drm_mode_config *config = &state->dev->mode_config;
606 int ret;
607
608 ret = drm_atomic_check_only(state);
609 if (ret)
610 return ret;
611
612 DRM_DEBUG_KMS("commiting %p\n", state);
613
614 return config->funcs->atomic_commit(state->dev, state, false);
615}
616EXPORT_SYMBOL(drm_atomic_commit);
617
618/**
619 * drm_atomic_async_commit - atomic&async configuration commit
620 * @state: atomic configuration to check
621 *
622 * Note that this function can return -EDEADLK if the driver needed to acquire
623 * more locks but encountered a deadlock. The caller must then do the usual w/w
624 * backoff dance and restart. All other errors are fatal.
625 *
626 * Also note that on successful execution ownership of @state is transferred
627 * from the caller of this function to the function itself. The caller must not
628 * free or in any other way access @state. If the function fails then the caller
629 * must clean up @state itself.
630 *
631 * Returns:
632 * 0 on success, negative error code on failure.
633 */
634int drm_atomic_async_commit(struct drm_atomic_state *state)
635{
636 struct drm_mode_config *config = &state->dev->mode_config;
637 int ret;
638
639 ret = drm_atomic_check_only(state);
640 if (ret)
641 return ret;
642
643 DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
644
645 return config->funcs->atomic_commit(state->dev, state, true);
646}
647EXPORT_SYMBOL(drm_atomic_async_commit);
This page took 0.053755 seconds and 5 git commands to generate.