Merge branch 'stable-4.7' of git://git.infradead.org/users/pcmoore/audit
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_atomic.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * DOC: atomic modeset support
26 *
27 * The functions here implement the state management and hardware programming
28 * dispatch required by the atomic modeset infrastructure.
29 * See intel_atomic_plane.c for the plane-specific atomic functionality.
30 */
31
32 #include <drm/drmP.h>
33 #include <drm/drm_atomic.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_plane_helper.h>
36 #include "intel_drv.h"
37
38 /**
39 * intel_connector_atomic_get_property - fetch connector property value
40 * @connector: connector to fetch property for
41 * @state: state containing the property value
42 * @property: property to look up
43 * @val: pointer to write property value into
44 *
45 * The DRM core does not store shadow copies of properties for
46 * atomic-capable drivers. This entrypoint is used to fetch
47 * the current value of a driver-specific connector property.
48 */
49 int
50 intel_connector_atomic_get_property(struct drm_connector *connector,
51 const struct drm_connector_state *state,
52 struct drm_property *property,
53 uint64_t *val)
54 {
55 int i;
56
57 /*
58 * TODO: We only have atomic modeset for planes at the moment, so the
59 * crtc/connector code isn't quite ready yet. Until it's ready,
60 * continue to look up all property values in the DRM's shadow copy
61 * in obj->properties->values[].
62 *
63 * When the crtc/connector state work matures, this function should
64 * be updated to read the values out of the state structure instead.
65 */
66 for (i = 0; i < connector->base.properties->count; i++) {
67 if (connector->base.properties->properties[i] == property) {
68 *val = connector->base.properties->values[i];
69 return 0;
70 }
71 }
72
73 return -EINVAL;
74 }
75
76 /*
77 * intel_crtc_duplicate_state - duplicate crtc state
78 * @crtc: drm crtc
79 *
80 * Allocates and returns a copy of the crtc state (both common and
81 * Intel-specific) for the specified crtc.
82 *
83 * Returns: The newly allocated crtc state, or NULL on failure.
84 */
85 struct drm_crtc_state *
86 intel_crtc_duplicate_state(struct drm_crtc *crtc)
87 {
88 struct intel_crtc_state *crtc_state;
89
90 crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
91 if (!crtc_state)
92 return NULL;
93
94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
95
96 crtc_state->update_pipe = false;
97 crtc_state->disable_lp_wm = false;
98 crtc_state->disable_cxsr = false;
99 crtc_state->wm_changed = false;
100 crtc_state->fb_changed = false;
101
102 return &crtc_state->base;
103 }
104
105 /**
106 * intel_crtc_destroy_state - destroy crtc state
107 * @crtc: drm crtc
108 *
109 * Destroys the crtc state (both common and Intel-specific) for the
110 * specified crtc.
111 */
112 void
113 intel_crtc_destroy_state(struct drm_crtc *crtc,
114 struct drm_crtc_state *state)
115 {
116 drm_atomic_helper_crtc_destroy_state(crtc, state);
117 }
118
119 /**
120 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
121 * @dev: DRM device
122 * @crtc: intel crtc
123 * @crtc_state: incoming crtc_state to validate and setup scalers
124 *
125 * This function sets up scalers based on staged scaling requests for
126 * a @crtc and its planes. It is called from crtc level check path. If request
127 * is a supportable request, it attaches scalers to requested planes and crtc.
128 *
129 * This function takes into account the current scaler(s) in use by any planes
130 * not being part of this atomic state
131 *
132 * Returns:
133 * 0 - scalers were setup succesfully
134 * error code - otherwise
135 */
136 int intel_atomic_setup_scalers(struct drm_device *dev,
137 struct intel_crtc *intel_crtc,
138 struct intel_crtc_state *crtc_state)
139 {
140 struct drm_plane *plane = NULL;
141 struct intel_plane *intel_plane;
142 struct intel_plane_state *plane_state = NULL;
143 struct intel_crtc_scaler_state *scaler_state =
144 &crtc_state->scaler_state;
145 struct drm_atomic_state *drm_state = crtc_state->base.state;
146 int num_scalers_need;
147 int i, j;
148
149 num_scalers_need = hweight32(scaler_state->scaler_users);
150
151 /*
152 * High level flow:
153 * - staged scaler requests are already in scaler_state->scaler_users
154 * - check whether staged scaling requests can be supported
155 * - add planes using scalers that aren't in current transaction
156 * - assign scalers to requested users
157 * - as part of plane commit, scalers will be committed
158 * (i.e., either attached or detached) to respective planes in hw
159 * - as part of crtc_commit, scaler will be either attached or detached
160 * to crtc in hw
161 */
162
163 /* fail if required scalers > available scalers */
164 if (num_scalers_need > intel_crtc->num_scalers){
165 DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
166 num_scalers_need, intel_crtc->num_scalers);
167 return -EINVAL;
168 }
169
170 /* walkthrough scaler_users bits and start assigning scalers */
171 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
172 int *scaler_id;
173 const char *name;
174 int idx;
175
176 /* skip if scaler not required */
177 if (!(scaler_state->scaler_users & (1 << i)))
178 continue;
179
180 if (i == SKL_CRTC_INDEX) {
181 name = "CRTC";
182 idx = intel_crtc->base.base.id;
183
184 /* panel fitter case: assign as a crtc scaler */
185 scaler_id = &scaler_state->scaler_id;
186 } else {
187 name = "PLANE";
188
189 /* plane scaler case: assign as a plane scaler */
190 /* find the plane that set the bit as scaler_user */
191 plane = drm_state->planes[i];
192
193 /*
194 * to enable/disable hq mode, add planes that are using scaler
195 * into this transaction
196 */
197 if (!plane) {
198 struct drm_plane_state *state;
199 plane = drm_plane_from_index(dev, i);
200 state = drm_atomic_get_plane_state(drm_state, plane);
201 if (IS_ERR(state)) {
202 DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
203 plane->base.id);
204 return PTR_ERR(state);
205 }
206
207 /*
208 * the plane is added after plane checks are run,
209 * but since this plane is unchanged just do the
210 * minimum required validation.
211 */
212 crtc_state->base.planes_changed = true;
213 }
214
215 intel_plane = to_intel_plane(plane);
216 idx = plane->base.id;
217
218 /* plane on different crtc cannot be a scaler user of this crtc */
219 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
220 continue;
221 }
222
223 plane_state = to_intel_plane_state(drm_state->plane_states[i]);
224 scaler_id = &plane_state->scaler_id;
225 }
226
227 if (*scaler_id < 0) {
228 /* find a free scaler */
229 for (j = 0; j < intel_crtc->num_scalers; j++) {
230 if (!scaler_state->scalers[j].in_use) {
231 scaler_state->scalers[j].in_use = 1;
232 *scaler_id = j;
233 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
234 intel_crtc->pipe, *scaler_id, name, idx);
235 break;
236 }
237 }
238 }
239
240 if (WARN_ON(*scaler_id < 0)) {
241 DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
242 continue;
243 }
244
245 /* set scaler mode */
246 if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
247 /*
248 * when only 1 scaler is in use on either pipe A or B,
249 * scaler 0 operates in high quality (HQ) mode.
250 * In this case use scaler 0 to take advantage of HQ mode
251 */
252 *scaler_id = 0;
253 scaler_state->scalers[0].in_use = 1;
254 scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
255 scaler_state->scalers[1].in_use = 0;
256 } else {
257 scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
258 }
259 }
260
261 return 0;
262 }
263
264 static void
265 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
266 struct intel_shared_dpll_config *shared_dpll)
267 {
268 enum intel_dpll_id i;
269
270 /* Copy shared dpll state */
271 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
272 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
273
274 shared_dpll[i] = pll->config;
275 }
276 }
277
278 struct intel_shared_dpll_config *
279 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
280 {
281 struct intel_atomic_state *state = to_intel_atomic_state(s);
282
283 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
284
285 if (!state->dpll_set) {
286 state->dpll_set = true;
287
288 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
289 state->shared_dpll);
290 }
291
292 return state->shared_dpll;
293 }
294
295 struct drm_atomic_state *
296 intel_atomic_state_alloc(struct drm_device *dev)
297 {
298 struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
299
300 if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
301 kfree(state);
302 return NULL;
303 }
304
305 return &state->base;
306 }
307
308 void intel_atomic_state_clear(struct drm_atomic_state *s)
309 {
310 struct intel_atomic_state *state = to_intel_atomic_state(s);
311 drm_atomic_state_default_clear(&state->base);
312 state->dpll_set = state->modeset = false;
313 }
This page took 0.037989 seconds and 6 git commands to generate.