Commit | Line | Data |
---|---|---|
0136db58 BW |
1 | /* |
2 | * Copyright © 2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Ben Widawsky <ben@bwidawsk.net> | |
25 | * | |
26 | */ | |
27 | ||
28 | #include <linux/device.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/stat.h> | |
31 | #include <linux/sysfs.h> | |
84bc7581 | 32 | #include "intel_drv.h" |
0136db58 BW |
33 | #include "i915_drv.h" |
34 | ||
5bdebb18 | 35 | #define dev_to_drm_minor(d) dev_get_drvdata((d)) |
14c8d110 | 36 | |
5ab3633d | 37 | #ifdef CONFIG_PM |
0136db58 BW |
38 | static u32 calc_residency(struct drm_device *dev, const u32 reg) |
39 | { | |
40 | struct drm_i915_private *dev_priv = dev->dev_private; | |
41 | u64 raw_time; /* 32b value may overflow during fixed point math */ | |
e454a05d | 42 | u64 units = 128ULL, div = 100000ULL, bias = 100ULL; |
c8c8fb33 | 43 | u32 ret; |
0136db58 BW |
44 | |
45 | if (!intel_enable_rc6(dev)) | |
46 | return 0; | |
47 | ||
c8c8fb33 PZ |
48 | intel_runtime_pm_get(dev_priv); |
49 | ||
542a6b20 | 50 | /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ |
e454a05d | 51 | if (IS_VALLEYVIEW(dev)) { |
f78ae63f | 52 | u32 clk_reg, czcount_30ns; |
e454a05d | 53 | |
542a6b20 | 54 | if (IS_CHERRYVIEW(dev)) |
f78ae63f | 55 | clk_reg = CHV_CLK_CTL1; |
542a6b20 | 56 | else |
f78ae63f | 57 | clk_reg = VLV_CLK_CTL2; |
542a6b20 | 58 | |
f78ae63f | 59 | czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT; |
542a6b20 MK |
60 | |
61 | if (!czcount_30ns) { | |
62 | WARN(!czcount_30ns, "bogus CZ count value"); | |
c8c8fb33 PZ |
63 | ret = 0; |
64 | goto out; | |
e454a05d | 65 | } |
542a6b20 MK |
66 | |
67 | units = 0; | |
68 | div = 1000000ULL; | |
69 | ||
70 | if (IS_CHERRYVIEW(dev)) { | |
71 | /* Special case for 320Mhz */ | |
72 | if (czcount_30ns == 1) { | |
73 | div = 10000000ULL; | |
74 | units = 3125ULL; | |
75 | } else { | |
76 | /* chv counts are one less */ | |
77 | czcount_30ns += 1; | |
78 | } | |
79 | } | |
80 | ||
81 | if (units == 0) | |
82 | units = DIV_ROUND_UP_ULL(30ULL * bias, | |
83 | (u64)czcount_30ns); | |
84 | ||
e454a05d JB |
85 | if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) |
86 | units <<= 8; | |
87 | ||
542a6b20 | 88 | div = div * bias; |
e454a05d JB |
89 | } |
90 | ||
91 | raw_time = I915_READ(reg) * units; | |
c8c8fb33 PZ |
92 | ret = DIV_ROUND_UP_ULL(raw_time, div); |
93 | ||
94 | out: | |
95 | intel_runtime_pm_put(dev_priv); | |
96 | return ret; | |
0136db58 BW |
97 | } |
98 | ||
99 | static ssize_t | |
dbdfd8e9 | 100 | show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 101 | { |
14c8d110 | 102 | struct drm_minor *dminor = dev_to_drm_minor(kdev); |
3e2a1556 | 103 | return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); |
0136db58 BW |
104 | } |
105 | ||
106 | static ssize_t | |
dbdfd8e9 | 107 | show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 108 | { |
5bdebb18 | 109 | struct drm_minor *dminor = dev_get_drvdata(kdev); |
0136db58 | 110 | u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); |
3e2a1556 | 111 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); |
0136db58 BW |
112 | } |
113 | ||
114 | static ssize_t | |
dbdfd8e9 | 115 | show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 116 | { |
14c8d110 | 117 | struct drm_minor *dminor = dev_to_drm_minor(kdev); |
0136db58 | 118 | u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); |
3e2a1556 | 119 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); |
0136db58 BW |
120 | } |
121 | ||
122 | static ssize_t | |
dbdfd8e9 | 123 | show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 124 | { |
14c8d110 | 125 | struct drm_minor *dminor = dev_to_drm_minor(kdev); |
0136db58 | 126 | u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); |
3e2a1556 | 127 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); |
0136db58 BW |
128 | } |
129 | ||
130 | static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); | |
131 | static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); | |
132 | static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); | |
133 | static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); | |
134 | ||
135 | static struct attribute *rc6_attrs[] = { | |
136 | &dev_attr_rc6_enable.attr, | |
137 | &dev_attr_rc6_residency_ms.attr, | |
0136db58 BW |
138 | NULL |
139 | }; | |
140 | ||
141 | static struct attribute_group rc6_attr_group = { | |
142 | .name = power_group_name, | |
143 | .attrs = rc6_attrs | |
144 | }; | |
58abf1da RV |
145 | |
146 | static struct attribute *rc6p_attrs[] = { | |
147 | &dev_attr_rc6p_residency_ms.attr, | |
148 | &dev_attr_rc6pp_residency_ms.attr, | |
149 | NULL | |
150 | }; | |
151 | ||
152 | static struct attribute_group rc6p_attr_group = { | |
153 | .name = power_group_name, | |
154 | .attrs = rc6p_attrs | |
155 | }; | |
8c3f929b | 156 | #endif |
0136db58 | 157 | |
84bc7581 BW |
158 | static int l3_access_valid(struct drm_device *dev, loff_t offset) |
159 | { | |
040d2baa | 160 | if (!HAS_L3_DPF(dev)) |
84bc7581 BW |
161 | return -EPERM; |
162 | ||
163 | if (offset % 4 != 0) | |
164 | return -EINVAL; | |
165 | ||
166 | if (offset >= GEN7_L3LOG_SIZE) | |
167 | return -ENXIO; | |
168 | ||
169 | return 0; | |
170 | } | |
171 | ||
172 | static ssize_t | |
173 | i915_l3_read(struct file *filp, struct kobject *kobj, | |
174 | struct bin_attribute *attr, char *buf, | |
175 | loff_t offset, size_t count) | |
176 | { | |
177 | struct device *dev = container_of(kobj, struct device, kobj); | |
14c8d110 | 178 | struct drm_minor *dminor = dev_to_drm_minor(dev); |
84bc7581 BW |
179 | struct drm_device *drm_dev = dminor->dev; |
180 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
35a85ac6 | 181 | int slice = (int)(uintptr_t)attr->private; |
3ccfd19d | 182 | int ret; |
84bc7581 | 183 | |
1c3dcd1c BW |
184 | count = round_down(count, 4); |
185 | ||
84bc7581 BW |
186 | ret = l3_access_valid(drm_dev, offset); |
187 | if (ret) | |
188 | return ret; | |
189 | ||
e5ad4026 | 190 | count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); |
33618ea5 | 191 | |
84bc7581 BW |
192 | ret = i915_mutex_lock_interruptible(drm_dev); |
193 | if (ret) | |
194 | return ret; | |
195 | ||
3ccfd19d BW |
196 | if (dev_priv->l3_parity.remap_info[slice]) |
197 | memcpy(buf, | |
198 | dev_priv->l3_parity.remap_info[slice] + (offset/4), | |
199 | count); | |
200 | else | |
201 | memset(buf, 0, count); | |
84bc7581 | 202 | |
84bc7581 BW |
203 | mutex_unlock(&drm_dev->struct_mutex); |
204 | ||
1c966dd2 | 205 | return count; |
84bc7581 BW |
206 | } |
207 | ||
208 | static ssize_t | |
209 | i915_l3_write(struct file *filp, struct kobject *kobj, | |
210 | struct bin_attribute *attr, char *buf, | |
211 | loff_t offset, size_t count) | |
212 | { | |
213 | struct device *dev = container_of(kobj, struct device, kobj); | |
14c8d110 | 214 | struct drm_minor *dminor = dev_to_drm_minor(dev); |
84bc7581 BW |
215 | struct drm_device *drm_dev = dminor->dev; |
216 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
273497e5 | 217 | struct intel_context *ctx; |
84bc7581 | 218 | u32 *temp = NULL; /* Just here to make handling failures easy */ |
35a85ac6 | 219 | int slice = (int)(uintptr_t)attr->private; |
84bc7581 BW |
220 | int ret; |
221 | ||
8245be31 BW |
222 | if (!HAS_HW_CONTEXTS(drm_dev)) |
223 | return -ENXIO; | |
224 | ||
84bc7581 BW |
225 | ret = l3_access_valid(drm_dev, offset); |
226 | if (ret) | |
227 | return ret; | |
228 | ||
229 | ret = i915_mutex_lock_interruptible(drm_dev); | |
230 | if (ret) | |
231 | return ret; | |
232 | ||
35a85ac6 | 233 | if (!dev_priv->l3_parity.remap_info[slice]) { |
84bc7581 BW |
234 | temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); |
235 | if (!temp) { | |
236 | mutex_unlock(&drm_dev->struct_mutex); | |
237 | return -ENOMEM; | |
238 | } | |
239 | } | |
240 | ||
241 | ret = i915_gpu_idle(drm_dev); | |
242 | if (ret) { | |
243 | kfree(temp); | |
244 | mutex_unlock(&drm_dev->struct_mutex); | |
245 | return ret; | |
246 | } | |
247 | ||
248 | /* TODO: Ideally we really want a GPU reset here to make sure errors | |
249 | * aren't propagated. Since I cannot find a stable way to reset the GPU | |
250 | * at this point it is left as a TODO. | |
251 | */ | |
252 | if (temp) | |
35a85ac6 | 253 | dev_priv->l3_parity.remap_info[slice] = temp; |
84bc7581 | 254 | |
35a85ac6 | 255 | memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count); |
84bc7581 | 256 | |
3ccfd19d BW |
257 | /* NB: We defer the remapping until we switch to the context */ |
258 | list_for_each_entry(ctx, &dev_priv->context_list, link) | |
259 | ctx->remap_slice |= (1<<slice); | |
84bc7581 BW |
260 | |
261 | mutex_unlock(&drm_dev->struct_mutex); | |
262 | ||
263 | return count; | |
264 | } | |
265 | ||
266 | static struct bin_attribute dpf_attrs = { | |
267 | .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, | |
268 | .size = GEN7_L3LOG_SIZE, | |
269 | .read = i915_l3_read, | |
270 | .write = i915_l3_write, | |
35a85ac6 BW |
271 | .mmap = NULL, |
272 | .private = (void *)0 | |
273 | }; | |
274 | ||
275 | static struct bin_attribute dpf_attrs_1 = { | |
276 | .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, | |
277 | .size = GEN7_L3LOG_SIZE, | |
278 | .read = i915_l3_read, | |
279 | .write = i915_l3_write, | |
280 | .mmap = NULL, | |
281 | .private = (void *)1 | |
84bc7581 BW |
282 | }; |
283 | ||
c8c972eb | 284 | static ssize_t gt_act_freq_mhz_show(struct device *kdev, |
df6eedc8 BW |
285 | struct device_attribute *attr, char *buf) |
286 | { | |
14c8d110 | 287 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
df6eedc8 BW |
288 | struct drm_device *dev = minor->dev; |
289 | struct drm_i915_private *dev_priv = dev->dev_private; | |
290 | int ret; | |
291 | ||
5c9669ce TR |
292 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
293 | ||
d46c0517 ID |
294 | intel_runtime_pm_get(dev_priv); |
295 | ||
4fc688ce | 296 | mutex_lock(&dev_priv->rps.hw_lock); |
177006a1 JB |
297 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
298 | u32 freq; | |
64936258 | 299 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
7c59a9c1 | 300 | ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); |
c8c972eb VS |
301 | } else { |
302 | u32 rpstat = I915_READ(GEN6_RPSTAT1); | |
303 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
304 | ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; | |
305 | else | |
306 | ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; | |
7c59a9c1 | 307 | ret = intel_gpu_freq(dev_priv, ret); |
c8c972eb VS |
308 | } |
309 | mutex_unlock(&dev_priv->rps.hw_lock); | |
310 | ||
311 | intel_runtime_pm_put(dev_priv); | |
312 | ||
313 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); | |
314 | } | |
315 | ||
316 | static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |
317 | struct device_attribute *attr, char *buf) | |
318 | { | |
319 | struct drm_minor *minor = dev_to_drm_minor(kdev); | |
320 | struct drm_device *dev = minor->dev; | |
321 | struct drm_i915_private *dev_priv = dev->dev_private; | |
322 | int ret; | |
323 | ||
324 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | |
325 | ||
326 | intel_runtime_pm_get(dev_priv); | |
327 | ||
328 | mutex_lock(&dev_priv->rps.hw_lock); | |
7c59a9c1 | 329 | ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq); |
4fc688ce | 330 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 331 | |
d46c0517 ID |
332 | intel_runtime_pm_put(dev_priv); |
333 | ||
3e2a1556 | 334 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
335 | } |
336 | ||
97e4eed7 CW |
337 | static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, |
338 | struct device_attribute *attr, char *buf) | |
339 | { | |
14c8d110 | 340 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
97e4eed7 CW |
341 | struct drm_device *dev = minor->dev; |
342 | struct drm_i915_private *dev_priv = dev->dev_private; | |
343 | ||
7c59a9c1 VS |
344 | return snprintf(buf, PAGE_SIZE, |
345 | "%d\n", | |
346 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); | |
97e4eed7 CW |
347 | } |
348 | ||
df6eedc8 BW |
349 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
350 | { | |
14c8d110 | 351 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
df6eedc8 BW |
352 | struct drm_device *dev = minor->dev; |
353 | struct drm_i915_private *dev_priv = dev->dev_private; | |
354 | int ret; | |
355 | ||
5c9669ce TR |
356 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
357 | ||
4fc688ce | 358 | mutex_lock(&dev_priv->rps.hw_lock); |
7c59a9c1 | 359 | ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); |
4fc688ce | 360 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 361 | |
3e2a1556 | 362 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
363 | } |
364 | ||
46ddf194 BW |
365 | static ssize_t gt_max_freq_mhz_store(struct device *kdev, |
366 | struct device_attribute *attr, | |
367 | const char *buf, size_t count) | |
368 | { | |
14c8d110 | 369 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
46ddf194 BW |
370 | struct drm_device *dev = minor->dev; |
371 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2a5913a8 | 372 | u32 val; |
46ddf194 BW |
373 | ssize_t ret; |
374 | ||
375 | ret = kstrtou32(buf, 0, &val); | |
376 | if (ret) | |
377 | return ret; | |
378 | ||
5c9669ce TR |
379 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
380 | ||
4fc688ce | 381 | mutex_lock(&dev_priv->rps.hw_lock); |
46ddf194 | 382 | |
7c59a9c1 | 383 | val = intel_freq_opcode(dev_priv, val); |
46ddf194 | 384 | |
2a5913a8 BW |
385 | if (val < dev_priv->rps.min_freq || |
386 | val > dev_priv->rps.max_freq || | |
b39fb297 | 387 | val < dev_priv->rps.min_freq_softlimit) { |
4fc688ce | 388 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
389 | return -EINVAL; |
390 | } | |
391 | ||
2a5913a8 | 392 | if (val > dev_priv->rps.rp0_freq) |
31c77388 | 393 | DRM_DEBUG("User requested overclocking to %d\n", |
7c59a9c1 | 394 | intel_gpu_freq(dev_priv, val)); |
31c77388 | 395 | |
b39fb297 | 396 | dev_priv->rps.max_freq_softlimit = val; |
6917c7b9 | 397 | |
f745a80e VS |
398 | val = clamp_t(int, dev_priv->rps.cur_freq, |
399 | dev_priv->rps.min_freq_softlimit, | |
400 | dev_priv->rps.max_freq_softlimit); | |
401 | ||
402 | /* We still need *_set_rps to process the new max_delay and | |
403 | * update the interrupt limits and PMINTRMSK even though | |
404 | * frequency request may be unchanged. */ | |
405 | if (IS_VALLEYVIEW(dev)) | |
406 | valleyview_set_rps(dev, val); | |
407 | else | |
408 | gen6_set_rps(dev, val); | |
46ddf194 | 409 | |
4fc688ce | 410 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
411 | |
412 | return count; | |
413 | } | |
414 | ||
df6eedc8 BW |
415 | static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
416 | { | |
14c8d110 | 417 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
df6eedc8 BW |
418 | struct drm_device *dev = minor->dev; |
419 | struct drm_i915_private *dev_priv = dev->dev_private; | |
420 | int ret; | |
421 | ||
5c9669ce TR |
422 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
423 | ||
4fc688ce | 424 | mutex_lock(&dev_priv->rps.hw_lock); |
7c59a9c1 | 425 | ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); |
4fc688ce | 426 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 427 | |
3e2a1556 | 428 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
429 | } |
430 | ||
46ddf194 BW |
431 | static ssize_t gt_min_freq_mhz_store(struct device *kdev, |
432 | struct device_attribute *attr, | |
433 | const char *buf, size_t count) | |
434 | { | |
14c8d110 | 435 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
46ddf194 BW |
436 | struct drm_device *dev = minor->dev; |
437 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2a5913a8 | 438 | u32 val; |
46ddf194 BW |
439 | ssize_t ret; |
440 | ||
441 | ret = kstrtou32(buf, 0, &val); | |
442 | if (ret) | |
443 | return ret; | |
444 | ||
5c9669ce TR |
445 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
446 | ||
4fc688ce | 447 | mutex_lock(&dev_priv->rps.hw_lock); |
46ddf194 | 448 | |
7c59a9c1 | 449 | val = intel_freq_opcode(dev_priv, val); |
0a073b84 | 450 | |
2a5913a8 BW |
451 | if (val < dev_priv->rps.min_freq || |
452 | val > dev_priv->rps.max_freq || | |
453 | val > dev_priv->rps.max_freq_softlimit) { | |
4fc688ce | 454 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
455 | return -EINVAL; |
456 | } | |
457 | ||
b39fb297 | 458 | dev_priv->rps.min_freq_softlimit = val; |
6917c7b9 | 459 | |
f745a80e VS |
460 | val = clamp_t(int, dev_priv->rps.cur_freq, |
461 | dev_priv->rps.min_freq_softlimit, | |
462 | dev_priv->rps.max_freq_softlimit); | |
463 | ||
464 | /* We still need *_set_rps to process the new min_delay and | |
465 | * update the interrupt limits and PMINTRMSK even though | |
466 | * frequency request may be unchanged. */ | |
467 | if (IS_VALLEYVIEW(dev)) | |
468 | valleyview_set_rps(dev, val); | |
469 | else | |
470 | gen6_set_rps(dev, val); | |
46ddf194 | 471 | |
4fc688ce | 472 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
473 | |
474 | return count; | |
475 | ||
476 | } | |
477 | ||
c8c972eb | 478 | static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); |
df6eedc8 | 479 | static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); |
46ddf194 BW |
480 | static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); |
481 | static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); | |
df6eedc8 | 482 | |
97e4eed7 | 483 | static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); |
ac6ae347 BW |
484 | |
485 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); | |
486 | static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
487 | static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
488 | static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
489 | ||
490 | /* For now we have a static number of RP states */ | |
491 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | |
492 | { | |
14c8d110 | 493 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
ac6ae347 BW |
494 | struct drm_device *dev = minor->dev; |
495 | struct drm_i915_private *dev_priv = dev->dev_private; | |
496 | u32 val, rp_state_cap; | |
497 | ssize_t ret; | |
498 | ||
499 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
500 | if (ret) | |
501 | return ret; | |
c8c8fb33 | 502 | intel_runtime_pm_get(dev_priv); |
ac6ae347 | 503 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
c8c8fb33 | 504 | intel_runtime_pm_put(dev_priv); |
ac6ae347 BW |
505 | mutex_unlock(&dev->struct_mutex); |
506 | ||
507 | if (attr == &dev_attr_gt_RP0_freq_mhz) { | |
74c4f62b | 508 | if (IS_VALLEYVIEW(dev)) |
7c59a9c1 | 509 | val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); |
74c4f62b | 510 | else |
7c59a9c1 VS |
511 | val = intel_gpu_freq(dev_priv, |
512 | ((rp_state_cap & 0x0000ff) >> 0)); | |
ac6ae347 | 513 | } else if (attr == &dev_attr_gt_RP1_freq_mhz) { |
74c4f62b | 514 | if (IS_VALLEYVIEW(dev)) |
7c59a9c1 | 515 | val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); |
74c4f62b | 516 | else |
7c59a9c1 VS |
517 | val = intel_gpu_freq(dev_priv, |
518 | ((rp_state_cap & 0x00ff00) >> 8)); | |
ac6ae347 | 519 | } else if (attr == &dev_attr_gt_RPn_freq_mhz) { |
74c4f62b | 520 | if (IS_VALLEYVIEW(dev)) |
7c59a9c1 | 521 | val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq); |
74c4f62b | 522 | else |
7c59a9c1 VS |
523 | val = intel_gpu_freq(dev_priv, |
524 | ((rp_state_cap & 0xff0000) >> 16)); | |
ac6ae347 BW |
525 | } else { |
526 | BUG(); | |
527 | } | |
3e2a1556 | 528 | return snprintf(buf, PAGE_SIZE, "%d\n", val); |
ac6ae347 BW |
529 | } |
530 | ||
df6eedc8 | 531 | static const struct attribute *gen6_attrs[] = { |
c8c972eb | 532 | &dev_attr_gt_act_freq_mhz.attr, |
df6eedc8 BW |
533 | &dev_attr_gt_cur_freq_mhz.attr, |
534 | &dev_attr_gt_max_freq_mhz.attr, | |
535 | &dev_attr_gt_min_freq_mhz.attr, | |
ac6ae347 BW |
536 | &dev_attr_gt_RP0_freq_mhz.attr, |
537 | &dev_attr_gt_RP1_freq_mhz.attr, | |
538 | &dev_attr_gt_RPn_freq_mhz.attr, | |
df6eedc8 BW |
539 | NULL, |
540 | }; | |
541 | ||
97e4eed7 | 542 | static const struct attribute *vlv_attrs[] = { |
c8c972eb | 543 | &dev_attr_gt_act_freq_mhz.attr, |
97e4eed7 CW |
544 | &dev_attr_gt_cur_freq_mhz.attr, |
545 | &dev_attr_gt_max_freq_mhz.attr, | |
546 | &dev_attr_gt_min_freq_mhz.attr, | |
74c4f62b D |
547 | &dev_attr_gt_RP0_freq_mhz.attr, |
548 | &dev_attr_gt_RP1_freq_mhz.attr, | |
549 | &dev_attr_gt_RPn_freq_mhz.attr, | |
97e4eed7 CW |
550 | &dev_attr_vlv_rpe_freq_mhz.attr, |
551 | NULL, | |
552 | }; | |
553 | ||
ef86ddce MK |
554 | static ssize_t error_state_read(struct file *filp, struct kobject *kobj, |
555 | struct bin_attribute *attr, char *buf, | |
556 | loff_t off, size_t count) | |
557 | { | |
558 | ||
559 | struct device *kdev = container_of(kobj, struct device, kobj); | |
14c8d110 | 560 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
ef86ddce MK |
561 | struct drm_device *dev = minor->dev; |
562 | struct i915_error_state_file_priv error_priv; | |
563 | struct drm_i915_error_state_buf error_str; | |
564 | ssize_t ret_count = 0; | |
565 | int ret; | |
566 | ||
567 | memset(&error_priv, 0, sizeof(error_priv)); | |
568 | ||
0a4cd7c8 | 569 | ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off); |
ef86ddce MK |
570 | if (ret) |
571 | return ret; | |
572 | ||
573 | error_priv.dev = dev; | |
574 | i915_error_state_get(dev, &error_priv); | |
575 | ||
576 | ret = i915_error_state_to_str(&error_str, &error_priv); | |
577 | if (ret) | |
578 | goto out; | |
579 | ||
580 | ret_count = count < error_str.bytes ? count : error_str.bytes; | |
581 | ||
582 | memcpy(buf, error_str.buf, ret_count); | |
583 | out: | |
584 | i915_error_state_put(&error_priv); | |
585 | i915_error_state_buf_release(&error_str); | |
586 | ||
587 | return ret ?: ret_count; | |
588 | } | |
589 | ||
590 | static ssize_t error_state_write(struct file *file, struct kobject *kobj, | |
591 | struct bin_attribute *attr, char *buf, | |
592 | loff_t off, size_t count) | |
593 | { | |
594 | struct device *kdev = container_of(kobj, struct device, kobj); | |
14c8d110 | 595 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
ef86ddce MK |
596 | struct drm_device *dev = minor->dev; |
597 | int ret; | |
598 | ||
599 | DRM_DEBUG_DRIVER("Resetting error state\n"); | |
600 | ||
601 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
602 | if (ret) | |
603 | return ret; | |
604 | ||
605 | i915_destroy_error_state(dev); | |
606 | mutex_unlock(&dev->struct_mutex); | |
607 | ||
608 | return count; | |
609 | } | |
610 | ||
611 | static struct bin_attribute error_state_attr = { | |
612 | .attr.name = "error", | |
613 | .attr.mode = S_IRUSR | S_IWUSR, | |
614 | .size = 0, | |
615 | .read = error_state_read, | |
616 | .write = error_state_write, | |
617 | }; | |
618 | ||
0136db58 BW |
619 | void i915_setup_sysfs(struct drm_device *dev) |
620 | { | |
621 | int ret; | |
622 | ||
8c3f929b | 623 | #ifdef CONFIG_PM |
58abf1da | 624 | if (HAS_RC6(dev)) { |
5bdebb18 | 625 | ret = sysfs_merge_group(&dev->primary->kdev->kobj, |
112abd29 DV |
626 | &rc6_attr_group); |
627 | if (ret) | |
628 | DRM_ERROR("RC6 residency sysfs setup failed\n"); | |
629 | } | |
58abf1da RV |
630 | if (HAS_RC6p(dev)) { |
631 | ret = sysfs_merge_group(&dev->primary->kdev->kobj, | |
632 | &rc6p_attr_group); | |
633 | if (ret) | |
634 | DRM_ERROR("RC6p residency sysfs setup failed\n"); | |
635 | } | |
8c3f929b | 636 | #endif |
040d2baa | 637 | if (HAS_L3_DPF(dev)) { |
5bdebb18 | 638 | ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); |
112abd29 DV |
639 | if (ret) |
640 | DRM_ERROR("l3 parity sysfs setup failed\n"); | |
35a85ac6 BW |
641 | |
642 | if (NUM_L3_SLICES(dev) > 1) { | |
5bdebb18 | 643 | ret = device_create_bin_file(dev->primary->kdev, |
35a85ac6 BW |
644 | &dpf_attrs_1); |
645 | if (ret) | |
646 | DRM_ERROR("l3 parity slice 1 setup failed\n"); | |
647 | } | |
112abd29 | 648 | } |
df6eedc8 | 649 | |
97e4eed7 CW |
650 | ret = 0; |
651 | if (IS_VALLEYVIEW(dev)) | |
5bdebb18 | 652 | ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs); |
97e4eed7 | 653 | else if (INTEL_INFO(dev)->gen >= 6) |
5bdebb18 | 654 | ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs); |
97e4eed7 CW |
655 | if (ret) |
656 | DRM_ERROR("RPS sysfs setup failed\n"); | |
ef86ddce | 657 | |
5bdebb18 | 658 | ret = sysfs_create_bin_file(&dev->primary->kdev->kobj, |
ef86ddce MK |
659 | &error_state_attr); |
660 | if (ret) | |
661 | DRM_ERROR("error_state sysfs setup failed\n"); | |
0136db58 BW |
662 | } |
663 | ||
664 | void i915_teardown_sysfs(struct drm_device *dev) | |
665 | { | |
5bdebb18 | 666 | sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr); |
97e4eed7 | 667 | if (IS_VALLEYVIEW(dev)) |
5bdebb18 | 668 | sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs); |
97e4eed7 | 669 | else |
5bdebb18 DA |
670 | sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs); |
671 | device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1); | |
672 | device_remove_bin_file(dev->primary->kdev, &dpf_attrs); | |
853c70e8 | 673 | #ifdef CONFIG_PM |
5bdebb18 | 674 | sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group); |
58abf1da | 675 | sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group); |
853c70e8 | 676 | #endif |
0136db58 | 677 | } |