Commit | Line | Data |
---|---|---|
0136db58 BW |
1 | /* |
2 | * Copyright © 2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Ben Widawsky <ben@bwidawsk.net> | |
25 | * | |
26 | */ | |
27 | ||
28 | #include <linux/device.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/stat.h> | |
31 | #include <linux/sysfs.h> | |
84bc7581 | 32 | #include "intel_drv.h" |
0136db58 BW |
33 | #include "i915_drv.h" |
34 | ||
5ab3633d | 35 | #ifdef CONFIG_PM |
0136db58 BW |
36 | static u32 calc_residency(struct drm_device *dev, const u32 reg) |
37 | { | |
38 | struct drm_i915_private *dev_priv = dev->dev_private; | |
39 | u64 raw_time; /* 32b value may overflow during fixed point math */ | |
40 | ||
41 | if (!intel_enable_rc6(dev)) | |
42 | return 0; | |
43 | ||
a85d4bcb BW |
44 | raw_time = I915_READ(reg) * 128ULL; |
45 | return DIV_ROUND_UP_ULL(raw_time, 100000); | |
0136db58 BW |
46 | } |
47 | ||
48 | static ssize_t | |
dbdfd8e9 | 49 | show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 50 | { |
dbdfd8e9 | 51 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
3e2a1556 | 52 | return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); |
0136db58 BW |
53 | } |
54 | ||
55 | static ssize_t | |
dbdfd8e9 | 56 | show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 57 | { |
dbdfd8e9 | 58 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
0136db58 | 59 | u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); |
3e2a1556 | 60 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); |
0136db58 BW |
61 | } |
62 | ||
63 | static ssize_t | |
dbdfd8e9 | 64 | show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 65 | { |
dbdfd8e9 | 66 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
0136db58 | 67 | u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); |
5ffd494b JB |
68 | if (IS_VALLEYVIEW(dminor->dev)) |
69 | rc6p_residency = 0; | |
3e2a1556 | 70 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); |
0136db58 BW |
71 | } |
72 | ||
73 | static ssize_t | |
dbdfd8e9 | 74 | show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 75 | { |
dbdfd8e9 | 76 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
0136db58 | 77 | u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); |
5ffd494b JB |
78 | if (IS_VALLEYVIEW(dminor->dev)) |
79 | rc6pp_residency = 0; | |
3e2a1556 | 80 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); |
0136db58 BW |
81 | } |
82 | ||
83 | static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); | |
84 | static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); | |
85 | static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); | |
86 | static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); | |
87 | ||
88 | static struct attribute *rc6_attrs[] = { | |
89 | &dev_attr_rc6_enable.attr, | |
90 | &dev_attr_rc6_residency_ms.attr, | |
91 | &dev_attr_rc6p_residency_ms.attr, | |
92 | &dev_attr_rc6pp_residency_ms.attr, | |
93 | NULL | |
94 | }; | |
95 | ||
96 | static struct attribute_group rc6_attr_group = { | |
97 | .name = power_group_name, | |
98 | .attrs = rc6_attrs | |
99 | }; | |
8c3f929b | 100 | #endif |
0136db58 | 101 | |
84bc7581 BW |
102 | static int l3_access_valid(struct drm_device *dev, loff_t offset) |
103 | { | |
040d2baa | 104 | if (!HAS_L3_DPF(dev)) |
84bc7581 BW |
105 | return -EPERM; |
106 | ||
107 | if (offset % 4 != 0) | |
108 | return -EINVAL; | |
109 | ||
110 | if (offset >= GEN7_L3LOG_SIZE) | |
111 | return -ENXIO; | |
112 | ||
113 | return 0; | |
114 | } | |
115 | ||
116 | static ssize_t | |
117 | i915_l3_read(struct file *filp, struct kobject *kobj, | |
118 | struct bin_attribute *attr, char *buf, | |
119 | loff_t offset, size_t count) | |
120 | { | |
121 | struct device *dev = container_of(kobj, struct device, kobj); | |
122 | struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); | |
123 | struct drm_device *drm_dev = dminor->dev; | |
124 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
35a85ac6 | 125 | int slice = (int)(uintptr_t)attr->private; |
3ccfd19d | 126 | int ret; |
84bc7581 | 127 | |
1c3dcd1c BW |
128 | count = round_down(count, 4); |
129 | ||
84bc7581 BW |
130 | ret = l3_access_valid(drm_dev, offset); |
131 | if (ret) | |
132 | return ret; | |
133 | ||
e5ad4026 | 134 | count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); |
33618ea5 | 135 | |
84bc7581 BW |
136 | ret = i915_mutex_lock_interruptible(drm_dev); |
137 | if (ret) | |
138 | return ret; | |
139 | ||
3ccfd19d BW |
140 | if (dev_priv->l3_parity.remap_info[slice]) |
141 | memcpy(buf, | |
142 | dev_priv->l3_parity.remap_info[slice] + (offset/4), | |
143 | count); | |
144 | else | |
145 | memset(buf, 0, count); | |
84bc7581 | 146 | |
84bc7581 BW |
147 | mutex_unlock(&drm_dev->struct_mutex); |
148 | ||
1c966dd2 | 149 | return count; |
84bc7581 BW |
150 | } |
151 | ||
152 | static ssize_t | |
153 | i915_l3_write(struct file *filp, struct kobject *kobj, | |
154 | struct bin_attribute *attr, char *buf, | |
155 | loff_t offset, size_t count) | |
156 | { | |
157 | struct device *dev = container_of(kobj, struct device, kobj); | |
158 | struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); | |
159 | struct drm_device *drm_dev = dminor->dev; | |
160 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
3ccfd19d | 161 | struct i915_hw_context *ctx; |
84bc7581 | 162 | u32 *temp = NULL; /* Just here to make handling failures easy */ |
35a85ac6 | 163 | int slice = (int)(uintptr_t)attr->private; |
84bc7581 BW |
164 | int ret; |
165 | ||
166 | ret = l3_access_valid(drm_dev, offset); | |
167 | if (ret) | |
168 | return ret; | |
169 | ||
3ccfd19d BW |
170 | if (dev_priv->hw_contexts_disabled) |
171 | return -ENXIO; | |
172 | ||
84bc7581 BW |
173 | ret = i915_mutex_lock_interruptible(drm_dev); |
174 | if (ret) | |
175 | return ret; | |
176 | ||
35a85ac6 | 177 | if (!dev_priv->l3_parity.remap_info[slice]) { |
84bc7581 BW |
178 | temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); |
179 | if (!temp) { | |
180 | mutex_unlock(&drm_dev->struct_mutex); | |
181 | return -ENOMEM; | |
182 | } | |
183 | } | |
184 | ||
185 | ret = i915_gpu_idle(drm_dev); | |
186 | if (ret) { | |
187 | kfree(temp); | |
188 | mutex_unlock(&drm_dev->struct_mutex); | |
189 | return ret; | |
190 | } | |
191 | ||
192 | /* TODO: Ideally we really want a GPU reset here to make sure errors | |
193 | * aren't propagated. Since I cannot find a stable way to reset the GPU | |
194 | * at this point it is left as a TODO. | |
195 | */ | |
196 | if (temp) | |
35a85ac6 | 197 | dev_priv->l3_parity.remap_info[slice] = temp; |
84bc7581 | 198 | |
35a85ac6 | 199 | memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count); |
84bc7581 | 200 | |
3ccfd19d BW |
201 | /* NB: We defer the remapping until we switch to the context */ |
202 | list_for_each_entry(ctx, &dev_priv->context_list, link) | |
203 | ctx->remap_slice |= (1<<slice); | |
84bc7581 BW |
204 | |
205 | mutex_unlock(&drm_dev->struct_mutex); | |
206 | ||
207 | return count; | |
208 | } | |
209 | ||
210 | static struct bin_attribute dpf_attrs = { | |
211 | .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, | |
212 | .size = GEN7_L3LOG_SIZE, | |
213 | .read = i915_l3_read, | |
214 | .write = i915_l3_write, | |
35a85ac6 BW |
215 | .mmap = NULL, |
216 | .private = (void *)0 | |
217 | }; | |
218 | ||
219 | static struct bin_attribute dpf_attrs_1 = { | |
220 | .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, | |
221 | .size = GEN7_L3LOG_SIZE, | |
222 | .read = i915_l3_read, | |
223 | .write = i915_l3_write, | |
224 | .mmap = NULL, | |
225 | .private = (void *)1 | |
84bc7581 BW |
226 | }; |
227 | ||
df6eedc8 BW |
228 | static ssize_t gt_cur_freq_mhz_show(struct device *kdev, |
229 | struct device_attribute *attr, char *buf) | |
230 | { | |
231 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
232 | struct drm_device *dev = minor->dev; | |
233 | struct drm_i915_private *dev_priv = dev->dev_private; | |
234 | int ret; | |
235 | ||
4fc688ce | 236 | mutex_lock(&dev_priv->rps.hw_lock); |
177006a1 JB |
237 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
238 | u32 freq; | |
64936258 | 239 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
177006a1 JB |
240 | ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff); |
241 | } else { | |
0a073b84 | 242 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; |
177006a1 | 243 | } |
4fc688ce | 244 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 245 | |
3e2a1556 | 246 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
247 | } |
248 | ||
97e4eed7 CW |
249 | static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, |
250 | struct device_attribute *attr, char *buf) | |
251 | { | |
252 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
253 | struct drm_device *dev = minor->dev; | |
254 | struct drm_i915_private *dev_priv = dev->dev_private; | |
255 | ||
256 | return snprintf(buf, PAGE_SIZE, "%d\n", | |
257 | vlv_gpu_freq(dev_priv->mem_freq, | |
258 | dev_priv->rps.rpe_delay)); | |
259 | } | |
260 | ||
df6eedc8 BW |
261 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
262 | { | |
263 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
264 | struct drm_device *dev = minor->dev; | |
265 | struct drm_i915_private *dev_priv = dev->dev_private; | |
266 | int ret; | |
267 | ||
4fc688ce | 268 | mutex_lock(&dev_priv->rps.hw_lock); |
0a073b84 JB |
269 | if (IS_VALLEYVIEW(dev_priv->dev)) |
270 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); | |
271 | else | |
272 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | |
4fc688ce | 273 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 274 | |
3e2a1556 | 275 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
276 | } |
277 | ||
46ddf194 BW |
278 | static ssize_t gt_max_freq_mhz_store(struct device *kdev, |
279 | struct device_attribute *attr, | |
280 | const char *buf, size_t count) | |
281 | { | |
282 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
283 | struct drm_device *dev = minor->dev; | |
284 | struct drm_i915_private *dev_priv = dev->dev_private; | |
31c77388 | 285 | u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; |
46ddf194 BW |
286 | ssize_t ret; |
287 | ||
288 | ret = kstrtou32(buf, 0, &val); | |
289 | if (ret) | |
290 | return ret; | |
291 | ||
4fc688ce | 292 | mutex_lock(&dev_priv->rps.hw_lock); |
46ddf194 | 293 | |
0a073b84 JB |
294 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
295 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | |
296 | ||
297 | hw_max = valleyview_rps_max_freq(dev_priv); | |
298 | hw_min = valleyview_rps_min_freq(dev_priv); | |
299 | non_oc_max = hw_max; | |
300 | } else { | |
301 | val /= GT_FREQUENCY_MULTIPLIER; | |
46ddf194 | 302 | |
0a073b84 JB |
303 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
304 | hw_max = dev_priv->rps.hw_max; | |
305 | non_oc_max = (rp_state_cap & 0xff); | |
306 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | |
307 | } | |
308 | ||
309 | if (val < hw_min || val > hw_max || | |
310 | val < dev_priv->rps.min_delay) { | |
4fc688ce | 311 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
312 | return -EINVAL; |
313 | } | |
314 | ||
31c77388 BW |
315 | if (val > non_oc_max) |
316 | DRM_DEBUG("User requested overclocking to %d\n", | |
317 | val * GT_FREQUENCY_MULTIPLIER); | |
318 | ||
0a073b84 JB |
319 | if (dev_priv->rps.cur_delay > val) { |
320 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
321 | valleyview_set_rps(dev_priv->dev, val); | |
322 | else | |
323 | gen6_set_rps(dev_priv->dev, val); | |
324 | } | |
46ddf194 BW |
325 | |
326 | dev_priv->rps.max_delay = val; | |
327 | ||
4fc688ce | 328 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
329 | |
330 | return count; | |
331 | } | |
332 | ||
df6eedc8 BW |
333 | static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
334 | { | |
335 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
336 | struct drm_device *dev = minor->dev; | |
337 | struct drm_i915_private *dev_priv = dev->dev_private; | |
338 | int ret; | |
339 | ||
4fc688ce | 340 | mutex_lock(&dev_priv->rps.hw_lock); |
0a073b84 JB |
341 | if (IS_VALLEYVIEW(dev_priv->dev)) |
342 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); | |
343 | else | |
344 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | |
4fc688ce | 345 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 346 | |
3e2a1556 | 347 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
348 | } |
349 | ||
46ddf194 BW |
350 | static ssize_t gt_min_freq_mhz_store(struct device *kdev, |
351 | struct device_attribute *attr, | |
352 | const char *buf, size_t count) | |
353 | { | |
354 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
355 | struct drm_device *dev = minor->dev; | |
356 | struct drm_i915_private *dev_priv = dev->dev_private; | |
357 | u32 val, rp_state_cap, hw_max, hw_min; | |
358 | ssize_t ret; | |
359 | ||
360 | ret = kstrtou32(buf, 0, &val); | |
361 | if (ret) | |
362 | return ret; | |
363 | ||
4fc688ce | 364 | mutex_lock(&dev_priv->rps.hw_lock); |
46ddf194 | 365 | |
0a073b84 JB |
366 | if (IS_VALLEYVIEW(dev)) { |
367 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | |
368 | ||
369 | hw_max = valleyview_rps_max_freq(dev_priv); | |
370 | hw_min = valleyview_rps_min_freq(dev_priv); | |
371 | } else { | |
372 | val /= GT_FREQUENCY_MULTIPLIER; | |
373 | ||
374 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | |
375 | hw_max = dev_priv->rps.hw_max; | |
376 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | |
377 | } | |
46ddf194 BW |
378 | |
379 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | |
4fc688ce | 380 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
381 | return -EINVAL; |
382 | } | |
383 | ||
0a073b84 JB |
384 | if (dev_priv->rps.cur_delay < val) { |
385 | if (IS_VALLEYVIEW(dev)) | |
386 | valleyview_set_rps(dev, val); | |
387 | else | |
388 | gen6_set_rps(dev_priv->dev, val); | |
389 | } | |
46ddf194 BW |
390 | |
391 | dev_priv->rps.min_delay = val; | |
392 | ||
4fc688ce | 393 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
394 | |
395 | return count; | |
396 | ||
397 | } | |
398 | ||
df6eedc8 | 399 | static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); |
46ddf194 BW |
400 | static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); |
401 | static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); | |
df6eedc8 | 402 | |
97e4eed7 | 403 | static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); |
ac6ae347 BW |
404 | |
405 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); | |
406 | static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
407 | static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
408 | static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
409 | ||
410 | /* For now we have a static number of RP states */ | |
411 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | |
412 | { | |
413 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
414 | struct drm_device *dev = minor->dev; | |
415 | struct drm_i915_private *dev_priv = dev->dev_private; | |
416 | u32 val, rp_state_cap; | |
417 | ssize_t ret; | |
418 | ||
419 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
420 | if (ret) | |
421 | return ret; | |
422 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | |
423 | mutex_unlock(&dev->struct_mutex); | |
424 | ||
425 | if (attr == &dev_attr_gt_RP0_freq_mhz) { | |
426 | val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; | |
427 | } else if (attr == &dev_attr_gt_RP1_freq_mhz) { | |
428 | val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; | |
429 | } else if (attr == &dev_attr_gt_RPn_freq_mhz) { | |
430 | val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; | |
431 | } else { | |
432 | BUG(); | |
433 | } | |
3e2a1556 | 434 | return snprintf(buf, PAGE_SIZE, "%d\n", val); |
ac6ae347 BW |
435 | } |
436 | ||
df6eedc8 BW |
437 | static const struct attribute *gen6_attrs[] = { |
438 | &dev_attr_gt_cur_freq_mhz.attr, | |
439 | &dev_attr_gt_max_freq_mhz.attr, | |
440 | &dev_attr_gt_min_freq_mhz.attr, | |
ac6ae347 BW |
441 | &dev_attr_gt_RP0_freq_mhz.attr, |
442 | &dev_attr_gt_RP1_freq_mhz.attr, | |
443 | &dev_attr_gt_RPn_freq_mhz.attr, | |
df6eedc8 BW |
444 | NULL, |
445 | }; | |
446 | ||
97e4eed7 CW |
447 | static const struct attribute *vlv_attrs[] = { |
448 | &dev_attr_gt_cur_freq_mhz.attr, | |
449 | &dev_attr_gt_max_freq_mhz.attr, | |
450 | &dev_attr_gt_min_freq_mhz.attr, | |
451 | &dev_attr_vlv_rpe_freq_mhz.attr, | |
452 | NULL, | |
453 | }; | |
454 | ||
ef86ddce MK |
455 | static ssize_t error_state_read(struct file *filp, struct kobject *kobj, |
456 | struct bin_attribute *attr, char *buf, | |
457 | loff_t off, size_t count) | |
458 | { | |
459 | ||
460 | struct device *kdev = container_of(kobj, struct device, kobj); | |
461 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
462 | struct drm_device *dev = minor->dev; | |
463 | struct i915_error_state_file_priv error_priv; | |
464 | struct drm_i915_error_state_buf error_str; | |
465 | ssize_t ret_count = 0; | |
466 | int ret; | |
467 | ||
468 | memset(&error_priv, 0, sizeof(error_priv)); | |
469 | ||
470 | ret = i915_error_state_buf_init(&error_str, count, off); | |
471 | if (ret) | |
472 | return ret; | |
473 | ||
474 | error_priv.dev = dev; | |
475 | i915_error_state_get(dev, &error_priv); | |
476 | ||
477 | ret = i915_error_state_to_str(&error_str, &error_priv); | |
478 | if (ret) | |
479 | goto out; | |
480 | ||
481 | ret_count = count < error_str.bytes ? count : error_str.bytes; | |
482 | ||
483 | memcpy(buf, error_str.buf, ret_count); | |
484 | out: | |
485 | i915_error_state_put(&error_priv); | |
486 | i915_error_state_buf_release(&error_str); | |
487 | ||
488 | return ret ?: ret_count; | |
489 | } | |
490 | ||
491 | static ssize_t error_state_write(struct file *file, struct kobject *kobj, | |
492 | struct bin_attribute *attr, char *buf, | |
493 | loff_t off, size_t count) | |
494 | { | |
495 | struct device *kdev = container_of(kobj, struct device, kobj); | |
496 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
497 | struct drm_device *dev = minor->dev; | |
498 | int ret; | |
499 | ||
500 | DRM_DEBUG_DRIVER("Resetting error state\n"); | |
501 | ||
502 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
503 | if (ret) | |
504 | return ret; | |
505 | ||
506 | i915_destroy_error_state(dev); | |
507 | mutex_unlock(&dev->struct_mutex); | |
508 | ||
509 | return count; | |
510 | } | |
511 | ||
512 | static struct bin_attribute error_state_attr = { | |
513 | .attr.name = "error", | |
514 | .attr.mode = S_IRUSR | S_IWUSR, | |
515 | .size = 0, | |
516 | .read = error_state_read, | |
517 | .write = error_state_write, | |
518 | }; | |
519 | ||
0136db58 BW |
520 | void i915_setup_sysfs(struct drm_device *dev) |
521 | { | |
522 | int ret; | |
523 | ||
8c3f929b | 524 | #ifdef CONFIG_PM |
112abd29 DV |
525 | if (INTEL_INFO(dev)->gen >= 6) { |
526 | ret = sysfs_merge_group(&dev->primary->kdev.kobj, | |
527 | &rc6_attr_group); | |
528 | if (ret) | |
529 | DRM_ERROR("RC6 residency sysfs setup failed\n"); | |
530 | } | |
8c3f929b | 531 | #endif |
040d2baa | 532 | if (HAS_L3_DPF(dev)) { |
112abd29 DV |
533 | ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); |
534 | if (ret) | |
535 | DRM_ERROR("l3 parity sysfs setup failed\n"); | |
35a85ac6 BW |
536 | |
537 | if (NUM_L3_SLICES(dev) > 1) { | |
538 | ret = device_create_bin_file(&dev->primary->kdev, | |
539 | &dpf_attrs_1); | |
540 | if (ret) | |
541 | DRM_ERROR("l3 parity slice 1 setup failed\n"); | |
542 | } | |
112abd29 | 543 | } |
df6eedc8 | 544 | |
97e4eed7 CW |
545 | ret = 0; |
546 | if (IS_VALLEYVIEW(dev)) | |
547 | ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs); | |
548 | else if (INTEL_INFO(dev)->gen >= 6) | |
df6eedc8 | 549 | ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); |
97e4eed7 CW |
550 | if (ret) |
551 | DRM_ERROR("RPS sysfs setup failed\n"); | |
ef86ddce MK |
552 | |
553 | ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, | |
554 | &error_state_attr); | |
555 | if (ret) | |
556 | DRM_ERROR("error_state sysfs setup failed\n"); | |
0136db58 BW |
557 | } |
558 | ||
559 | void i915_teardown_sysfs(struct drm_device *dev) | |
560 | { | |
ef86ddce | 561 | sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); |
97e4eed7 CW |
562 | if (IS_VALLEYVIEW(dev)) |
563 | sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); | |
564 | else | |
565 | sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); | |
35a85ac6 | 566 | device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1); |
84bc7581 | 567 | device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); |
853c70e8 | 568 | #ifdef CONFIG_PM |
0136db58 | 569 | sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); |
853c70e8 | 570 | #endif |
0136db58 | 571 | } |