drm/i915: Embed the io-mapping struct inside drm_i915_private
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_sysfs.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
33 #include "i915_drv.h"
34
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
36
37 #ifdef CONFIG_PM
38 static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg)
40 {
41 struct drm_i915_private *dev_priv = to_i915(dev);
42 u64 raw_time; /* 32b value may overflow during fixed point math */
43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret;
45
46 if (!intel_enable_rc6())
47 return 0;
48
49 intel_runtime_pm_get(dev_priv);
50
51 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
52 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
53 units = 1;
54 div = dev_priv->czclk_freq;
55
56 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
57 units <<= 8;
58 } else if (IS_BROXTON(dev)) {
59 units = 1;
60 div = 1200; /* 833.33ns */
61 }
62
63 raw_time = I915_READ(reg) * units;
64 ret = DIV_ROUND_UP_ULL(raw_time, div);
65
66 intel_runtime_pm_put(dev_priv);
67 return ret;
68 }
69
70 static ssize_t
71 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72 {
73 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
74 }
75
76 static ssize_t
77 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
78 {
79 struct drm_minor *dminor = dev_get_drvdata(kdev);
80 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
81 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
82 }
83
84 static ssize_t
85 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
86 {
87 struct drm_minor *dminor = dev_to_drm_minor(kdev);
88 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
89 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
90 }
91
92 static ssize_t
93 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
94 {
95 struct drm_minor *dminor = dev_to_drm_minor(kdev);
96 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
97 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
98 }
99
100 static ssize_t
101 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
102 {
103 struct drm_minor *dminor = dev_get_drvdata(kdev);
104 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
105 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
106 }
107
108 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
109 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
110 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
111 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
112 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
113
114 static struct attribute *rc6_attrs[] = {
115 &dev_attr_rc6_enable.attr,
116 &dev_attr_rc6_residency_ms.attr,
117 NULL
118 };
119
120 static struct attribute_group rc6_attr_group = {
121 .name = power_group_name,
122 .attrs = rc6_attrs
123 };
124
125 static struct attribute *rc6p_attrs[] = {
126 &dev_attr_rc6p_residency_ms.attr,
127 &dev_attr_rc6pp_residency_ms.attr,
128 NULL
129 };
130
131 static struct attribute_group rc6p_attr_group = {
132 .name = power_group_name,
133 .attrs = rc6p_attrs
134 };
135
136 static struct attribute *media_rc6_attrs[] = {
137 &dev_attr_media_rc6_residency_ms.attr,
138 NULL
139 };
140
141 static struct attribute_group media_rc6_attr_group = {
142 .name = power_group_name,
143 .attrs = media_rc6_attrs
144 };
145 #endif
146
147 static int l3_access_valid(struct drm_device *dev, loff_t offset)
148 {
149 if (!HAS_L3_DPF(dev))
150 return -EPERM;
151
152 if (offset % 4 != 0)
153 return -EINVAL;
154
155 if (offset >= GEN7_L3LOG_SIZE)
156 return -ENXIO;
157
158 return 0;
159 }
160
161 static ssize_t
162 i915_l3_read(struct file *filp, struct kobject *kobj,
163 struct bin_attribute *attr, char *buf,
164 loff_t offset, size_t count)
165 {
166 struct device *dev = kobj_to_dev(kobj);
167 struct drm_minor *dminor = dev_to_drm_minor(dev);
168 struct drm_device *drm_dev = dminor->dev;
169 struct drm_i915_private *dev_priv = to_i915(drm_dev);
170 int slice = (int)(uintptr_t)attr->private;
171 int ret;
172
173 count = round_down(count, 4);
174
175 ret = l3_access_valid(drm_dev, offset);
176 if (ret)
177 return ret;
178
179 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
180
181 ret = i915_mutex_lock_interruptible(drm_dev);
182 if (ret)
183 return ret;
184
185 if (dev_priv->l3_parity.remap_info[slice])
186 memcpy(buf,
187 dev_priv->l3_parity.remap_info[slice] + (offset/4),
188 count);
189 else
190 memset(buf, 0, count);
191
192 mutex_unlock(&drm_dev->struct_mutex);
193
194 return count;
195 }
196
197 static ssize_t
198 i915_l3_write(struct file *filp, struct kobject *kobj,
199 struct bin_attribute *attr, char *buf,
200 loff_t offset, size_t count)
201 {
202 struct device *dev = kobj_to_dev(kobj);
203 struct drm_minor *dminor = dev_to_drm_minor(dev);
204 struct drm_device *drm_dev = dminor->dev;
205 struct drm_i915_private *dev_priv = to_i915(drm_dev);
206 struct i915_gem_context *ctx;
207 u32 *temp = NULL; /* Just here to make handling failures easy */
208 int slice = (int)(uintptr_t)attr->private;
209 int ret;
210
211 if (!HAS_HW_CONTEXTS(drm_dev))
212 return -ENXIO;
213
214 ret = l3_access_valid(drm_dev, offset);
215 if (ret)
216 return ret;
217
218 ret = i915_mutex_lock_interruptible(drm_dev);
219 if (ret)
220 return ret;
221
222 if (!dev_priv->l3_parity.remap_info[slice]) {
223 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
224 if (!temp) {
225 mutex_unlock(&drm_dev->struct_mutex);
226 return -ENOMEM;
227 }
228 }
229
230 /* TODO: Ideally we really want a GPU reset here to make sure errors
231 * aren't propagated. Since I cannot find a stable way to reset the GPU
232 * at this point it is left as a TODO.
233 */
234 if (temp)
235 dev_priv->l3_parity.remap_info[slice] = temp;
236
237 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
238
239 /* NB: We defer the remapping until we switch to the context */
240 list_for_each_entry(ctx, &dev_priv->context_list, link)
241 ctx->remap_slice |= (1<<slice);
242
243 mutex_unlock(&drm_dev->struct_mutex);
244
245 return count;
246 }
247
248 static struct bin_attribute dpf_attrs = {
249 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
250 .size = GEN7_L3LOG_SIZE,
251 .read = i915_l3_read,
252 .write = i915_l3_write,
253 .mmap = NULL,
254 .private = (void *)0
255 };
256
257 static struct bin_attribute dpf_attrs_1 = {
258 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
259 .size = GEN7_L3LOG_SIZE,
260 .read = i915_l3_read,
261 .write = i915_l3_write,
262 .mmap = NULL,
263 .private = (void *)1
264 };
265
266 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
267 struct device_attribute *attr, char *buf)
268 {
269 struct drm_minor *minor = dev_to_drm_minor(kdev);
270 struct drm_device *dev = minor->dev;
271 struct drm_i915_private *dev_priv = to_i915(dev);
272 int ret;
273
274 intel_runtime_pm_get(dev_priv);
275
276 mutex_lock(&dev_priv->rps.hw_lock);
277 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
278 u32 freq;
279 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
280 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
281 } else {
282 u32 rpstat = I915_READ(GEN6_RPSTAT1);
283 if (IS_GEN9(dev_priv))
284 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
285 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
286 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
287 else
288 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
289 ret = intel_gpu_freq(dev_priv, ret);
290 }
291 mutex_unlock(&dev_priv->rps.hw_lock);
292
293 intel_runtime_pm_put(dev_priv);
294
295 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
296 }
297
298 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
299 struct device_attribute *attr, char *buf)
300 {
301 struct drm_minor *minor = dev_to_drm_minor(kdev);
302 struct drm_device *dev = minor->dev;
303 struct drm_i915_private *dev_priv = to_i915(dev);
304
305 return snprintf(buf, PAGE_SIZE, "%d\n",
306 intel_gpu_freq(dev_priv,
307 dev_priv->rps.cur_freq));
308 }
309
310 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
311 {
312 struct drm_minor *minor = dev_to_drm_minor(kdev);
313 struct drm_i915_private *dev_priv = to_i915(minor->dev);
314
315 return snprintf(buf, PAGE_SIZE, "%d\n",
316 intel_gpu_freq(dev_priv,
317 dev_priv->rps.boost_freq));
318 }
319
320 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
321 struct device_attribute *attr,
322 const char *buf, size_t count)
323 {
324 struct drm_minor *minor = dev_to_drm_minor(kdev);
325 struct drm_device *dev = minor->dev;
326 struct drm_i915_private *dev_priv = to_i915(dev);
327 u32 val;
328 ssize_t ret;
329
330 ret = kstrtou32(buf, 0, &val);
331 if (ret)
332 return ret;
333
334 /* Validate against (static) hardware limits */
335 val = intel_freq_opcode(dev_priv, val);
336 if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
337 return -EINVAL;
338
339 mutex_lock(&dev_priv->rps.hw_lock);
340 dev_priv->rps.boost_freq = val;
341 mutex_unlock(&dev_priv->rps.hw_lock);
342
343 return count;
344 }
345
346 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
347 struct device_attribute *attr, char *buf)
348 {
349 struct drm_minor *minor = dev_to_drm_minor(kdev);
350 struct drm_device *dev = minor->dev;
351 struct drm_i915_private *dev_priv = to_i915(dev);
352
353 return snprintf(buf, PAGE_SIZE, "%d\n",
354 intel_gpu_freq(dev_priv,
355 dev_priv->rps.efficient_freq));
356 }
357
358 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
359 {
360 struct drm_minor *minor = dev_to_drm_minor(kdev);
361 struct drm_device *dev = minor->dev;
362 struct drm_i915_private *dev_priv = to_i915(dev);
363
364 return snprintf(buf, PAGE_SIZE, "%d\n",
365 intel_gpu_freq(dev_priv,
366 dev_priv->rps.max_freq_softlimit));
367 }
368
369 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
370 struct device_attribute *attr,
371 const char *buf, size_t count)
372 {
373 struct drm_minor *minor = dev_to_drm_minor(kdev);
374 struct drm_device *dev = minor->dev;
375 struct drm_i915_private *dev_priv = to_i915(dev);
376 u32 val;
377 ssize_t ret;
378
379 ret = kstrtou32(buf, 0, &val);
380 if (ret)
381 return ret;
382
383 intel_runtime_pm_get(dev_priv);
384
385 mutex_lock(&dev_priv->rps.hw_lock);
386
387 val = intel_freq_opcode(dev_priv, val);
388
389 if (val < dev_priv->rps.min_freq ||
390 val > dev_priv->rps.max_freq ||
391 val < dev_priv->rps.min_freq_softlimit) {
392 mutex_unlock(&dev_priv->rps.hw_lock);
393 intel_runtime_pm_put(dev_priv);
394 return -EINVAL;
395 }
396
397 if (val > dev_priv->rps.rp0_freq)
398 DRM_DEBUG("User requested overclocking to %d\n",
399 intel_gpu_freq(dev_priv, val));
400
401 dev_priv->rps.max_freq_softlimit = val;
402
403 val = clamp_t(int, dev_priv->rps.cur_freq,
404 dev_priv->rps.min_freq_softlimit,
405 dev_priv->rps.max_freq_softlimit);
406
407 /* We still need *_set_rps to process the new max_delay and
408 * update the interrupt limits and PMINTRMSK even though
409 * frequency request may be unchanged. */
410 intel_set_rps(dev_priv, val);
411
412 mutex_unlock(&dev_priv->rps.hw_lock);
413
414 intel_runtime_pm_put(dev_priv);
415
416 return count;
417 }
418
419 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
420 {
421 struct drm_minor *minor = dev_to_drm_minor(kdev);
422 struct drm_device *dev = minor->dev;
423 struct drm_i915_private *dev_priv = to_i915(dev);
424
425 return snprintf(buf, PAGE_SIZE, "%d\n",
426 intel_gpu_freq(dev_priv,
427 dev_priv->rps.min_freq_softlimit));
428 }
429
430 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
431 struct device_attribute *attr,
432 const char *buf, size_t count)
433 {
434 struct drm_minor *minor = dev_to_drm_minor(kdev);
435 struct drm_device *dev = minor->dev;
436 struct drm_i915_private *dev_priv = to_i915(dev);
437 u32 val;
438 ssize_t ret;
439
440 ret = kstrtou32(buf, 0, &val);
441 if (ret)
442 return ret;
443
444 intel_runtime_pm_get(dev_priv);
445
446 mutex_lock(&dev_priv->rps.hw_lock);
447
448 val = intel_freq_opcode(dev_priv, val);
449
450 if (val < dev_priv->rps.min_freq ||
451 val > dev_priv->rps.max_freq ||
452 val > dev_priv->rps.max_freq_softlimit) {
453 mutex_unlock(&dev_priv->rps.hw_lock);
454 intel_runtime_pm_put(dev_priv);
455 return -EINVAL;
456 }
457
458 dev_priv->rps.min_freq_softlimit = val;
459
460 val = clamp_t(int, dev_priv->rps.cur_freq,
461 dev_priv->rps.min_freq_softlimit,
462 dev_priv->rps.max_freq_softlimit);
463
464 /* We still need *_set_rps to process the new min_delay and
465 * update the interrupt limits and PMINTRMSK even though
466 * frequency request may be unchanged. */
467 intel_set_rps(dev_priv, val);
468
469 mutex_unlock(&dev_priv->rps.hw_lock);
470
471 intel_runtime_pm_put(dev_priv);
472
473 return count;
474
475 }
476
477 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
478 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
479 static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
480 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
481 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
482
483 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
484
485 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
486 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
487 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
488 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
489
490 /* For now we have a static number of RP states */
491 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
492 {
493 struct drm_minor *minor = dev_to_drm_minor(kdev);
494 struct drm_device *dev = minor->dev;
495 struct drm_i915_private *dev_priv = to_i915(dev);
496 u32 val;
497
498 if (attr == &dev_attr_gt_RP0_freq_mhz)
499 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
500 else if (attr == &dev_attr_gt_RP1_freq_mhz)
501 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
502 else if (attr == &dev_attr_gt_RPn_freq_mhz)
503 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
504 else
505 BUG();
506
507 return snprintf(buf, PAGE_SIZE, "%d\n", val);
508 }
509
510 static const struct attribute *gen6_attrs[] = {
511 &dev_attr_gt_act_freq_mhz.attr,
512 &dev_attr_gt_cur_freq_mhz.attr,
513 &dev_attr_gt_boost_freq_mhz.attr,
514 &dev_attr_gt_max_freq_mhz.attr,
515 &dev_attr_gt_min_freq_mhz.attr,
516 &dev_attr_gt_RP0_freq_mhz.attr,
517 &dev_attr_gt_RP1_freq_mhz.attr,
518 &dev_attr_gt_RPn_freq_mhz.attr,
519 NULL,
520 };
521
522 static const struct attribute *vlv_attrs[] = {
523 &dev_attr_gt_act_freq_mhz.attr,
524 &dev_attr_gt_cur_freq_mhz.attr,
525 &dev_attr_gt_boost_freq_mhz.attr,
526 &dev_attr_gt_max_freq_mhz.attr,
527 &dev_attr_gt_min_freq_mhz.attr,
528 &dev_attr_gt_RP0_freq_mhz.attr,
529 &dev_attr_gt_RP1_freq_mhz.attr,
530 &dev_attr_gt_RPn_freq_mhz.attr,
531 &dev_attr_vlv_rpe_freq_mhz.attr,
532 NULL,
533 };
534
535 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
536 struct bin_attribute *attr, char *buf,
537 loff_t off, size_t count)
538 {
539
540 struct device *kdev = kobj_to_dev(kobj);
541 struct drm_minor *minor = dev_to_drm_minor(kdev);
542 struct drm_device *dev = minor->dev;
543 struct i915_error_state_file_priv error_priv;
544 struct drm_i915_error_state_buf error_str;
545 ssize_t ret_count = 0;
546 int ret;
547
548 memset(&error_priv, 0, sizeof(error_priv));
549
550 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
551 if (ret)
552 return ret;
553
554 error_priv.dev = dev;
555 i915_error_state_get(dev, &error_priv);
556
557 ret = i915_error_state_to_str(&error_str, &error_priv);
558 if (ret)
559 goto out;
560
561 ret_count = count < error_str.bytes ? count : error_str.bytes;
562
563 memcpy(buf, error_str.buf, ret_count);
564 out:
565 i915_error_state_put(&error_priv);
566 i915_error_state_buf_release(&error_str);
567
568 return ret ?: ret_count;
569 }
570
571 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
572 struct bin_attribute *attr, char *buf,
573 loff_t off, size_t count)
574 {
575 struct device *kdev = kobj_to_dev(kobj);
576 struct drm_minor *minor = dev_to_drm_minor(kdev);
577 struct drm_device *dev = minor->dev;
578 int ret;
579
580 DRM_DEBUG_DRIVER("Resetting error state\n");
581
582 ret = mutex_lock_interruptible(&dev->struct_mutex);
583 if (ret)
584 return ret;
585
586 i915_destroy_error_state(dev);
587 mutex_unlock(&dev->struct_mutex);
588
589 return count;
590 }
591
592 static struct bin_attribute error_state_attr = {
593 .attr.name = "error",
594 .attr.mode = S_IRUSR | S_IWUSR,
595 .size = 0,
596 .read = error_state_read,
597 .write = error_state_write,
598 };
599
600 void i915_setup_sysfs(struct drm_device *dev)
601 {
602 int ret;
603
604 #ifdef CONFIG_PM
605 if (HAS_RC6(dev)) {
606 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
607 &rc6_attr_group);
608 if (ret)
609 DRM_ERROR("RC6 residency sysfs setup failed\n");
610 }
611 if (HAS_RC6p(dev)) {
612 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
613 &rc6p_attr_group);
614 if (ret)
615 DRM_ERROR("RC6p residency sysfs setup failed\n");
616 }
617 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
618 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
619 &media_rc6_attr_group);
620 if (ret)
621 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
622 }
623 #endif
624 if (HAS_L3_DPF(dev)) {
625 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
626 if (ret)
627 DRM_ERROR("l3 parity sysfs setup failed\n");
628
629 if (NUM_L3_SLICES(dev) > 1) {
630 ret = device_create_bin_file(dev->primary->kdev,
631 &dpf_attrs_1);
632 if (ret)
633 DRM_ERROR("l3 parity slice 1 setup failed\n");
634 }
635 }
636
637 ret = 0;
638 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
639 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
640 else if (INTEL_INFO(dev)->gen >= 6)
641 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
642 if (ret)
643 DRM_ERROR("RPS sysfs setup failed\n");
644
645 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
646 &error_state_attr);
647 if (ret)
648 DRM_ERROR("error_state sysfs setup failed\n");
649 }
650
651 void i915_teardown_sysfs(struct drm_device *dev)
652 {
653 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
654 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
655 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
656 else
657 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
658 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
659 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
660 #ifdef CONFIG_PM
661 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
662 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
663 #endif
664 }
This page took 0.129526 seconds and 5 git commands to generate.