Merge tag 'pxa-dt-4.7' of https://github.com/rjarzmik/linux into next/dt
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_sysfs.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
33 #include "i915_drv.h"
34
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
36
37 #ifdef CONFIG_PM
38 static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg)
40 {
41 struct drm_i915_private *dev_priv = dev->dev_private;
42 u64 raw_time; /* 32b value may overflow during fixed point math */
43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret;
45
46 if (!intel_enable_rc6(dev))
47 return 0;
48
49 intel_runtime_pm_get(dev_priv);
50
51 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
52 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
53 units = 1;
54 div = dev_priv->czclk_freq;
55
56 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
57 units <<= 8;
58 } else if (IS_BROXTON(dev)) {
59 units = 1;
60 div = 1200; /* 833.33ns */
61 }
62
63 raw_time = I915_READ(reg) * units;
64 ret = DIV_ROUND_UP_ULL(raw_time, div);
65
66 intel_runtime_pm_put(dev_priv);
67 return ret;
68 }
69
70 static ssize_t
71 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72 {
73 struct drm_minor *dminor = dev_to_drm_minor(kdev);
74 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
75 }
76
77 static ssize_t
78 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
79 {
80 struct drm_minor *dminor = dev_get_drvdata(kdev);
81 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
82 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
83 }
84
85 static ssize_t
86 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
87 {
88 struct drm_minor *dminor = dev_to_drm_minor(kdev);
89 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
90 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
91 }
92
93 static ssize_t
94 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
95 {
96 struct drm_minor *dminor = dev_to_drm_minor(kdev);
97 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
98 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
99 }
100
101 static ssize_t
102 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
103 {
104 struct drm_minor *dminor = dev_get_drvdata(kdev);
105 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
106 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
107 }
108
109 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
110 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
111 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
112 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
113 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
114
115 static struct attribute *rc6_attrs[] = {
116 &dev_attr_rc6_enable.attr,
117 &dev_attr_rc6_residency_ms.attr,
118 NULL
119 };
120
121 static struct attribute_group rc6_attr_group = {
122 .name = power_group_name,
123 .attrs = rc6_attrs
124 };
125
126 static struct attribute *rc6p_attrs[] = {
127 &dev_attr_rc6p_residency_ms.attr,
128 &dev_attr_rc6pp_residency_ms.attr,
129 NULL
130 };
131
132 static struct attribute_group rc6p_attr_group = {
133 .name = power_group_name,
134 .attrs = rc6p_attrs
135 };
136
137 static struct attribute *media_rc6_attrs[] = {
138 &dev_attr_media_rc6_residency_ms.attr,
139 NULL
140 };
141
142 static struct attribute_group media_rc6_attr_group = {
143 .name = power_group_name,
144 .attrs = media_rc6_attrs
145 };
146 #endif
147
148 static int l3_access_valid(struct drm_device *dev, loff_t offset)
149 {
150 if (!HAS_L3_DPF(dev))
151 return -EPERM;
152
153 if (offset % 4 != 0)
154 return -EINVAL;
155
156 if (offset >= GEN7_L3LOG_SIZE)
157 return -ENXIO;
158
159 return 0;
160 }
161
162 static ssize_t
163 i915_l3_read(struct file *filp, struct kobject *kobj,
164 struct bin_attribute *attr, char *buf,
165 loff_t offset, size_t count)
166 {
167 struct device *dev = kobj_to_dev(kobj);
168 struct drm_minor *dminor = dev_to_drm_minor(dev);
169 struct drm_device *drm_dev = dminor->dev;
170 struct drm_i915_private *dev_priv = drm_dev->dev_private;
171 int slice = (int)(uintptr_t)attr->private;
172 int ret;
173
174 count = round_down(count, 4);
175
176 ret = l3_access_valid(drm_dev, offset);
177 if (ret)
178 return ret;
179
180 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
181
182 ret = i915_mutex_lock_interruptible(drm_dev);
183 if (ret)
184 return ret;
185
186 if (dev_priv->l3_parity.remap_info[slice])
187 memcpy(buf,
188 dev_priv->l3_parity.remap_info[slice] + (offset/4),
189 count);
190 else
191 memset(buf, 0, count);
192
193 mutex_unlock(&drm_dev->struct_mutex);
194
195 return count;
196 }
197
198 static ssize_t
199 i915_l3_write(struct file *filp, struct kobject *kobj,
200 struct bin_attribute *attr, char *buf,
201 loff_t offset, size_t count)
202 {
203 struct device *dev = kobj_to_dev(kobj);
204 struct drm_minor *dminor = dev_to_drm_minor(dev);
205 struct drm_device *drm_dev = dminor->dev;
206 struct drm_i915_private *dev_priv = drm_dev->dev_private;
207 struct intel_context *ctx;
208 u32 *temp = NULL; /* Just here to make handling failures easy */
209 int slice = (int)(uintptr_t)attr->private;
210 int ret;
211
212 if (!HAS_HW_CONTEXTS(drm_dev))
213 return -ENXIO;
214
215 ret = l3_access_valid(drm_dev, offset);
216 if (ret)
217 return ret;
218
219 ret = i915_mutex_lock_interruptible(drm_dev);
220 if (ret)
221 return ret;
222
223 if (!dev_priv->l3_parity.remap_info[slice]) {
224 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
225 if (!temp) {
226 mutex_unlock(&drm_dev->struct_mutex);
227 return -ENOMEM;
228 }
229 }
230
231 ret = i915_gpu_idle(drm_dev);
232 if (ret) {
233 kfree(temp);
234 mutex_unlock(&drm_dev->struct_mutex);
235 return ret;
236 }
237
238 /* TODO: Ideally we really want a GPU reset here to make sure errors
239 * aren't propagated. Since I cannot find a stable way to reset the GPU
240 * at this point it is left as a TODO.
241 */
242 if (temp)
243 dev_priv->l3_parity.remap_info[slice] = temp;
244
245 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
246
247 /* NB: We defer the remapping until we switch to the context */
248 list_for_each_entry(ctx, &dev_priv->context_list, link)
249 ctx->remap_slice |= (1<<slice);
250
251 mutex_unlock(&drm_dev->struct_mutex);
252
253 return count;
254 }
255
256 static struct bin_attribute dpf_attrs = {
257 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
258 .size = GEN7_L3LOG_SIZE,
259 .read = i915_l3_read,
260 .write = i915_l3_write,
261 .mmap = NULL,
262 .private = (void *)0
263 };
264
265 static struct bin_attribute dpf_attrs_1 = {
266 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
267 .size = GEN7_L3LOG_SIZE,
268 .read = i915_l3_read,
269 .write = i915_l3_write,
270 .mmap = NULL,
271 .private = (void *)1
272 };
273
274 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
275 struct device_attribute *attr, char *buf)
276 {
277 struct drm_minor *minor = dev_to_drm_minor(kdev);
278 struct drm_device *dev = minor->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 int ret;
281
282 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
283
284 intel_runtime_pm_get(dev_priv);
285
286 mutex_lock(&dev_priv->rps.hw_lock);
287 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
288 u32 freq;
289 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
290 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
291 } else {
292 u32 rpstat = I915_READ(GEN6_RPSTAT1);
293 if (IS_GEN9(dev_priv))
294 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
295 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
296 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
297 else
298 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
299 ret = intel_gpu_freq(dev_priv, ret);
300 }
301 mutex_unlock(&dev_priv->rps.hw_lock);
302
303 intel_runtime_pm_put(dev_priv);
304
305 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
306 }
307
308 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
309 struct device_attribute *attr, char *buf)
310 {
311 struct drm_minor *minor = dev_to_drm_minor(kdev);
312 struct drm_device *dev = minor->dev;
313 struct drm_i915_private *dev_priv = dev->dev_private;
314 int ret;
315
316 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
317
318 intel_runtime_pm_get(dev_priv);
319
320 mutex_lock(&dev_priv->rps.hw_lock);
321 ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
322 mutex_unlock(&dev_priv->rps.hw_lock);
323
324 intel_runtime_pm_put(dev_priv);
325
326 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
327 }
328
329 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
330 struct device_attribute *attr, char *buf)
331 {
332 struct drm_minor *minor = dev_to_drm_minor(kdev);
333 struct drm_device *dev = minor->dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335
336 return snprintf(buf, PAGE_SIZE,
337 "%d\n",
338 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
339 }
340
341 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
342 {
343 struct drm_minor *minor = dev_to_drm_minor(kdev);
344 struct drm_device *dev = minor->dev;
345 struct drm_i915_private *dev_priv = dev->dev_private;
346 int ret;
347
348 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
349
350 mutex_lock(&dev_priv->rps.hw_lock);
351 ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
352 mutex_unlock(&dev_priv->rps.hw_lock);
353
354 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
355 }
356
357 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
360 {
361 struct drm_minor *minor = dev_to_drm_minor(kdev);
362 struct drm_device *dev = minor->dev;
363 struct drm_i915_private *dev_priv = dev->dev_private;
364 u32 val;
365 ssize_t ret;
366
367 ret = kstrtou32(buf, 0, &val);
368 if (ret)
369 return ret;
370
371 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
372
373 intel_runtime_pm_get(dev_priv);
374
375 mutex_lock(&dev_priv->rps.hw_lock);
376
377 val = intel_freq_opcode(dev_priv, val);
378
379 if (val < dev_priv->rps.min_freq ||
380 val > dev_priv->rps.max_freq ||
381 val < dev_priv->rps.min_freq_softlimit) {
382 mutex_unlock(&dev_priv->rps.hw_lock);
383 intel_runtime_pm_put(dev_priv);
384 return -EINVAL;
385 }
386
387 if (val > dev_priv->rps.rp0_freq)
388 DRM_DEBUG("User requested overclocking to %d\n",
389 intel_gpu_freq(dev_priv, val));
390
391 dev_priv->rps.max_freq_softlimit = val;
392
393 val = clamp_t(int, dev_priv->rps.cur_freq,
394 dev_priv->rps.min_freq_softlimit,
395 dev_priv->rps.max_freq_softlimit);
396
397 /* We still need *_set_rps to process the new max_delay and
398 * update the interrupt limits and PMINTRMSK even though
399 * frequency request may be unchanged. */
400 intel_set_rps(dev, val);
401
402 mutex_unlock(&dev_priv->rps.hw_lock);
403
404 intel_runtime_pm_put(dev_priv);
405
406 return count;
407 }
408
409 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
410 {
411 struct drm_minor *minor = dev_to_drm_minor(kdev);
412 struct drm_device *dev = minor->dev;
413 struct drm_i915_private *dev_priv = dev->dev_private;
414 int ret;
415
416 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
417
418 mutex_lock(&dev_priv->rps.hw_lock);
419 ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
420 mutex_unlock(&dev_priv->rps.hw_lock);
421
422 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
423 }
424
425 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
426 struct device_attribute *attr,
427 const char *buf, size_t count)
428 {
429 struct drm_minor *minor = dev_to_drm_minor(kdev);
430 struct drm_device *dev = minor->dev;
431 struct drm_i915_private *dev_priv = dev->dev_private;
432 u32 val;
433 ssize_t ret;
434
435 ret = kstrtou32(buf, 0, &val);
436 if (ret)
437 return ret;
438
439 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
440
441 intel_runtime_pm_get(dev_priv);
442
443 mutex_lock(&dev_priv->rps.hw_lock);
444
445 val = intel_freq_opcode(dev_priv, val);
446
447 if (val < dev_priv->rps.min_freq ||
448 val > dev_priv->rps.max_freq ||
449 val > dev_priv->rps.max_freq_softlimit) {
450 mutex_unlock(&dev_priv->rps.hw_lock);
451 intel_runtime_pm_put(dev_priv);
452 return -EINVAL;
453 }
454
455 dev_priv->rps.min_freq_softlimit = val;
456
457 val = clamp_t(int, dev_priv->rps.cur_freq,
458 dev_priv->rps.min_freq_softlimit,
459 dev_priv->rps.max_freq_softlimit);
460
461 /* We still need *_set_rps to process the new min_delay and
462 * update the interrupt limits and PMINTRMSK even though
463 * frequency request may be unchanged. */
464 intel_set_rps(dev, val);
465
466 mutex_unlock(&dev_priv->rps.hw_lock);
467
468 intel_runtime_pm_put(dev_priv);
469
470 return count;
471
472 }
473
474 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
475 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
476 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
477 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
478
479 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
480
481 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
482 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
483 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
484 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
485
486 /* For now we have a static number of RP states */
487 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
488 {
489 struct drm_minor *minor = dev_to_drm_minor(kdev);
490 struct drm_device *dev = minor->dev;
491 struct drm_i915_private *dev_priv = dev->dev_private;
492 u32 val;
493
494 if (attr == &dev_attr_gt_RP0_freq_mhz)
495 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
496 else if (attr == &dev_attr_gt_RP1_freq_mhz)
497 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
498 else if (attr == &dev_attr_gt_RPn_freq_mhz)
499 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
500 else
501 BUG();
502
503 return snprintf(buf, PAGE_SIZE, "%d\n", val);
504 }
505
506 static const struct attribute *gen6_attrs[] = {
507 &dev_attr_gt_act_freq_mhz.attr,
508 &dev_attr_gt_cur_freq_mhz.attr,
509 &dev_attr_gt_max_freq_mhz.attr,
510 &dev_attr_gt_min_freq_mhz.attr,
511 &dev_attr_gt_RP0_freq_mhz.attr,
512 &dev_attr_gt_RP1_freq_mhz.attr,
513 &dev_attr_gt_RPn_freq_mhz.attr,
514 NULL,
515 };
516
517 static const struct attribute *vlv_attrs[] = {
518 &dev_attr_gt_act_freq_mhz.attr,
519 &dev_attr_gt_cur_freq_mhz.attr,
520 &dev_attr_gt_max_freq_mhz.attr,
521 &dev_attr_gt_min_freq_mhz.attr,
522 &dev_attr_gt_RP0_freq_mhz.attr,
523 &dev_attr_gt_RP1_freq_mhz.attr,
524 &dev_attr_gt_RPn_freq_mhz.attr,
525 &dev_attr_vlv_rpe_freq_mhz.attr,
526 NULL,
527 };
528
529 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
530 struct bin_attribute *attr, char *buf,
531 loff_t off, size_t count)
532 {
533
534 struct device *kdev = kobj_to_dev(kobj);
535 struct drm_minor *minor = dev_to_drm_minor(kdev);
536 struct drm_device *dev = minor->dev;
537 struct i915_error_state_file_priv error_priv;
538 struct drm_i915_error_state_buf error_str;
539 ssize_t ret_count = 0;
540 int ret;
541
542 memset(&error_priv, 0, sizeof(error_priv));
543
544 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
545 if (ret)
546 return ret;
547
548 error_priv.dev = dev;
549 i915_error_state_get(dev, &error_priv);
550
551 ret = i915_error_state_to_str(&error_str, &error_priv);
552 if (ret)
553 goto out;
554
555 ret_count = count < error_str.bytes ? count : error_str.bytes;
556
557 memcpy(buf, error_str.buf, ret_count);
558 out:
559 i915_error_state_put(&error_priv);
560 i915_error_state_buf_release(&error_str);
561
562 return ret ?: ret_count;
563 }
564
565 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
566 struct bin_attribute *attr, char *buf,
567 loff_t off, size_t count)
568 {
569 struct device *kdev = kobj_to_dev(kobj);
570 struct drm_minor *minor = dev_to_drm_minor(kdev);
571 struct drm_device *dev = minor->dev;
572 int ret;
573
574 DRM_DEBUG_DRIVER("Resetting error state\n");
575
576 ret = mutex_lock_interruptible(&dev->struct_mutex);
577 if (ret)
578 return ret;
579
580 i915_destroy_error_state(dev);
581 mutex_unlock(&dev->struct_mutex);
582
583 return count;
584 }
585
586 static struct bin_attribute error_state_attr = {
587 .attr.name = "error",
588 .attr.mode = S_IRUSR | S_IWUSR,
589 .size = 0,
590 .read = error_state_read,
591 .write = error_state_write,
592 };
593
594 void i915_setup_sysfs(struct drm_device *dev)
595 {
596 int ret;
597
598 #ifdef CONFIG_PM
599 if (HAS_RC6(dev)) {
600 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
601 &rc6_attr_group);
602 if (ret)
603 DRM_ERROR("RC6 residency sysfs setup failed\n");
604 }
605 if (HAS_RC6p(dev)) {
606 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
607 &rc6p_attr_group);
608 if (ret)
609 DRM_ERROR("RC6p residency sysfs setup failed\n");
610 }
611 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
612 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
613 &media_rc6_attr_group);
614 if (ret)
615 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
616 }
617 #endif
618 if (HAS_L3_DPF(dev)) {
619 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
620 if (ret)
621 DRM_ERROR("l3 parity sysfs setup failed\n");
622
623 if (NUM_L3_SLICES(dev) > 1) {
624 ret = device_create_bin_file(dev->primary->kdev,
625 &dpf_attrs_1);
626 if (ret)
627 DRM_ERROR("l3 parity slice 1 setup failed\n");
628 }
629 }
630
631 ret = 0;
632 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
633 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
634 else if (INTEL_INFO(dev)->gen >= 6)
635 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
636 if (ret)
637 DRM_ERROR("RPS sysfs setup failed\n");
638
639 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
640 &error_state_attr);
641 if (ret)
642 DRM_ERROR("error_state sysfs setup failed\n");
643 }
644
645 void i915_teardown_sysfs(struct drm_device *dev)
646 {
647 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
648 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
649 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
650 else
651 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
652 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
653 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
654 #ifdef CONFIG_PM
655 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
656 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
657 #endif
658 }
This page took 0.055827 seconds and 5 git commands to generate.