2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
38 static u32
calc_residency(struct drm_device
*dev
,
41 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
42 u64 raw_time
; /* 32b value may overflow during fixed point math */
43 u64 units
= 128ULL, div
= 100000ULL;
46 if (!intel_enable_rc6(dev
))
49 intel_runtime_pm_get(dev_priv
);
51 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
52 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
54 div
= dev_priv
->czclk_freq
;
56 if (I915_READ(VLV_COUNTER_CONTROL
) & VLV_COUNT_RANGE_HIGH
)
58 } else if (IS_BROXTON(dev
)) {
60 div
= 1200; /* 833.33ns */
63 raw_time
= I915_READ(reg
) * units
;
64 ret
= DIV_ROUND_UP_ULL(raw_time
, div
);
66 intel_runtime_pm_put(dev_priv
);
71 show_rc6_mask(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
73 struct drm_minor
*dminor
= dev_to_drm_minor(kdev
);
74 return snprintf(buf
, PAGE_SIZE
, "%x\n", intel_enable_rc6(dminor
->dev
));
78 show_rc6_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
80 struct drm_minor
*dminor
= dev_get_drvdata(kdev
);
81 u32 rc6_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6
);
82 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6_residency
);
86 show_rc6p_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
88 struct drm_minor
*dminor
= dev_to_drm_minor(kdev
);
89 u32 rc6p_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6p
);
90 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6p_residency
);
94 show_rc6pp_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
96 struct drm_minor
*dminor
= dev_to_drm_minor(kdev
);
97 u32 rc6pp_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6pp
);
98 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6pp_residency
);
102 show_media_rc6_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
104 struct drm_minor
*dminor
= dev_get_drvdata(kdev
);
105 u32 rc6_residency
= calc_residency(dminor
->dev
, VLV_GT_MEDIA_RC6
);
106 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6_residency
);
109 static DEVICE_ATTR(rc6_enable
, S_IRUGO
, show_rc6_mask
, NULL
);
110 static DEVICE_ATTR(rc6_residency_ms
, S_IRUGO
, show_rc6_ms
, NULL
);
111 static DEVICE_ATTR(rc6p_residency_ms
, S_IRUGO
, show_rc6p_ms
, NULL
);
112 static DEVICE_ATTR(rc6pp_residency_ms
, S_IRUGO
, show_rc6pp_ms
, NULL
);
113 static DEVICE_ATTR(media_rc6_residency_ms
, S_IRUGO
, show_media_rc6_ms
, NULL
);
115 static struct attribute
*rc6_attrs
[] = {
116 &dev_attr_rc6_enable
.attr
,
117 &dev_attr_rc6_residency_ms
.attr
,
121 static struct attribute_group rc6_attr_group
= {
122 .name
= power_group_name
,
126 static struct attribute
*rc6p_attrs
[] = {
127 &dev_attr_rc6p_residency_ms
.attr
,
128 &dev_attr_rc6pp_residency_ms
.attr
,
132 static struct attribute_group rc6p_attr_group
= {
133 .name
= power_group_name
,
137 static struct attribute
*media_rc6_attrs
[] = {
138 &dev_attr_media_rc6_residency_ms
.attr
,
142 static struct attribute_group media_rc6_attr_group
= {
143 .name
= power_group_name
,
144 .attrs
= media_rc6_attrs
148 static int l3_access_valid(struct drm_device
*dev
, loff_t offset
)
150 if (!HAS_L3_DPF(dev
))
156 if (offset
>= GEN7_L3LOG_SIZE
)
163 i915_l3_read(struct file
*filp
, struct kobject
*kobj
,
164 struct bin_attribute
*attr
, char *buf
,
165 loff_t offset
, size_t count
)
167 struct device
*dev
= kobj_to_dev(kobj
);
168 struct drm_minor
*dminor
= dev_to_drm_minor(dev
);
169 struct drm_device
*drm_dev
= dminor
->dev
;
170 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
171 int slice
= (int)(uintptr_t)attr
->private;
174 count
= round_down(count
, 4);
176 ret
= l3_access_valid(drm_dev
, offset
);
180 count
= min_t(size_t, GEN7_L3LOG_SIZE
- offset
, count
);
182 ret
= i915_mutex_lock_interruptible(drm_dev
);
186 if (dev_priv
->l3_parity
.remap_info
[slice
])
188 dev_priv
->l3_parity
.remap_info
[slice
] + (offset
/4),
191 memset(buf
, 0, count
);
193 mutex_unlock(&drm_dev
->struct_mutex
);
199 i915_l3_write(struct file
*filp
, struct kobject
*kobj
,
200 struct bin_attribute
*attr
, char *buf
,
201 loff_t offset
, size_t count
)
203 struct device
*dev
= kobj_to_dev(kobj
);
204 struct drm_minor
*dminor
= dev_to_drm_minor(dev
);
205 struct drm_device
*drm_dev
= dminor
->dev
;
206 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
207 struct intel_context
*ctx
;
208 u32
*temp
= NULL
; /* Just here to make handling failures easy */
209 int slice
= (int)(uintptr_t)attr
->private;
212 if (!HAS_HW_CONTEXTS(drm_dev
))
215 ret
= l3_access_valid(drm_dev
, offset
);
219 ret
= i915_mutex_lock_interruptible(drm_dev
);
223 if (!dev_priv
->l3_parity
.remap_info
[slice
]) {
224 temp
= kzalloc(GEN7_L3LOG_SIZE
, GFP_KERNEL
);
226 mutex_unlock(&drm_dev
->struct_mutex
);
231 ret
= i915_gpu_idle(drm_dev
);
234 mutex_unlock(&drm_dev
->struct_mutex
);
238 /* TODO: Ideally we really want a GPU reset here to make sure errors
239 * aren't propagated. Since I cannot find a stable way to reset the GPU
240 * at this point it is left as a TODO.
243 dev_priv
->l3_parity
.remap_info
[slice
] = temp
;
245 memcpy(dev_priv
->l3_parity
.remap_info
[slice
] + (offset
/4), buf
, count
);
247 /* NB: We defer the remapping until we switch to the context */
248 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
)
249 ctx
->remap_slice
|= (1<<slice
);
251 mutex_unlock(&drm_dev
->struct_mutex
);
256 static struct bin_attribute dpf_attrs
= {
257 .attr
= {.name
= "l3_parity", .mode
= (S_IRUSR
| S_IWUSR
)},
258 .size
= GEN7_L3LOG_SIZE
,
259 .read
= i915_l3_read
,
260 .write
= i915_l3_write
,
265 static struct bin_attribute dpf_attrs_1
= {
266 .attr
= {.name
= "l3_parity_slice_1", .mode
= (S_IRUSR
| S_IWUSR
)},
267 .size
= GEN7_L3LOG_SIZE
,
268 .read
= i915_l3_read
,
269 .write
= i915_l3_write
,
274 static ssize_t
gt_act_freq_mhz_show(struct device
*kdev
,
275 struct device_attribute
*attr
, char *buf
)
277 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
278 struct drm_device
*dev
= minor
->dev
;
279 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
282 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
284 intel_runtime_pm_get(dev_priv
);
286 mutex_lock(&dev_priv
->rps
.hw_lock
);
287 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
289 freq
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
290 ret
= intel_gpu_freq(dev_priv
, (freq
>> 8) & 0xff);
292 u32 rpstat
= I915_READ(GEN6_RPSTAT1
);
293 if (IS_GEN9(dev_priv
))
294 ret
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
295 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
296 ret
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
298 ret
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
299 ret
= intel_gpu_freq(dev_priv
, ret
);
301 mutex_unlock(&dev_priv
->rps
.hw_lock
);
303 intel_runtime_pm_put(dev_priv
);
305 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
308 static ssize_t
gt_cur_freq_mhz_show(struct device
*kdev
,
309 struct device_attribute
*attr
, char *buf
)
311 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
312 struct drm_device
*dev
= minor
->dev
;
313 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
316 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
318 intel_runtime_pm_get(dev_priv
);
320 mutex_lock(&dev_priv
->rps
.hw_lock
);
321 ret
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
);
322 mutex_unlock(&dev_priv
->rps
.hw_lock
);
324 intel_runtime_pm_put(dev_priv
);
326 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
329 static ssize_t
vlv_rpe_freq_mhz_show(struct device
*kdev
,
330 struct device_attribute
*attr
, char *buf
)
332 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
333 struct drm_device
*dev
= minor
->dev
;
334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
336 return snprintf(buf
, PAGE_SIZE
,
338 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
341 static ssize_t
gt_max_freq_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
343 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
344 struct drm_device
*dev
= minor
->dev
;
345 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
348 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
350 mutex_lock(&dev_priv
->rps
.hw_lock
);
351 ret
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
352 mutex_unlock(&dev_priv
->rps
.hw_lock
);
354 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
357 static ssize_t
gt_max_freq_mhz_store(struct device
*kdev
,
358 struct device_attribute
*attr
,
359 const char *buf
, size_t count
)
361 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
362 struct drm_device
*dev
= minor
->dev
;
363 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
367 ret
= kstrtou32(buf
, 0, &val
);
371 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
373 intel_runtime_pm_get(dev_priv
);
375 mutex_lock(&dev_priv
->rps
.hw_lock
);
377 val
= intel_freq_opcode(dev_priv
, val
);
379 if (val
< dev_priv
->rps
.min_freq
||
380 val
> dev_priv
->rps
.max_freq
||
381 val
< dev_priv
->rps
.min_freq_softlimit
) {
382 mutex_unlock(&dev_priv
->rps
.hw_lock
);
383 intel_runtime_pm_put(dev_priv
);
387 if (val
> dev_priv
->rps
.rp0_freq
)
388 DRM_DEBUG("User requested overclocking to %d\n",
389 intel_gpu_freq(dev_priv
, val
));
391 dev_priv
->rps
.max_freq_softlimit
= val
;
393 val
= clamp_t(int, dev_priv
->rps
.cur_freq
,
394 dev_priv
->rps
.min_freq_softlimit
,
395 dev_priv
->rps
.max_freq_softlimit
);
397 /* We still need *_set_rps to process the new max_delay and
398 * update the interrupt limits and PMINTRMSK even though
399 * frequency request may be unchanged. */
400 intel_set_rps(dev
, val
);
402 mutex_unlock(&dev_priv
->rps
.hw_lock
);
404 intel_runtime_pm_put(dev_priv
);
409 static ssize_t
gt_min_freq_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
411 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
412 struct drm_device
*dev
= minor
->dev
;
413 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
416 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
418 mutex_lock(&dev_priv
->rps
.hw_lock
);
419 ret
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
420 mutex_unlock(&dev_priv
->rps
.hw_lock
);
422 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
425 static ssize_t
gt_min_freq_mhz_store(struct device
*kdev
,
426 struct device_attribute
*attr
,
427 const char *buf
, size_t count
)
429 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
430 struct drm_device
*dev
= minor
->dev
;
431 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
435 ret
= kstrtou32(buf
, 0, &val
);
439 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
441 intel_runtime_pm_get(dev_priv
);
443 mutex_lock(&dev_priv
->rps
.hw_lock
);
445 val
= intel_freq_opcode(dev_priv
, val
);
447 if (val
< dev_priv
->rps
.min_freq
||
448 val
> dev_priv
->rps
.max_freq
||
449 val
> dev_priv
->rps
.max_freq_softlimit
) {
450 mutex_unlock(&dev_priv
->rps
.hw_lock
);
451 intel_runtime_pm_put(dev_priv
);
455 dev_priv
->rps
.min_freq_softlimit
= val
;
457 val
= clamp_t(int, dev_priv
->rps
.cur_freq
,
458 dev_priv
->rps
.min_freq_softlimit
,
459 dev_priv
->rps
.max_freq_softlimit
);
461 /* We still need *_set_rps to process the new min_delay and
462 * update the interrupt limits and PMINTRMSK even though
463 * frequency request may be unchanged. */
464 intel_set_rps(dev
, val
);
466 mutex_unlock(&dev_priv
->rps
.hw_lock
);
468 intel_runtime_pm_put(dev_priv
);
474 static DEVICE_ATTR(gt_act_freq_mhz
, S_IRUGO
, gt_act_freq_mhz_show
, NULL
);
475 static DEVICE_ATTR(gt_cur_freq_mhz
, S_IRUGO
, gt_cur_freq_mhz_show
, NULL
);
476 static DEVICE_ATTR(gt_max_freq_mhz
, S_IRUGO
| S_IWUSR
, gt_max_freq_mhz_show
, gt_max_freq_mhz_store
);
477 static DEVICE_ATTR(gt_min_freq_mhz
, S_IRUGO
| S_IWUSR
, gt_min_freq_mhz_show
, gt_min_freq_mhz_store
);
479 static DEVICE_ATTR(vlv_rpe_freq_mhz
, S_IRUGO
, vlv_rpe_freq_mhz_show
, NULL
);
481 static ssize_t
gt_rp_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
);
482 static DEVICE_ATTR(gt_RP0_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
483 static DEVICE_ATTR(gt_RP1_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
484 static DEVICE_ATTR(gt_RPn_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
486 /* For now we have a static number of RP states */
487 static ssize_t
gt_rp_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
489 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
490 struct drm_device
*dev
= minor
->dev
;
491 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
494 if (attr
== &dev_attr_gt_RP0_freq_mhz
)
495 val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp0_freq
);
496 else if (attr
== &dev_attr_gt_RP1_freq_mhz
)
497 val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
);
498 else if (attr
== &dev_attr_gt_RPn_freq_mhz
)
499 val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
);
503 return snprintf(buf
, PAGE_SIZE
, "%d\n", val
);
506 static const struct attribute
*gen6_attrs
[] = {
507 &dev_attr_gt_act_freq_mhz
.attr
,
508 &dev_attr_gt_cur_freq_mhz
.attr
,
509 &dev_attr_gt_max_freq_mhz
.attr
,
510 &dev_attr_gt_min_freq_mhz
.attr
,
511 &dev_attr_gt_RP0_freq_mhz
.attr
,
512 &dev_attr_gt_RP1_freq_mhz
.attr
,
513 &dev_attr_gt_RPn_freq_mhz
.attr
,
517 static const struct attribute
*vlv_attrs
[] = {
518 &dev_attr_gt_act_freq_mhz
.attr
,
519 &dev_attr_gt_cur_freq_mhz
.attr
,
520 &dev_attr_gt_max_freq_mhz
.attr
,
521 &dev_attr_gt_min_freq_mhz
.attr
,
522 &dev_attr_gt_RP0_freq_mhz
.attr
,
523 &dev_attr_gt_RP1_freq_mhz
.attr
,
524 &dev_attr_gt_RPn_freq_mhz
.attr
,
525 &dev_attr_vlv_rpe_freq_mhz
.attr
,
529 static ssize_t
error_state_read(struct file
*filp
, struct kobject
*kobj
,
530 struct bin_attribute
*attr
, char *buf
,
531 loff_t off
, size_t count
)
534 struct device
*kdev
= kobj_to_dev(kobj
);
535 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
536 struct drm_device
*dev
= minor
->dev
;
537 struct i915_error_state_file_priv error_priv
;
538 struct drm_i915_error_state_buf error_str
;
539 ssize_t ret_count
= 0;
542 memset(&error_priv
, 0, sizeof(error_priv
));
544 ret
= i915_error_state_buf_init(&error_str
, to_i915(dev
), count
, off
);
548 error_priv
.dev
= dev
;
549 i915_error_state_get(dev
, &error_priv
);
551 ret
= i915_error_state_to_str(&error_str
, &error_priv
);
555 ret_count
= count
< error_str
.bytes
? count
: error_str
.bytes
;
557 memcpy(buf
, error_str
.buf
, ret_count
);
559 i915_error_state_put(&error_priv
);
560 i915_error_state_buf_release(&error_str
);
562 return ret
?: ret_count
;
565 static ssize_t
error_state_write(struct file
*file
, struct kobject
*kobj
,
566 struct bin_attribute
*attr
, char *buf
,
567 loff_t off
, size_t count
)
569 struct device
*kdev
= kobj_to_dev(kobj
);
570 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
571 struct drm_device
*dev
= minor
->dev
;
574 DRM_DEBUG_DRIVER("Resetting error state\n");
576 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
580 i915_destroy_error_state(dev
);
581 mutex_unlock(&dev
->struct_mutex
);
586 static struct bin_attribute error_state_attr
= {
587 .attr
.name
= "error",
588 .attr
.mode
= S_IRUSR
| S_IWUSR
,
590 .read
= error_state_read
,
591 .write
= error_state_write
,
594 void i915_setup_sysfs(struct drm_device
*dev
)
600 ret
= sysfs_merge_group(&dev
->primary
->kdev
->kobj
,
603 DRM_ERROR("RC6 residency sysfs setup failed\n");
606 ret
= sysfs_merge_group(&dev
->primary
->kdev
->kobj
,
609 DRM_ERROR("RC6p residency sysfs setup failed\n");
611 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
612 ret
= sysfs_merge_group(&dev
->primary
->kdev
->kobj
,
613 &media_rc6_attr_group
);
615 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
618 if (HAS_L3_DPF(dev
)) {
619 ret
= device_create_bin_file(dev
->primary
->kdev
, &dpf_attrs
);
621 DRM_ERROR("l3 parity sysfs setup failed\n");
623 if (NUM_L3_SLICES(dev
) > 1) {
624 ret
= device_create_bin_file(dev
->primary
->kdev
,
627 DRM_ERROR("l3 parity slice 1 setup failed\n");
632 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
633 ret
= sysfs_create_files(&dev
->primary
->kdev
->kobj
, vlv_attrs
);
634 else if (INTEL_INFO(dev
)->gen
>= 6)
635 ret
= sysfs_create_files(&dev
->primary
->kdev
->kobj
, gen6_attrs
);
637 DRM_ERROR("RPS sysfs setup failed\n");
639 ret
= sysfs_create_bin_file(&dev
->primary
->kdev
->kobj
,
642 DRM_ERROR("error_state sysfs setup failed\n");
645 void i915_teardown_sysfs(struct drm_device
*dev
)
647 sysfs_remove_bin_file(&dev
->primary
->kdev
->kobj
, &error_state_attr
);
648 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
649 sysfs_remove_files(&dev
->primary
->kdev
->kobj
, vlv_attrs
);
651 sysfs_remove_files(&dev
->primary
->kdev
->kobj
, gen6_attrs
);
652 device_remove_bin_file(dev
->primary
->kdev
, &dpf_attrs_1
);
653 device_remove_bin_file(dev
->primary
->kdev
, &dpf_attrs
);
655 sysfs_unmerge_group(&dev
->primary
->kdev
->kobj
, &rc6_attr_group
);
656 sysfs_unmerge_group(&dev
->primary
->kdev
->kobj
, &rc6p_attr_group
);