]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/i915/i915_sysfs.c
HID: sony: Remove the size check for the Dualshock 4 HID Descriptor
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_sysfs.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
33 #include "i915_drv.h"
34
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
36
37 #ifdef CONFIG_PM
38 static u32 calc_residency(struct drm_device *dev, const u32 reg)
39 {
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */
42 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
43 u32 ret;
44
45 if (!intel_enable_rc6(dev))
46 return 0;
47
48 intel_runtime_pm_get(dev_priv);
49
50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51 if (IS_VALLEYVIEW(dev)) {
52 u32 clk_reg, czcount_30ns;
53
54 if (IS_CHERRYVIEW(dev))
55 clk_reg = CHV_CLK_CTL1;
56 else
57 clk_reg = VLV_CLK_CTL2;
58
59 czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
60
61 if (!czcount_30ns) {
62 WARN(!czcount_30ns, "bogus CZ count value");
63 ret = 0;
64 goto out;
65 }
66
67 if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
68 /* Special case for 320Mhz */
69 div = 10000000ULL;
70 units = 3125ULL;
71 } else {
72 czcount_30ns += 1;
73 div = 1000000ULL;
74 units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
75 }
76
77 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
78 units <<= 8;
79
80 div = div * bias;
81 }
82
83 raw_time = I915_READ(reg) * units;
84 ret = DIV_ROUND_UP_ULL(raw_time, div);
85
86 out:
87 intel_runtime_pm_put(dev_priv);
88 return ret;
89 }
90
91 static ssize_t
92 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
93 {
94 struct drm_minor *dminor = dev_to_drm_minor(kdev);
95 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
96 }
97
98 static ssize_t
99 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
100 {
101 struct drm_minor *dminor = dev_get_drvdata(kdev);
102 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
103 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
104 }
105
106 static ssize_t
107 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
108 {
109 struct drm_minor *dminor = dev_to_drm_minor(kdev);
110 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
111 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
112 }
113
114 static ssize_t
115 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
116 {
117 struct drm_minor *dminor = dev_to_drm_minor(kdev);
118 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
119 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
120 }
121
122 static ssize_t
123 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
124 {
125 struct drm_minor *dminor = dev_get_drvdata(kdev);
126 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
127 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
128 }
129
130 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
131 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
132 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
133 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
134 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
135
136 static struct attribute *rc6_attrs[] = {
137 &dev_attr_rc6_enable.attr,
138 &dev_attr_rc6_residency_ms.attr,
139 NULL
140 };
141
142 static struct attribute_group rc6_attr_group = {
143 .name = power_group_name,
144 .attrs = rc6_attrs
145 };
146
147 static struct attribute *rc6p_attrs[] = {
148 &dev_attr_rc6p_residency_ms.attr,
149 &dev_attr_rc6pp_residency_ms.attr,
150 NULL
151 };
152
153 static struct attribute_group rc6p_attr_group = {
154 .name = power_group_name,
155 .attrs = rc6p_attrs
156 };
157
158 static struct attribute *media_rc6_attrs[] = {
159 &dev_attr_media_rc6_residency_ms.attr,
160 NULL
161 };
162
163 static struct attribute_group media_rc6_attr_group = {
164 .name = power_group_name,
165 .attrs = media_rc6_attrs
166 };
167 #endif
168
169 static int l3_access_valid(struct drm_device *dev, loff_t offset)
170 {
171 if (!HAS_L3_DPF(dev))
172 return -EPERM;
173
174 if (offset % 4 != 0)
175 return -EINVAL;
176
177 if (offset >= GEN7_L3LOG_SIZE)
178 return -ENXIO;
179
180 return 0;
181 }
182
183 static ssize_t
184 i915_l3_read(struct file *filp, struct kobject *kobj,
185 struct bin_attribute *attr, char *buf,
186 loff_t offset, size_t count)
187 {
188 struct device *dev = container_of(kobj, struct device, kobj);
189 struct drm_minor *dminor = dev_to_drm_minor(dev);
190 struct drm_device *drm_dev = dminor->dev;
191 struct drm_i915_private *dev_priv = drm_dev->dev_private;
192 int slice = (int)(uintptr_t)attr->private;
193 int ret;
194
195 count = round_down(count, 4);
196
197 ret = l3_access_valid(drm_dev, offset);
198 if (ret)
199 return ret;
200
201 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
202
203 ret = i915_mutex_lock_interruptible(drm_dev);
204 if (ret)
205 return ret;
206
207 if (dev_priv->l3_parity.remap_info[slice])
208 memcpy(buf,
209 dev_priv->l3_parity.remap_info[slice] + (offset/4),
210 count);
211 else
212 memset(buf, 0, count);
213
214 mutex_unlock(&drm_dev->struct_mutex);
215
216 return count;
217 }
218
219 static ssize_t
220 i915_l3_write(struct file *filp, struct kobject *kobj,
221 struct bin_attribute *attr, char *buf,
222 loff_t offset, size_t count)
223 {
224 struct device *dev = container_of(kobj, struct device, kobj);
225 struct drm_minor *dminor = dev_to_drm_minor(dev);
226 struct drm_device *drm_dev = dminor->dev;
227 struct drm_i915_private *dev_priv = drm_dev->dev_private;
228 struct intel_context *ctx;
229 u32 *temp = NULL; /* Just here to make handling failures easy */
230 int slice = (int)(uintptr_t)attr->private;
231 int ret;
232
233 if (!HAS_HW_CONTEXTS(drm_dev))
234 return -ENXIO;
235
236 ret = l3_access_valid(drm_dev, offset);
237 if (ret)
238 return ret;
239
240 ret = i915_mutex_lock_interruptible(drm_dev);
241 if (ret)
242 return ret;
243
244 if (!dev_priv->l3_parity.remap_info[slice]) {
245 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
246 if (!temp) {
247 mutex_unlock(&drm_dev->struct_mutex);
248 return -ENOMEM;
249 }
250 }
251
252 ret = i915_gpu_idle(drm_dev);
253 if (ret) {
254 kfree(temp);
255 mutex_unlock(&drm_dev->struct_mutex);
256 return ret;
257 }
258
259 /* TODO: Ideally we really want a GPU reset here to make sure errors
260 * aren't propagated. Since I cannot find a stable way to reset the GPU
261 * at this point it is left as a TODO.
262 */
263 if (temp)
264 dev_priv->l3_parity.remap_info[slice] = temp;
265
266 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
267
268 /* NB: We defer the remapping until we switch to the context */
269 list_for_each_entry(ctx, &dev_priv->context_list, link)
270 ctx->remap_slice |= (1<<slice);
271
272 mutex_unlock(&drm_dev->struct_mutex);
273
274 return count;
275 }
276
277 static struct bin_attribute dpf_attrs = {
278 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
279 .size = GEN7_L3LOG_SIZE,
280 .read = i915_l3_read,
281 .write = i915_l3_write,
282 .mmap = NULL,
283 .private = (void *)0
284 };
285
286 static struct bin_attribute dpf_attrs_1 = {
287 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
288 .size = GEN7_L3LOG_SIZE,
289 .read = i915_l3_read,
290 .write = i915_l3_write,
291 .mmap = NULL,
292 .private = (void *)1
293 };
294
295 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
296 struct device_attribute *attr, char *buf)
297 {
298 struct drm_minor *minor = dev_to_drm_minor(kdev);
299 struct drm_device *dev = minor->dev;
300 struct drm_i915_private *dev_priv = dev->dev_private;
301 int ret;
302
303 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
304
305 intel_runtime_pm_get(dev_priv);
306
307 mutex_lock(&dev_priv->rps.hw_lock);
308 if (IS_VALLEYVIEW(dev_priv->dev)) {
309 u32 freq;
310 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
311 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
312 } else {
313 u32 rpstat = I915_READ(GEN6_RPSTAT1);
314 if (IS_GEN9(dev_priv))
315 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
316 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
317 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
318 else
319 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
320 ret = intel_gpu_freq(dev_priv, ret);
321 }
322 mutex_unlock(&dev_priv->rps.hw_lock);
323
324 intel_runtime_pm_put(dev_priv);
325
326 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
327 }
328
329 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
330 struct device_attribute *attr, char *buf)
331 {
332 struct drm_minor *minor = dev_to_drm_minor(kdev);
333 struct drm_device *dev = minor->dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 int ret;
336
337 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
338
339 intel_runtime_pm_get(dev_priv);
340
341 mutex_lock(&dev_priv->rps.hw_lock);
342 ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
343 mutex_unlock(&dev_priv->rps.hw_lock);
344
345 intel_runtime_pm_put(dev_priv);
346
347 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
348 }
349
350 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
351 struct device_attribute *attr, char *buf)
352 {
353 struct drm_minor *minor = dev_to_drm_minor(kdev);
354 struct drm_device *dev = minor->dev;
355 struct drm_i915_private *dev_priv = dev->dev_private;
356
357 return snprintf(buf, PAGE_SIZE,
358 "%d\n",
359 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
360 }
361
362 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
363 {
364 struct drm_minor *minor = dev_to_drm_minor(kdev);
365 struct drm_device *dev = minor->dev;
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 int ret;
368
369 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
370
371 mutex_lock(&dev_priv->rps.hw_lock);
372 ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
373 mutex_unlock(&dev_priv->rps.hw_lock);
374
375 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
376 }
377
378 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
379 struct device_attribute *attr,
380 const char *buf, size_t count)
381 {
382 struct drm_minor *minor = dev_to_drm_minor(kdev);
383 struct drm_device *dev = minor->dev;
384 struct drm_i915_private *dev_priv = dev->dev_private;
385 u32 val;
386 ssize_t ret;
387
388 ret = kstrtou32(buf, 0, &val);
389 if (ret)
390 return ret;
391
392 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
393
394 mutex_lock(&dev_priv->rps.hw_lock);
395
396 val = intel_freq_opcode(dev_priv, val);
397
398 if (val < dev_priv->rps.min_freq ||
399 val > dev_priv->rps.max_freq ||
400 val < dev_priv->rps.min_freq_softlimit) {
401 mutex_unlock(&dev_priv->rps.hw_lock);
402 return -EINVAL;
403 }
404
405 if (val > dev_priv->rps.rp0_freq)
406 DRM_DEBUG("User requested overclocking to %d\n",
407 intel_gpu_freq(dev_priv, val));
408
409 dev_priv->rps.max_freq_softlimit = val;
410
411 val = clamp_t(int, dev_priv->rps.cur_freq,
412 dev_priv->rps.min_freq_softlimit,
413 dev_priv->rps.max_freq_softlimit);
414
415 /* We still need *_set_rps to process the new max_delay and
416 * update the interrupt limits and PMINTRMSK even though
417 * frequency request may be unchanged. */
418 intel_set_rps(dev, val);
419
420 mutex_unlock(&dev_priv->rps.hw_lock);
421
422 return count;
423 }
424
425 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
426 {
427 struct drm_minor *minor = dev_to_drm_minor(kdev);
428 struct drm_device *dev = minor->dev;
429 struct drm_i915_private *dev_priv = dev->dev_private;
430 int ret;
431
432 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
433
434 mutex_lock(&dev_priv->rps.hw_lock);
435 ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
436 mutex_unlock(&dev_priv->rps.hw_lock);
437
438 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
439 }
440
441 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
442 struct device_attribute *attr,
443 const char *buf, size_t count)
444 {
445 struct drm_minor *minor = dev_to_drm_minor(kdev);
446 struct drm_device *dev = minor->dev;
447 struct drm_i915_private *dev_priv = dev->dev_private;
448 u32 val;
449 ssize_t ret;
450
451 ret = kstrtou32(buf, 0, &val);
452 if (ret)
453 return ret;
454
455 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
456
457 mutex_lock(&dev_priv->rps.hw_lock);
458
459 val = intel_freq_opcode(dev_priv, val);
460
461 if (val < dev_priv->rps.min_freq ||
462 val > dev_priv->rps.max_freq ||
463 val > dev_priv->rps.max_freq_softlimit) {
464 mutex_unlock(&dev_priv->rps.hw_lock);
465 return -EINVAL;
466 }
467
468 dev_priv->rps.min_freq_softlimit = val;
469
470 val = clamp_t(int, dev_priv->rps.cur_freq,
471 dev_priv->rps.min_freq_softlimit,
472 dev_priv->rps.max_freq_softlimit);
473
474 /* We still need *_set_rps to process the new min_delay and
475 * update the interrupt limits and PMINTRMSK even though
476 * frequency request may be unchanged. */
477 intel_set_rps(dev, val);
478
479 mutex_unlock(&dev_priv->rps.hw_lock);
480
481 return count;
482
483 }
484
485 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
486 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
487 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
488 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
489
490 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
491
492 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
493 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
494 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
495 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
496
497 /* For now we have a static number of RP states */
498 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
499 {
500 struct drm_minor *minor = dev_to_drm_minor(kdev);
501 struct drm_device *dev = minor->dev;
502 struct drm_i915_private *dev_priv = dev->dev_private;
503 u32 val;
504
505 if (attr == &dev_attr_gt_RP0_freq_mhz)
506 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
507 else if (attr == &dev_attr_gt_RP1_freq_mhz)
508 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
509 else if (attr == &dev_attr_gt_RPn_freq_mhz)
510 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
511 else
512 BUG();
513
514 return snprintf(buf, PAGE_SIZE, "%d\n", val);
515 }
516
517 static const struct attribute *gen6_attrs[] = {
518 &dev_attr_gt_act_freq_mhz.attr,
519 &dev_attr_gt_cur_freq_mhz.attr,
520 &dev_attr_gt_max_freq_mhz.attr,
521 &dev_attr_gt_min_freq_mhz.attr,
522 &dev_attr_gt_RP0_freq_mhz.attr,
523 &dev_attr_gt_RP1_freq_mhz.attr,
524 &dev_attr_gt_RPn_freq_mhz.attr,
525 NULL,
526 };
527
528 static const struct attribute *vlv_attrs[] = {
529 &dev_attr_gt_act_freq_mhz.attr,
530 &dev_attr_gt_cur_freq_mhz.attr,
531 &dev_attr_gt_max_freq_mhz.attr,
532 &dev_attr_gt_min_freq_mhz.attr,
533 &dev_attr_gt_RP0_freq_mhz.attr,
534 &dev_attr_gt_RP1_freq_mhz.attr,
535 &dev_attr_gt_RPn_freq_mhz.attr,
536 &dev_attr_vlv_rpe_freq_mhz.attr,
537 NULL,
538 };
539
540 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
541 struct bin_attribute *attr, char *buf,
542 loff_t off, size_t count)
543 {
544
545 struct device *kdev = container_of(kobj, struct device, kobj);
546 struct drm_minor *minor = dev_to_drm_minor(kdev);
547 struct drm_device *dev = minor->dev;
548 struct i915_error_state_file_priv error_priv;
549 struct drm_i915_error_state_buf error_str;
550 ssize_t ret_count = 0;
551 int ret;
552
553 memset(&error_priv, 0, sizeof(error_priv));
554
555 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
556 if (ret)
557 return ret;
558
559 error_priv.dev = dev;
560 i915_error_state_get(dev, &error_priv);
561
562 ret = i915_error_state_to_str(&error_str, &error_priv);
563 if (ret)
564 goto out;
565
566 ret_count = count < error_str.bytes ? count : error_str.bytes;
567
568 memcpy(buf, error_str.buf, ret_count);
569 out:
570 i915_error_state_put(&error_priv);
571 i915_error_state_buf_release(&error_str);
572
573 return ret ?: ret_count;
574 }
575
576 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
577 struct bin_attribute *attr, char *buf,
578 loff_t off, size_t count)
579 {
580 struct device *kdev = container_of(kobj, struct device, kobj);
581 struct drm_minor *minor = dev_to_drm_minor(kdev);
582 struct drm_device *dev = minor->dev;
583 int ret;
584
585 DRM_DEBUG_DRIVER("Resetting error state\n");
586
587 ret = mutex_lock_interruptible(&dev->struct_mutex);
588 if (ret)
589 return ret;
590
591 i915_destroy_error_state(dev);
592 mutex_unlock(&dev->struct_mutex);
593
594 return count;
595 }
596
597 static struct bin_attribute error_state_attr = {
598 .attr.name = "error",
599 .attr.mode = S_IRUSR | S_IWUSR,
600 .size = 0,
601 .read = error_state_read,
602 .write = error_state_write,
603 };
604
605 void i915_setup_sysfs(struct drm_device *dev)
606 {
607 int ret;
608
609 #ifdef CONFIG_PM
610 if (HAS_RC6(dev)) {
611 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
612 &rc6_attr_group);
613 if (ret)
614 DRM_ERROR("RC6 residency sysfs setup failed\n");
615 }
616 if (HAS_RC6p(dev)) {
617 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
618 &rc6p_attr_group);
619 if (ret)
620 DRM_ERROR("RC6p residency sysfs setup failed\n");
621 }
622 if (IS_VALLEYVIEW(dev)) {
623 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
624 &media_rc6_attr_group);
625 if (ret)
626 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
627 }
628 #endif
629 if (HAS_L3_DPF(dev)) {
630 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
631 if (ret)
632 DRM_ERROR("l3 parity sysfs setup failed\n");
633
634 if (NUM_L3_SLICES(dev) > 1) {
635 ret = device_create_bin_file(dev->primary->kdev,
636 &dpf_attrs_1);
637 if (ret)
638 DRM_ERROR("l3 parity slice 1 setup failed\n");
639 }
640 }
641
642 ret = 0;
643 if (IS_VALLEYVIEW(dev))
644 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
645 else if (INTEL_INFO(dev)->gen >= 6)
646 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
647 if (ret)
648 DRM_ERROR("RPS sysfs setup failed\n");
649
650 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
651 &error_state_attr);
652 if (ret)
653 DRM_ERROR("error_state sysfs setup failed\n");
654 }
655
656 void i915_teardown_sysfs(struct drm_device *dev)
657 {
658 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
659 if (IS_VALLEYVIEW(dev))
660 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
661 else
662 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
663 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
664 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
665 #ifdef CONFIG_PM
666 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
667 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
668 #endif
669 }