Commit | Line | Data |
---|---|---|
0136db58 BW |
1 | /* |
2 | * Copyright © 2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Ben Widawsky <ben@bwidawsk.net> | |
25 | * | |
26 | */ | |
27 | ||
28 | #include <linux/device.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/stat.h> | |
31 | #include <linux/sysfs.h> | |
84bc7581 | 32 | #include "intel_drv.h" |
0136db58 BW |
33 | #include "i915_drv.h" |
34 | ||
5ab3633d | 35 | #ifdef CONFIG_PM |
0136db58 BW |
36 | static u32 calc_residency(struct drm_device *dev, const u32 reg) |
37 | { | |
38 | struct drm_i915_private *dev_priv = dev->dev_private; | |
39 | u64 raw_time; /* 32b value may overflow during fixed point math */ | |
40 | ||
41 | if (!intel_enable_rc6(dev)) | |
42 | return 0; | |
43 | ||
a85d4bcb BW |
44 | raw_time = I915_READ(reg) * 128ULL; |
45 | return DIV_ROUND_UP_ULL(raw_time, 100000); | |
0136db58 BW |
46 | } |
47 | ||
48 | static ssize_t | |
dbdfd8e9 | 49 | show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 50 | { |
dbdfd8e9 | 51 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
3e2a1556 | 52 | return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); |
0136db58 BW |
53 | } |
54 | ||
55 | static ssize_t | |
dbdfd8e9 | 56 | show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 57 | { |
dbdfd8e9 | 58 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
0136db58 | 59 | u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); |
3e2a1556 | 60 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); |
0136db58 BW |
61 | } |
62 | ||
63 | static ssize_t | |
dbdfd8e9 | 64 | show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 65 | { |
dbdfd8e9 | 66 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
0136db58 | 67 | u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); |
3e2a1556 | 68 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); |
0136db58 BW |
69 | } |
70 | ||
71 | static ssize_t | |
dbdfd8e9 | 72 | show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 73 | { |
dbdfd8e9 | 74 | struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); |
0136db58 | 75 | u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); |
3e2a1556 | 76 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); |
0136db58 BW |
77 | } |
78 | ||
79 | static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); | |
80 | static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); | |
81 | static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); | |
82 | static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); | |
83 | ||
84 | static struct attribute *rc6_attrs[] = { | |
85 | &dev_attr_rc6_enable.attr, | |
86 | &dev_attr_rc6_residency_ms.attr, | |
87 | &dev_attr_rc6p_residency_ms.attr, | |
88 | &dev_attr_rc6pp_residency_ms.attr, | |
89 | NULL | |
90 | }; | |
91 | ||
92 | static struct attribute_group rc6_attr_group = { | |
93 | .name = power_group_name, | |
94 | .attrs = rc6_attrs | |
95 | }; | |
8c3f929b | 96 | #endif |
0136db58 | 97 | |
84bc7581 BW |
98 | static int l3_access_valid(struct drm_device *dev, loff_t offset) |
99 | { | |
ebf69cb8 | 100 | if (!HAS_L3_GPU_CACHE(dev)) |
84bc7581 BW |
101 | return -EPERM; |
102 | ||
103 | if (offset % 4 != 0) | |
104 | return -EINVAL; | |
105 | ||
106 | if (offset >= GEN7_L3LOG_SIZE) | |
107 | return -ENXIO; | |
108 | ||
109 | return 0; | |
110 | } | |
111 | ||
112 | static ssize_t | |
113 | i915_l3_read(struct file *filp, struct kobject *kobj, | |
114 | struct bin_attribute *attr, char *buf, | |
115 | loff_t offset, size_t count) | |
116 | { | |
117 | struct device *dev = container_of(kobj, struct device, kobj); | |
118 | struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); | |
119 | struct drm_device *drm_dev = dminor->dev; | |
120 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
121 | uint32_t misccpctl; | |
122 | int i, ret; | |
123 | ||
124 | ret = l3_access_valid(drm_dev, offset); | |
125 | if (ret) | |
126 | return ret; | |
127 | ||
128 | ret = i915_mutex_lock_interruptible(drm_dev); | |
129 | if (ret) | |
130 | return ret; | |
131 | ||
132 | misccpctl = I915_READ(GEN7_MISCCPCTL); | |
133 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
134 | ||
135 | for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) | |
136 | *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); | |
137 | ||
138 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | |
139 | ||
140 | mutex_unlock(&drm_dev->struct_mutex); | |
141 | ||
142 | return i - offset; | |
143 | } | |
144 | ||
145 | static ssize_t | |
146 | i915_l3_write(struct file *filp, struct kobject *kobj, | |
147 | struct bin_attribute *attr, char *buf, | |
148 | loff_t offset, size_t count) | |
149 | { | |
150 | struct device *dev = container_of(kobj, struct device, kobj); | |
151 | struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); | |
152 | struct drm_device *drm_dev = dminor->dev; | |
153 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | |
154 | u32 *temp = NULL; /* Just here to make handling failures easy */ | |
155 | int ret; | |
156 | ||
157 | ret = l3_access_valid(drm_dev, offset); | |
158 | if (ret) | |
159 | return ret; | |
160 | ||
161 | ret = i915_mutex_lock_interruptible(drm_dev); | |
162 | if (ret) | |
163 | return ret; | |
164 | ||
a4da4fa4 | 165 | if (!dev_priv->l3_parity.remap_info) { |
84bc7581 BW |
166 | temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); |
167 | if (!temp) { | |
168 | mutex_unlock(&drm_dev->struct_mutex); | |
169 | return -ENOMEM; | |
170 | } | |
171 | } | |
172 | ||
173 | ret = i915_gpu_idle(drm_dev); | |
174 | if (ret) { | |
175 | kfree(temp); | |
176 | mutex_unlock(&drm_dev->struct_mutex); | |
177 | return ret; | |
178 | } | |
179 | ||
180 | /* TODO: Ideally we really want a GPU reset here to make sure errors | |
181 | * aren't propagated. Since I cannot find a stable way to reset the GPU | |
182 | * at this point it is left as a TODO. | |
183 | */ | |
184 | if (temp) | |
a4da4fa4 | 185 | dev_priv->l3_parity.remap_info = temp; |
84bc7581 | 186 | |
a4da4fa4 | 187 | memcpy(dev_priv->l3_parity.remap_info + (offset/4), |
84bc7581 BW |
188 | buf + (offset/4), |
189 | count); | |
190 | ||
191 | i915_gem_l3_remap(drm_dev); | |
192 | ||
193 | mutex_unlock(&drm_dev->struct_mutex); | |
194 | ||
195 | return count; | |
196 | } | |
197 | ||
198 | static struct bin_attribute dpf_attrs = { | |
199 | .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, | |
200 | .size = GEN7_L3LOG_SIZE, | |
201 | .read = i915_l3_read, | |
202 | .write = i915_l3_write, | |
203 | .mmap = NULL | |
204 | }; | |
205 | ||
df6eedc8 BW |
206 | static ssize_t gt_cur_freq_mhz_show(struct device *kdev, |
207 | struct device_attribute *attr, char *buf) | |
208 | { | |
209 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
210 | struct drm_device *dev = minor->dev; | |
211 | struct drm_i915_private *dev_priv = dev->dev_private; | |
212 | int ret; | |
213 | ||
4fc688ce | 214 | mutex_lock(&dev_priv->rps.hw_lock); |
177006a1 JB |
215 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
216 | u32 freq; | |
64936258 | 217 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
177006a1 JB |
218 | ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff); |
219 | } else { | |
0a073b84 | 220 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; |
177006a1 | 221 | } |
4fc688ce | 222 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 223 | |
3e2a1556 | 224 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
225 | } |
226 | ||
227 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | |
228 | { | |
229 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
230 | struct drm_device *dev = minor->dev; | |
231 | struct drm_i915_private *dev_priv = dev->dev_private; | |
232 | int ret; | |
233 | ||
4fc688ce | 234 | mutex_lock(&dev_priv->rps.hw_lock); |
0a073b84 JB |
235 | if (IS_VALLEYVIEW(dev_priv->dev)) |
236 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); | |
237 | else | |
238 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | |
4fc688ce | 239 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 240 | |
3e2a1556 | 241 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
242 | } |
243 | ||
46ddf194 BW |
244 | static ssize_t gt_max_freq_mhz_store(struct device *kdev, |
245 | struct device_attribute *attr, | |
246 | const char *buf, size_t count) | |
247 | { | |
248 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
249 | struct drm_device *dev = minor->dev; | |
250 | struct drm_i915_private *dev_priv = dev->dev_private; | |
31c77388 | 251 | u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; |
46ddf194 BW |
252 | ssize_t ret; |
253 | ||
254 | ret = kstrtou32(buf, 0, &val); | |
255 | if (ret) | |
256 | return ret; | |
257 | ||
4fc688ce | 258 | mutex_lock(&dev_priv->rps.hw_lock); |
46ddf194 | 259 | |
0a073b84 JB |
260 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
261 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | |
262 | ||
263 | hw_max = valleyview_rps_max_freq(dev_priv); | |
264 | hw_min = valleyview_rps_min_freq(dev_priv); | |
265 | non_oc_max = hw_max; | |
266 | } else { | |
267 | val /= GT_FREQUENCY_MULTIPLIER; | |
46ddf194 | 268 | |
0a073b84 JB |
269 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
270 | hw_max = dev_priv->rps.hw_max; | |
271 | non_oc_max = (rp_state_cap & 0xff); | |
272 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | |
273 | } | |
274 | ||
275 | if (val < hw_min || val > hw_max || | |
276 | val < dev_priv->rps.min_delay) { | |
4fc688ce | 277 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
278 | return -EINVAL; |
279 | } | |
280 | ||
31c77388 BW |
281 | if (val > non_oc_max) |
282 | DRM_DEBUG("User requested overclocking to %d\n", | |
283 | val * GT_FREQUENCY_MULTIPLIER); | |
284 | ||
0a073b84 JB |
285 | if (dev_priv->rps.cur_delay > val) { |
286 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
287 | valleyview_set_rps(dev_priv->dev, val); | |
288 | else | |
289 | gen6_set_rps(dev_priv->dev, val); | |
290 | } | |
46ddf194 BW |
291 | |
292 | dev_priv->rps.max_delay = val; | |
293 | ||
4fc688ce | 294 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
295 | |
296 | return count; | |
297 | } | |
298 | ||
df6eedc8 BW |
299 | static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
300 | { | |
301 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
302 | struct drm_device *dev = minor->dev; | |
303 | struct drm_i915_private *dev_priv = dev->dev_private; | |
304 | int ret; | |
305 | ||
4fc688ce | 306 | mutex_lock(&dev_priv->rps.hw_lock); |
0a073b84 JB |
307 | if (IS_VALLEYVIEW(dev_priv->dev)) |
308 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); | |
309 | else | |
310 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | |
4fc688ce | 311 | mutex_unlock(&dev_priv->rps.hw_lock); |
df6eedc8 | 312 | |
3e2a1556 | 313 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
df6eedc8 BW |
314 | } |
315 | ||
46ddf194 BW |
316 | static ssize_t gt_min_freq_mhz_store(struct device *kdev, |
317 | struct device_attribute *attr, | |
318 | const char *buf, size_t count) | |
319 | { | |
320 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
321 | struct drm_device *dev = minor->dev; | |
322 | struct drm_i915_private *dev_priv = dev->dev_private; | |
323 | u32 val, rp_state_cap, hw_max, hw_min; | |
324 | ssize_t ret; | |
325 | ||
326 | ret = kstrtou32(buf, 0, &val); | |
327 | if (ret) | |
328 | return ret; | |
329 | ||
4fc688ce | 330 | mutex_lock(&dev_priv->rps.hw_lock); |
46ddf194 | 331 | |
0a073b84 JB |
332 | if (IS_VALLEYVIEW(dev)) { |
333 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | |
334 | ||
335 | hw_max = valleyview_rps_max_freq(dev_priv); | |
336 | hw_min = valleyview_rps_min_freq(dev_priv); | |
337 | } else { | |
338 | val /= GT_FREQUENCY_MULTIPLIER; | |
339 | ||
340 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | |
341 | hw_max = dev_priv->rps.hw_max; | |
342 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | |
343 | } | |
46ddf194 BW |
344 | |
345 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | |
4fc688ce | 346 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
347 | return -EINVAL; |
348 | } | |
349 | ||
0a073b84 JB |
350 | if (dev_priv->rps.cur_delay < val) { |
351 | if (IS_VALLEYVIEW(dev)) | |
352 | valleyview_set_rps(dev, val); | |
353 | else | |
354 | gen6_set_rps(dev_priv->dev, val); | |
355 | } | |
46ddf194 BW |
356 | |
357 | dev_priv->rps.min_delay = val; | |
358 | ||
4fc688ce | 359 | mutex_unlock(&dev_priv->rps.hw_lock); |
46ddf194 BW |
360 | |
361 | return count; | |
362 | ||
363 | } | |
364 | ||
df6eedc8 | 365 | static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); |
46ddf194 BW |
366 | static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); |
367 | static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); | |
df6eedc8 | 368 | |
ac6ae347 BW |
369 | |
370 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); | |
371 | static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
372 | static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
373 | static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
374 | ||
375 | /* For now we have a static number of RP states */ | |
376 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | |
377 | { | |
378 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | |
379 | struct drm_device *dev = minor->dev; | |
380 | struct drm_i915_private *dev_priv = dev->dev_private; | |
381 | u32 val, rp_state_cap; | |
382 | ssize_t ret; | |
383 | ||
384 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
385 | if (ret) | |
386 | return ret; | |
387 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | |
388 | mutex_unlock(&dev->struct_mutex); | |
389 | ||
390 | if (attr == &dev_attr_gt_RP0_freq_mhz) { | |
391 | val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; | |
392 | } else if (attr == &dev_attr_gt_RP1_freq_mhz) { | |
393 | val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; | |
394 | } else if (attr == &dev_attr_gt_RPn_freq_mhz) { | |
395 | val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; | |
396 | } else { | |
397 | BUG(); | |
398 | } | |
3e2a1556 | 399 | return snprintf(buf, PAGE_SIZE, "%d\n", val); |
ac6ae347 BW |
400 | } |
401 | ||
df6eedc8 BW |
402 | static const struct attribute *gen6_attrs[] = { |
403 | &dev_attr_gt_cur_freq_mhz.attr, | |
404 | &dev_attr_gt_max_freq_mhz.attr, | |
405 | &dev_attr_gt_min_freq_mhz.attr, | |
ac6ae347 BW |
406 | &dev_attr_gt_RP0_freq_mhz.attr, |
407 | &dev_attr_gt_RP1_freq_mhz.attr, | |
408 | &dev_attr_gt_RPn_freq_mhz.attr, | |
df6eedc8 BW |
409 | NULL, |
410 | }; | |
411 | ||
0136db58 BW |
412 | void i915_setup_sysfs(struct drm_device *dev) |
413 | { | |
414 | int ret; | |
415 | ||
8c3f929b | 416 | #ifdef CONFIG_PM |
112abd29 DV |
417 | if (INTEL_INFO(dev)->gen >= 6) { |
418 | ret = sysfs_merge_group(&dev->primary->kdev.kobj, | |
419 | &rc6_attr_group); | |
420 | if (ret) | |
421 | DRM_ERROR("RC6 residency sysfs setup failed\n"); | |
422 | } | |
8c3f929b | 423 | #endif |
e1ef7cc2 | 424 | if (HAS_L3_GPU_CACHE(dev)) { |
112abd29 DV |
425 | ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); |
426 | if (ret) | |
427 | DRM_ERROR("l3 parity sysfs setup failed\n"); | |
428 | } | |
df6eedc8 BW |
429 | |
430 | if (INTEL_INFO(dev)->gen >= 6) { | |
431 | ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); | |
432 | if (ret) | |
433 | DRM_ERROR("gen6 sysfs setup failed\n"); | |
434 | } | |
0136db58 BW |
435 | } |
436 | ||
437 | void i915_teardown_sysfs(struct drm_device *dev) | |
438 | { | |
df6eedc8 | 439 | sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); |
84bc7581 | 440 | device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); |
853c70e8 | 441 | #ifdef CONFIG_PM |
0136db58 | 442 | sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); |
853c70e8 | 443 | #endif |
0136db58 | 444 | } |