Merge tag 'drm/tegra/for-4.8-rc1' of git://anongit.freedesktop.org/tegra/linux into...
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / subdev / clk / base.c
1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <subdev/bios.h>
27 #include <subdev/bios/boost.h>
28 #include <subdev/bios/cstep.h>
29 #include <subdev/bios/perf.h>
30 #include <subdev/fb.h>
31 #include <subdev/therm.h>
32 #include <subdev/volt.h>
33
34 #include <core/option.h>
35
36 /******************************************************************************
37 * misc
38 *****************************************************************************/
39 static u32
40 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
41 u8 pstate, u8 domain, u32 input)
42 {
43 struct nvkm_bios *bios = clk->subdev.device->bios;
44 struct nvbios_boostE boostE;
45 u8 ver, hdr, cnt, len;
46 u16 data;
47
48 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
49 if (data) {
50 struct nvbios_boostS boostS;
51 u8 idx = 0, sver, shdr;
52 u16 subd;
53
54 input = max(boostE.min, input);
55 input = min(boostE.max, input);
56 do {
57 sver = ver;
58 shdr = hdr;
59 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
60 cnt, len, &boostS);
61 if (subd && boostS.domain == domain) {
62 if (adjust)
63 input = input * boostS.percent / 100;
64 input = max(boostS.min, input);
65 input = min(boostS.max, input);
66 break;
67 }
68 } while (subd);
69 }
70
71 return input;
72 }
73
74 /******************************************************************************
75 * C-States
76 *****************************************************************************/
77 static int
78 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
79 {
80 struct nvkm_subdev *subdev = &clk->subdev;
81 struct nvkm_device *device = subdev->device;
82 struct nvkm_therm *therm = device->therm;
83 struct nvkm_volt *volt = device->volt;
84 struct nvkm_cstate *cstate;
85 int ret;
86
87 if (!list_empty(&pstate->list)) {
88 cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
89 } else {
90 cstate = &pstate->base;
91 }
92
93 if (therm) {
94 ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
95 if (ret && ret != -ENODEV) {
96 nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
97 return ret;
98 }
99 }
100
101 if (volt) {
102 ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
103 if (ret && ret != -ENODEV) {
104 nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
105 return ret;
106 }
107 }
108
109 ret = clk->func->calc(clk, cstate);
110 if (ret == 0) {
111 ret = clk->func->prog(clk);
112 clk->func->tidy(clk);
113 }
114
115 if (volt) {
116 ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
117 if (ret && ret != -ENODEV)
118 nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
119 }
120
121 if (therm) {
122 ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
123 if (ret && ret != -ENODEV)
124 nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
125 }
126
127 return ret;
128 }
129
130 static void
131 nvkm_cstate_del(struct nvkm_cstate *cstate)
132 {
133 list_del(&cstate->head);
134 kfree(cstate);
135 }
136
137 static int
138 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
139 {
140 struct nvkm_bios *bios = clk->subdev.device->bios;
141 const struct nvkm_domain *domain = clk->domains;
142 struct nvkm_cstate *cstate = NULL;
143 struct nvbios_cstepX cstepX;
144 u8 ver, hdr;
145 u16 data;
146
147 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
148 if (!data)
149 return -ENOENT;
150
151 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
152 if (!cstate)
153 return -ENOMEM;
154
155 *cstate = pstate->base;
156 cstate->voltage = cstepX.voltage;
157
158 while (domain && domain->name != nv_clk_src_max) {
159 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
160 u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
161 domain->bios, cstepX.freq);
162 cstate->domain[domain->name] = freq;
163 }
164 domain++;
165 }
166
167 list_add(&cstate->head, &pstate->list);
168 return 0;
169 }
170
171 /******************************************************************************
172 * P-States
173 *****************************************************************************/
174 static int
175 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
176 {
177 struct nvkm_subdev *subdev = &clk->subdev;
178 struct nvkm_ram *ram = subdev->device->fb->ram;
179 struct nvkm_pci *pci = subdev->device->pci;
180 struct nvkm_pstate *pstate;
181 int ret, idx = 0;
182
183 list_for_each_entry(pstate, &clk->states, head) {
184 if (idx++ == pstatei)
185 break;
186 }
187
188 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
189 clk->pstate = pstatei;
190
191 nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
192
193 if (ram && ram->func->calc) {
194 int khz = pstate->base.domain[nv_clk_src_mem];
195 do {
196 ret = ram->func->calc(ram, khz);
197 if (ret == 0)
198 ret = ram->func->prog(ram);
199 } while (ret > 0);
200 ram->func->tidy(ram);
201 }
202
203 return nvkm_cstate_prog(clk, pstate, 0);
204 }
205
206 static void
207 nvkm_pstate_work(struct work_struct *work)
208 {
209 struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
210 struct nvkm_subdev *subdev = &clk->subdev;
211 int pstate;
212
213 if (!atomic_xchg(&clk->waiting, 0))
214 return;
215 clk->pwrsrc = power_supply_is_system_supplied();
216
217 nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
218 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
219 clk->astate, clk->tstate, clk->dstate);
220
221 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
222 if (clk->state_nr && pstate != -1) {
223 pstate = (pstate < 0) ? clk->astate : pstate;
224 pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
225 pstate = max(pstate, clk->dstate);
226 } else {
227 pstate = clk->pstate = -1;
228 }
229
230 nvkm_trace(subdev, "-> %d\n", pstate);
231 if (pstate != clk->pstate) {
232 int ret = nvkm_pstate_prog(clk, pstate);
233 if (ret) {
234 nvkm_error(subdev, "error setting pstate %d: %d\n",
235 pstate, ret);
236 }
237 }
238
239 wake_up_all(&clk->wait);
240 nvkm_notify_get(&clk->pwrsrc_ntfy);
241 }
242
243 static int
244 nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
245 {
246 atomic_set(&clk->waiting, 1);
247 schedule_work(&clk->work);
248 if (wait)
249 wait_event(clk->wait, !atomic_read(&clk->waiting));
250 return 0;
251 }
252
253 static void
254 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
255 {
256 const struct nvkm_domain *clock = clk->domains - 1;
257 struct nvkm_cstate *cstate;
258 struct nvkm_subdev *subdev = &clk->subdev;
259 char info[3][32] = { "", "", "" };
260 char name[4] = "--";
261 int i = -1;
262
263 if (pstate->pstate != 0xff)
264 snprintf(name, sizeof(name), "%02x", pstate->pstate);
265
266 while ((++clock)->name != nv_clk_src_max) {
267 u32 lo = pstate->base.domain[clock->name];
268 u32 hi = lo;
269 if (hi == 0)
270 continue;
271
272 nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
273 list_for_each_entry(cstate, &pstate->list, head) {
274 u32 freq = cstate->domain[clock->name];
275 lo = min(lo, freq);
276 hi = max(hi, freq);
277 nvkm_debug(subdev, "%10d KHz\n", freq);
278 }
279
280 if (clock->mname && ++i < ARRAY_SIZE(info)) {
281 lo /= clock->mdiv;
282 hi /= clock->mdiv;
283 if (lo == hi) {
284 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
285 clock->mname, lo);
286 } else {
287 snprintf(info[i], sizeof(info[i]),
288 "%s %d-%d MHz", clock->mname, lo, hi);
289 }
290 }
291 }
292
293 nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
294 }
295
296 static void
297 nvkm_pstate_del(struct nvkm_pstate *pstate)
298 {
299 struct nvkm_cstate *cstate, *temp;
300
301 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
302 nvkm_cstate_del(cstate);
303 }
304
305 list_del(&pstate->head);
306 kfree(pstate);
307 }
308
309 static int
310 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
311 {
312 struct nvkm_bios *bios = clk->subdev.device->bios;
313 const struct nvkm_domain *domain = clk->domains - 1;
314 struct nvkm_pstate *pstate;
315 struct nvkm_cstate *cstate;
316 struct nvbios_cstepE cstepE;
317 struct nvbios_perfE perfE;
318 u8 ver, hdr, cnt, len;
319 u16 data;
320
321 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
322 if (!data)
323 return -EINVAL;
324 if (perfE.pstate == 0xff)
325 return 0;
326
327 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
328 cstate = &pstate->base;
329 if (!pstate)
330 return -ENOMEM;
331
332 INIT_LIST_HEAD(&pstate->list);
333
334 pstate->pstate = perfE.pstate;
335 pstate->fanspeed = perfE.fanspeed;
336 pstate->pcie_speed = perfE.pcie_speed;
337 pstate->pcie_width = perfE.pcie_width;
338 cstate->voltage = perfE.voltage;
339 cstate->domain[nv_clk_src_core] = perfE.core;
340 cstate->domain[nv_clk_src_shader] = perfE.shader;
341 cstate->domain[nv_clk_src_mem] = perfE.memory;
342 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
343 cstate->domain[nv_clk_src_dom6] = perfE.disp;
344
345 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
346 struct nvbios_perfS perfS;
347 u8 sver = ver, shdr = hdr;
348 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
349 &sver, &shdr, cnt, len, &perfS);
350 if (perfSe == 0 || sver != 0x40)
351 continue;
352
353 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
354 perfS.v40.freq = nvkm_clk_adjust(clk, false,
355 pstate->pstate,
356 domain->bios,
357 perfS.v40.freq);
358 }
359
360 cstate->domain[domain->name] = perfS.v40.freq;
361 }
362
363 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
364 if (data) {
365 int idx = cstepE.index;
366 do {
367 nvkm_cstate_new(clk, idx, pstate);
368 } while(idx--);
369 }
370
371 nvkm_pstate_info(clk, pstate);
372 list_add_tail(&pstate->head, &clk->states);
373 clk->state_nr++;
374 return 0;
375 }
376
377 /******************************************************************************
378 * Adjustment triggers
379 *****************************************************************************/
380 static int
381 nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
382 {
383 struct nvkm_pstate *pstate;
384 int i = 0;
385
386 if (!clk->allow_reclock)
387 return -ENOSYS;
388
389 if (req != -1 && req != -2) {
390 list_for_each_entry(pstate, &clk->states, head) {
391 if (pstate->pstate == req)
392 break;
393 i++;
394 }
395
396 if (pstate->pstate != req)
397 return -EINVAL;
398 req = i;
399 }
400
401 return req + 2;
402 }
403
404 static int
405 nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
406 {
407 int ret = 1;
408
409 if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
410 return -2;
411
412 if (strncasecmpz(mode, "disabled", arglen)) {
413 char save = mode[arglen];
414 long v;
415
416 ((char *)mode)[arglen] = '\0';
417 if (!kstrtol(mode, 0, &v)) {
418 ret = nvkm_clk_ustate_update(clk, v);
419 if (ret < 0)
420 ret = 1;
421 }
422 ((char *)mode)[arglen] = save;
423 }
424
425 return ret - 2;
426 }
427
428 int
429 nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
430 {
431 int ret = nvkm_clk_ustate_update(clk, req);
432 if (ret >= 0) {
433 if (ret -= 2, pwr) clk->ustate_ac = ret;
434 else clk->ustate_dc = ret;
435 return nvkm_pstate_calc(clk, true);
436 }
437 return ret;
438 }
439
440 int
441 nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
442 {
443 if (!rel) clk->astate = req;
444 if ( rel) clk->astate += rel;
445 clk->astate = min(clk->astate, clk->state_nr - 1);
446 clk->astate = max(clk->astate, 0);
447 return nvkm_pstate_calc(clk, wait);
448 }
449
450 int
451 nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
452 {
453 if (!rel) clk->tstate = req;
454 if ( rel) clk->tstate += rel;
455 clk->tstate = min(clk->tstate, 0);
456 clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
457 return nvkm_pstate_calc(clk, true);
458 }
459
460 int
461 nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
462 {
463 if (!rel) clk->dstate = req;
464 if ( rel) clk->dstate += rel;
465 clk->dstate = min(clk->dstate, clk->state_nr - 1);
466 clk->dstate = max(clk->dstate, 0);
467 return nvkm_pstate_calc(clk, true);
468 }
469
470 static int
471 nvkm_clk_pwrsrc(struct nvkm_notify *notify)
472 {
473 struct nvkm_clk *clk =
474 container_of(notify, typeof(*clk), pwrsrc_ntfy);
475 nvkm_pstate_calc(clk, false);
476 return NVKM_NOTIFY_DROP;
477 }
478
479 /******************************************************************************
480 * subdev base class implementation
481 *****************************************************************************/
482
483 int
484 nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
485 {
486 return clk->func->read(clk, src);
487 }
488
489 static int
490 nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
491 {
492 struct nvkm_clk *clk = nvkm_clk(subdev);
493 nvkm_notify_put(&clk->pwrsrc_ntfy);
494 flush_work(&clk->work);
495 if (clk->func->fini)
496 clk->func->fini(clk);
497 return 0;
498 }
499
500 static int
501 nvkm_clk_init(struct nvkm_subdev *subdev)
502 {
503 struct nvkm_clk *clk = nvkm_clk(subdev);
504 const struct nvkm_domain *clock = clk->domains;
505 int ret;
506
507 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
508 INIT_LIST_HEAD(&clk->bstate.list);
509 clk->bstate.pstate = 0xff;
510
511 while (clock->name != nv_clk_src_max) {
512 ret = nvkm_clk_read(clk, clock->name);
513 if (ret < 0) {
514 nvkm_error(subdev, "%02x freq unknown\n", clock->name);
515 return ret;
516 }
517 clk->bstate.base.domain[clock->name] = ret;
518 clock++;
519 }
520
521 nvkm_pstate_info(clk, &clk->bstate);
522
523 if (clk->func->init)
524 return clk->func->init(clk);
525
526 clk->astate = clk->state_nr - 1;
527 clk->tstate = 0;
528 clk->dstate = 0;
529 clk->pstate = -1;
530 nvkm_pstate_calc(clk, true);
531 return 0;
532 }
533
534 static void *
535 nvkm_clk_dtor(struct nvkm_subdev *subdev)
536 {
537 struct nvkm_clk *clk = nvkm_clk(subdev);
538 struct nvkm_pstate *pstate, *temp;
539
540 nvkm_notify_fini(&clk->pwrsrc_ntfy);
541
542 /* Early return if the pstates have been provided statically */
543 if (clk->func->pstates)
544 return clk;
545
546 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
547 nvkm_pstate_del(pstate);
548 }
549
550 return clk;
551 }
552
553 static const struct nvkm_subdev_func
554 nvkm_clk = {
555 .dtor = nvkm_clk_dtor,
556 .init = nvkm_clk_init,
557 .fini = nvkm_clk_fini,
558 };
559
560 int
561 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
562 int index, bool allow_reclock, struct nvkm_clk *clk)
563 {
564 int ret, idx, arglen;
565 const char *mode;
566
567 nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
568 clk->func = func;
569 INIT_LIST_HEAD(&clk->states);
570 clk->domains = func->domains;
571 clk->ustate_ac = -1;
572 clk->ustate_dc = -1;
573 clk->allow_reclock = allow_reclock;
574
575 INIT_WORK(&clk->work, nvkm_pstate_work);
576 init_waitqueue_head(&clk->wait);
577 atomic_set(&clk->waiting, 0);
578
579 /* If no pstates are provided, try and fetch them from the BIOS */
580 if (!func->pstates) {
581 idx = 0;
582 do {
583 ret = nvkm_pstate_new(clk, idx++);
584 } while (ret == 0);
585 } else {
586 for (idx = 0; idx < func->nr_pstates; idx++)
587 list_add_tail(&func->pstates[idx].head, &clk->states);
588 clk->state_nr = func->nr_pstates;
589 }
590
591 ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
592 NULL, 0, 0, &clk->pwrsrc_ntfy);
593 if (ret)
594 return ret;
595
596 mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
597 if (mode) {
598 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
599 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
600 }
601
602 mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
603 if (mode)
604 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
605
606 mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
607 if (mode)
608 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
609
610 return 0;
611 }
612
613 int
614 nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
615 int index, bool allow_reclock, struct nvkm_clk **pclk)
616 {
617 if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
618 return -ENOMEM;
619 return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
620 }
This page took 0.059226 seconds and 5 git commands to generate.