2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #define gt215_clk(p) container_of((p), struct gt215_clk, base)
29 #include <engine/fifo.h>
30 #include <subdev/bios.h>
31 #include <subdev/bios/pll.h>
32 #include <subdev/timer.h>
36 struct gt215_clk_info eng
[nv_clk_src_max
];
39 static u32
read_clk(struct gt215_clk
*, int, bool);
40 static u32
read_pll(struct gt215_clk
*, int, u32
);
43 read_vco(struct gt215_clk
*clk
, int idx
)
45 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
46 u32 sctl
= nvkm_rd32(device
, 0x4120 + (idx
* 4));
48 switch (sctl
& 0x00000030) {
50 return device
->crystal
;
52 return read_pll(clk
, 0x41, 0x00e820);
54 return read_pll(clk
, 0x42, 0x00e8a0);
61 read_clk(struct gt215_clk
*clk
, int idx
, bool ignore_en
)
63 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
66 /* refclk for the 0xe8xx plls is a fixed frequency */
68 if (device
->chipset
== 0xaf) {
69 /* no joke.. seriously.. sigh.. */
70 return nvkm_rd32(device
, 0x00471c) * 1000;
73 return device
->crystal
;
76 sctl
= nvkm_rd32(device
, 0x4120 + (idx
* 4));
77 if (!ignore_en
&& !(sctl
& 0x00000100))
81 if (sctl
& 0x00000400)
85 switch (sctl
& 0x00003000) {
87 if (!(sctl
& 0x00000200))
88 return device
->crystal
;
91 if (sctl
& 0x00000040)
96 if (!(sctl
& 0x00000001))
99 sclk
= read_vco(clk
, idx
);
100 sdiv
= ((sctl
& 0x003f0000) >> 16) + 2;
101 return (sclk
* 2) / sdiv
;
108 read_pll(struct gt215_clk
*clk
, int idx
, u32 pll
)
110 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
111 u32 ctrl
= nvkm_rd32(device
, pll
+ 0);
112 u32 sclk
= 0, P
= 1, N
= 1, M
= 1;
114 if (!(ctrl
& 0x00000008)) {
115 if (ctrl
& 0x00000001) {
116 u32 coef
= nvkm_rd32(device
, pll
+ 4);
117 M
= (coef
& 0x000000ff) >> 0;
118 N
= (coef
& 0x0000ff00) >> 8;
119 P
= (coef
& 0x003f0000) >> 16;
121 /* no post-divider on these..
122 * XXX: it looks more like two post-"dividers" that
123 * cross each other out in the default RPLL config */
124 if ((pll
& 0x00ff00) == 0x00e800)
127 sclk
= read_clk(clk
, 0x00 + idx
, false);
130 sclk
= read_clk(clk
, 0x10 + idx
, false);
134 return sclk
* N
/ (M
* P
);
140 gt215_clk_read(struct nvkm_clk
*base
, enum nv_clk_src src
)
142 struct gt215_clk
*clk
= gt215_clk(base
);
143 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
144 struct nvkm_device
*device
= subdev
->device
;
148 case nv_clk_src_crystal
:
149 return device
->crystal
;
150 case nv_clk_src_core
:
151 case nv_clk_src_core_intm
:
152 return read_pll(clk
, 0x00, 0x4200);
153 case nv_clk_src_shader
:
154 return read_pll(clk
, 0x01, 0x4220);
156 return read_pll(clk
, 0x02, 0x4000);
157 case nv_clk_src_disp
:
158 return read_clk(clk
, 0x20, false);
159 case nv_clk_src_vdec
:
160 return read_clk(clk
, 0x21, false);
162 return read_clk(clk
, 0x25, false);
163 case nv_clk_src_host
:
164 hsrc
= (nvkm_rd32(device
, 0xc040) & 0x30000000) >> 28;
167 return read_clk(clk
, 0x1d, false);
172 nvkm_error(subdev
, "unknown HOST clock source %d\n", hsrc
);
176 nvkm_error(subdev
, "invalid clock source %d\n", src
);
184 gt215_clk_info(struct nvkm_clk
*base
, int idx
, u32 khz
,
185 struct gt215_clk_info
*info
)
187 struct gt215_clk
*clk
= gt215_clk(base
);
188 u32 oclk
, sclk
, sdiv
;
195 info
->clk
= 0x00000100;
198 info
->clk
= 0x00002100;
201 info
->clk
= 0x00002140;
204 sclk
= read_vco(clk
, idx
);
205 sdiv
= min((sclk
* 2) / khz
, (u32
)65);
206 oclk
= (sclk
* 2) / sdiv
;
207 diff
= ((khz
+ 3000) - oclk
);
209 /* When imprecise, play it safe and aim for a clock lower than
210 * desired rather than higher */
213 oclk
= (sclk
* 2) / sdiv
;
216 /* divider can go as low as 2, limited here because NVIDIA
217 * and the VBIOS on my NVA8 seem to prefer using the PLL
218 * for 810MHz - is there a good reason?
219 * XXX: PLLs with refclk 810MHz? */
221 info
->clk
= (((sdiv
- 2) << 16) | 0x00003100);
232 gt215_pll_info(struct nvkm_clk
*base
, int idx
, u32 pll
, u32 khz
,
233 struct gt215_clk_info
*info
)
235 struct gt215_clk
*clk
= gt215_clk(base
);
236 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
237 struct nvbios_pll limits
;
243 /* If we can get a within [-2, 3) MHz of a divider, we'll disable the
244 * PLL and use the divider instead. */
245 ret
= gt215_clk_info(&clk
->base
, idx
, khz
, info
);
247 if (!pll
|| (diff
>= -2000 && diff
< 3000)) {
252 ret
= nvbios_pll_parse(subdev
->device
->bios
, pll
, &limits
);
256 ret
= gt215_clk_info(&clk
->base
, idx
- 0x10, limits
.refclk
, info
);
257 if (ret
!= limits
.refclk
)
260 ret
= gt215_pll_calc(subdev
, &limits
, khz
, &N
, NULL
, &M
, &P
);
262 info
->pll
= (P
<< 16) | (N
<< 8) | M
;
266 info
->fb_delay
= max(((khz
+ 7566) / 15133), (u32
) 18);
267 return ret
? ret
: -ERANGE
;
271 calc_clk(struct gt215_clk
*clk
, struct nvkm_cstate
*cstate
,
272 int idx
, u32 pll
, int dom
)
274 int ret
= gt215_pll_info(&clk
->base
, idx
, pll
, cstate
->domain
[dom
],
282 calc_host(struct gt215_clk
*clk
, struct nvkm_cstate
*cstate
)
285 u32 kHz
= cstate
->domain
[nv_clk_src_host
];
286 struct gt215_clk_info
*info
= &clk
->eng
[nv_clk_src_host
];
290 info
->host_out
= NVA3_HOST_277
;
294 info
->host_out
= NVA3_HOST_CLK
;
296 ret
= gt215_clk_info(&clk
->base
, 0x1d, kHz
, info
);
304 gt215_clk_pre(struct nvkm_clk
*clk
, unsigned long *flags
)
306 struct nvkm_device
*device
= clk
->subdev
.device
;
307 struct nvkm_fifo
*fifo
= device
->fifo
;
309 /* halt and idle execution engines */
310 nvkm_mask(device
, 0x020060, 0x00070000, 0x00000000);
311 nvkm_mask(device
, 0x002504, 0x00000001, 0x00000001);
312 /* Wait until the interrupt handler is finished */
313 if (nvkm_msec(device
, 2000,
314 if (!nvkm_rd32(device
, 0x000100))
320 nvkm_fifo_pause(fifo
, flags
);
322 if (nvkm_msec(device
, 2000,
323 if (nvkm_rd32(device
, 0x002504) & 0x00000010)
328 if (nvkm_msec(device
, 2000,
329 u32 tmp
= nvkm_rd32(device
, 0x00251c) & 0x0000003f;
330 if (tmp
== 0x0000003f)
339 gt215_clk_post(struct nvkm_clk
*clk
, unsigned long *flags
)
341 struct nvkm_device
*device
= clk
->subdev
.device
;
342 struct nvkm_fifo
*fifo
= device
->fifo
;
345 nvkm_fifo_start(fifo
, flags
);
347 nvkm_mask(device
, 0x002504, 0x00000001, 0x00000000);
348 nvkm_mask(device
, 0x020060, 0x00070000, 0x00040000);
352 disable_clk_src(struct gt215_clk
*clk
, u32 src
)
354 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
355 nvkm_mask(device
, src
, 0x00000100, 0x00000000);
356 nvkm_mask(device
, src
, 0x00000001, 0x00000000);
360 prog_pll(struct gt215_clk
*clk
, int idx
, u32 pll
, int dom
)
362 struct gt215_clk_info
*info
= &clk
->eng
[dom
];
363 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
364 const u32 src0
= 0x004120 + (idx
* 4);
365 const u32 src1
= 0x004160 + (idx
* 4);
366 const u32 ctrl
= pll
+ 0;
367 const u32 coef
= pll
+ 4;
371 /* Always start from a non-PLL clock */
372 bypass
= nvkm_rd32(device
, ctrl
) & 0x00000008;
374 nvkm_mask(device
, src1
, 0x00000101, 0x00000101);
375 nvkm_mask(device
, ctrl
, 0x00000008, 0x00000008);
379 nvkm_mask(device
, src0
, 0x003f3141, 0x00000101 | info
->clk
);
380 nvkm_wr32(device
, coef
, info
->pll
);
381 nvkm_mask(device
, ctrl
, 0x00000015, 0x00000015);
382 nvkm_mask(device
, ctrl
, 0x00000010, 0x00000000);
383 if (nvkm_msec(device
, 2000,
384 if (nvkm_rd32(device
, ctrl
) & 0x00020000)
387 nvkm_mask(device
, ctrl
, 0x00000010, 0x00000010);
388 nvkm_mask(device
, src0
, 0x00000101, 0x00000000);
391 nvkm_mask(device
, ctrl
, 0x00000010, 0x00000010);
392 nvkm_mask(device
, ctrl
, 0x00000008, 0x00000000);
393 disable_clk_src(clk
, src1
);
395 nvkm_mask(device
, src1
, 0x003f3141, 0x00000101 | info
->clk
);
396 nvkm_mask(device
, ctrl
, 0x00000018, 0x00000018);
398 nvkm_mask(device
, ctrl
, 0x00000001, 0x00000000);
399 disable_clk_src(clk
, src0
);
404 prog_clk(struct gt215_clk
*clk
, int idx
, int dom
)
406 struct gt215_clk_info
*info
= &clk
->eng
[dom
];
407 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
408 nvkm_mask(device
, 0x004120 + (idx
* 4), 0x003f3141, 0x00000101 | info
->clk
);
412 prog_host(struct gt215_clk
*clk
)
414 struct gt215_clk_info
*info
= &clk
->eng
[nv_clk_src_host
];
415 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
416 u32 hsrc
= (nvkm_rd32(device
, 0xc040));
418 switch (info
->host_out
) {
420 if ((hsrc
& 0x30000000) == 0) {
421 nvkm_wr32(device
, 0xc040, hsrc
| 0x20000000);
422 disable_clk_src(clk
, 0x4194);
426 prog_clk(clk
, 0x1d, nv_clk_src_host
);
427 if ((hsrc
& 0x30000000) >= 0x20000000) {
428 nvkm_wr32(device
, 0xc040, hsrc
& ~0x30000000);
435 /* This seems to be a clock gating factor on idle, always set to 64 */
436 nvkm_wr32(device
, 0xc044, 0x3e);
440 prog_core(struct gt215_clk
*clk
, int dom
)
442 struct gt215_clk_info
*info
= &clk
->eng
[dom
];
443 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
444 u32 fb_delay
= nvkm_rd32(device
, 0x10002c);
446 if (fb_delay
< info
->fb_delay
)
447 nvkm_wr32(device
, 0x10002c, info
->fb_delay
);
449 prog_pll(clk
, 0x00, 0x004200, dom
);
451 if (fb_delay
> info
->fb_delay
)
452 nvkm_wr32(device
, 0x10002c, info
->fb_delay
);
456 gt215_clk_calc(struct nvkm_clk
*base
, struct nvkm_cstate
*cstate
)
458 struct gt215_clk
*clk
= gt215_clk(base
);
459 struct gt215_clk_info
*core
= &clk
->eng
[nv_clk_src_core
];
462 if ((ret
= calc_clk(clk
, cstate
, 0x10, 0x4200, nv_clk_src_core
)) ||
463 (ret
= calc_clk(clk
, cstate
, 0x11, 0x4220, nv_clk_src_shader
)) ||
464 (ret
= calc_clk(clk
, cstate
, 0x20, 0x0000, nv_clk_src_disp
)) ||
465 (ret
= calc_clk(clk
, cstate
, 0x21, 0x0000, nv_clk_src_vdec
)) ||
466 (ret
= calc_host(clk
, cstate
)))
469 /* XXX: Should be reading the highest bit in the VBIOS clock to decide
470 * whether to use a PLL or not... but using a PLL defeats the purpose */
472 ret
= gt215_clk_info(&clk
->base
, 0x10,
473 cstate
->domain
[nv_clk_src_core_intm
],
474 &clk
->eng
[nv_clk_src_core_intm
]);
483 gt215_clk_prog(struct nvkm_clk
*base
)
485 struct gt215_clk
*clk
= gt215_clk(base
);
486 struct gt215_clk_info
*core
= &clk
->eng
[nv_clk_src_core
];
489 unsigned long *f
= &flags
;
491 ret
= gt215_clk_pre(&clk
->base
, f
);
496 prog_core(clk
, nv_clk_src_core_intm
);
498 prog_core(clk
, nv_clk_src_core
);
499 prog_pll(clk
, 0x01, 0x004220, nv_clk_src_shader
);
500 prog_clk(clk
, 0x20, nv_clk_src_disp
);
501 prog_clk(clk
, 0x21, nv_clk_src_vdec
);
508 gt215_clk_post(&clk
->base
, f
);
513 gt215_clk_tidy(struct nvkm_clk
*base
)
517 static const struct nvkm_clk_func
519 .read
= gt215_clk_read
,
520 .calc
= gt215_clk_calc
,
521 .prog
= gt215_clk_prog
,
522 .tidy
= gt215_clk_tidy
,
524 { nv_clk_src_crystal
, 0xff },
525 { nv_clk_src_core
, 0x00, 0, "core", 1000 },
526 { nv_clk_src_shader
, 0x01, 0, "shader", 1000 },
527 { nv_clk_src_mem
, 0x02, 0, "memory", 1000 },
528 { nv_clk_src_vdec
, 0x03 },
529 { nv_clk_src_disp
, 0x04 },
530 { nv_clk_src_host
, 0x05 },
531 { nv_clk_src_core_intm
, 0x06 },
537 gt215_clk_new(struct nvkm_device
*device
, int index
, struct nvkm_clk
**pclk
)
539 struct gt215_clk
*clk
;
541 if (!(clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
)))
545 return nvkm_clk_ctor(>215_clk
, device
, index
, true, &clk
->base
);
This page took 0.041602 seconds and 5 git commands to generate.