drm/nouveau/clk: switch to device pri macros
authorBen Skeggs <bskeggs@redhat.com>
Thu, 20 Aug 2015 04:54:08 +0000 (14:54 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Fri, 28 Aug 2015 02:40:14 +0000 (12:40 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c

index 99cfa09b215987c7b1fc0796316282b4b9263afe..f7aac3702c2348f64b777247da1ec0ad1a5641d3 100644 (file)
@@ -47,7 +47,8 @@ static u32 read_div(struct gf100_clk *, int, u32, u32);
 static u32
 read_vco(struct gf100_clk *clk, u32 dsrc)
 {
-       u32 ssrc = nv_rd32(clk, dsrc);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ssrc = nvkm_rd32(device, dsrc);
        if (!(ssrc & 0x00000100))
                return clk->base.read(&clk->base, nv_clk_src_sppll0);
        return clk->base.read(&clk->base, nv_clk_src_sppll1);
@@ -56,8 +57,9 @@ read_vco(struct gf100_clk *clk, u32 dsrc)
 static u32
 read_pll(struct gf100_clk *clk, u32 pll)
 {
-       u32 ctrl = nv_rd32(clk, pll + 0x00);
-       u32 coef = nv_rd32(clk, pll + 0x04);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ctrl = nvkm_rd32(device, pll + 0x00);
+       u32 coef = nvkm_rd32(device, pll + 0x04);
        u32 P = (coef & 0x003f0000) >> 16;
        u32 N = (coef & 0x0000ff00) >> 8;
        u32 M = (coef & 0x000000ff) >> 0;
@@ -69,7 +71,7 @@ read_pll(struct gf100_clk *clk, u32 pll)
        switch (pll) {
        case 0x00e800:
        case 0x00e820:
-               sclk = nv_device(clk)->crystal;
+               sclk = device->crystal;
                P = 1;
                break;
        case 0x132000:
@@ -94,13 +96,14 @@ read_pll(struct gf100_clk *clk, u32 pll)
 static u32
 read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-       u32 ssrc = nv_rd32(clk, dsrc + (doff * 4));
-       u32 sctl = nv_rd32(clk, dctl + (doff * 4));
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+       u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
        switch (ssrc & 0x00000003) {
        case 0:
                if ((ssrc & 0x00030000) != 0x00030000)
-                       return nv_device(clk)->crystal;
+                       return device->crystal;
                return 108000;
        case 2:
                return 100000;
@@ -120,8 +123,9 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
 static u32
 read_clk(struct gf100_clk *clk, int idx)
 {
-       u32 sctl = nv_rd32(clk, 0x137250 + (idx * 4));
-       u32 ssel = nv_rd32(clk, 0x137100);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
+       u32 ssel = nvkm_rd32(device, 0x137100);
        u32 sclk, sdiv;
 
        if (ssel & (1 << idx)) {
@@ -145,7 +149,7 @@ static int
 gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
 {
        struct gf100_clk *clk = container_of(obj, typeof(*clk), base);
-       struct nvkm_device *device = nv_device(clk);
+       struct nvkm_device *device = clk->base.subdev.device;
 
        switch (src) {
        case nv_clk_src_crystal:
@@ -166,7 +170,7 @@ gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
        case nv_clk_src_mdiv:
                return read_div(clk, 0, 0x137300, 0x137310);
        case nv_clk_src_mem:
-               if (nv_rd32(clk, 0x1373f0) & 0x00000002)
+               if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
                        return clk->base.read(&clk->base, nv_clk_src_mpll);
                return clk->base.read(&clk->base, nv_clk_src_mdiv);
 
@@ -329,16 +333,18 @@ static void
 gf100_clk_prog_0(struct gf100_clk *clk, int idx)
 {
        struct gf100_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        if (idx < 7 && !info->ssel) {
-               nv_mask(clk, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
-               nv_wr32(clk, 0x137160 + (idx * 0x04), info->dsrc);
+               nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
+               nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
        }
 }
 
 static void
 gf100_clk_prog_1(struct gf100_clk *clk, int idx)
 {
-       nv_mask(clk, 0x137100, (1 << idx), 0x00000000);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
        nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
 }
 
@@ -346,15 +352,16 @@ static void
 gf100_clk_prog_2(struct gf100_clk *clk, int idx)
 {
        struct gf100_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        const u32 addr = 0x137000 + (idx * 0x20);
        if (idx <= 7) {
-               nv_mask(clk, addr + 0x00, 0x00000004, 0x00000000);
-               nv_mask(clk, addr + 0x00, 0x00000001, 0x00000000);
+               nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+               nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
                if (info->coef) {
-                       nv_wr32(clk, addr + 0x04, info->coef);
-                       nv_mask(clk, addr + 0x00, 0x00000001, 0x00000001);
+                       nvkm_wr32(device, addr + 0x04, info->coef);
+                       nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
                        nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
-                       nv_mask(clk, addr + 0x00, 0x00020004, 0x00000004);
+                       nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
                }
        }
 }
@@ -363,8 +370,9 @@ static void
 gf100_clk_prog_3(struct gf100_clk *clk, int idx)
 {
        struct gf100_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        if (info->ssel) {
-               nv_mask(clk, 0x137100, (1 << idx), info->ssel);
+               nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
                nv_wait(clk, 0x137100, (1 << idx), info->ssel);
        }
 }
@@ -373,7 +381,8 @@ static void
 gf100_clk_prog_4(struct gf100_clk *clk, int idx)
 {
        struct gf100_clk_info *info = &clk->eng[idx];
-       nv_mask(clk, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
 }
 
 static int
index 7723e9379ab8d63690f9717421345710069ca630..d2b7d6ec1c2b5455f0ce6cbca783de186b7b1951 100644 (file)
@@ -48,7 +48,8 @@ static u32 read_pll(struct gk104_clk *, u32);
 static u32
 read_vco(struct gk104_clk *clk, u32 dsrc)
 {
-       u32 ssrc = nv_rd32(clk, dsrc);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ssrc = nvkm_rd32(device, dsrc);
        if (!(ssrc & 0x00000100))
                return read_pll(clk, 0x00e800);
        return read_pll(clk, 0x00e820);
@@ -57,8 +58,9 @@ read_vco(struct gk104_clk *clk, u32 dsrc)
 static u32
 read_pll(struct gk104_clk *clk, u32 pll)
 {
-       u32 ctrl = nv_rd32(clk, pll + 0x00);
-       u32 coef = nv_rd32(clk, pll + 0x04);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ctrl = nvkm_rd32(device, pll + 0x00);
+       u32 coef = nvkm_rd32(device, pll + 0x04);
        u32 P = (coef & 0x003f0000) >> 16;
        u32 N = (coef & 0x0000ff00) >> 8;
        u32 M = (coef & 0x000000ff) >> 0;
@@ -71,7 +73,7 @@ read_pll(struct gk104_clk *clk, u32 pll)
        switch (pll) {
        case 0x00e800:
        case 0x00e820:
-               sclk = nv_device(clk)->crystal;
+               sclk = device->crystal;
                P = 1;
                break;
        case 0x132000:
@@ -80,7 +82,7 @@ read_pll(struct gk104_clk *clk, u32 pll)
                break;
        case 0x132020:
                sclk = read_div(clk, 0, 0x137320, 0x137330);
-               fN   = nv_rd32(clk, pll + 0x10) >> 16;
+               fN   = nvkm_rd32(device, pll + 0x10) >> 16;
                break;
        case 0x137000:
        case 0x137020:
@@ -102,13 +104,14 @@ read_pll(struct gk104_clk *clk, u32 pll)
 static u32
 read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-       u32 ssrc = nv_rd32(clk, dsrc + (doff * 4));
-       u32 sctl = nv_rd32(clk, dctl + (doff * 4));
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+       u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
        switch (ssrc & 0x00000003) {
        case 0:
                if ((ssrc & 0x00030000) != 0x00030000)
-                       return nv_device(clk)->crystal;
+                       return device->crystal;
                return 108000;
        case 2:
                return 100000;
@@ -128,7 +131,8 @@ read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
 static u32
 read_mem(struct gk104_clk *clk)
 {
-       switch (nv_rd32(clk, 0x1373f4) & 0x0000000f) {
+       struct nvkm_device *device = clk->base.subdev.device;
+       switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
        case 1: return read_pll(clk, 0x132020);
        case 2: return read_pll(clk, 0x132000);
        default:
@@ -139,11 +143,12 @@ read_mem(struct gk104_clk *clk)
 static u32
 read_clk(struct gk104_clk *clk, int idx)
 {
-       u32 sctl = nv_rd32(clk, 0x137250 + (idx * 4));
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
        u32 sclk, sdiv;
 
        if (idx < 7) {
-               u32 ssel = nv_rd32(clk, 0x137100);
+               u32 ssel = nvkm_rd32(device, 0x137100);
                if (ssel & (1 << idx)) {
                        sclk = read_pll(clk, 0x137000 + (idx * 0x20));
                        sdiv = 1;
@@ -152,7 +157,7 @@ read_clk(struct gk104_clk *clk, int idx)
                        sdiv = 0;
                }
        } else {
-               u32 ssrc = nv_rd32(clk, 0x137160 + (idx * 0x04));
+               u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
                if ((ssrc & 0x00000003) == 0x00000003) {
                        sclk = read_div(clk, idx, 0x137160, 0x1371d0);
                        if (ssrc & 0x00000100) {
@@ -183,7 +188,7 @@ static int
 gk104_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
 {
        struct gk104_clk *clk = container_of(obj, typeof(*clk), base);
-       struct nvkm_device *device = nv_device(clk);
+       struct nvkm_device *device = clk->base.subdev.device;
 
        switch (src) {
        case nv_clk_src_crystal:
@@ -349,37 +354,41 @@ static void
 gk104_clk_prog_0(struct gk104_clk *clk, int idx)
 {
        struct gk104_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        if (!info->ssel) {
-               nv_mask(clk, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
-               nv_wr32(clk, 0x137160 + (idx * 0x04), info->dsrc);
+               nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
+               nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
        }
 }
 
 static void
 gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
 {
-       nv_mask(clk, 0x137100, (1 << idx), 0x00000000);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
        nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
 }
 
 static void
 gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
 {
-       nv_mask(clk, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
 }
 
 static void
 gk104_clk_prog_2(struct gk104_clk *clk, int idx)
 {
        struct gk104_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        const u32 addr = 0x137000 + (idx * 0x20);
-       nv_mask(clk, addr + 0x00, 0x00000004, 0x00000000);
-       nv_mask(clk, addr + 0x00, 0x00000001, 0x00000000);
+       nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+       nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
        if (info->coef) {
-               nv_wr32(clk, addr + 0x04, info->coef);
-               nv_mask(clk, addr + 0x00, 0x00000001, 0x00000001);
+               nvkm_wr32(device, addr + 0x04, info->coef);
+               nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
                nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
-               nv_mask(clk, addr + 0x00, 0x00020004, 0x00000004);
+               nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
        }
 }
 
@@ -387,18 +396,20 @@ static void
 gk104_clk_prog_3(struct gk104_clk *clk, int idx)
 {
        struct gk104_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        if (info->ssel)
-               nv_mask(clk, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
+               nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
        else
-               nv_mask(clk, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
+               nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
 }
 
 static void
 gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
 {
        struct gk104_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        if (info->ssel) {
-               nv_mask(clk, 0x137100, (1 << idx), info->ssel);
+               nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
                nv_wait(clk, 0x137100, (1 << idx), info->ssel);
        }
 }
@@ -407,9 +418,10 @@ static void
 gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
 {
        struct gk104_clk_info *info = &clk->eng[idx];
+       struct nvkm_device *device = clk->base.subdev.device;
        if (info->ssel) {
-               nv_mask(clk, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
-               nv_mask(clk, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
+               nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
+               nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
        }
 }
 
index 2019368775dac6f3400823f713e133b8a4e7d92b..6d36d094b8f2e6de48b5c0a11f2f747127a5afa5 100644 (file)
@@ -126,9 +126,10 @@ struct gk20a_clk {
 static void
 gk20a_pllg_read_mnp(struct gk20a_clk *clk)
 {
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 val;
 
-       val = nv_rd32(clk, GPCPLL_COEFF);
+       val = nvkm_rd32(device, GPCPLL_COEFF);
        clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
        clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
        clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
@@ -265,51 +266,52 @@ found_match:
 static int
 gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
 {
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 val;
        int ramp_timeout;
 
        /* get old coefficients */
-       val = nv_rd32(clk, GPCPLL_COEFF);
+       val = nvkm_rd32(device, GPCPLL_COEFF);
        /* do nothing if NDIV is the same */
        if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
                return 0;
 
        /* setup */
-       nv_mask(clk, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+       nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
                0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
-       nv_mask(clk, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+       nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
                0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
 
        /* pll slowdown mode */
-       nv_mask(clk, GPCPLL_NDIV_SLOWDOWN,
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
                BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
                BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
 
        /* new ndiv ready for ramp */
-       val = nv_rd32(clk, GPCPLL_COEFF);
+       val = nvkm_rd32(device, GPCPLL_COEFF);
        val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
        val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
        udelay(1);
-       nv_wr32(clk, GPCPLL_COEFF, val);
+       nvkm_wr32(device, GPCPLL_COEFF, val);
 
        /* dynamic ramp to new ndiv */
-       val = nv_rd32(clk, GPCPLL_NDIV_SLOWDOWN);
+       val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
        val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
        udelay(1);
-       nv_wr32(clk, GPCPLL_NDIV_SLOWDOWN, val);
+       nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
 
        for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
                udelay(1);
-               val = nv_rd32(clk, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+               val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
                if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
                        break;
        }
 
        /* exit slowdown mode */
-       nv_mask(clk, GPCPLL_NDIV_SLOWDOWN,
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
                BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
                BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
-       nv_rd32(clk, GPCPLL_NDIV_SLOWDOWN);
+       nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 
        if (ramp_timeout <= 0) {
                nv_error(clk, "gpcpll dynamic ramp timeout\n");
@@ -322,30 +324,33 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
 static void
 _gk20a_pllg_enable(struct gk20a_clk *clk)
 {
-       nv_mask(clk, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
-       nv_rd32(clk, GPCPLL_CFG);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+       nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static void
 _gk20a_pllg_disable(struct gk20a_clk *clk)
 {
-       nv_mask(clk, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
-       nv_rd32(clk, GPCPLL_CFG);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+       nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static int
 _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
 {
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 val, cfg;
        u32 m_old, pl_old, n_lo;
 
        /* get old coefficients */
-       val = nv_rd32(clk, GPCPLL_COEFF);
+       val = nvkm_rd32(device, GPCPLL_COEFF);
        m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
        pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 
        /* do NDIV slide if there is no change in M and PL */
-       cfg = nv_rd32(clk, GPCPLL_CFG);
+       cfg = nvkm_rd32(device, GPCPLL_CFG);
        if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
            (cfg & GPCPLL_CFG_ENABLE)) {
                return gk20a_pllg_slide(clk, clk->n);
@@ -362,21 +367,21 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
        }
 
        /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
-       nv_mask(clk, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
                0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
 
        /* put PLL in bypass before programming it */
-       val = nv_rd32(clk, SEL_VCO);
+       val = nvkm_rd32(device, SEL_VCO);
        val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
        udelay(2);
-       nv_wr32(clk, SEL_VCO, val);
+       nvkm_wr32(device, SEL_VCO, val);
 
        /* get out from IDDQ */
-       val = nv_rd32(clk, GPCPLL_CFG);
+       val = nvkm_rd32(device, GPCPLL_CFG);
        if (val & GPCPLL_CFG_IDDQ) {
                val &= ~GPCPLL_CFG_IDDQ;
-               nv_wr32(clk, GPCPLL_CFG, val);
-               nv_rd32(clk, GPCPLL_CFG);
+               nvkm_wr32(device, GPCPLL_CFG, val);
+               nvkm_rd32(device, GPCPLL_CFG);
                udelay(2);
        }
 
@@ -390,14 +395,14 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
        val = clk->m << GPCPLL_COEFF_M_SHIFT;
        val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
        val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
-       nv_wr32(clk, GPCPLL_COEFF, val);
+       nvkm_wr32(device, GPCPLL_COEFF, val);
 
        _gk20a_pllg_enable(clk);
 
-       val = nv_rd32(clk, GPCPLL_CFG);
+       val = nvkm_rd32(device, GPCPLL_CFG);
        if (val & GPCPLL_CFG_LOCK_DET_OFF) {
                val &= ~GPCPLL_CFG_LOCK_DET_OFF;
-               nv_wr32(clk, GPCPLL_CFG, val);
+               nvkm_wr32(device, GPCPLL_CFG, val);
        }
 
        if (!nvkm_timer_wait_eq(clk, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
@@ -407,13 +412,13 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
        }
 
        /* switch to VCO mode */
-       nv_mask(clk, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+       nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 
        /* restore out divider 1:1 */
-       val = nv_rd32(clk, GPC2CLK_OUT);
+       val = nvkm_rd32(device, GPC2CLK_OUT);
        val &= ~GPC2CLK_OUT_VCODIV_MASK;
        udelay(2);
-       nv_wr32(clk, GPC2CLK_OUT, val);
+       nvkm_wr32(device, GPC2CLK_OUT, val);
 
        /* slide up to new NDIV */
        return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
@@ -434,14 +439,15 @@ gk20a_pllg_program_mnp(struct gk20a_clk *clk)
 static void
 gk20a_pllg_disable(struct gk20a_clk *clk)
 {
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 val;
 
        /* slide to VCO min */
-       val = nv_rd32(clk, GPCPLL_CFG);
+       val = nvkm_rd32(device, GPCPLL_CFG);
        if (val & GPCPLL_CFG_ENABLE) {
                u32 coeff, m, n_lo;
 
-               coeff = nv_rd32(clk, GPCPLL_COEFF);
+               coeff = nvkm_rd32(device, GPCPLL_COEFF);
                m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
                n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
                                    clk->parent_rate / MHZ);
@@ -449,7 +455,7 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
        }
 
        /* put PLL in bypass before disabling it */
-       nv_mask(clk, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
 
        _gk20a_pllg_disable(clk);
 }
@@ -561,10 +567,11 @@ static int
 gk20a_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
 {
        struct gk20a_clk *clk = container_of(obj, typeof(*clk), base);
+       struct nvkm_device *device = clk->base.subdev.device;
 
        switch (src) {
        case nv_clk_src_crystal:
-               return nv_device(clk)->crystal;
+               return device->crystal;
        case nv_clk_src_gpc:
                gk20a_pllg_read_mnp(clk);
                return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
@@ -613,9 +620,10 @@ static int
 gk20a_clk_init(struct nvkm_object *object)
 {
        struct gk20a_clk *clk = (void *)object;
+       struct nvkm_device *device = clk->base.subdev.device;
        int ret;
 
-       nv_mask(clk, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
 
        ret = nvkm_clk_init(&clk->base);
        if (ret)
index e17135a1ec83971dd7ae2b14c5bfef9482a47d90..364c9b0df728a19678fa101fe6272e1f08bd9aa1 100644 (file)
@@ -41,11 +41,12 @@ static u32 read_pll(struct gt215_clk *, int, u32);
 static u32
 read_vco(struct gt215_clk *clk, int idx)
 {
-       u32 sctl = nv_rd32(clk, 0x4120 + (idx * 4));
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
 
        switch (sctl & 0x00000030) {
        case 0x00000000:
-               return nv_device(clk)->crystal;
+               return device->crystal;
        case 0x00000020:
                return read_pll(clk, 0x41, 0x00e820);
        case 0x00000030:
@@ -58,19 +59,20 @@ read_vco(struct gt215_clk *clk, int idx)
 static u32
 read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
 {
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 sctl, sdiv, sclk;
 
        /* refclk for the 0xe8xx plls is a fixed frequency */
        if (idx >= 0x40) {
-               if (nv_device(clk)->chipset == 0xaf) {
+               if (device->chipset == 0xaf) {
                        /* no joke.. seriously.. sigh.. */
-                       return nv_rd32(clk, 0x00471c) * 1000;
+                       return nvkm_rd32(device, 0x00471c) * 1000;
                }
 
-               return nv_device(clk)->crystal;
+               return device->crystal;
        }
 
-       sctl = nv_rd32(clk, 0x4120 + (idx * 4));
+       sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
        if (!ignore_en && !(sctl & 0x00000100))
                return 0;
 
@@ -82,7 +84,7 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
        switch (sctl & 0x00003000) {
        case 0x00000000:
                if (!(sctl & 0x00000200))
-                       return nv_device(clk)->crystal;
+                       return device->crystal;
                return 0;
        case 0x00002000:
                if (sctl & 0x00000040)
@@ -104,12 +106,13 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
 static u32
 read_pll(struct gt215_clk *clk, int idx, u32 pll)
 {
-       u32 ctrl = nv_rd32(clk, pll + 0);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ctrl = nvkm_rd32(device, pll + 0);
        u32 sclk = 0, P = 1, N = 1, M = 1;
 
        if (!(ctrl & 0x00000008)) {
                if (ctrl & 0x00000001) {
-                       u32 coef = nv_rd32(clk, pll + 4);
+                       u32 coef = nvkm_rd32(device, pll + 4);
                        M = (coef & 0x000000ff) >> 0;
                        N = (coef & 0x0000ff00) >> 8;
                        P = (coef & 0x003f0000) >> 16;
@@ -136,11 +139,12 @@ static int
 gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
 {
        struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 hsrc;
 
        switch (src) {
        case nv_clk_src_crystal:
-               return nv_device(clk)->crystal;
+               return device->crystal;
        case nv_clk_src_core:
        case nv_clk_src_core_intm:
                return read_pll(clk, 0x00, 0x4200);
@@ -155,7 +159,7 @@ gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
        case nv_clk_src_daemon:
                return read_clk(clk, 0x25, false);
        case nv_clk_src_host:
-               hsrc = (nv_rd32(clk, 0xc040) & 0x30000000) >> 28;
+               hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
                switch (hsrc) {
                case 0:
                        return read_clk(clk, 0x1d, false);
@@ -297,11 +301,12 @@ calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
 int
 gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
 {
+       struct nvkm_device *device = clk->subdev.device;
        struct nvkm_fifo *fifo = nvkm_fifo(clk);
 
        /* halt and idle execution engines */
-       nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
-       nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+       nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
+       nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
        /* Wait until the interrupt handler is finished */
        if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
                return -EBUSY;
@@ -320,26 +325,29 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
 void
 gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
 {
+       struct nvkm_device *device = clk->subdev.device;
        struct nvkm_fifo *fifo = nvkm_fifo(clk);
 
        if (fifo && flags)
                fifo->start(fifo, flags);
 
-       nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
-       nv_mask(clk, 0x020060, 0x00070000, 0x00040000);
+       nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
+       nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
 }
 
 static void
 disable_clk_src(struct gt215_clk *clk, u32 src)
 {
-       nv_mask(clk, src, 0x00000100, 0x00000000);
-       nv_mask(clk, src, 0x00000001, 0x00000000);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, src, 0x00000100, 0x00000000);
+       nvkm_mask(device, src, 0x00000001, 0x00000000);
 }
 
 static void
 prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
 {
        struct gt215_clk_info *info = &clk->eng[dom];
+       struct nvkm_device *device = clk->base.subdev.device;
        const u32 src0 = 0x004120 + (idx * 4);
        const u32 src1 = 0x004160 + (idx * 4);
        const u32 ctrl = pll + 0;
@@ -348,30 +356,30 @@ prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
 
        if (info->pll) {
                /* Always start from a non-PLL clock */
-               bypass = nv_rd32(clk, ctrl)  & 0x00000008;
+               bypass = nvkm_rd32(device, ctrl)  & 0x00000008;
                if (!bypass) {
-                       nv_mask(clk, src1, 0x00000101, 0x00000101);
-                       nv_mask(clk, ctrl, 0x00000008, 0x00000008);
+                       nvkm_mask(device, src1, 0x00000101, 0x00000101);
+                       nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
                        udelay(20);
                }
 
-               nv_mask(clk, src0, 0x003f3141, 0x00000101 | info->clk);
-               nv_wr32(clk, coef, info->pll);
-               nv_mask(clk, ctrl, 0x00000015, 0x00000015);
-               nv_mask(clk, ctrl, 0x00000010, 0x00000000);
+               nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
+               nvkm_wr32(device, coef, info->pll);
+               nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
+               nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
                if (!nv_wait(clk, ctrl, 0x00020000, 0x00020000)) {
-                       nv_mask(clk, ctrl, 0x00000010, 0x00000010);
-                       nv_mask(clk, src0, 0x00000101, 0x00000000);
+                       nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+                       nvkm_mask(device, src0, 0x00000101, 0x00000000);
                        return;
                }
-               nv_mask(clk, ctrl, 0x00000010, 0x00000010);
-               nv_mask(clk, ctrl, 0x00000008, 0x00000000);
+               nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+               nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
                disable_clk_src(clk, src1);
        } else {
-               nv_mask(clk, src1, 0x003f3141, 0x00000101 | info->clk);
-               nv_mask(clk, ctrl, 0x00000018, 0x00000018);
+               nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
+               nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
                udelay(20);
-               nv_mask(clk, ctrl, 0x00000001, 0x00000000);
+               nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
                disable_clk_src(clk, src0);
        }
 }
@@ -380,26 +388,28 @@ static void
 prog_clk(struct gt215_clk *clk, int idx, int dom)
 {
        struct gt215_clk_info *info = &clk->eng[dom];
-       nv_mask(clk, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
 }
 
 static void
 prog_host(struct gt215_clk *clk)
 {
        struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
-       u32 hsrc = (nv_rd32(clk, 0xc040));
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 hsrc = (nvkm_rd32(device, 0xc040));
 
        switch (info->host_out) {
        case NVA3_HOST_277:
                if ((hsrc & 0x30000000) == 0) {
-                       nv_wr32(clk, 0xc040, hsrc | 0x20000000);
+                       nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
                        disable_clk_src(clk, 0x4194);
                }
                break;
        case NVA3_HOST_CLK:
                prog_clk(clk, 0x1d, nv_clk_src_host);
                if ((hsrc & 0x30000000) >= 0x20000000) {
-                       nv_wr32(clk, 0xc040, hsrc & ~0x30000000);
+                       nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
                }
                break;
        default:
@@ -407,22 +417,23 @@ prog_host(struct gt215_clk *clk)
        }
 
        /* This seems to be a clock gating factor on idle, always set to 64 */
-       nv_wr32(clk, 0xc044, 0x3e);
+       nvkm_wr32(device, 0xc044, 0x3e);
 }
 
 static void
 prog_core(struct gt215_clk *clk, int dom)
 {
        struct gt215_clk_info *info = &clk->eng[dom];
-       u32 fb_delay = nv_rd32(clk, 0x10002c);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 fb_delay = nvkm_rd32(device, 0x10002c);
 
        if (fb_delay < info->fb_delay)
-               nv_wr32(clk, 0x10002c, info->fb_delay);
+               nvkm_wr32(device, 0x10002c, info->fb_delay);
 
        prog_pll(clk, 0x00, 0x004200, dom);
 
        if (fb_delay > info->fb_delay)
-               nv_wr32(clk, 0x10002c, info->fb_delay);
+               nvkm_wr32(device, 0x10002c, info->fb_delay);
 }
 
 static int
index 7203bb3e0a9a446769ddb6a5acc707a3b607a886..9d7c118e978454180fdb4830b333a24e77940925 100644 (file)
@@ -40,14 +40,16 @@ struct mcp77_clk {
 static u32
 read_div(struct mcp77_clk *clk)
 {
-       return nv_rd32(clk, 0x004600);
+       struct nvkm_device *device = clk->base.subdev.device;
+       return nvkm_rd32(device, 0x004600);
 }
 
 static u32
 read_pll(struct mcp77_clk *clk, u32 base)
 {
-       u32 ctrl = nv_rd32(clk, base + 0);
-       u32 coef = nv_rd32(clk, base + 4);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ctrl = nvkm_rd32(device, base + 0);
+       u32 coef = nvkm_rd32(device, base + 4);
        u32 ref = clk->base.read(&clk->base, nv_clk_src_href);
        u32 post_div = 0;
        u32 clock = 0;
@@ -55,10 +57,10 @@ read_pll(struct mcp77_clk *clk, u32 base)
 
        switch (base){
        case 0x4020:
-               post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+               post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
                break;
        case 0x4028:
-               post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+               post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
                break;
        default:
                break;
@@ -78,12 +80,13 @@ static int
 mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
 {
        struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
-       u32 mast = nv_rd32(clk, 0x00c054);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 mast = nvkm_rd32(device, 0x00c054);
        u32 P = 0;
 
        switch (src) {
        case nv_clk_src_crystal:
-               return nv_device(clk)->crystal;
+               return device->crystal;
        case nv_clk_src_href:
                return 100000; /* PCIE reference clock */
        case nv_clk_src_hclkm4:
@@ -99,7 +102,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                }
                break;
        case nv_clk_src_core:
-               P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+               P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
 
                switch (mast & 0x00000003) {
                case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
@@ -122,7 +125,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                default: return 0;
                }
        case nv_clk_src_shader:
-               P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+               P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
                switch (mast & 0x00000030) {
                case 0x00000000:
                        if (mast & 0x00000040)
@@ -293,6 +296,7 @@ static int
 mcp77_clk_prog(struct nvkm_clk *obj)
 {
        struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 pllmask = 0, mast;
        unsigned long flags;
        unsigned long *f = &flags;
@@ -303,19 +307,19 @@ mcp77_clk_prog(struct nvkm_clk *obj)
                goto out;
 
        /* First switch to safe clocks: href */
-       mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+       mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
        mast &= ~0x00400e73;
        mast |= 0x03000000;
 
        switch (clk->csrc) {
        case nv_clk_src_hclkm4:
-               nv_mask(clk, 0x4028, 0x00070000, clk->cctrl);
+               nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
                mast |= 0x00000002;
                break;
        case nv_clk_src_core:
-               nv_wr32(clk, 0x402c, clk->ccoef);
-               nv_wr32(clk, 0x4028, 0x80000000 | clk->cctrl);
-               nv_wr32(clk, 0x4040, clk->cpost);
+               nvkm_wr32(device, 0x402c, clk->ccoef);
+               nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
+               nvkm_wr32(device, 0x4040, clk->cpost);
                pllmask |= (0x3 << 8);
                mast |= 0x00000003;
                break;
@@ -326,17 +330,17 @@ mcp77_clk_prog(struct nvkm_clk *obj)
 
        switch (clk->ssrc) {
        case nv_clk_src_href:
-               nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+               nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
                /* mast |= 0x00000000; */
                break;
        case nv_clk_src_core:
-               nv_mask(clk, 0x4020, 0x00070000, clk->sctrl);
+               nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
                mast |= 0x00000020;
                break;
        case nv_clk_src_shader:
-               nv_wr32(clk, 0x4024, clk->scoef);
-               nv_wr32(clk, 0x4020, 0x80000000 | clk->sctrl);
-               nv_wr32(clk, 0x4070, clk->spost);
+               nvkm_wr32(device, 0x4024, clk->scoef);
+               nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
+               nvkm_wr32(device, 0x4070, clk->spost);
                pllmask |= (0x3 << 12);
                mast |= 0x00000030;
                break;
@@ -354,21 +358,21 @@ mcp77_clk_prog(struct nvkm_clk *obj)
        case nv_clk_src_cclk:
                mast |= 0x00400000;
        default:
-               nv_wr32(clk, 0x4600, clk->vdiv);
+               nvkm_wr32(device, 0x4600, clk->vdiv);
        }
 
-       nv_wr32(clk, 0xc054, mast);
+       nvkm_wr32(device, 0xc054, mast);
 
 resume:
        /* Disable some PLLs and dividers when unused */
        if (clk->csrc != nv_clk_src_core) {
-               nv_wr32(clk, 0x4040, 0x00000000);
-               nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+               nvkm_wr32(device, 0x4040, 0x00000000);
+               nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
        }
 
        if (clk->ssrc != nv_clk_src_shader) {
-               nv_wr32(clk, 0x4070, 0x00000000);
-               nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+               nvkm_wr32(device, 0x4070, 0x00000000);
+               nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
        }
 
 out:
index eb4a8affd0ecc24a1d4215d5d4b52f7788c7f1eb..32d59f40f5d389ed442b15648d1fdf7e832e687b 100644 (file)
@@ -48,7 +48,8 @@ nv40_domain[] = {
 static u32
 read_pll_1(struct nv40_clk *clk, u32 reg)
 {
-       u32 ctrl = nv_rd32(clk, reg + 0x00);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ctrl = nvkm_rd32(device, reg + 0x00);
        int P = (ctrl & 0x00070000) >> 16;
        int N = (ctrl & 0x0000ff00) >> 8;
        int M = (ctrl & 0x000000ff) >> 0;
@@ -63,8 +64,9 @@ read_pll_1(struct nv40_clk *clk, u32 reg)
 static u32
 read_pll_2(struct nv40_clk *clk, u32 reg)
 {
-       u32 ctrl = nv_rd32(clk, reg + 0x00);
-       u32 coef = nv_rd32(clk, reg + 0x04);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 ctrl = nvkm_rd32(device, reg + 0x00);
+       u32 coef = nvkm_rd32(device, reg + 0x04);
        int N2 = (coef & 0xff000000) >> 24;
        int M2 = (coef & 0x00ff0000) >> 16;
        int N1 = (coef & 0x0000ff00) >> 8;
@@ -104,11 +106,12 @@ static int
 nv40_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
 {
        struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
-       u32 mast = nv_rd32(clk, 0x00c040);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 mast = nvkm_rd32(device, 0x00c040);
 
        switch (src) {
        case nv_clk_src_crystal:
-               return nv_device(clk)->crystal;
+               return device->crystal;
        case nv_clk_src_href:
                return 100000; /*XXX: PCIE/AGP differ*/
        case nv_clk_src_core:
@@ -191,12 +194,13 @@ static int
 nv40_clk_prog(struct nvkm_clk *obj)
 {
        struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
-       nv_mask(clk, 0x00c040, 0x00000333, 0x00000000);
-       nv_wr32(clk, 0x004004, clk->npll_coef);
-       nv_mask(clk, 0x004000, 0xc0070100, clk->npll_ctrl);
-       nv_mask(clk, 0x004008, 0xc007ffff, clk->spll);
+       struct nvkm_device *device = clk->base.subdev.device;
+       nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
+       nvkm_wr32(device, 0x004004, clk->npll_coef);
+       nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
+       nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
        mdelay(5);
-       nv_mask(clk, 0x00c040, 0x00000333, clk->ctrl);
+       nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
        return 0;
 }
 
index 2b44ff4449094e8f8bf1ff41d6a29c2de708bba8..0d17de6f90e8197bc6bba55c8fa9f61b030c96c3 100644 (file)
 static u32
 read_div(struct nv50_clk *clk)
 {
-       switch (nv_device(clk)->chipset) {
+       struct nvkm_device *device = clk->base.subdev.device;
+       switch (device->chipset) {
        case 0x50: /* it exists, but only has bit 31, not the dividers.. */
        case 0x84:
        case 0x86:
        case 0x98:
        case 0xa0:
-               return nv_rd32(clk, 0x004700);
+               return nvkm_rd32(device, 0x004700);
        case 0x92:
        case 0x94:
        case 0x96:
-               return nv_rd32(clk, 0x004800);
+               return nvkm_rd32(device, 0x004800);
        default:
                return 0x00000000;
        }
@@ -50,11 +51,12 @@ read_div(struct nv50_clk *clk)
 static u32
 read_pll_src(struct nv50_clk *clk, u32 base)
 {
+       struct nvkm_device *device = clk->base.subdev.device;
        u32 coef, ref = clk->base.read(&clk->base, nv_clk_src_crystal);
-       u32 rsel = nv_rd32(clk, 0x00e18c);
+       u32 rsel = nvkm_rd32(device, 0x00e18c);
        int P, N, M, id;
 
-       switch (nv_device(clk)->chipset) {
+       switch (device->chipset) {
        case 0x50:
        case 0xa0:
                switch (base) {
@@ -67,7 +69,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
                        return 0;
                }
 
-               coef = nv_rd32(clk, 0x00e81c + (id * 0x0c));
+               coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
                ref *=  (coef & 0x01000000) ? 2 : 4;
                P    =  (coef & 0x00070000) >> 16;
                N    = ((coef & 0x0000ff00) >> 8) + 1;
@@ -76,7 +78,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
        case 0x84:
        case 0x86:
        case 0x92:
-               coef = nv_rd32(clk, 0x00e81c);
+               coef = nvkm_rd32(device, 0x00e81c);
                P    = (coef & 0x00070000) >> 16;
                N    = (coef & 0x0000ff00) >> 8;
                M    = (coef & 0x000000ff) >> 0;
@@ -84,7 +86,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
        case 0x94:
        case 0x96:
        case 0x98:
-               rsel = nv_rd32(clk, 0x00c050);
+               rsel = nvkm_rd32(device, 0x00c050);
                switch (base) {
                case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
                case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
@@ -102,8 +104,8 @@ read_pll_src(struct nv50_clk *clk, u32 base)
                case 3: id = 0; break;
                }
 
-               coef =  nv_rd32(clk, 0x00e81c + (id * 0x28));
-               P    = (nv_rd32(clk, 0x00e824 + (id * 0x28)) >> 16) & 7;
+               coef =  nvkm_rd32(device, 0x00e81c + (id * 0x28));
+               P    = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
                P   += (coef & 0x00070000) >> 16;
                N    = (coef & 0x0000ff00) >> 8;
                M    = (coef & 0x000000ff) >> 0;
@@ -121,7 +123,8 @@ read_pll_src(struct nv50_clk *clk, u32 base)
 static u32
 read_pll_ref(struct nv50_clk *clk, u32 base)
 {
-       u32 src, mast = nv_rd32(clk, 0x00c040);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 src, mast = nvkm_rd32(device, 0x00c040);
 
        switch (base) {
        case 0x004028:
@@ -152,16 +155,17 @@ read_pll_ref(struct nv50_clk *clk, u32 base)
 static u32
 read_pll(struct nv50_clk *clk, u32 base)
 {
-       u32 mast = nv_rd32(clk, 0x00c040);
-       u32 ctrl = nv_rd32(clk, base + 0);
-       u32 coef = nv_rd32(clk, base + 4);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 mast = nvkm_rd32(device, 0x00c040);
+       u32 ctrl = nvkm_rd32(device, base + 0);
+       u32 coef = nvkm_rd32(device, base + 4);
        u32 ref = read_pll_ref(clk, base);
        u32 freq = 0;
        int N1, N2, M1, M2;
 
        if (base == 0x004028 && (mast & 0x00100000)) {
                /* wtf, appears to only disable post-divider on gt200 */
-               if (nv_device(clk)->chipset != 0xa0)
+               if (device->chipset != 0xa0)
                        return clk->base.read(&clk->base, nv_clk_src_dom6);
        }
 
@@ -186,12 +190,13 @@ static int
 nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
 {
        struct nv50_clk *clk = container_of(obj, typeof(*clk), base);
-       u32 mast = nv_rd32(clk, 0x00c040);
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 mast = nvkm_rd32(device, 0x00c040);
        u32 P = 0;
 
        switch (src) {
        case nv_clk_src_crystal:
-               return nv_device(clk)->crystal;
+               return device->crystal;
        case nv_clk_src_href:
                return 100000; /* PCIE reference clock */
        case nv_clk_src_hclk:
@@ -210,7 +215,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                break;
        case nv_clk_src_core:
                if (!(mast & 0x00100000))
-                       P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+                       P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
                switch (mast & 0x00000003) {
                case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
                case 0x00000001: return clk->base.read(&clk->base, nv_clk_src_dom6);
@@ -219,7 +224,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                }
                break;
        case nv_clk_src_shader:
-               P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+               P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
                switch (mast & 0x00000030) {
                case 0x00000000:
                        if (mast & 0x00000080)
@@ -231,8 +236,8 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                }
                break;
        case nv_clk_src_mem:
-               P = (nv_rd32(clk, 0x004008) & 0x00070000) >> 16;
-               if (nv_rd32(clk, 0x004008) & 0x00000200) {
+               P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
+               if (nvkm_rd32(device, 0x004008) & 0x00000200) {
                        switch (mast & 0x0000c000) {
                        case 0x00000000:
                                return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
@@ -246,7 +251,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                break;
        case nv_clk_src_vdec:
                P = (read_div(clk) & 0x00000700) >> 8;
-               switch (nv_device(clk)->chipset) {
+               switch (device->chipset) {
                case 0x84:
                case 0x86:
                case 0x92:
@@ -255,7 +260,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                case 0xa0:
                        switch (mast & 0x00000c00) {
                        case 0x00000000:
-                               if (nv_device(clk)->chipset == 0xa0) /* wtf?? */
+                               if (device->chipset == 0xa0) /* wtf?? */
                                        return clk->base.read(&clk->base, nv_clk_src_core) >> P;
                                return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
                        case 0x00000400:
@@ -283,7 +288,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
                }
                break;
        case nv_clk_src_dom6:
-               switch (nv_device(clk)->chipset) {
+               switch (device->chipset) {
                case 0x50:
                case 0xa0:
                        return read_pll(clk, 0x00e810) >> 2;
This page took 0.110641 seconds and 5 git commands to generate.