89b19d7804c80c40e1f1a480dc51ce30a1fe1de8
[deliverable/linux.git] / drivers / staging / dwc2 / core.c
1 /*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
49 #include <linux/io.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
52
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
55
56 #include "core.h"
57 #include "hcd.h"
58
59 /**
60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
61 * used in both device and host modes
62 *
63 * @hsotg: Programming view of the DWC_otg controller
64 */
65 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
66 {
67 u32 intmsk;
68
69 /* Clear any pending OTG Interrupts */
70 writel(0xffffffff, hsotg->regs + GOTGINT);
71
72 /* Clear any pending interrupts */
73 writel(0xffffffff, hsotg->regs + GINTSTS);
74
75 /* Enable the interrupts in the GINTMSK */
76 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
77
78 if (hsotg->core_params->dma_enable <= 0)
79 intmsk |= GINTSTS_RXFLVL;
80
81 intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
82 GINTSTS_SESSREQINT;
83
84 writel(intmsk, hsotg->regs + GINTMSK);
85 }
86
87 /*
88 * Initializes the FSLSPClkSel field of the HCFG register depending on the
89 * PHY type
90 */
91 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
92 {
93 u32 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
94 u32 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
95 u32 hcfg, val;
96
97 if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
98 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
99 hsotg->core_params->ulpi_fs_ls > 0) ||
100 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
101 /* Full speed PHY */
102 val = HCFG_FSLSPCLKSEL_48_MHZ;
103 } else {
104 /* High speed PHY running at full speed or high speed */
105 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
106 }
107
108 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
109 hcfg = readl(hsotg->regs + HCFG);
110 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
111 hcfg |= val;
112 writel(hcfg, hsotg->regs + HCFG);
113 }
114
115 /*
116 * Do core a soft reset of the core. Be careful with this because it
117 * resets all the internal state machines of the core.
118 */
119 static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
120 {
121 u32 greset;
122 int count = 0;
123
124 dev_vdbg(hsotg->dev, "%s()\n", __func__);
125
126 /* Wait for AHB master IDLE state */
127 do {
128 usleep_range(20000, 40000);
129 greset = readl(hsotg->regs + GRSTCTL);
130 if (++count > 50) {
131 dev_warn(hsotg->dev,
132 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
133 __func__, greset);
134 return;
135 }
136 } while (!(greset & GRSTCTL_AHBIDLE));
137
138 /* Core Soft Reset */
139 count = 0;
140 greset |= GRSTCTL_CSFTRST;
141 writel(greset, hsotg->regs + GRSTCTL);
142 do {
143 usleep_range(20000, 40000);
144 greset = readl(hsotg->regs + GRSTCTL);
145 if (++count > 50) {
146 dev_warn(hsotg->dev,
147 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
148 __func__, greset);
149 break;
150 }
151 } while (greset & GRSTCTL_CSFTRST);
152
153 /*
154 * NOTE: This long sleep is _very_ important, otherwise the core will
155 * not stay in host mode after a connector ID change!
156 */
157 usleep_range(150000, 200000);
158 }
159
160 static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
161 {
162 u32 usbcfg, i2cctl;
163
164 /*
165 * core_init() is now called on every switch so only call the
166 * following for the first time through
167 */
168 if (select_phy) {
169 dev_dbg(hsotg->dev, "FS PHY selected\n");
170 usbcfg = readl(hsotg->regs + GUSBCFG);
171 usbcfg |= GUSBCFG_PHYSEL;
172 writel(usbcfg, hsotg->regs + GUSBCFG);
173
174 /* Reset after a PHY select */
175 dwc2_core_reset(hsotg);
176 }
177
178 /*
179 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
180 * do this on HNP Dev/Host mode switches (done in dev_init and
181 * host_init).
182 */
183 if (dwc2_is_host_mode(hsotg))
184 dwc2_init_fs_ls_pclk_sel(hsotg);
185
186 if (hsotg->core_params->i2c_enable > 0) {
187 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
188
189 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
190 usbcfg = readl(hsotg->regs + GUSBCFG);
191 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
192 writel(usbcfg, hsotg->regs + GUSBCFG);
193
194 /* Program GI2CCTL.I2CEn */
195 i2cctl = readl(hsotg->regs + GI2CCTL);
196 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
197 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
198 i2cctl &= ~GI2CCTL_I2CEN;
199 writel(i2cctl, hsotg->regs + GI2CCTL);
200 i2cctl |= GI2CCTL_I2CEN;
201 writel(i2cctl, hsotg->regs + GI2CCTL);
202 }
203 }
204
205 static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
206 {
207 u32 usbcfg;
208
209 if (!select_phy)
210 return;
211
212 usbcfg = readl(hsotg->regs + GUSBCFG);
213
214 /*
215 * HS PHY parameters. These parameters are preserved during soft reset
216 * so only program the first time. Do a soft reset immediately after
217 * setting phyif.
218 */
219 switch (hsotg->core_params->phy_type) {
220 case DWC2_PHY_TYPE_PARAM_ULPI:
221 /* ULPI interface */
222 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
223 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
224 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
225 if (hsotg->core_params->phy_ulpi_ddr > 0)
226 usbcfg |= GUSBCFG_DDRSEL;
227 break;
228 case DWC2_PHY_TYPE_PARAM_UTMI:
229 /* UTMI+ interface */
230 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
231 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
232 if (hsotg->core_params->phy_utmi_width == 16)
233 usbcfg |= GUSBCFG_PHYIF16;
234 break;
235 default:
236 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
237 break;
238 }
239
240 writel(usbcfg, hsotg->regs + GUSBCFG);
241
242 /* Reset after setting the PHY parameters */
243 dwc2_core_reset(hsotg);
244 }
245
246 static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
247 {
248 u32 usbcfg, hs_phy_type, fs_phy_type;
249
250 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
251 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
252 /* If FS mode with FS PHY */
253 dwc2_fs_phy_init(hsotg, select_phy);
254 } else {
255 /* High speed PHY */
256 dwc2_hs_phy_init(hsotg, select_phy);
257 }
258
259 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
260 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
261
262 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
263 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
264 hsotg->core_params->ulpi_fs_ls > 0) {
265 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
266 usbcfg = readl(hsotg->regs + GUSBCFG);
267 usbcfg |= GUSBCFG_ULPI_FS_LS;
268 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
269 writel(usbcfg, hsotg->regs + GUSBCFG);
270 } else {
271 usbcfg = readl(hsotg->regs + GUSBCFG);
272 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
273 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
274 writel(usbcfg, hsotg->regs + GUSBCFG);
275 }
276 }
277
278 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
279 {
280 u32 ahbcfg = 0;
281
282 switch (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) {
283 case GHWCFG2_EXT_DMA_ARCH:
284 dev_err(hsotg->dev, "External DMA Mode not supported\n");
285 return -EINVAL;
286
287 case GHWCFG2_INT_DMA_ARCH:
288 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
289 /*
290 * Old value was GAHBCFG_HBSTLEN_INCR - done for
291 * Host mode ISOC in issue fix - vahrama
292 */
293 ahbcfg |= GAHBCFG_HBSTLEN_INCR4;
294 break;
295
296 case GHWCFG2_SLAVE_ONLY_ARCH:
297 default:
298 dev_dbg(hsotg->dev, "Slave Only Mode\n");
299 break;
300 }
301
302 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
303 hsotg->core_params->dma_enable,
304 hsotg->core_params->dma_desc_enable);
305
306 if (hsotg->core_params->dma_enable > 0) {
307 if (hsotg->core_params->dma_desc_enable > 0)
308 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
309 else
310 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
311 } else {
312 dev_dbg(hsotg->dev, "Using Slave mode\n");
313 hsotg->core_params->dma_desc_enable = 0;
314 }
315
316 if (hsotg->core_params->ahb_single > 0)
317 ahbcfg |= GAHBCFG_AHB_SINGLE;
318
319 if (hsotg->core_params->dma_enable > 0)
320 ahbcfg |= GAHBCFG_DMA_EN;
321
322 writel(ahbcfg, hsotg->regs + GAHBCFG);
323
324 return 0;
325 }
326
327 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
328 {
329 u32 usbcfg;
330
331 usbcfg = readl(hsotg->regs + GUSBCFG);
332 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
333
334 switch (hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK) {
335 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
336 if (hsotg->core_params->otg_cap ==
337 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
338 usbcfg |= GUSBCFG_HNPCAP;
339 if (hsotg->core_params->otg_cap !=
340 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
341 usbcfg |= GUSBCFG_SRPCAP;
342 break;
343
344 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
345 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
346 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
347 if (hsotg->core_params->otg_cap !=
348 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
349 usbcfg |= GUSBCFG_SRPCAP;
350 break;
351
352 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
353 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
354 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
355 default:
356 break;
357 }
358
359 writel(usbcfg, hsotg->regs + GUSBCFG);
360 }
361
362 /**
363 * dwc2_core_init() - Initializes the DWC_otg controller registers and
364 * prepares the core for device mode or host mode operation
365 *
366 * @hsotg: Programming view of the DWC_otg controller
367 * @select_phy: If true then also set the Phy type
368 * @irq: If >= 0, the irq to register
369 */
370 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
371 {
372 u32 usbcfg, otgctl;
373 int retval;
374
375 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
376
377 usbcfg = readl(hsotg->regs + GUSBCFG);
378
379 /* Set ULPI External VBUS bit if needed */
380 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
381 if (hsotg->core_params->phy_ulpi_ext_vbus ==
382 DWC2_PHY_ULPI_EXTERNAL_VBUS)
383 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
384
385 /* Set external TS Dline pulsing bit if needed */
386 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
387 if (hsotg->core_params->ts_dline > 0)
388 usbcfg |= GUSBCFG_TERMSELDLPULSE;
389
390 writel(usbcfg, hsotg->regs + GUSBCFG);
391
392 /* Reset the Controller */
393 dwc2_core_reset(hsotg);
394
395 dev_dbg(hsotg->dev, "num_dev_perio_in_ep=%d\n",
396 hsotg->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT &
397 GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >>
398 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT);
399
400 hsotg->total_fifo_size = hsotg->hwcfg3 >> GHWCFG3_DFIFO_DEPTH_SHIFT &
401 GHWCFG3_DFIFO_DEPTH_MASK >> GHWCFG3_DFIFO_DEPTH_SHIFT;
402 hsotg->rx_fifo_size = readl(hsotg->regs + GRXFSIZ);
403 hsotg->nperio_tx_fifo_size =
404 readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
405
406 dev_dbg(hsotg->dev, "Total FIFO SZ=%d\n", hsotg->total_fifo_size);
407 dev_dbg(hsotg->dev, "RxFIFO SZ=%d\n", hsotg->rx_fifo_size);
408 dev_dbg(hsotg->dev, "NP TxFIFO SZ=%d\n", hsotg->nperio_tx_fifo_size);
409
410 /*
411 * This needs to happen in FS mode before any other programming occurs
412 */
413 dwc2_phy_init(hsotg, select_phy);
414
415 /* Program the GAHBCFG Register */
416 retval = dwc2_gahbcfg_init(hsotg);
417 if (retval)
418 return retval;
419
420 /* Program the GUSBCFG register */
421 dwc2_gusbcfg_init(hsotg);
422
423 /* Program the GOTGCTL register */
424 otgctl = readl(hsotg->regs + GOTGCTL);
425 otgctl &= ~GOTGCTL_OTGVER;
426 if (hsotg->core_params->otg_ver > 0)
427 otgctl |= GOTGCTL_OTGVER;
428 writel(otgctl, hsotg->regs + GOTGCTL);
429 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
430
431 /* Clear the SRP success bit for FS-I2c */
432 hsotg->srp_success = 0;
433
434 if (irq >= 0) {
435 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
436 irq);
437 retval = devm_request_irq(hsotg->dev, irq,
438 dwc2_handle_common_intr, IRQF_SHARED,
439 dev_name(hsotg->dev), hsotg);
440 if (retval)
441 return retval;
442 }
443
444 /* Enable common interrupts */
445 dwc2_enable_common_interrupts(hsotg);
446
447 /*
448 * Do device or host intialization based on mode during PCD and
449 * HCD initialization
450 */
451 if (dwc2_is_host_mode(hsotg)) {
452 dev_dbg(hsotg->dev, "Host Mode\n");
453 hsotg->op_state = OTG_STATE_A_HOST;
454 } else {
455 dev_dbg(hsotg->dev, "Device Mode\n");
456 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
457 }
458
459 return 0;
460 }
461
462 /**
463 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
464 *
465 * @hsotg: Programming view of DWC_otg controller
466 */
467 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
468 {
469 u32 intmsk;
470
471 dev_dbg(hsotg->dev, "%s()\n", __func__);
472
473 /* Disable all interrupts */
474 writel(0, hsotg->regs + GINTMSK);
475 writel(0, hsotg->regs + HAINTMSK);
476
477 /* Clear any pending interrupts */
478 writel(0xffffffff, hsotg->regs + GINTSTS);
479
480 /* Enable the common interrupts */
481 dwc2_enable_common_interrupts(hsotg);
482
483 /* Enable host mode interrupts without disturbing common interrupts */
484 intmsk = readl(hsotg->regs + GINTMSK);
485 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
486 writel(intmsk, hsotg->regs + GINTMSK);
487 }
488
489 /**
490 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
491 *
492 * @hsotg: Programming view of DWC_otg controller
493 */
494 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
495 {
496 u32 intmsk = readl(hsotg->regs + GINTMSK);
497
498 /* Disable host mode interrupts without disturbing common interrupts */
499 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
500 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
501 writel(intmsk, hsotg->regs + GINTMSK);
502 }
503
504 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
505 {
506 struct dwc2_core_params *params = hsotg->core_params;
507 u32 rxfsiz, nptxfsiz, ptxfsiz, hptxfsiz, dfifocfg;
508
509 if (!params->enable_dynamic_fifo)
510 return;
511
512 dev_dbg(hsotg->dev, "Total FIFO Size=%d\n", hsotg->total_fifo_size);
513 dev_dbg(hsotg->dev, "Rx FIFO Size=%d\n", params->host_rx_fifo_size);
514 dev_dbg(hsotg->dev, "NP Tx FIFO Size=%d\n",
515 params->host_nperio_tx_fifo_size);
516 dev_dbg(hsotg->dev, "P Tx FIFO Size=%d\n",
517 params->host_perio_tx_fifo_size);
518
519 /* Rx FIFO */
520 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n",
521 readl(hsotg->regs + GRXFSIZ));
522 writel(params->host_rx_fifo_size, hsotg->regs + GRXFSIZ);
523 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
524
525 /* Non-periodic Tx FIFO */
526 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
527 readl(hsotg->regs + GNPTXFSIZ));
528 nptxfsiz = params->host_nperio_tx_fifo_size <<
529 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
530 nptxfsiz |= params->host_rx_fifo_size <<
531 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
532 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
533 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
534 readl(hsotg->regs + GNPTXFSIZ));
535
536 /* Periodic Tx FIFO */
537 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
538 readl(hsotg->regs + HPTXFSIZ));
539 ptxfsiz = params->host_perio_tx_fifo_size <<
540 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
541 ptxfsiz |= (params->host_rx_fifo_size +
542 params->host_nperio_tx_fifo_size) <<
543 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
544 writel(ptxfsiz, hsotg->regs + HPTXFSIZ);
545 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
546 readl(hsotg->regs + HPTXFSIZ));
547
548 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
549 hsotg->snpsid <= DWC2_CORE_REV_2_94a) {
550 /*
551 * Global DFIFOCFG calculation for Host mode -
552 * include RxFIFO, NPTXFIFO and HPTXFIFO
553 */
554 dfifocfg = readl(hsotg->regs + GDFIFOCFG);
555 rxfsiz = readl(hsotg->regs + GRXFSIZ) & 0x0000ffff;
556 nptxfsiz = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
557 hptxfsiz = readl(hsotg->regs + HPTXFSIZ) >> 16 & 0xffff;
558 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
559 dfifocfg |= (rxfsiz + nptxfsiz + hptxfsiz) <<
560 GDFIFOCFG_EPINFOBASE_SHIFT &
561 GDFIFOCFG_EPINFOBASE_MASK;
562 writel(dfifocfg, hsotg->regs + GDFIFOCFG);
563 }
564 }
565
566 /**
567 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
568 * Host mode
569 *
570 * @hsotg: Programming view of DWC_otg controller
571 *
572 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
573 * request queues. Host channels are reset to ensure that they are ready for
574 * performing transfers.
575 */
576 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
577 {
578 u32 hcfg, hfir, otgctl;
579
580 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
581
582 /* Restart the Phy Clock */
583 writel(0, hsotg->regs + PCGCTL);
584
585 /* Initialize Host Configuration Register */
586 dwc2_init_fs_ls_pclk_sel(hsotg);
587 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
588 hcfg = readl(hsotg->regs + HCFG);
589 hcfg |= HCFG_FSLSSUPP;
590 writel(hcfg, hsotg->regs + HCFG);
591 }
592
593 /*
594 * This bit allows dynamic reloading of the HFIR register during
595 * runtime. This bit needs to be programmed during inital configuration
596 * and its value must not be changed during runtime.
597 */
598 if (hsotg->core_params->reload_ctl > 0) {
599 hfir = readl(hsotg->regs + HFIR);
600 hfir |= HFIR_RLDCTRL;
601 writel(hfir, hsotg->regs + HFIR);
602 }
603
604 if (hsotg->core_params->dma_desc_enable > 0) {
605 u32 op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
606
607 if (hsotg->snpsid < DWC2_CORE_REV_2_90a ||
608 !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA) ||
609 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
610 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
611 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
612 dev_err(hsotg->dev,
613 "Hardware does not support descriptor DMA mode -\n");
614 dev_err(hsotg->dev,
615 "falling back to buffer DMA mode.\n");
616 hsotg->core_params->dma_desc_enable = 0;
617 } else {
618 hcfg = readl(hsotg->regs + HCFG);
619 hcfg |= HCFG_DESCDMA;
620 writel(hcfg, hsotg->regs + HCFG);
621 }
622 }
623
624 /* Configure data FIFO sizes */
625 dwc2_config_fifos(hsotg);
626
627 /* TODO - check this */
628 /* Clear Host Set HNP Enable in the OTG Control Register */
629 otgctl = readl(hsotg->regs + GOTGCTL);
630 otgctl &= ~GOTGCTL_HSTSETHNPEN;
631 writel(otgctl, hsotg->regs + GOTGCTL);
632
633 /* Make sure the FIFOs are flushed */
634 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
635 dwc2_flush_rx_fifo(hsotg);
636
637 /* Clear Host Set HNP Enable in the OTG Control Register */
638 otgctl = readl(hsotg->regs + GOTGCTL);
639 otgctl &= ~GOTGCTL_HSTSETHNPEN;
640 writel(otgctl, hsotg->regs + GOTGCTL);
641
642 if (hsotg->core_params->dma_desc_enable <= 0) {
643 int num_channels, i;
644 u32 hcchar;
645
646 /* Flush out any leftover queued requests */
647 num_channels = hsotg->core_params->host_channels;
648 for (i = 0; i < num_channels; i++) {
649 hcchar = readl(hsotg->regs + HCCHAR(i));
650 hcchar &= ~HCCHAR_CHENA;
651 hcchar |= HCCHAR_CHDIS;
652 hcchar &= ~HCCHAR_EPDIR;
653 writel(hcchar, hsotg->regs + HCCHAR(i));
654 }
655
656 /* Halt all channels to put them into a known state */
657 for (i = 0; i < num_channels; i++) {
658 int count = 0;
659
660 hcchar = readl(hsotg->regs + HCCHAR(i));
661 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
662 hcchar &= ~HCCHAR_EPDIR;
663 writel(hcchar, hsotg->regs + HCCHAR(i));
664 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
665 __func__, i);
666 do {
667 hcchar = readl(hsotg->regs + HCCHAR(i));
668 if (++count > 1000) {
669 dev_err(hsotg->dev,
670 "Unable to clear enable on channel %d\n",
671 i);
672 break;
673 }
674 udelay(1);
675 } while (hcchar & HCCHAR_CHENA);
676 }
677 }
678
679 /* Turn on the vbus power */
680 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
681 if (hsotg->op_state == OTG_STATE_A_HOST) {
682 u32 hprt0 = dwc2_read_hprt0(hsotg);
683
684 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
685 !!(hprt0 & HPRT0_PWR));
686 if (!(hprt0 & HPRT0_PWR)) {
687 hprt0 |= HPRT0_PWR;
688 writel(hprt0, hsotg->regs + HPRT0);
689 }
690 }
691
692 dwc2_enable_host_interrupts(hsotg);
693 }
694
695 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
696 struct dwc2_host_chan *chan)
697 {
698 u32 hcintmsk = HCINTMSK_CHHLTD;
699
700 switch (chan->ep_type) {
701 case USB_ENDPOINT_XFER_CONTROL:
702 case USB_ENDPOINT_XFER_BULK:
703 dev_vdbg(hsotg->dev, "control/bulk\n");
704 hcintmsk |= HCINTMSK_XFERCOMPL;
705 hcintmsk |= HCINTMSK_STALL;
706 hcintmsk |= HCINTMSK_XACTERR;
707 hcintmsk |= HCINTMSK_DATATGLERR;
708 if (chan->ep_is_in) {
709 hcintmsk |= HCINTMSK_BBLERR;
710 } else {
711 hcintmsk |= HCINTMSK_NAK;
712 hcintmsk |= HCINTMSK_NYET;
713 if (chan->do_ping)
714 hcintmsk |= HCINTMSK_ACK;
715 }
716
717 if (chan->do_split) {
718 hcintmsk |= HCINTMSK_NAK;
719 if (chan->complete_split)
720 hcintmsk |= HCINTMSK_NYET;
721 else
722 hcintmsk |= HCINTMSK_ACK;
723 }
724
725 if (chan->error_state)
726 hcintmsk |= HCINTMSK_ACK;
727 break;
728
729 case USB_ENDPOINT_XFER_INT:
730 if (dbg_perio())
731 dev_vdbg(hsotg->dev, "intr\n");
732 hcintmsk |= HCINTMSK_XFERCOMPL;
733 hcintmsk |= HCINTMSK_NAK;
734 hcintmsk |= HCINTMSK_STALL;
735 hcintmsk |= HCINTMSK_XACTERR;
736 hcintmsk |= HCINTMSK_DATATGLERR;
737 hcintmsk |= HCINTMSK_FRMOVRUN;
738
739 if (chan->ep_is_in)
740 hcintmsk |= HCINTMSK_BBLERR;
741 if (chan->error_state)
742 hcintmsk |= HCINTMSK_ACK;
743 if (chan->do_split) {
744 if (chan->complete_split)
745 hcintmsk |= HCINTMSK_NYET;
746 else
747 hcintmsk |= HCINTMSK_ACK;
748 }
749 break;
750
751 case USB_ENDPOINT_XFER_ISOC:
752 if (dbg_perio())
753 dev_vdbg(hsotg->dev, "isoc\n");
754 hcintmsk |= HCINTMSK_XFERCOMPL;
755 hcintmsk |= HCINTMSK_FRMOVRUN;
756 hcintmsk |= HCINTMSK_ACK;
757
758 if (chan->ep_is_in) {
759 hcintmsk |= HCINTMSK_XACTERR;
760 hcintmsk |= HCINTMSK_BBLERR;
761 }
762 break;
763 default:
764 dev_err(hsotg->dev, "## Unknown EP type ##\n");
765 break;
766 }
767
768 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
769 if (dbg_hc(chan))
770 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
771 }
772
773 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
774 struct dwc2_host_chan *chan)
775 {
776 u32 hcintmsk = HCINTMSK_CHHLTD;
777
778 /*
779 * For Descriptor DMA mode core halts the channel on AHB error.
780 * Interrupt is not required.
781 */
782 if (hsotg->core_params->dma_desc_enable <= 0) {
783 if (dbg_hc(chan))
784 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
785 hcintmsk |= HCINTMSK_AHBERR;
786 } else {
787 if (dbg_hc(chan))
788 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
789 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
790 hcintmsk |= HCINTMSK_XFERCOMPL;
791 }
792
793 if (chan->error_state && !chan->do_split &&
794 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
795 if (dbg_hc(chan))
796 dev_vdbg(hsotg->dev, "setting ACK\n");
797 hcintmsk |= HCINTMSK_ACK;
798 if (chan->ep_is_in) {
799 hcintmsk |= HCINTMSK_DATATGLERR;
800 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
801 hcintmsk |= HCINTMSK_NAK;
802 }
803 }
804
805 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
806 if (dbg_hc(chan))
807 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
808 }
809
810 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
811 struct dwc2_host_chan *chan)
812 {
813 u32 intmsk;
814
815 if (hsotg->core_params->dma_enable > 0) {
816 if (dbg_hc(chan))
817 dev_vdbg(hsotg->dev, "DMA enabled\n");
818 dwc2_hc_enable_dma_ints(hsotg, chan);
819 } else {
820 if (dbg_hc(chan))
821 dev_vdbg(hsotg->dev, "DMA disabled\n");
822 dwc2_hc_enable_slave_ints(hsotg, chan);
823 }
824
825 /* Enable the top level host channel interrupt */
826 intmsk = readl(hsotg->regs + HAINTMSK);
827 intmsk |= 1 << chan->hc_num;
828 writel(intmsk, hsotg->regs + HAINTMSK);
829 if (dbg_hc(chan))
830 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
831
832 /* Make sure host channel interrupts are enabled */
833 intmsk = readl(hsotg->regs + GINTMSK);
834 intmsk |= GINTSTS_HCHINT;
835 writel(intmsk, hsotg->regs + GINTMSK);
836 if (dbg_hc(chan))
837 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
838 }
839
840 /**
841 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
842 * a specific endpoint
843 *
844 * @hsotg: Programming view of DWC_otg controller
845 * @chan: Information needed to initialize the host channel
846 *
847 * The HCCHARn register is set up with the characteristics specified in chan.
848 * Host channel interrupts that may need to be serviced while this transfer is
849 * in progress are enabled.
850 */
851 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
852 {
853 u8 hc_num = chan->hc_num;
854 u32 hcintmsk;
855 u32 hcchar;
856 u32 hcsplt = 0;
857
858 if (dbg_hc(chan))
859 dev_vdbg(hsotg->dev, "%s()\n", __func__);
860
861 /* Clear old interrupt conditions for this host channel */
862 hcintmsk = 0xffffffff;
863 hcintmsk &= ~HCINTMSK_RESERVED14_31;
864 writel(hcintmsk, hsotg->regs + HCINT(hc_num));
865
866 /* Enable channel interrupts required for this transfer */
867 dwc2_hc_enable_ints(hsotg, chan);
868
869 /*
870 * Program the HCCHARn register with the endpoint characteristics for
871 * the current transfer
872 */
873 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
874 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
875 if (chan->ep_is_in)
876 hcchar |= HCCHAR_EPDIR;
877 if (chan->speed == USB_SPEED_LOW)
878 hcchar |= HCCHAR_LSPDDEV;
879 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
880 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
881 writel(hcchar, hsotg->regs + HCCHAR(hc_num));
882 if (dbg_hc(chan)) {
883 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
884 hc_num, hcchar);
885
886 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, hc_num);
887 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
888 hcchar >> HCCHAR_DEVADDR_SHIFT &
889 HCCHAR_DEVADDR_MASK >> HCCHAR_DEVADDR_SHIFT);
890 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
891 hcchar >> HCCHAR_EPNUM_SHIFT &
892 HCCHAR_EPNUM_MASK >> HCCHAR_EPNUM_SHIFT);
893 dev_vdbg(hsotg->dev, " Is In: %d\n",
894 !!(hcchar & HCCHAR_EPDIR));
895 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
896 !!(hcchar & HCCHAR_LSPDDEV));
897 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
898 hcchar >> HCCHAR_EPTYPE_SHIFT &
899 HCCHAR_EPTYPE_MASK >> HCCHAR_EPTYPE_SHIFT);
900 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
901 hcchar >> HCCHAR_MPS_SHIFT &
902 HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
903 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
904 hcchar >> HCCHAR_MULTICNT_SHIFT &
905 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
906 }
907
908 /* Program the HCSPLT register for SPLITs */
909 if (chan->do_split) {
910 if (dbg_hc(chan))
911 dev_vdbg(hsotg->dev,
912 "Programming HC %d with split --> %s\n",
913 hc_num,
914 chan->complete_split ? "CSPLIT" : "SSPLIT");
915 if (chan->complete_split)
916 hcsplt |= HCSPLT_COMPSPLT;
917 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
918 HCSPLT_XACTPOS_MASK;
919 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
920 HCSPLT_HUBADDR_MASK;
921 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
922 HCSPLT_PRTADDR_MASK;
923 if (dbg_hc(chan)) {
924 dev_vdbg(hsotg->dev, " comp split %d\n",
925 chan->complete_split);
926 dev_vdbg(hsotg->dev, " xact pos %d\n",
927 chan->xact_pos);
928 dev_vdbg(hsotg->dev, " hub addr %d\n",
929 chan->hub_addr);
930 dev_vdbg(hsotg->dev, " hub port %d\n",
931 chan->hub_port);
932 dev_vdbg(hsotg->dev, " is_in %d\n",
933 chan->ep_is_in);
934 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
935 hcchar >> HCCHAR_MPS_SHIFT &
936 HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
937 dev_vdbg(hsotg->dev, " xferlen %d\n",
938 chan->xfer_len);
939 }
940 }
941
942 writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
943 }
944
945 /**
946 * dwc2_hc_halt() - Attempts to halt a host channel
947 *
948 * @hsotg: Controller register interface
949 * @chan: Host channel to halt
950 * @halt_status: Reason for halting the channel
951 *
952 * This function should only be called in Slave mode or to abort a transfer in
953 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
954 * controller halts the channel when the transfer is complete or a condition
955 * occurs that requires application intervention.
956 *
957 * In slave mode, checks for a free request queue entry, then sets the Channel
958 * Enable and Channel Disable bits of the Host Channel Characteristics
959 * register of the specified channel to intiate the halt. If there is no free
960 * request queue entry, sets only the Channel Disable bit of the HCCHARn
961 * register to flush requests for this channel. In the latter case, sets a
962 * flag to indicate that the host channel needs to be halted when a request
963 * queue slot is open.
964 *
965 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
966 * HCCHARn register. The controller ensures there is space in the request
967 * queue before submitting the halt request.
968 *
969 * Some time may elapse before the core flushes any posted requests for this
970 * host channel and halts. The Channel Halted interrupt handler completes the
971 * deactivation of the host channel.
972 */
973 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
974 enum dwc2_halt_status halt_status)
975 {
976 u32 nptxsts, hptxsts, hcchar;
977
978 if (dbg_hc(chan))
979 dev_vdbg(hsotg->dev, "%s()\n", __func__);
980 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
981 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
982
983 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
984 halt_status == DWC2_HC_XFER_AHB_ERR) {
985 /*
986 * Disable all channel interrupts except Ch Halted. The QTD
987 * and QH state associated with this transfer has been cleared
988 * (in the case of URB_DEQUEUE), so the channel needs to be
989 * shut down carefully to prevent crashes.
990 */
991 u32 hcintmsk = HCINTMSK_CHHLTD;
992
993 dev_vdbg(hsotg->dev, "dequeue/error\n");
994 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
995
996 /*
997 * Make sure no other interrupts besides halt are currently
998 * pending. Handling another interrupt could cause a crash due
999 * to the QTD and QH state.
1000 */
1001 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1002
1003 /*
1004 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1005 * even if the channel was already halted for some other
1006 * reason
1007 */
1008 chan->halt_status = halt_status;
1009
1010 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1011 if (!(hcchar & HCCHAR_CHENA)) {
1012 /*
1013 * The channel is either already halted or it hasn't
1014 * started yet. In DMA mode, the transfer may halt if
1015 * it finishes normally or a condition occurs that
1016 * requires driver intervention. Don't want to halt
1017 * the channel again. In either Slave or DMA mode,
1018 * it's possible that the transfer has been assigned
1019 * to a channel, but not started yet when an URB is
1020 * dequeued. Don't want to halt a channel that hasn't
1021 * started yet.
1022 */
1023 return;
1024 }
1025 }
1026 if (chan->halt_pending) {
1027 /*
1028 * A halt has already been issued for this channel. This might
1029 * happen when a transfer is aborted by a higher level in
1030 * the stack.
1031 */
1032 dev_vdbg(hsotg->dev,
1033 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1034 __func__, chan->hc_num);
1035 return;
1036 }
1037
1038 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1039
1040 /* No need to set the bit in DDMA for disabling the channel */
1041 /* TODO check it everywhere channel is disabled */
1042 if (hsotg->core_params->dma_desc_enable <= 0) {
1043 if (dbg_hc(chan))
1044 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1045 hcchar |= HCCHAR_CHENA;
1046 } else {
1047 if (dbg_hc(chan))
1048 dev_dbg(hsotg->dev, "desc DMA enabled\n");
1049 }
1050 hcchar |= HCCHAR_CHDIS;
1051
1052 if (hsotg->core_params->dma_enable <= 0) {
1053 if (dbg_hc(chan))
1054 dev_vdbg(hsotg->dev, "DMA not enabled\n");
1055 hcchar |= HCCHAR_CHENA;
1056
1057 /* Check for space in the request queue to issue the halt */
1058 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1059 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1060 dev_vdbg(hsotg->dev, "control/bulk\n");
1061 nptxsts = readl(hsotg->regs + GNPTXSTS);
1062 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1063 dev_vdbg(hsotg->dev, "Disabling channel\n");
1064 hcchar &= ~HCCHAR_CHENA;
1065 }
1066 } else {
1067 if (dbg_perio())
1068 dev_vdbg(hsotg->dev, "isoc/intr\n");
1069 hptxsts = readl(hsotg->regs + HPTXSTS);
1070 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1071 hsotg->queuing_high_bandwidth) {
1072 if (dbg_perio())
1073 dev_vdbg(hsotg->dev, "Disabling channel\n");
1074 hcchar &= ~HCCHAR_CHENA;
1075 }
1076 }
1077 } else {
1078 if (dbg_hc(chan))
1079 dev_vdbg(hsotg->dev, "DMA enabled\n");
1080 }
1081
1082 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1083 chan->halt_status = halt_status;
1084
1085 if (hcchar & HCCHAR_CHENA) {
1086 if (dbg_hc(chan))
1087 dev_vdbg(hsotg->dev, "Channel enabled\n");
1088 chan->halt_pending = 1;
1089 chan->halt_on_queue = 0;
1090 } else {
1091 if (dbg_hc(chan))
1092 dev_vdbg(hsotg->dev, "Channel disabled\n");
1093 chan->halt_on_queue = 1;
1094 }
1095
1096 if (dbg_hc(chan)) {
1097 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1098 chan->hc_num);
1099 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1100 hcchar);
1101 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1102 chan->halt_pending);
1103 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1104 chan->halt_on_queue);
1105 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1106 chan->halt_status);
1107 }
1108 }
1109
1110 /**
1111 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1112 *
1113 * @hsotg: Programming view of DWC_otg controller
1114 * @chan: Identifies the host channel to clean up
1115 *
1116 * This function is normally called after a transfer is done and the host
1117 * channel is being released
1118 */
1119 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1120 {
1121 u32 hcintmsk;
1122
1123 chan->xfer_started = 0;
1124
1125 /*
1126 * Clear channel interrupt enables and any unhandled channel interrupt
1127 * conditions
1128 */
1129 writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1130 hcintmsk = 0xffffffff;
1131 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1132 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1133 }
1134
1135 /**
1136 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1137 * which frame a periodic transfer should occur
1138 *
1139 * @hsotg: Programming view of DWC_otg controller
1140 * @chan: Identifies the host channel to set up and its properties
1141 * @hcchar: Current value of the HCCHAR register for the specified host channel
1142 *
1143 * This function has no effect on non-periodic transfers
1144 */
1145 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1146 struct dwc2_host_chan *chan, u32 *hcchar)
1147 {
1148 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1149 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1150 /* 1 if _next_ frame is odd, 0 if it's even */
1151 if (dwc2_hcd_get_frame_number(hsotg) & 0x1)
1152 *hcchar |= HCCHAR_ODDFRM;
1153 }
1154 }
1155
1156 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1157 {
1158 /* Set up the initial PID for the transfer */
1159 if (chan->speed == USB_SPEED_HIGH) {
1160 if (chan->ep_is_in) {
1161 if (chan->multi_count == 1)
1162 chan->data_pid_start = DWC2_HC_PID_DATA0;
1163 else if (chan->multi_count == 2)
1164 chan->data_pid_start = DWC2_HC_PID_DATA1;
1165 else
1166 chan->data_pid_start = DWC2_HC_PID_DATA2;
1167 } else {
1168 if (chan->multi_count == 1)
1169 chan->data_pid_start = DWC2_HC_PID_DATA0;
1170 else
1171 chan->data_pid_start = DWC2_HC_PID_MDATA;
1172 }
1173 } else {
1174 chan->data_pid_start = DWC2_HC_PID_DATA0;
1175 }
1176 }
1177
1178 /**
1179 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1180 * the Host Channel
1181 *
1182 * @hsotg: Programming view of DWC_otg controller
1183 * @chan: Information needed to initialize the host channel
1184 *
1185 * This function should only be called in Slave mode. For a channel associated
1186 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1187 * associated with a periodic EP, the periodic Tx FIFO is written.
1188 *
1189 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1190 * the number of bytes written to the Tx FIFO.
1191 */
1192 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1193 struct dwc2_host_chan *chan)
1194 {
1195 u32 i;
1196 u32 remaining_count;
1197 u32 byte_count;
1198 u32 dword_count;
1199 u32 __iomem *data_fifo;
1200 u32 *data_buf = (u32 *)chan->xfer_buf;
1201
1202 if (dbg_hc(chan))
1203 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1204
1205 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1206
1207 remaining_count = chan->xfer_len - chan->xfer_count;
1208 if (remaining_count > chan->max_packet)
1209 byte_count = chan->max_packet;
1210 else
1211 byte_count = remaining_count;
1212
1213 dword_count = (byte_count + 3) / 4;
1214
1215 if (((unsigned long)data_buf & 0x3) == 0) {
1216 /* xfer_buf is DWORD aligned */
1217 for (i = 0; i < dword_count; i++, data_buf++)
1218 writel(*data_buf, data_fifo);
1219 } else {
1220 /* xfer_buf is not DWORD aligned */
1221 for (i = 0; i < dword_count; i++, data_buf++) {
1222 u32 data = data_buf[0] | data_buf[1] << 8 |
1223 data_buf[2] << 16 | data_buf[3] << 24;
1224 writel(data, data_fifo);
1225 }
1226 }
1227
1228 chan->xfer_count += byte_count;
1229 chan->xfer_buf += byte_count;
1230 }
1231
1232 /**
1233 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1234 * channel and starts the transfer
1235 *
1236 * @hsotg: Programming view of DWC_otg controller
1237 * @chan: Information needed to initialize the host channel. The xfer_len value
1238 * may be reduced to accommodate the max widths of the XferSize and
1239 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1240 * changed to reflect the final xfer_len value.
1241 *
1242 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1243 * the caller must ensure that there is sufficient space in the request queue
1244 * and Tx Data FIFO.
1245 *
1246 * For an OUT transfer in Slave mode, it loads a data packet into the
1247 * appropriate FIFO. If necessary, additional data packets are loaded in the
1248 * Host ISR.
1249 *
1250 * For an IN transfer in Slave mode, a data packet is requested. The data
1251 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1252 * additional data packets are requested in the Host ISR.
1253 *
1254 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1255 * register along with a packet count of 1 and the channel is enabled. This
1256 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1257 * simply set to 0 since no data transfer occurs in this case.
1258 *
1259 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1260 * all the information required to perform the subsequent data transfer. In
1261 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1262 * controller performs the entire PING protocol, then starts the data
1263 * transfer.
1264 */
1265 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1266 struct dwc2_host_chan *chan)
1267 {
1268 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1269 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1270 u32 hcchar;
1271 u32 hctsiz = 0;
1272 u16 num_packets;
1273
1274 if (dbg_hc(chan))
1275 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1276
1277 if (chan->do_ping) {
1278 if (hsotg->core_params->dma_enable <= 0) {
1279 if (dbg_hc(chan))
1280 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1281 dwc2_hc_do_ping(hsotg, chan);
1282 chan->xfer_started = 1;
1283 return;
1284 } else {
1285 if (dbg_hc(chan))
1286 dev_vdbg(hsotg->dev, "ping, DMA\n");
1287 hctsiz |= TSIZ_DOPNG;
1288 }
1289 }
1290
1291 if (chan->do_split) {
1292 if (dbg_hc(chan))
1293 dev_vdbg(hsotg->dev, "split\n");
1294 num_packets = 1;
1295
1296 if (chan->complete_split && !chan->ep_is_in)
1297 /*
1298 * For CSPLIT OUT Transfer, set the size to 0 so the
1299 * core doesn't expect any data written to the FIFO
1300 */
1301 chan->xfer_len = 0;
1302 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1303 chan->xfer_len = chan->max_packet;
1304 else if (!chan->ep_is_in && chan->xfer_len > 188)
1305 chan->xfer_len = 188;
1306
1307 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1308 TSIZ_XFERSIZE_MASK;
1309 } else {
1310 if (dbg_hc(chan))
1311 dev_vdbg(hsotg->dev, "no split\n");
1312 /*
1313 * Ensure that the transfer length and packet count will fit
1314 * in the widths allocated for them in the HCTSIZn register
1315 */
1316 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1317 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1318 /*
1319 * Make sure the transfer size is no larger than one
1320 * (micro)frame's worth of data. (A check was done
1321 * when the periodic transfer was accepted to ensure
1322 * that a (micro)frame's worth of data can be
1323 * programmed into a channel.)
1324 */
1325 u32 max_periodic_len =
1326 chan->multi_count * chan->max_packet;
1327
1328 if (chan->xfer_len > max_periodic_len)
1329 chan->xfer_len = max_periodic_len;
1330 } else if (chan->xfer_len > max_hc_xfer_size) {
1331 /*
1332 * Make sure that xfer_len is a multiple of max packet
1333 * size
1334 */
1335 chan->xfer_len =
1336 max_hc_xfer_size - chan->max_packet + 1;
1337 }
1338
1339 if (chan->xfer_len > 0) {
1340 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1341 chan->max_packet;
1342 if (num_packets > max_hc_pkt_count) {
1343 num_packets = max_hc_pkt_count;
1344 chan->xfer_len = num_packets * chan->max_packet;
1345 }
1346 } else {
1347 /* Need 1 packet for transfer length of 0 */
1348 num_packets = 1;
1349 }
1350
1351 if (chan->ep_is_in)
1352 /*
1353 * Always program an integral # of max packets for IN
1354 * transfers
1355 */
1356 chan->xfer_len = num_packets * chan->max_packet;
1357
1358 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1359 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1360 /*
1361 * Make sure that the multi_count field matches the
1362 * actual transfer length
1363 */
1364 chan->multi_count = num_packets;
1365
1366 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1367 dwc2_set_pid_isoc(chan);
1368
1369 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1370 TSIZ_XFERSIZE_MASK;
1371 }
1372
1373 chan->start_pkt_count = num_packets;
1374 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1375 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1376 TSIZ_SC_MC_PID_MASK;
1377 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1378 if (dbg_hc(chan)) {
1379 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1380 hctsiz, chan->hc_num);
1381
1382 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1383 chan->hc_num);
1384 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1385 hctsiz >> TSIZ_XFERSIZE_SHIFT &
1386 TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
1387 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1388 hctsiz >> TSIZ_PKTCNT_SHIFT &
1389 TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
1390 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1391 hctsiz >> TSIZ_SC_MC_PID_SHIFT &
1392 TSIZ_SC_MC_PID_MASK >> TSIZ_SC_MC_PID_SHIFT);
1393 }
1394
1395 if (hsotg->core_params->dma_enable > 0) {
1396 dma_addr_t dma_addr;
1397
1398 if (chan->align_buf) {
1399 if (dbg_hc(chan))
1400 dev_vdbg(hsotg->dev, "align_buf\n");
1401 dma_addr = chan->align_buf;
1402 } else {
1403 dma_addr = chan->xfer_dma;
1404 }
1405 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1406 if (dbg_hc(chan))
1407 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1408 (unsigned long)dma_addr, chan->hc_num);
1409 }
1410
1411 /* Start the split */
1412 if (chan->do_split) {
1413 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1414
1415 hcsplt |= HCSPLT_SPLTENA;
1416 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1417 }
1418
1419 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1420 hcchar &= ~HCCHAR_MULTICNT_MASK;
1421 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1422 HCCHAR_MULTICNT_MASK;
1423 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1424
1425 if (hcchar & HCCHAR_CHDIS)
1426 dev_warn(hsotg->dev,
1427 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1428 __func__, chan->hc_num, hcchar);
1429
1430 /* Set host channel enable after all other setup is complete */
1431 hcchar |= HCCHAR_CHENA;
1432 hcchar &= ~HCCHAR_CHDIS;
1433
1434 if (dbg_hc(chan))
1435 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1436 hcchar >> HCCHAR_MULTICNT_SHIFT &
1437 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
1438
1439 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1440 if (dbg_hc(chan))
1441 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1442 chan->hc_num);
1443
1444 chan->xfer_started = 1;
1445 chan->requests++;
1446
1447 if (hsotg->core_params->dma_enable <= 0 &&
1448 !chan->ep_is_in && chan->xfer_len > 0)
1449 /* Load OUT packet into the appropriate Tx FIFO */
1450 dwc2_hc_write_packet(hsotg, chan);
1451 }
1452
1453 /**
1454 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1455 * host channel and starts the transfer in Descriptor DMA mode
1456 *
1457 * @hsotg: Programming view of DWC_otg controller
1458 * @chan: Information needed to initialize the host channel
1459 *
1460 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1461 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1462 * with micro-frame bitmap.
1463 *
1464 * Initializes HCDMA register with descriptor list address and CTD value then
1465 * starts the transfer via enabling the channel.
1466 */
1467 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1468 struct dwc2_host_chan *chan)
1469 {
1470 u32 hcchar;
1471 u32 hc_dma;
1472 u32 hctsiz = 0;
1473
1474 if (chan->do_ping)
1475 hctsiz |= TSIZ_DOPNG;
1476
1477 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1478 dwc2_set_pid_isoc(chan);
1479
1480 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1481 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1482 TSIZ_SC_MC_PID_MASK;
1483
1484 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1485 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1486
1487 /* Non-zero only for high-speed interrupt endpoints */
1488 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1489
1490 if (dbg_hc(chan)) {
1491 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1492 chan->hc_num);
1493 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1494 chan->data_pid_start);
1495 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1496 }
1497
1498 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1499
1500 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1501
1502 /* Always start from first descriptor */
1503 hc_dma &= ~HCDMA_CTD_MASK;
1504 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
1505 if (dbg_hc(chan))
1506 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1507 hc_dma, chan->hc_num);
1508
1509 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1510 hcchar &= ~HCCHAR_MULTICNT_MASK;
1511 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1512 HCCHAR_MULTICNT_MASK;
1513
1514 if (hcchar & HCCHAR_CHDIS)
1515 dev_warn(hsotg->dev,
1516 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1517 __func__, chan->hc_num, hcchar);
1518
1519 /* Set host channel enable after all other setup is complete */
1520 hcchar |= HCCHAR_CHENA;
1521 hcchar &= ~HCCHAR_CHDIS;
1522
1523 if (dbg_hc(chan))
1524 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1525 hcchar >> HCCHAR_MULTICNT_SHIFT &
1526 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
1527
1528 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1529 if (dbg_hc(chan))
1530 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1531 chan->hc_num);
1532
1533 chan->xfer_started = 1;
1534 chan->requests++;
1535 }
1536
1537 /**
1538 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1539 * a previous call to dwc2_hc_start_transfer()
1540 *
1541 * @hsotg: Programming view of DWC_otg controller
1542 * @chan: Information needed to initialize the host channel
1543 *
1544 * The caller must ensure there is sufficient space in the request queue and Tx
1545 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1546 * the controller acts autonomously to complete transfers programmed to a host
1547 * channel.
1548 *
1549 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1550 * if there is any data remaining to be queued. For an IN transfer, another
1551 * data packet is always requested. For the SETUP phase of a control transfer,
1552 * this function does nothing.
1553 *
1554 * Return: 1 if a new request is queued, 0 if no more requests are required
1555 * for this transfer
1556 */
1557 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1558 struct dwc2_host_chan *chan)
1559 {
1560 if (dbg_hc(chan))
1561 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1562 chan->hc_num);
1563
1564 if (chan->do_split)
1565 /* SPLITs always queue just once per channel */
1566 return 0;
1567
1568 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1569 /* SETUPs are queued only once since they can't be NAK'd */
1570 return 0;
1571
1572 if (chan->ep_is_in) {
1573 /*
1574 * Always queue another request for other IN transfers. If
1575 * back-to-back INs are issued and NAKs are received for both,
1576 * the driver may still be processing the first NAK when the
1577 * second NAK is received. When the interrupt handler clears
1578 * the NAK interrupt for the first NAK, the second NAK will
1579 * not be seen. So we can't depend on the NAK interrupt
1580 * handler to requeue a NAK'd request. Instead, IN requests
1581 * are issued each time this function is called. When the
1582 * transfer completes, the extra requests for the channel will
1583 * be flushed.
1584 */
1585 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1586
1587 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1588 hcchar |= HCCHAR_CHENA;
1589 hcchar &= ~HCCHAR_CHDIS;
1590 if (dbg_hc(chan))
1591 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1592 hcchar);
1593 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1594 chan->requests++;
1595 return 1;
1596 }
1597
1598 /* OUT transfers */
1599
1600 if (chan->xfer_count < chan->xfer_len) {
1601 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1602 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1603 u32 hcchar = readl(hsotg->regs +
1604 HCCHAR(chan->hc_num));
1605
1606 dwc2_hc_set_even_odd_frame(hsotg, chan,
1607 &hcchar);
1608 }
1609
1610 /* Load OUT packet into the appropriate Tx FIFO */
1611 dwc2_hc_write_packet(hsotg, chan);
1612 chan->requests++;
1613 return 1;
1614 }
1615
1616 return 0;
1617 }
1618
1619 /**
1620 * dwc2_hc_do_ping() - Starts a PING transfer
1621 *
1622 * @hsotg: Programming view of DWC_otg controller
1623 * @chan: Information needed to initialize the host channel
1624 *
1625 * This function should only be called in Slave mode. The Do Ping bit is set in
1626 * the HCTSIZ register, then the channel is enabled.
1627 */
1628 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1629 {
1630 u32 hcchar;
1631 u32 hctsiz;
1632
1633 if (dbg_hc(chan))
1634 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1635 chan->hc_num);
1636
1637
1638 hctsiz = TSIZ_DOPNG;
1639 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1640 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1641
1642 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1643 hcchar |= HCCHAR_CHENA;
1644 hcchar &= ~HCCHAR_CHDIS;
1645 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1646 }
1647
1648 /**
1649 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1650 * the HFIR register according to PHY type and speed
1651 *
1652 * @hsotg: Programming view of DWC_otg controller
1653 *
1654 * NOTE: The caller can modify the value of the HFIR register only after the
1655 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1656 * has been set
1657 */
1658 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1659 {
1660 u32 usbcfg;
1661 u32 hwcfg2;
1662 u32 hprt0;
1663 int clock = 60; /* default value */
1664
1665 usbcfg = readl(hsotg->regs + GUSBCFG);
1666 hwcfg2 = readl(hsotg->regs + GHWCFG2);
1667 hprt0 = readl(hsotg->regs + HPRT0);
1668
1669 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1670 !(usbcfg & GUSBCFG_PHYIF16))
1671 clock = 60;
1672 if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
1673 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1674 clock = 48;
1675 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1676 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1677 clock = 30;
1678 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1679 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1680 clock = 60;
1681 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1682 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1683 clock = 48;
1684 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1685 (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
1686 GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1687 clock = 48;
1688 if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
1689 GHWCFG2_FS_PHY_TYPE_DEDICATED)
1690 clock = 48;
1691
1692 if ((hprt0 & HPRT0_SPD_MASK) == HPRT0_SPD_HIGH_SPEED)
1693 /* High speed case */
1694 return 125 * clock;
1695 else
1696 /* FS/LS case */
1697 return 1000 * clock;
1698 }
1699
1700 /**
1701 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1702 * buffer
1703 *
1704 * @core_if: Programming view of DWC_otg controller
1705 * @dest: Destination buffer for the packet
1706 * @bytes: Number of bytes to copy to the destination
1707 */
1708 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1709 {
1710 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1711 u32 *data_buf = (u32 *)dest;
1712 int word_count = (bytes + 3) / 4;
1713 int i;
1714
1715 /*
1716 * Todo: Account for the case where dest is not dword aligned. This
1717 * requires reading data from the FIFO into a u32 temp buffer, then
1718 * moving it into the data buffer.
1719 */
1720
1721 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1722
1723 for (i = 0; i < word_count; i++, data_buf++)
1724 *data_buf = readl(fifo);
1725 }
1726
1727 /**
1728 * dwc2_dump_host_registers() - Prints the host registers
1729 *
1730 * @hsotg: Programming view of DWC_otg controller
1731 *
1732 * NOTE: This function will be removed once the peripheral controller code
1733 * is integrated and the driver is stable
1734 */
1735 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1736 {
1737 #ifdef DEBUG
1738 u32 __iomem *addr;
1739 int i;
1740
1741 dev_dbg(hsotg->dev, "Host Global Registers\n");
1742 addr = hsotg->regs + HCFG;
1743 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
1744 (unsigned long)addr, readl(addr));
1745 addr = hsotg->regs + HFIR;
1746 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
1747 (unsigned long)addr, readl(addr));
1748 addr = hsotg->regs + HFNUM;
1749 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
1750 (unsigned long)addr, readl(addr));
1751 addr = hsotg->regs + HPTXSTS;
1752 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
1753 (unsigned long)addr, readl(addr));
1754 addr = hsotg->regs + HAINT;
1755 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
1756 (unsigned long)addr, readl(addr));
1757 addr = hsotg->regs + HAINTMSK;
1758 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
1759 (unsigned long)addr, readl(addr));
1760 if (hsotg->core_params->dma_desc_enable > 0) {
1761 addr = hsotg->regs + HFLBADDR;
1762 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1763 (unsigned long)addr, readl(addr));
1764 }
1765
1766 addr = hsotg->regs + HPRT0;
1767 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
1768 (unsigned long)addr, readl(addr));
1769
1770 for (i = 0; i < hsotg->core_params->host_channels; i++) {
1771 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1772 addr = hsotg->regs + HCCHAR(i);
1773 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
1774 (unsigned long)addr, readl(addr));
1775 addr = hsotg->regs + HCSPLT(i);
1776 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
1777 (unsigned long)addr, readl(addr));
1778 addr = hsotg->regs + HCINT(i);
1779 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
1780 (unsigned long)addr, readl(addr));
1781 addr = hsotg->regs + HCINTMSK(i);
1782 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
1783 (unsigned long)addr, readl(addr));
1784 addr = hsotg->regs + HCTSIZ(i);
1785 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
1786 (unsigned long)addr, readl(addr));
1787 addr = hsotg->regs + HCDMA(i);
1788 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
1789 (unsigned long)addr, readl(addr));
1790 if (hsotg->core_params->dma_desc_enable > 0) {
1791 addr = hsotg->regs + HCDMAB(i);
1792 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
1793 (unsigned long)addr, readl(addr));
1794 }
1795 }
1796 #endif
1797 }
1798
1799 /**
1800 * dwc2_dump_global_registers() - Prints the core global registers
1801 *
1802 * @hsotg: Programming view of DWC_otg controller
1803 *
1804 * NOTE: This function will be removed once the peripheral controller code
1805 * is integrated and the driver is stable
1806 */
1807 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1808 {
1809 #ifdef DEBUG
1810 u32 __iomem *addr;
1811
1812 dev_dbg(hsotg->dev, "Core Global Registers\n");
1813 addr = hsotg->regs + GOTGCTL;
1814 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
1815 (unsigned long)addr, readl(addr));
1816 addr = hsotg->regs + GOTGINT;
1817 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
1818 (unsigned long)addr, readl(addr));
1819 addr = hsotg->regs + GAHBCFG;
1820 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
1821 (unsigned long)addr, readl(addr));
1822 addr = hsotg->regs + GUSBCFG;
1823 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
1824 (unsigned long)addr, readl(addr));
1825 addr = hsotg->regs + GRSTCTL;
1826 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
1827 (unsigned long)addr, readl(addr));
1828 addr = hsotg->regs + GINTSTS;
1829 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
1830 (unsigned long)addr, readl(addr));
1831 addr = hsotg->regs + GINTMSK;
1832 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
1833 (unsigned long)addr, readl(addr));
1834 addr = hsotg->regs + GRXSTSR;
1835 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
1836 (unsigned long)addr, readl(addr));
1837 addr = hsotg->regs + GRXFSIZ;
1838 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
1839 (unsigned long)addr, readl(addr));
1840 addr = hsotg->regs + GNPTXFSIZ;
1841 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
1842 (unsigned long)addr, readl(addr));
1843 addr = hsotg->regs + GNPTXSTS;
1844 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
1845 (unsigned long)addr, readl(addr));
1846 addr = hsotg->regs + GI2CCTL;
1847 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
1848 (unsigned long)addr, readl(addr));
1849 addr = hsotg->regs + GPVNDCTL;
1850 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
1851 (unsigned long)addr, readl(addr));
1852 addr = hsotg->regs + GGPIO;
1853 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
1854 (unsigned long)addr, readl(addr));
1855 addr = hsotg->regs + GUID;
1856 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
1857 (unsigned long)addr, readl(addr));
1858 addr = hsotg->regs + GSNPSID;
1859 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
1860 (unsigned long)addr, readl(addr));
1861 addr = hsotg->regs + GHWCFG1;
1862 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
1863 (unsigned long)addr, readl(addr));
1864 addr = hsotg->regs + GHWCFG2;
1865 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
1866 (unsigned long)addr, readl(addr));
1867 addr = hsotg->regs + GHWCFG3;
1868 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
1869 (unsigned long)addr, readl(addr));
1870 addr = hsotg->regs + GHWCFG4;
1871 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
1872 (unsigned long)addr, readl(addr));
1873 addr = hsotg->regs + GLPMCFG;
1874 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
1875 (unsigned long)addr, readl(addr));
1876 addr = hsotg->regs + GPWRDN;
1877 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
1878 (unsigned long)addr, readl(addr));
1879 addr = hsotg->regs + GDFIFOCFG;
1880 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
1881 (unsigned long)addr, readl(addr));
1882 addr = hsotg->regs + HPTXFSIZ;
1883 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
1884 (unsigned long)addr, readl(addr));
1885
1886 addr = hsotg->regs + PCGCTL;
1887 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
1888 (unsigned long)addr, readl(addr));
1889 #endif
1890 }
1891
1892 /**
1893 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1894 *
1895 * @hsotg: Programming view of DWC_otg controller
1896 * @num: Tx FIFO to flush
1897 */
1898 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1899 {
1900 u32 greset;
1901 int count = 0;
1902
1903 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1904
1905 greset = GRSTCTL_TXFFLSH;
1906 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1907 writel(greset, hsotg->regs + GRSTCTL);
1908
1909 do {
1910 greset = readl(hsotg->regs + GRSTCTL);
1911 if (++count > 10000) {
1912 dev_warn(hsotg->dev,
1913 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1914 __func__, greset,
1915 readl(hsotg->regs + GNPTXSTS));
1916 break;
1917 }
1918 udelay(1);
1919 } while (greset & GRSTCTL_TXFFLSH);
1920
1921 /* Wait for at least 3 PHY Clocks */
1922 udelay(1);
1923 }
1924
1925 /**
1926 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1927 *
1928 * @hsotg: Programming view of DWC_otg controller
1929 */
1930 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1931 {
1932 u32 greset;
1933 int count = 0;
1934
1935 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1936
1937 greset = GRSTCTL_RXFFLSH;
1938 writel(greset, hsotg->regs + GRSTCTL);
1939
1940 do {
1941 greset = readl(hsotg->regs + GRSTCTL);
1942 if (++count > 10000) {
1943 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1944 __func__, greset);
1945 break;
1946 }
1947 udelay(1);
1948 } while (greset & GRSTCTL_RXFFLSH);
1949
1950 /* Wait for at least 3 PHY Clocks */
1951 udelay(1);
1952 }
1953
1954 #define DWC2_PARAM_TEST(a, b, c) ((a) < (b) || (a) > (c))
1955
1956 /* Parameter access functions */
1957 int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1958 {
1959 int valid = 1;
1960 int retval = 0;
1961 u32 op_mode;
1962
1963 op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
1964
1965 switch (val) {
1966 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
1967 if (op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
1968 valid = 0;
1969 break;
1970 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
1971 switch (op_mode) {
1972 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1973 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1974 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1975 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1976 break;
1977 default:
1978 valid = 0;
1979 break;
1980 }
1981 break;
1982 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1983 /* always valid */
1984 break;
1985 default:
1986 valid = 0;
1987 break;
1988 }
1989
1990 if (!valid) {
1991 if (val >= 0)
1992 dev_err(hsotg->dev,
1993 "%d invalid for otg_cap parameter. Check HW configuration.\n",
1994 val);
1995 switch (op_mode) {
1996 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1997 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
1998 break;
1999 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2000 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2001 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2002 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2003 break;
2004 default:
2005 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2006 break;
2007 }
2008 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2009 retval = -EINVAL;
2010 }
2011
2012 hsotg->core_params->otg_cap = val;
2013 return retval;
2014 }
2015
2016 int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2017 {
2018 int valid = 1;
2019 int retval = 0;
2020
2021 if (val > 0 && (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) ==
2022 GHWCFG2_SLAVE_ONLY_ARCH)
2023 valid = 0;
2024 if (val < 0)
2025 valid = 0;
2026
2027 if (!valid) {
2028 if (val >= 0)
2029 dev_err(hsotg->dev,
2030 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2031 val);
2032 val = (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) !=
2033 GHWCFG2_SLAVE_ONLY_ARCH;
2034 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2035 retval = -EINVAL;
2036 }
2037
2038 hsotg->core_params->dma_enable = val;
2039 return retval;
2040 }
2041
2042 int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2043 {
2044 int valid = 1;
2045 int retval = 0;
2046
2047 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2048 !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA)))
2049 valid = 0;
2050 if (val < 0)
2051 valid = 0;
2052
2053 if (!valid) {
2054 if (val >= 0)
2055 dev_err(hsotg->dev,
2056 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2057 val);
2058 val = (hsotg->core_params->dma_enable > 0 &&
2059 (hsotg->hwcfg4 & GHWCFG4_DESC_DMA));
2060 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2061 retval = -EINVAL;
2062 }
2063
2064 hsotg->core_params->dma_desc_enable = val;
2065 return retval;
2066 }
2067
2068 int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2069 int val)
2070 {
2071 int retval = 0;
2072
2073 if (DWC2_PARAM_TEST(val, 0, 1)) {
2074 if (val >= 0) {
2075 dev_err(hsotg->dev,
2076 "Wrong value for host_support_fs_low_power\n");
2077 dev_err(hsotg->dev,
2078 "host_support_fs_low_power must be 0 or 1\n");
2079 }
2080 val = 0;
2081 dev_dbg(hsotg->dev,
2082 "Setting host_support_fs_low_power to %d\n", val);
2083 retval = -EINVAL;
2084 }
2085
2086 hsotg->core_params->host_support_fs_ls_low_power = val;
2087 return retval;
2088 }
2089
2090 int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2091 {
2092 int valid = 1;
2093 int retval = 0;
2094
2095 if (val > 0 && !(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO))
2096 valid = 0;
2097 if (val < 0)
2098 valid = 0;
2099
2100 if (!valid) {
2101 if (val >= 0)
2102 dev_err(hsotg->dev,
2103 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2104 val);
2105 val = !!(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2106 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2107 retval = -EINVAL;
2108 }
2109
2110 hsotg->core_params->enable_dynamic_fifo = val;
2111 return retval;
2112 }
2113
2114 int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2115 {
2116 int valid = 1;
2117 int retval = 0;
2118
2119 if (val < 16 || val > readl(hsotg->regs + GRXFSIZ))
2120 valid = 0;
2121
2122 if (!valid) {
2123 if (val >= 0)
2124 dev_err(hsotg->dev,
2125 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2126 val);
2127 val = readl(hsotg->regs + GRXFSIZ);
2128 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2129 retval = -EINVAL;
2130 }
2131
2132 hsotg->core_params->host_rx_fifo_size = val;
2133 return retval;
2134 }
2135
2136 int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2137 {
2138 int valid = 1;
2139 int retval = 0;
2140
2141 if (val < 16 || val > (readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff))
2142 valid = 0;
2143
2144 if (!valid) {
2145 if (val >= 0)
2146 dev_err(hsotg->dev,
2147 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2148 val);
2149 val = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
2150 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2151 val);
2152 retval = -EINVAL;
2153 }
2154
2155 hsotg->core_params->host_nperio_tx_fifo_size = val;
2156 return retval;
2157 }
2158
2159 int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2160 {
2161 int valid = 1;
2162 int retval = 0;
2163
2164 if (val < 16 || val > (hsotg->hptxfsiz >> 16))
2165 valid = 0;
2166
2167 if (!valid) {
2168 if (val >= 0)
2169 dev_err(hsotg->dev,
2170 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2171 val);
2172 val = hsotg->hptxfsiz >> 16;
2173 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2174 val);
2175 retval = -EINVAL;
2176 }
2177
2178 hsotg->core_params->host_perio_tx_fifo_size = val;
2179 return retval;
2180 }
2181
2182 int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2183 {
2184 int valid = 1;
2185 int retval = 0;
2186 int width = hsotg->hwcfg3 >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT &
2187 GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
2188 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2189
2190 if (val < 2047 || val >= (1 << (width + 11)))
2191 valid = 0;
2192
2193 if (!valid) {
2194 if (val >= 0)
2195 dev_err(hsotg->dev,
2196 "%d invalid for max_transfer_size. Check HW configuration.\n",
2197 val);
2198 val = (1 << (width + 11)) - 1;
2199 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2200 retval = -EINVAL;
2201 }
2202
2203 hsotg->core_params->max_transfer_size = val;
2204 return retval;
2205 }
2206
2207 int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2208 {
2209 int valid = 1;
2210 int retval = 0;
2211 int width = hsotg->hwcfg3 >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT &
2212 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK >>
2213 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2214
2215 if (val < 15 || val > (1 << (width + 4)))
2216 valid = 0;
2217
2218 if (!valid) {
2219 if (val >= 0)
2220 dev_err(hsotg->dev,
2221 "%d invalid for max_packet_count. Check HW configuration.\n",
2222 val);
2223 val = (1 << (width + 4)) - 1;
2224 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2225 retval = -EINVAL;
2226 }
2227
2228 hsotg->core_params->max_packet_count = val;
2229 return retval;
2230 }
2231
2232 int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2233 {
2234 int valid = 1;
2235 int retval = 0;
2236 int num_chan = hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
2237 GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT;
2238
2239 if (val < 1 || val > num_chan + 1)
2240 valid = 0;
2241
2242 if (!valid) {
2243 if (val >= 0)
2244 dev_err(hsotg->dev,
2245 "%d invalid for host_channels. Check HW configuration.\n",
2246 val);
2247 val = num_chan + 1;
2248 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2249 retval = -EINVAL;
2250 }
2251
2252 hsotg->core_params->host_channels = val;
2253 return retval;
2254 }
2255
2256 int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2257 {
2258 #ifndef NO_FS_PHY_HW_CHECKS
2259 int valid = 0;
2260 u32 hs_phy_type;
2261 u32 fs_phy_type;
2262 #endif
2263 int retval = 0;
2264
2265 if (DWC2_PARAM_TEST(val, DWC2_PHY_TYPE_PARAM_FS,
2266 DWC2_PHY_TYPE_PARAM_ULPI)) {
2267 if (val >= 0) {
2268 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2269 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2270 }
2271
2272 #ifndef NO_FS_PHY_HW_CHECKS
2273 valid = 0;
2274 #else
2275 val = DWC2_PHY_TYPE_PARAM_FS;
2276 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2277 retval = -EINVAL;
2278 #endif
2279 }
2280
2281 #ifndef NO_FS_PHY_HW_CHECKS
2282 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
2283 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
2284
2285 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2286 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2287 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2288 valid = 1;
2289 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2290 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2291 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2292 valid = 1;
2293 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2294 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2295 valid = 1;
2296
2297 if (!valid) {
2298 if (val >= 0)
2299 dev_err(hsotg->dev,
2300 "%d invalid for phy_type. Check HW configuration.\n",
2301 val);
2302 val = DWC2_PHY_TYPE_PARAM_FS;
2303 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2304 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2305 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2306 val = DWC2_PHY_TYPE_PARAM_UTMI;
2307 else
2308 val = DWC2_PHY_TYPE_PARAM_ULPI;
2309 }
2310 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2311 retval = -EINVAL;
2312 }
2313 #endif
2314
2315 hsotg->core_params->phy_type = val;
2316 return retval;
2317 }
2318
2319 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2320 {
2321 return hsotg->core_params->phy_type;
2322 }
2323
2324 int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2325 {
2326 int valid = 1;
2327 int retval = 0;
2328
2329 if (DWC2_PARAM_TEST(val, 0, 1)) {
2330 if (val >= 0) {
2331 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2332 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2333 }
2334 valid = 0;
2335 }
2336
2337 if (val == DWC2_SPEED_PARAM_HIGH &&
2338 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2339 valid = 0;
2340
2341 if (!valid) {
2342 if (val >= 0)
2343 dev_err(hsotg->dev,
2344 "%d invalid for speed parameter. Check HW configuration.\n",
2345 val);
2346 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2347 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2348 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2349 retval = -EINVAL;
2350 }
2351
2352 hsotg->core_params->speed = val;
2353 return retval;
2354 }
2355
2356 int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2357 {
2358 int valid = 1;
2359 int retval = 0;
2360
2361 if (DWC2_PARAM_TEST(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2362 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2363 if (val >= 0) {
2364 dev_err(hsotg->dev,
2365 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2366 dev_err(hsotg->dev,
2367 "host_ls_low_power_phy_clk must be 0 or 1\n");
2368 }
2369 valid = 0;
2370 }
2371
2372 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2373 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2374 valid = 0;
2375
2376 if (!valid) {
2377 if (val >= 0)
2378 dev_err(hsotg->dev,
2379 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2380 val);
2381 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2382 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2383 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2384 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2385 val);
2386 retval = -EINVAL;
2387 }
2388
2389 hsotg->core_params->host_ls_low_power_phy_clk = val;
2390 return retval;
2391 }
2392
2393 int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2394 {
2395 int retval = 0;
2396
2397 if (DWC2_PARAM_TEST(val, 0, 1)) {
2398 if (val >= 0) {
2399 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2400 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2401 }
2402 val = 0;
2403 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2404 retval = -EINVAL;
2405 }
2406
2407 hsotg->core_params->phy_ulpi_ddr = val;
2408 return retval;
2409 }
2410
2411 int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2412 {
2413 int retval = 0;
2414
2415 if (DWC2_PARAM_TEST(val, 0, 1)) {
2416 if (val >= 0) {
2417 dev_err(hsotg->dev,
2418 "Wrong value for phy_ulpi_ext_vbus\n");
2419 dev_err(hsotg->dev,
2420 "phy_ulpi_ext_vbus must be 0 or 1\n");
2421 }
2422 val = 0;
2423 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2424 retval = -EINVAL;
2425 }
2426
2427 hsotg->core_params->phy_ulpi_ext_vbus = val;
2428 return retval;
2429 }
2430
2431 int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2432 {
2433 int retval = 0;
2434
2435 if (DWC2_PARAM_TEST(val, 8, 8) && DWC2_PARAM_TEST(val, 16, 16)) {
2436 if (val >= 0) {
2437 dev_err(hsotg->dev, "Wrong value for phy_utmi_width\n");
2438 dev_err(hsotg->dev, "phy_utmi_width must be 8 or 16\n");
2439 }
2440 val = 8;
2441 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2442 retval = -EINVAL;
2443 }
2444
2445 hsotg->core_params->phy_utmi_width = val;
2446 return retval;
2447 }
2448
2449 int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2450 {
2451 int retval = 0;
2452
2453 if (DWC2_PARAM_TEST(val, 0, 1)) {
2454 if (val >= 0) {
2455 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2456 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2457 }
2458 val = 0;
2459 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2460 retval = -EINVAL;
2461 }
2462
2463 hsotg->core_params->ulpi_fs_ls = val;
2464 return retval;
2465 }
2466
2467 int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2468 {
2469 int retval = 0;
2470
2471 if (DWC2_PARAM_TEST(val, 0, 1)) {
2472 if (val >= 0) {
2473 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2474 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2475 }
2476 val = 0;
2477 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2478 retval = -EINVAL;
2479 }
2480
2481 hsotg->core_params->ts_dline = val;
2482 return retval;
2483 }
2484
2485 int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2486 {
2487 #ifndef NO_FS_PHY_HW_CHECKS
2488 int valid = 1;
2489 #endif
2490 int retval = 0;
2491
2492 if (DWC2_PARAM_TEST(val, 0, 1)) {
2493 if (val >= 0) {
2494 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2495 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2496 }
2497
2498 #ifndef NO_FS_PHY_HW_CHECKS
2499 valid = 0;
2500 #else
2501 val = 0;
2502 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2503 retval = -EINVAL;
2504 #endif
2505 }
2506
2507 #ifndef NO_FS_PHY_HW_CHECKS
2508 if (val == 1 && !(hsotg->hwcfg3 & GHWCFG3_I2C))
2509 valid = 0;
2510
2511 if (!valid) {
2512 if (val >= 0)
2513 dev_err(hsotg->dev,
2514 "%d invalid for i2c_enable. Check HW configuration.\n",
2515 val);
2516 val = !!(hsotg->hwcfg3 & GHWCFG3_I2C);
2517 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2518 retval = -EINVAL;
2519 }
2520 #endif
2521
2522 hsotg->core_params->i2c_enable = val;
2523 return retval;
2524 }
2525
2526 int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2527 {
2528 int valid = 1;
2529 int retval = 0;
2530
2531 if (DWC2_PARAM_TEST(val, 0, 1)) {
2532 if (val >= 0) {
2533 dev_err(hsotg->dev,
2534 "Wrong value for en_multiple_tx_fifo,\n");
2535 dev_err(hsotg->dev,
2536 "en_multiple_tx_fifo must be 0 or 1\n");
2537 }
2538 valid = 0;
2539 }
2540
2541 if (val == 1 && !(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN))
2542 valid = 0;
2543
2544 if (!valid) {
2545 if (val >= 0)
2546 dev_err(hsotg->dev,
2547 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2548 val);
2549 val = !!(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN);
2550 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2551 retval = -EINVAL;
2552 }
2553
2554 hsotg->core_params->en_multiple_tx_fifo = val;
2555 return retval;
2556 }
2557
2558 int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2559 {
2560 int valid = 1;
2561 int retval = 0;
2562
2563 if (DWC2_PARAM_TEST(val, 0, 1)) {
2564 if (val >= 0) {
2565 dev_err(hsotg->dev,
2566 "'%d' invalid for parameter reload_ctl\n", val);
2567 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2568 }
2569 valid = 0;
2570 }
2571
2572 if (val == 1 && hsotg->snpsid < DWC2_CORE_REV_2_92a)
2573 valid = 0;
2574
2575 if (!valid) {
2576 if (val >= 0)
2577 dev_err(hsotg->dev,
2578 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2579 val);
2580 val = hsotg->snpsid >= DWC2_CORE_REV_2_92a;
2581 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2582 retval = -EINVAL;
2583 }
2584
2585 hsotg->core_params->reload_ctl = val;
2586 return retval;
2587 }
2588
2589 int dwc2_set_param_ahb_single(struct dwc2_hsotg *hsotg, int val)
2590 {
2591 int valid = 1;
2592 int retval = 0;
2593
2594 if (DWC2_PARAM_TEST(val, 0, 1)) {
2595 if (val >= 0) {
2596 dev_err(hsotg->dev,
2597 "'%d' invalid for parameter ahb_single\n", val);
2598 dev_err(hsotg->dev, "ahb_single must be 0 or 1\n");
2599 }
2600 valid = 0;
2601 }
2602
2603 if (val > 0 && hsotg->snpsid < DWC2_CORE_REV_2_94a)
2604 valid = 0;
2605
2606 if (!valid) {
2607 if (val >= 0)
2608 dev_err(hsotg->dev,
2609 "%d invalid for parameter ahb_single. Check HW configuration.\n",
2610 val);
2611 val = 0;
2612 dev_dbg(hsotg->dev, "Setting ahb_single to %d\n", val);
2613 retval = -EINVAL;
2614 }
2615
2616 hsotg->core_params->ahb_single = val;
2617 return retval;
2618 }
2619
2620 int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2621 {
2622 int retval = 0;
2623
2624 if (DWC2_PARAM_TEST(val, 0, 1)) {
2625 if (val >= 0) {
2626 dev_err(hsotg->dev,
2627 "'%d' invalid for parameter otg_ver\n", val);
2628 dev_err(hsotg->dev,
2629 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2630 }
2631 val = 0;
2632 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2633 retval = -EINVAL;
2634 }
2635
2636 hsotg->core_params->otg_ver = val;
2637 return retval;
2638 }
2639
2640 /*
2641 * This function is called during module intialization to pass module parameters
2642 * for the DWC_otg core. It returns non-0 if any parameters are invalid.
2643 */
2644 int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2645 struct dwc2_core_params *params)
2646 {
2647 int retval = 0;
2648
2649 dev_dbg(hsotg->dev, "%s()\n", __func__);
2650
2651 retval |= dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2652 retval |= dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2653 retval |= dwc2_set_param_dma_desc_enable(hsotg,
2654 params->dma_desc_enable);
2655 retval |= dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2656 params->host_support_fs_ls_low_power);
2657 retval |= dwc2_set_param_enable_dynamic_fifo(hsotg,
2658 params->enable_dynamic_fifo);
2659 retval |= dwc2_set_param_host_rx_fifo_size(hsotg,
2660 params->host_rx_fifo_size);
2661 retval |= dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2662 params->host_nperio_tx_fifo_size);
2663 retval |= dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2664 params->host_perio_tx_fifo_size);
2665 retval |= dwc2_set_param_max_transfer_size(hsotg,
2666 params->max_transfer_size);
2667 retval |= dwc2_set_param_max_packet_count(hsotg,
2668 params->max_packet_count);
2669 retval |= dwc2_set_param_host_channels(hsotg, params->host_channels);
2670 retval |= dwc2_set_param_phy_type(hsotg, params->phy_type);
2671 retval |= dwc2_set_param_speed(hsotg, params->speed);
2672 retval |= dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2673 params->host_ls_low_power_phy_clk);
2674 retval |= dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2675 retval |= dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2676 params->phy_ulpi_ext_vbus);
2677 retval |= dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2678 retval |= dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2679 retval |= dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2680 retval |= dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2681 retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
2682 params->en_multiple_tx_fifo);
2683 retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2684 retval |= dwc2_set_param_ahb_single(hsotg, params->ahb_single);
2685 retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2686
2687 return retval;
2688 }
2689
2690 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2691 {
2692 return (u16)(hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103);
2693 }
2694
2695 int dwc2_check_core_status(struct dwc2_hsotg *hsotg)
2696 {
2697 if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2698 return -1;
2699 else
2700 return 0;
2701 }
2702
2703 /**
2704 * dwc2_enable_global_interrupts() - Enables the controller's Global
2705 * Interrupt in the AHB Config register
2706 *
2707 * @hsotg: Programming view of DWC_otg controller
2708 */
2709 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2710 {
2711 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2712
2713 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2714 writel(ahbcfg, hsotg->regs + GAHBCFG);
2715 }
2716
2717 /**
2718 * dwc2_disable_global_interrupts() - Disables the controller's Global
2719 * Interrupt in the AHB Config register
2720 *
2721 * @hsotg: Programming view of DWC_otg controller
2722 */
2723 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2724 {
2725 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2726
2727 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2728 writel(ahbcfg, hsotg->regs + GAHBCFG);
2729 }
2730
2731 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2732 MODULE_AUTHOR("Synopsys, Inc.");
2733 MODULE_LICENSE("Dual BSD/GPL");
This page took 0.083088 seconds and 4 git commands to generate.