usb: gadget: pch_udc: enable MSI if hardware supports
[deliverable/linux.git] / drivers / usb / gadget / udc / pch_udc.c
1 /*
2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/gpio.h>
19 #include <linux/irq.h>
20
21 /* GPIO port for VBUS detecting */
22 static int vbus_gpio_port = -1; /* GPIO port number (-1:Not used) */
23
24 #define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
25 #define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */
26
27 /* Address offset of Registers */
28 #define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
29
30 #define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
31 #define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
32 #define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
33 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
34 #define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
35 #define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
36 #define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
37
38 #define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
39 #define UDC_DEVCTL_ADDR 0x404 /* Device control */
40 #define UDC_DEVSTS_ADDR 0x408 /* Device status */
41 #define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
42 #define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
43 #define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
44 #define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
45 #define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
46 #define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
47 #define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
48 #define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
49
50 /* Endpoint control register */
51 /* Bit position */
52 #define UDC_EPCTL_MRXFLUSH (1 << 12)
53 #define UDC_EPCTL_RRDY (1 << 9)
54 #define UDC_EPCTL_CNAK (1 << 8)
55 #define UDC_EPCTL_SNAK (1 << 7)
56 #define UDC_EPCTL_NAK (1 << 6)
57 #define UDC_EPCTL_P (1 << 3)
58 #define UDC_EPCTL_F (1 << 1)
59 #define UDC_EPCTL_S (1 << 0)
60 #define UDC_EPCTL_ET_SHIFT 4
61 /* Mask patern */
62 #define UDC_EPCTL_ET_MASK 0x00000030
63 /* Value for ET field */
64 #define UDC_EPCTL_ET_CONTROL 0
65 #define UDC_EPCTL_ET_ISO 1
66 #define UDC_EPCTL_ET_BULK 2
67 #define UDC_EPCTL_ET_INTERRUPT 3
68
69 /* Endpoint status register */
70 /* Bit position */
71 #define UDC_EPSTS_XFERDONE (1 << 27)
72 #define UDC_EPSTS_RSS (1 << 26)
73 #define UDC_EPSTS_RCS (1 << 25)
74 #define UDC_EPSTS_TXEMPTY (1 << 24)
75 #define UDC_EPSTS_TDC (1 << 10)
76 #define UDC_EPSTS_HE (1 << 9)
77 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
78 #define UDC_EPSTS_BNA (1 << 7)
79 #define UDC_EPSTS_IN (1 << 6)
80 #define UDC_EPSTS_OUT_SHIFT 4
81 /* Mask patern */
82 #define UDC_EPSTS_OUT_MASK 0x00000030
83 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
84 /* Value for OUT field */
85 #define UDC_EPSTS_OUT_SETUP 2
86 #define UDC_EPSTS_OUT_DATA 1
87
88 /* Device configuration register */
89 /* Bit position */
90 #define UDC_DEVCFG_CSR_PRG (1 << 17)
91 #define UDC_DEVCFG_SP (1 << 3)
92 /* SPD Valee */
93 #define UDC_DEVCFG_SPD_HS 0x0
94 #define UDC_DEVCFG_SPD_FS 0x1
95 #define UDC_DEVCFG_SPD_LS 0x2
96
97 /* Device control register */
98 /* Bit position */
99 #define UDC_DEVCTL_THLEN_SHIFT 24
100 #define UDC_DEVCTL_BRLEN_SHIFT 16
101 #define UDC_DEVCTL_CSR_DONE (1 << 13)
102 #define UDC_DEVCTL_SD (1 << 10)
103 #define UDC_DEVCTL_MODE (1 << 9)
104 #define UDC_DEVCTL_BREN (1 << 8)
105 #define UDC_DEVCTL_THE (1 << 7)
106 #define UDC_DEVCTL_DU (1 << 4)
107 #define UDC_DEVCTL_TDE (1 << 3)
108 #define UDC_DEVCTL_RDE (1 << 2)
109 #define UDC_DEVCTL_RES (1 << 0)
110
111 /* Device status register */
112 /* Bit position */
113 #define UDC_DEVSTS_TS_SHIFT 18
114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
115 #define UDC_DEVSTS_ALT_SHIFT 8
116 #define UDC_DEVSTS_INTF_SHIFT 4
117 #define UDC_DEVSTS_CFG_SHIFT 0
118 /* Mask patern */
119 #define UDC_DEVSTS_TS_MASK 0xfffc0000
120 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
121 #define UDC_DEVSTS_ALT_MASK 0x00000f00
122 #define UDC_DEVSTS_INTF_MASK 0x000000f0
123 #define UDC_DEVSTS_CFG_MASK 0x0000000f
124 /* value for maximum speed for SPEED field */
125 #define UDC_DEVSTS_ENUM_SPEED_FULL 1
126 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0
127 #define UDC_DEVSTS_ENUM_SPEED_LOW 2
128 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3
129
130 /* Device irq register */
131 /* Bit position */
132 #define UDC_DEVINT_RWKP (1 << 7)
133 #define UDC_DEVINT_ENUM (1 << 6)
134 #define UDC_DEVINT_SOF (1 << 5)
135 #define UDC_DEVINT_US (1 << 4)
136 #define UDC_DEVINT_UR (1 << 3)
137 #define UDC_DEVINT_ES (1 << 2)
138 #define UDC_DEVINT_SI (1 << 1)
139 #define UDC_DEVINT_SC (1 << 0)
140 /* Mask patern */
141 #define UDC_DEVINT_MSK 0x7f
142
143 /* Endpoint irq register */
144 /* Bit position */
145 #define UDC_EPINT_IN_SHIFT 0
146 #define UDC_EPINT_OUT_SHIFT 16
147 #define UDC_EPINT_IN_EP0 (1 << 0)
148 #define UDC_EPINT_OUT_EP0 (1 << 16)
149 /* Mask patern */
150 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
151
152 /* UDC_CSR_BUSY Status register */
153 /* Bit position */
154 #define UDC_CSR_BUSY (1 << 0)
155
156 /* SOFT RESET register */
157 /* Bit position */
158 #define UDC_PSRST (1 << 1)
159 #define UDC_SRST (1 << 0)
160
161 /* USB_DEVICE endpoint register */
162 /* Bit position */
163 #define UDC_CSR_NE_NUM_SHIFT 0
164 #define UDC_CSR_NE_DIR_SHIFT 4
165 #define UDC_CSR_NE_TYPE_SHIFT 5
166 #define UDC_CSR_NE_CFG_SHIFT 7
167 #define UDC_CSR_NE_INTF_SHIFT 11
168 #define UDC_CSR_NE_ALT_SHIFT 15
169 #define UDC_CSR_NE_MAX_PKT_SHIFT 19
170 /* Mask patern */
171 #define UDC_CSR_NE_NUM_MASK 0x0000000f
172 #define UDC_CSR_NE_DIR_MASK 0x00000010
173 #define UDC_CSR_NE_TYPE_MASK 0x00000060
174 #define UDC_CSR_NE_CFG_MASK 0x00000780
175 #define UDC_CSR_NE_INTF_MASK 0x00007800
176 #define UDC_CSR_NE_ALT_MASK 0x00078000
177 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
178
179 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
180 #define PCH_UDC_EPINT(in, num)\
181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182
183 /* Index of endpoint */
184 #define UDC_EP0IN_IDX 0
185 #define UDC_EP0OUT_IDX 1
186 #define UDC_EPIN_IDX(ep) (ep * 2)
187 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
188 #define PCH_UDC_EP0 0
189 #define PCH_UDC_EP1 1
190 #define PCH_UDC_EP2 2
191 #define PCH_UDC_EP3 3
192
193 /* Number of endpoint */
194 #define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
195 #define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
196 /* Length Value */
197 #define PCH_UDC_BRLEN 0x0F /* Burst length */
198 #define PCH_UDC_THLEN 0x1F /* Threshold length */
199 /* Value of EP Buffer Size */
200 #define UDC_EP0IN_BUFF_SIZE 16
201 #define UDC_EPIN_BUFF_SIZE 256
202 #define UDC_EP0OUT_BUFF_SIZE 16
203 #define UDC_EPOUT_BUFF_SIZE 256
204 /* Value of EP maximum packet size */
205 #define UDC_EP0IN_MAX_PKT_SIZE 64
206 #define UDC_EP0OUT_MAX_PKT_SIZE 64
207 #define UDC_BULK_MAX_PKT_SIZE 512
208
209 /* DMA */
210 #define DMA_DIR_RX 1 /* DMA for data receive */
211 #define DMA_DIR_TX 2 /* DMA for data transmit */
212 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
213 #define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
214
215 /**
216 * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
217 * for data
218 * @status: Status quadlet
219 * @reserved: Reserved
220 * @dataptr: Buffer descriptor
221 * @next: Next descriptor
222 */
223 struct pch_udc_data_dma_desc {
224 u32 status;
225 u32 reserved;
226 u32 dataptr;
227 u32 next;
228 };
229
230 /**
231 * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
232 * for control data
233 * @status: Status
234 * @reserved: Reserved
235 * @data12: First setup word
236 * @data34: Second setup word
237 */
238 struct pch_udc_stp_dma_desc {
239 u32 status;
240 u32 reserved;
241 struct usb_ctrlrequest request;
242 } __attribute((packed));
243
244 /* DMA status definitions */
245 /* Buffer status */
246 #define PCH_UDC_BUFF_STS 0xC0000000
247 #define PCH_UDC_BS_HST_RDY 0x00000000
248 #define PCH_UDC_BS_DMA_BSY 0x40000000
249 #define PCH_UDC_BS_DMA_DONE 0x80000000
250 #define PCH_UDC_BS_HST_BSY 0xC0000000
251 /* Rx/Tx Status */
252 #define PCH_UDC_RXTX_STS 0x30000000
253 #define PCH_UDC_RTS_SUCC 0x00000000
254 #define PCH_UDC_RTS_DESERR 0x10000000
255 #define PCH_UDC_RTS_BUFERR 0x30000000
256 /* Last Descriptor Indication */
257 #define PCH_UDC_DMA_LAST 0x08000000
258 /* Number of Rx/Tx Bytes Mask */
259 #define PCH_UDC_RXTX_BYTES 0x0000ffff
260
261 /**
262 * struct pch_udc_cfg_data - Structure to hold current configuration
263 * and interface information
264 * @cur_cfg: current configuration in use
265 * @cur_intf: current interface in use
266 * @cur_alt: current alt interface in use
267 */
268 struct pch_udc_cfg_data {
269 u16 cur_cfg;
270 u16 cur_intf;
271 u16 cur_alt;
272 };
273
274 /**
275 * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
276 * @ep: embedded ep request
277 * @td_stp_phys: for setup request
278 * @td_data_phys: for data request
279 * @td_stp: for setup request
280 * @td_data: for data request
281 * @dev: reference to device struct
282 * @offset_addr: offset address of ep register
283 * @desc: for this ep
284 * @queue: queue for requests
285 * @num: endpoint number
286 * @in: endpoint is IN
287 * @halted: endpoint halted?
288 * @epsts: Endpoint status
289 */
290 struct pch_udc_ep {
291 struct usb_ep ep;
292 dma_addr_t td_stp_phys;
293 dma_addr_t td_data_phys;
294 struct pch_udc_stp_dma_desc *td_stp;
295 struct pch_udc_data_dma_desc *td_data;
296 struct pch_udc_dev *dev;
297 unsigned long offset_addr;
298 struct list_head queue;
299 unsigned num:5,
300 in:1,
301 halted:1;
302 unsigned long epsts;
303 };
304
305 /**
306 * struct pch_vbus_gpio_data - Structure holding GPIO informaton
307 * for detecting VBUS
308 * @port: gpio port number
309 * @intr: gpio interrupt number
310 * @irq_work_fall Structure for WorkQueue
311 * @irq_work_rise Structure for WorkQueue
312 */
313 struct pch_vbus_gpio_data {
314 int port;
315 int intr;
316 struct work_struct irq_work_fall;
317 struct work_struct irq_work_rise;
318 };
319
320 /**
321 * struct pch_udc_dev - Structure holding complete information
322 * of the PCH USB device
323 * @gadget: gadget driver data
324 * @driver: reference to gadget driver bound
325 * @pdev: reference to the PCI device
326 * @ep: array of endpoints
327 * @lock: protects all state
328 * @stall: stall requested
329 * @prot_stall: protcol stall requested
330 * @registered: driver registered with system
331 * @suspended: driver in suspended state
332 * @connected: gadget driver associated
333 * @vbus_session: required vbus_session state
334 * @set_cfg_not_acked: pending acknowledgement 4 setup
335 * @waiting_zlp_ack: pending acknowledgement 4 ZLP
336 * @data_requests: DMA pool for data requests
337 * @stp_requests: DMA pool for setup requests
338 * @dma_addr: DMA pool for received
339 * @setup_data: Received setup data
340 * @base_addr: for mapped device memory
341 * @cfg_data: current cfg, intf, and alt in use
342 * @vbus_gpio: GPIO informaton for detecting VBUS
343 */
344 struct pch_udc_dev {
345 struct usb_gadget gadget;
346 struct usb_gadget_driver *driver;
347 struct pci_dev *pdev;
348 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
349 spinlock_t lock; /* protects all state */
350 unsigned
351 stall:1,
352 prot_stall:1,
353 suspended:1,
354 connected:1,
355 vbus_session:1,
356 set_cfg_not_acked:1,
357 waiting_zlp_ack:1;
358 struct pci_pool *data_requests;
359 struct pci_pool *stp_requests;
360 dma_addr_t dma_addr;
361 struct usb_ctrlrequest setup_data;
362 void __iomem *base_addr;
363 struct pch_udc_cfg_data cfg_data;
364 struct pch_vbus_gpio_data vbus_gpio;
365 };
366 #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
367
368 #define PCH_UDC_PCI_BAR_QUARK_X1000 0
369 #define PCH_UDC_PCI_BAR 1
370 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
371 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
372 #define PCI_VENDOR_ID_ROHM 0x10DB
373 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
374 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
375
376 static const char ep0_string[] = "ep0in";
377 static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
378 static bool speed_fs;
379 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
380 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
381
382 /**
383 * struct pch_udc_request - Structure holding a PCH USB device request packet
384 * @req: embedded ep request
385 * @td_data_phys: phys. address
386 * @td_data: first dma desc. of chain
387 * @td_data_last: last dma desc. of chain
388 * @queue: associated queue
389 * @dma_going: DMA in progress for request
390 * @dma_mapped: DMA memory mapped for request
391 * @dma_done: DMA completed for request
392 * @chain_len: chain length
393 * @buf: Buffer memory for align adjustment
394 * @dma: DMA memory for align adjustment
395 */
396 struct pch_udc_request {
397 struct usb_request req;
398 dma_addr_t td_data_phys;
399 struct pch_udc_data_dma_desc *td_data;
400 struct pch_udc_data_dma_desc *td_data_last;
401 struct list_head queue;
402 unsigned dma_going:1,
403 dma_mapped:1,
404 dma_done:1;
405 unsigned chain_len;
406 void *buf;
407 dma_addr_t dma;
408 };
409
410 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
411 {
412 return ioread32(dev->base_addr + reg);
413 }
414
415 static inline void pch_udc_writel(struct pch_udc_dev *dev,
416 unsigned long val, unsigned long reg)
417 {
418 iowrite32(val, dev->base_addr + reg);
419 }
420
421 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
422 unsigned long reg,
423 unsigned long bitmask)
424 {
425 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
426 }
427
428 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
429 unsigned long reg,
430 unsigned long bitmask)
431 {
432 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
433 }
434
435 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
436 {
437 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
438 }
439
440 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
441 unsigned long val, unsigned long reg)
442 {
443 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
444 }
445
446 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
447 unsigned long reg,
448 unsigned long bitmask)
449 {
450 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
451 }
452
453 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
454 unsigned long reg,
455 unsigned long bitmask)
456 {
457 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
458 }
459
460 /**
461 * pch_udc_csr_busy() - Wait till idle.
462 * @dev: Reference to pch_udc_dev structure
463 */
464 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
465 {
466 unsigned int count = 200;
467
468 /* Wait till idle */
469 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
470 && --count)
471 cpu_relax();
472 if (!count)
473 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
474 }
475
476 /**
477 * pch_udc_write_csr() - Write the command and status registers.
478 * @dev: Reference to pch_udc_dev structure
479 * @val: value to be written to CSR register
480 * @addr: address of CSR register
481 */
482 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
483 unsigned int ep)
484 {
485 unsigned long reg = PCH_UDC_CSR(ep);
486
487 pch_udc_csr_busy(dev); /* Wait till idle */
488 pch_udc_writel(dev, val, reg);
489 pch_udc_csr_busy(dev); /* Wait till idle */
490 }
491
492 /**
493 * pch_udc_read_csr() - Read the command and status registers.
494 * @dev: Reference to pch_udc_dev structure
495 * @addr: address of CSR register
496 *
497 * Return codes: content of CSR register
498 */
499 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
500 {
501 unsigned long reg = PCH_UDC_CSR(ep);
502
503 pch_udc_csr_busy(dev); /* Wait till idle */
504 pch_udc_readl(dev, reg); /* Dummy read */
505 pch_udc_csr_busy(dev); /* Wait till idle */
506 return pch_udc_readl(dev, reg);
507 }
508
509 /**
510 * pch_udc_rmt_wakeup() - Initiate for remote wakeup
511 * @dev: Reference to pch_udc_dev structure
512 */
513 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
514 {
515 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
516 mdelay(1);
517 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
518 }
519
520 /**
521 * pch_udc_get_frame() - Get the current frame from device status register
522 * @dev: Reference to pch_udc_dev structure
523 * Retern current frame
524 */
525 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
526 {
527 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
528 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
529 }
530
531 /**
532 * pch_udc_clear_selfpowered() - Clear the self power control
533 * @dev: Reference to pch_udc_regs structure
534 */
535 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
536 {
537 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
538 }
539
540 /**
541 * pch_udc_set_selfpowered() - Set the self power control
542 * @dev: Reference to pch_udc_regs structure
543 */
544 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
545 {
546 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
547 }
548
549 /**
550 * pch_udc_set_disconnect() - Set the disconnect status.
551 * @dev: Reference to pch_udc_regs structure
552 */
553 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
554 {
555 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
556 }
557
558 /**
559 * pch_udc_clear_disconnect() - Clear the disconnect status.
560 * @dev: Reference to pch_udc_regs structure
561 */
562 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
563 {
564 /* Clear the disconnect */
565 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
566 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
567 mdelay(1);
568 /* Resume USB signalling */
569 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
570 }
571
572 /**
573 * pch_udc_reconnect() - This API initializes usb device controller,
574 * and clear the disconnect status.
575 * @dev: Reference to pch_udc_regs structure
576 */
577 static void pch_udc_init(struct pch_udc_dev *dev);
578 static void pch_udc_reconnect(struct pch_udc_dev *dev)
579 {
580 pch_udc_init(dev);
581
582 /* enable device interrupts */
583 /* pch_udc_enable_interrupts() */
584 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
585 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
586
587 /* Clear the disconnect */
588 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
589 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
590 mdelay(1);
591 /* Resume USB signalling */
592 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
593 }
594
595 /**
596 * pch_udc_vbus_session() - set or clearr the disconnect status.
597 * @dev: Reference to pch_udc_regs structure
598 * @is_active: Parameter specifying the action
599 * 0: indicating VBUS power is ending
600 * !0: indicating VBUS power is starting
601 */
602 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
603 int is_active)
604 {
605 if (is_active) {
606 pch_udc_reconnect(dev);
607 dev->vbus_session = 1;
608 } else {
609 if (dev->driver && dev->driver->disconnect) {
610 spin_lock(&dev->lock);
611 dev->driver->disconnect(&dev->gadget);
612 spin_unlock(&dev->lock);
613 }
614 pch_udc_set_disconnect(dev);
615 dev->vbus_session = 0;
616 }
617 }
618
619 /**
620 * pch_udc_ep_set_stall() - Set the stall of endpoint
621 * @ep: Reference to structure of type pch_udc_ep_regs
622 */
623 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
624 {
625 if (ep->in) {
626 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
627 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
628 } else {
629 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
630 }
631 }
632
633 /**
634 * pch_udc_ep_clear_stall() - Clear the stall of endpoint
635 * @ep: Reference to structure of type pch_udc_ep_regs
636 */
637 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
638 {
639 /* Clear the stall */
640 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
641 /* Clear NAK by writing CNAK */
642 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
643 }
644
645 /**
646 * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
647 * @ep: Reference to structure of type pch_udc_ep_regs
648 * @type: Type of endpoint
649 */
650 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
651 u8 type)
652 {
653 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
654 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
655 }
656
657 /**
658 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
659 * @ep: Reference to structure of type pch_udc_ep_regs
660 * @buf_size: The buffer word size
661 */
662 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
663 u32 buf_size, u32 ep_in)
664 {
665 u32 data;
666 if (ep_in) {
667 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
668 data = (data & 0xffff0000) | (buf_size & 0xffff);
669 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
670 } else {
671 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
672 data = (buf_size << 16) | (data & 0xffff);
673 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
674 }
675 }
676
677 /**
678 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
679 * @ep: Reference to structure of type pch_udc_ep_regs
680 * @pkt_size: The packet byte size
681 */
682 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
683 {
684 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
685 data = (data & 0xffff0000) | (pkt_size & 0xffff);
686 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
687 }
688
689 /**
690 * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
691 * @ep: Reference to structure of type pch_udc_ep_regs
692 * @addr: Address of the register
693 */
694 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
695 {
696 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
697 }
698
699 /**
700 * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
701 * @ep: Reference to structure of type pch_udc_ep_regs
702 * @addr: Address of the register
703 */
704 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
705 {
706 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
707 }
708
709 /**
710 * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
711 * @ep: Reference to structure of type pch_udc_ep_regs
712 */
713 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
714 {
715 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
716 }
717
718 /**
719 * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
720 * @ep: Reference to structure of type pch_udc_ep_regs
721 */
722 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
723 {
724 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
725 }
726
727 /**
728 * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
729 * @ep: Reference to structure of type pch_udc_ep_regs
730 */
731 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
732 {
733 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
734 }
735
736 /**
737 * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
738 * register depending on the direction specified
739 * @dev: Reference to structure of type pch_udc_regs
740 * @dir: whether Tx or Rx
741 * DMA_DIR_RX: Receive
742 * DMA_DIR_TX: Transmit
743 */
744 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
745 {
746 if (dir == DMA_DIR_RX)
747 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
748 else if (dir == DMA_DIR_TX)
749 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
750 }
751
752 /**
753 * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
754 * register depending on the direction specified
755 * @dev: Reference to structure of type pch_udc_regs
756 * @dir: Whether Tx or Rx
757 * DMA_DIR_RX: Receive
758 * DMA_DIR_TX: Transmit
759 */
760 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
761 {
762 if (dir == DMA_DIR_RX)
763 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
764 else if (dir == DMA_DIR_TX)
765 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
766 }
767
768 /**
769 * pch_udc_set_csr_done() - Set the device control register
770 * CSR done field (bit 13)
771 * @dev: reference to structure of type pch_udc_regs
772 */
773 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
774 {
775 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
776 }
777
778 /**
779 * pch_udc_disable_interrupts() - Disables the specified interrupts
780 * @dev: Reference to structure of type pch_udc_regs
781 * @mask: Mask to disable interrupts
782 */
783 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
784 u32 mask)
785 {
786 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
787 }
788
789 /**
790 * pch_udc_enable_interrupts() - Enable the specified interrupts
791 * @dev: Reference to structure of type pch_udc_regs
792 * @mask: Mask to enable interrupts
793 */
794 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
795 u32 mask)
796 {
797 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
798 }
799
800 /**
801 * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
802 * @dev: Reference to structure of type pch_udc_regs
803 * @mask: Mask to disable interrupts
804 */
805 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
806 u32 mask)
807 {
808 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
809 }
810
811 /**
812 * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
813 * @dev: Reference to structure of type pch_udc_regs
814 * @mask: Mask to enable interrupts
815 */
816 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
817 u32 mask)
818 {
819 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
820 }
821
822 /**
823 * pch_udc_read_device_interrupts() - Read the device interrupts
824 * @dev: Reference to structure of type pch_udc_regs
825 * Retern The device interrupts
826 */
827 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
828 {
829 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
830 }
831
832 /**
833 * pch_udc_write_device_interrupts() - Write device interrupts
834 * @dev: Reference to structure of type pch_udc_regs
835 * @val: The value to be written to interrupt register
836 */
837 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
838 u32 val)
839 {
840 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
841 }
842
843 /**
844 * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
845 * @dev: Reference to structure of type pch_udc_regs
846 * Retern The endpoint interrupt
847 */
848 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
849 {
850 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
851 }
852
853 /**
854 * pch_udc_write_ep_interrupts() - Clear endpoint interupts
855 * @dev: Reference to structure of type pch_udc_regs
856 * @val: The value to be written to interrupt register
857 */
858 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
859 u32 val)
860 {
861 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
862 }
863
864 /**
865 * pch_udc_read_device_status() - Read the device status
866 * @dev: Reference to structure of type pch_udc_regs
867 * Retern The device status
868 */
869 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
870 {
871 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
872 }
873
874 /**
875 * pch_udc_read_ep_control() - Read the endpoint control
876 * @ep: Reference to structure of type pch_udc_ep_regs
877 * Retern The endpoint control register value
878 */
879 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
880 {
881 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
882 }
883
884 /**
885 * pch_udc_clear_ep_control() - Clear the endpoint control register
886 * @ep: Reference to structure of type pch_udc_ep_regs
887 * Retern The endpoint control register value
888 */
889 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
890 {
891 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
892 }
893
894 /**
895 * pch_udc_read_ep_status() - Read the endpoint status
896 * @ep: Reference to structure of type pch_udc_ep_regs
897 * Retern The endpoint status
898 */
899 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
900 {
901 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
902 }
903
904 /**
905 * pch_udc_clear_ep_status() - Clear the endpoint status
906 * @ep: Reference to structure of type pch_udc_ep_regs
907 * @stat: Endpoint status
908 */
909 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
910 u32 stat)
911 {
912 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
913 }
914
915 /**
916 * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
917 * of the endpoint control register
918 * @ep: Reference to structure of type pch_udc_ep_regs
919 */
920 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
921 {
922 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
923 }
924
925 /**
926 * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
927 * of the endpoint control register
928 * @ep: reference to structure of type pch_udc_ep_regs
929 */
930 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
931 {
932 unsigned int loopcnt = 0;
933 struct pch_udc_dev *dev = ep->dev;
934
935 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
936 return;
937 if (!ep->in) {
938 loopcnt = 10000;
939 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
940 --loopcnt)
941 udelay(5);
942 if (!loopcnt)
943 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
944 __func__);
945 }
946 loopcnt = 10000;
947 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
948 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
949 udelay(5);
950 }
951 if (!loopcnt)
952 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
953 __func__, ep->num, (ep->in ? "in" : "out"));
954 }
955
956 /**
957 * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
958 * @ep: reference to structure of type pch_udc_ep_regs
959 * @dir: direction of endpoint
960 * 0: endpoint is OUT
961 * !0: endpoint is IN
962 */
963 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
964 {
965 if (dir) { /* IN ep */
966 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
967 return;
968 }
969 }
970
971 /**
972 * pch_udc_ep_enable() - This api enables endpoint
973 * @regs: Reference to structure pch_udc_ep_regs
974 * @desc: endpoint descriptor
975 */
976 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
977 struct pch_udc_cfg_data *cfg,
978 const struct usb_endpoint_descriptor *desc)
979 {
980 u32 val = 0;
981 u32 buff_size = 0;
982
983 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
984 if (ep->in)
985 buff_size = UDC_EPIN_BUFF_SIZE;
986 else
987 buff_size = UDC_EPOUT_BUFF_SIZE;
988 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
989 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
990 pch_udc_ep_set_nak(ep);
991 pch_udc_ep_fifo_flush(ep, ep->in);
992 /* Configure the endpoint */
993 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
994 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
995 UDC_CSR_NE_TYPE_SHIFT) |
996 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
997 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
998 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
999 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1000
1001 if (ep->in)
1002 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1003 else
1004 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1005 }
1006
1007 /**
1008 * pch_udc_ep_disable() - This api disables endpoint
1009 * @regs: Reference to structure pch_udc_ep_regs
1010 */
1011 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1012 {
1013 if (ep->in) {
1014 /* flush the fifo */
1015 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1016 /* set NAK */
1017 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1018 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1019 } else {
1020 /* set NAK */
1021 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1022 }
1023 /* reset desc pointer */
1024 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1025 }
1026
1027 /**
1028 * pch_udc_wait_ep_stall() - Wait EP stall.
1029 * @dev: Reference to pch_udc_dev structure
1030 */
1031 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1032 {
1033 unsigned int count = 10000;
1034
1035 /* Wait till idle */
1036 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1037 udelay(5);
1038 if (!count)
1039 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1040 }
1041
1042 /**
1043 * pch_udc_init() - This API initializes usb device controller
1044 * @dev: Rreference to pch_udc_regs structure
1045 */
1046 static void pch_udc_init(struct pch_udc_dev *dev)
1047 {
1048 if (NULL == dev) {
1049 pr_err("%s: Invalid address\n", __func__);
1050 return;
1051 }
1052 /* Soft Reset and Reset PHY */
1053 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1054 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1055 mdelay(1);
1056 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1057 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1058 mdelay(1);
1059 /* mask and clear all device interrupts */
1060 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1061 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1062
1063 /* mask and clear all ep interrupts */
1064 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1065 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1066
1067 /* enable dynamic CSR programmingi, self powered and device speed */
1068 if (speed_fs)
1069 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1070 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1071 else /* defaul high speed */
1072 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1073 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1074 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1075 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1076 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1077 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1078 UDC_DEVCTL_THE);
1079 }
1080
1081 /**
1082 * pch_udc_exit() - This API exit usb device controller
1083 * @dev: Reference to pch_udc_regs structure
1084 */
1085 static void pch_udc_exit(struct pch_udc_dev *dev)
1086 {
1087 /* mask all device interrupts */
1088 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1089 /* mask all ep interrupts */
1090 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1091 /* put device in disconnected state */
1092 pch_udc_set_disconnect(dev);
1093 }
1094
1095 /**
1096 * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1097 * @gadget: Reference to the gadget driver
1098 *
1099 * Return codes:
1100 * 0: Success
1101 * -EINVAL: If the gadget passed is NULL
1102 */
1103 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1104 {
1105 struct pch_udc_dev *dev;
1106
1107 if (!gadget)
1108 return -EINVAL;
1109 dev = container_of(gadget, struct pch_udc_dev, gadget);
1110 return pch_udc_get_frame(dev);
1111 }
1112
1113 /**
1114 * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1115 * @gadget: Reference to the gadget driver
1116 *
1117 * Return codes:
1118 * 0: Success
1119 * -EINVAL: If the gadget passed is NULL
1120 */
1121 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1122 {
1123 struct pch_udc_dev *dev;
1124 unsigned long flags;
1125
1126 if (!gadget)
1127 return -EINVAL;
1128 dev = container_of(gadget, struct pch_udc_dev, gadget);
1129 spin_lock_irqsave(&dev->lock, flags);
1130 pch_udc_rmt_wakeup(dev);
1131 spin_unlock_irqrestore(&dev->lock, flags);
1132 return 0;
1133 }
1134
1135 /**
1136 * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1137 * is self powered or not
1138 * @gadget: Reference to the gadget driver
1139 * @value: Specifies self powered or not
1140 *
1141 * Return codes:
1142 * 0: Success
1143 * -EINVAL: If the gadget passed is NULL
1144 */
1145 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1146 {
1147 struct pch_udc_dev *dev;
1148
1149 if (!gadget)
1150 return -EINVAL;
1151 gadget->is_selfpowered = (value != 0);
1152 dev = container_of(gadget, struct pch_udc_dev, gadget);
1153 if (value)
1154 pch_udc_set_selfpowered(dev);
1155 else
1156 pch_udc_clear_selfpowered(dev);
1157 return 0;
1158 }
1159
1160 /**
1161 * pch_udc_pcd_pullup() - This API is invoked to make the device
1162 * visible/invisible to the host
1163 * @gadget: Reference to the gadget driver
1164 * @is_on: Specifies whether the pull up is made active or inactive
1165 *
1166 * Return codes:
1167 * 0: Success
1168 * -EINVAL: If the gadget passed is NULL
1169 */
1170 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1171 {
1172 struct pch_udc_dev *dev;
1173
1174 if (!gadget)
1175 return -EINVAL;
1176 dev = container_of(gadget, struct pch_udc_dev, gadget);
1177 if (is_on) {
1178 pch_udc_reconnect(dev);
1179 } else {
1180 if (dev->driver && dev->driver->disconnect) {
1181 spin_lock(&dev->lock);
1182 dev->driver->disconnect(&dev->gadget);
1183 spin_unlock(&dev->lock);
1184 }
1185 pch_udc_set_disconnect(dev);
1186 }
1187
1188 return 0;
1189 }
1190
1191 /**
1192 * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1193 * transceiver (or GPIO) that
1194 * detects a VBUS power session starting/ending
1195 * @gadget: Reference to the gadget driver
1196 * @is_active: specifies whether the session is starting or ending
1197 *
1198 * Return codes:
1199 * 0: Success
1200 * -EINVAL: If the gadget passed is NULL
1201 */
1202 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1203 {
1204 struct pch_udc_dev *dev;
1205
1206 if (!gadget)
1207 return -EINVAL;
1208 dev = container_of(gadget, struct pch_udc_dev, gadget);
1209 pch_udc_vbus_session(dev, is_active);
1210 return 0;
1211 }
1212
1213 /**
1214 * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1215 * SET_CONFIGURATION calls to
1216 * specify how much power the device can consume
1217 * @gadget: Reference to the gadget driver
1218 * @mA: specifies the current limit in 2mA unit
1219 *
1220 * Return codes:
1221 * -EINVAL: If the gadget passed is NULL
1222 * -EOPNOTSUPP:
1223 */
1224 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1225 {
1226 return -EOPNOTSUPP;
1227 }
1228
1229 static int pch_udc_start(struct usb_gadget *g,
1230 struct usb_gadget_driver *driver);
1231 static int pch_udc_stop(struct usb_gadget *g);
1232
1233 static const struct usb_gadget_ops pch_udc_ops = {
1234 .get_frame = pch_udc_pcd_get_frame,
1235 .wakeup = pch_udc_pcd_wakeup,
1236 .set_selfpowered = pch_udc_pcd_selfpowered,
1237 .pullup = pch_udc_pcd_pullup,
1238 .vbus_session = pch_udc_pcd_vbus_session,
1239 .vbus_draw = pch_udc_pcd_vbus_draw,
1240 .udc_start = pch_udc_start,
1241 .udc_stop = pch_udc_stop,
1242 };
1243
1244 /**
1245 * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1246 * @dev: Reference to the driver structure
1247 *
1248 * Return value:
1249 * 1: VBUS is high
1250 * 0: VBUS is low
1251 * -1: It is not enable to detect VBUS using GPIO
1252 */
1253 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1254 {
1255 int vbus = 0;
1256
1257 if (dev->vbus_gpio.port)
1258 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1259 else
1260 vbus = -1;
1261
1262 return vbus;
1263 }
1264
1265 /**
1266 * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1267 * If VBUS is Low, disconnect is processed
1268 * @irq_work: Structure for WorkQueue
1269 *
1270 */
1271 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1272 {
1273 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1274 struct pch_vbus_gpio_data, irq_work_fall);
1275 struct pch_udc_dev *dev =
1276 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1277 int vbus_saved = -1;
1278 int vbus;
1279 int count;
1280
1281 if (!dev->vbus_gpio.port)
1282 return;
1283
1284 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1285 count++) {
1286 vbus = pch_vbus_gpio_get_value(dev);
1287
1288 if ((vbus_saved == vbus) && (vbus == 0)) {
1289 dev_dbg(&dev->pdev->dev, "VBUS fell");
1290 if (dev->driver
1291 && dev->driver->disconnect) {
1292 dev->driver->disconnect(
1293 &dev->gadget);
1294 }
1295 if (dev->vbus_gpio.intr)
1296 pch_udc_init(dev);
1297 else
1298 pch_udc_reconnect(dev);
1299 return;
1300 }
1301 vbus_saved = vbus;
1302 mdelay(PCH_VBUS_INTERVAL);
1303 }
1304 }
1305
1306 /**
1307 * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1308 * If VBUS is High, connect is processed
1309 * @irq_work: Structure for WorkQueue
1310 *
1311 */
1312 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1313 {
1314 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1315 struct pch_vbus_gpio_data, irq_work_rise);
1316 struct pch_udc_dev *dev =
1317 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1318 int vbus;
1319
1320 if (!dev->vbus_gpio.port)
1321 return;
1322
1323 mdelay(PCH_VBUS_INTERVAL);
1324 vbus = pch_vbus_gpio_get_value(dev);
1325
1326 if (vbus == 1) {
1327 dev_dbg(&dev->pdev->dev, "VBUS rose");
1328 pch_udc_reconnect(dev);
1329 return;
1330 }
1331 }
1332
1333 /**
1334 * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
1335 * @irq: Interrupt request number
1336 * @dev: Reference to the device structure
1337 *
1338 * Return codes:
1339 * 0: Success
1340 * -EINVAL: GPIO port is invalid or can't be initialized.
1341 */
1342 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1343 {
1344 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1345
1346 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1347 return IRQ_NONE;
1348
1349 if (pch_vbus_gpio_get_value(dev))
1350 schedule_work(&dev->vbus_gpio.irq_work_rise);
1351 else
1352 schedule_work(&dev->vbus_gpio.irq_work_fall);
1353
1354 return IRQ_HANDLED;
1355 }
1356
1357 /**
1358 * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1359 * @dev: Reference to the driver structure
1360 * @vbus_gpio Number of GPIO port to detect gpio
1361 *
1362 * Return codes:
1363 * 0: Success
1364 * -EINVAL: GPIO port is invalid or can't be initialized.
1365 */
1366 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1367 {
1368 int err;
1369 int irq_num = 0;
1370
1371 dev->vbus_gpio.port = 0;
1372 dev->vbus_gpio.intr = 0;
1373
1374 if (vbus_gpio_port <= -1)
1375 return -EINVAL;
1376
1377 err = gpio_is_valid(vbus_gpio_port);
1378 if (!err) {
1379 pr_err("%s: gpio port %d is invalid\n",
1380 __func__, vbus_gpio_port);
1381 return -EINVAL;
1382 }
1383
1384 err = gpio_request(vbus_gpio_port, "pch_vbus");
1385 if (err) {
1386 pr_err("%s: can't request gpio port %d, err: %d\n",
1387 __func__, vbus_gpio_port, err);
1388 return -EINVAL;
1389 }
1390
1391 dev->vbus_gpio.port = vbus_gpio_port;
1392 gpio_direction_input(vbus_gpio_port);
1393 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1394
1395 irq_num = gpio_to_irq(vbus_gpio_port);
1396 if (irq_num > 0) {
1397 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1398 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1399 "vbus_detect", dev);
1400 if (!err) {
1401 dev->vbus_gpio.intr = irq_num;
1402 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1403 pch_vbus_gpio_work_rise);
1404 } else {
1405 pr_err("%s: can't request irq %d, err: %d\n",
1406 __func__, irq_num, err);
1407 }
1408 }
1409
1410 return 0;
1411 }
1412
1413 /**
1414 * pch_vbus_gpio_free() - This API frees resources of GPIO port
1415 * @dev: Reference to the driver structure
1416 */
1417 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1418 {
1419 if (dev->vbus_gpio.intr)
1420 free_irq(dev->vbus_gpio.intr, dev);
1421
1422 if (dev->vbus_gpio.port)
1423 gpio_free(dev->vbus_gpio.port);
1424 }
1425
1426 /**
1427 * complete_req() - This API is invoked from the driver when processing
1428 * of a request is complete
1429 * @ep: Reference to the endpoint structure
1430 * @req: Reference to the request structure
1431 * @status: Indicates the success/failure of completion
1432 */
1433 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1434 int status)
1435 __releases(&dev->lock)
1436 __acquires(&dev->lock)
1437 {
1438 struct pch_udc_dev *dev;
1439 unsigned halted = ep->halted;
1440
1441 list_del_init(&req->queue);
1442
1443 /* set new status if pending */
1444 if (req->req.status == -EINPROGRESS)
1445 req->req.status = status;
1446 else
1447 status = req->req.status;
1448
1449 dev = ep->dev;
1450 if (req->dma_mapped) {
1451 if (req->dma == DMA_ADDR_INVALID) {
1452 if (ep->in)
1453 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1454 req->req.length,
1455 DMA_TO_DEVICE);
1456 else
1457 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1458 req->req.length,
1459 DMA_FROM_DEVICE);
1460 req->req.dma = DMA_ADDR_INVALID;
1461 } else {
1462 if (ep->in)
1463 dma_unmap_single(&dev->pdev->dev, req->dma,
1464 req->req.length,
1465 DMA_TO_DEVICE);
1466 else {
1467 dma_unmap_single(&dev->pdev->dev, req->dma,
1468 req->req.length,
1469 DMA_FROM_DEVICE);
1470 memcpy(req->req.buf, req->buf, req->req.length);
1471 }
1472 kfree(req->buf);
1473 req->dma = DMA_ADDR_INVALID;
1474 }
1475 req->dma_mapped = 0;
1476 }
1477 ep->halted = 1;
1478 spin_lock(&dev->lock);
1479 if (!ep->in)
1480 pch_udc_ep_clear_rrdy(ep);
1481 usb_gadget_giveback_request(&ep->ep, &req->req);
1482 spin_unlock(&dev->lock);
1483 ep->halted = halted;
1484 }
1485
1486 /**
1487 * empty_req_queue() - This API empties the request queue of an endpoint
1488 * @ep: Reference to the endpoint structure
1489 */
1490 static void empty_req_queue(struct pch_udc_ep *ep)
1491 {
1492 struct pch_udc_request *req;
1493
1494 ep->halted = 1;
1495 while (!list_empty(&ep->queue)) {
1496 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1497 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
1498 }
1499 }
1500
1501 /**
1502 * pch_udc_free_dma_chain() - This function frees the DMA chain created
1503 * for the request
1504 * @dev Reference to the driver structure
1505 * @req Reference to the request to be freed
1506 *
1507 * Return codes:
1508 * 0: Success
1509 */
1510 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1511 struct pch_udc_request *req)
1512 {
1513 struct pch_udc_data_dma_desc *td = req->td_data;
1514 unsigned i = req->chain_len;
1515
1516 dma_addr_t addr2;
1517 dma_addr_t addr = (dma_addr_t)td->next;
1518 td->next = 0x00;
1519 for (; i > 1; --i) {
1520 /* do not free first desc., will be done by free for request */
1521 td = phys_to_virt(addr);
1522 addr2 = (dma_addr_t)td->next;
1523 pci_pool_free(dev->data_requests, td, addr);
1524 td->next = 0x00;
1525 addr = addr2;
1526 }
1527 req->chain_len = 1;
1528 }
1529
1530 /**
1531 * pch_udc_create_dma_chain() - This function creates or reinitializes
1532 * a DMA chain
1533 * @ep: Reference to the endpoint structure
1534 * @req: Reference to the request
1535 * @buf_len: The buffer length
1536 * @gfp_flags: Flags to be used while mapping the data buffer
1537 *
1538 * Return codes:
1539 * 0: success,
1540 * -ENOMEM: pci_pool_alloc invocation fails
1541 */
1542 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1543 struct pch_udc_request *req,
1544 unsigned long buf_len,
1545 gfp_t gfp_flags)
1546 {
1547 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1548 unsigned long bytes = req->req.length, i = 0;
1549 dma_addr_t dma_addr;
1550 unsigned len = 1;
1551
1552 if (req->chain_len > 1)
1553 pch_udc_free_dma_chain(ep->dev, req);
1554
1555 if (req->dma == DMA_ADDR_INVALID)
1556 td->dataptr = req->req.dma;
1557 else
1558 td->dataptr = req->dma;
1559
1560 td->status = PCH_UDC_BS_HST_BSY;
1561 for (; ; bytes -= buf_len, ++len) {
1562 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1563 if (bytes <= buf_len)
1564 break;
1565 last = td;
1566 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1567 &dma_addr);
1568 if (!td)
1569 goto nomem;
1570 i += buf_len;
1571 td->dataptr = req->td_data->dataptr + i;
1572 last->next = dma_addr;
1573 }
1574
1575 req->td_data_last = td;
1576 td->status |= PCH_UDC_DMA_LAST;
1577 td->next = req->td_data_phys;
1578 req->chain_len = len;
1579 return 0;
1580
1581 nomem:
1582 if (len > 1) {
1583 req->chain_len = len;
1584 pch_udc_free_dma_chain(ep->dev, req);
1585 }
1586 req->chain_len = 1;
1587 return -ENOMEM;
1588 }
1589
1590 /**
1591 * prepare_dma() - This function creates and initializes the DMA chain
1592 * for the request
1593 * @ep: Reference to the endpoint structure
1594 * @req: Reference to the request
1595 * @gfp: Flag to be used while mapping the data buffer
1596 *
1597 * Return codes:
1598 * 0: Success
1599 * Other 0: linux error number on failure
1600 */
1601 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1602 gfp_t gfp)
1603 {
1604 int retval;
1605
1606 /* Allocate and create a DMA chain */
1607 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1608 if (retval) {
1609 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1610 return retval;
1611 }
1612 if (ep->in)
1613 req->td_data->status = (req->td_data->status &
1614 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1615 return 0;
1616 }
1617
1618 /**
1619 * process_zlp() - This function process zero length packets
1620 * from the gadget driver
1621 * @ep: Reference to the endpoint structure
1622 * @req: Reference to the request
1623 */
1624 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1625 {
1626 struct pch_udc_dev *dev = ep->dev;
1627
1628 /* IN zlp's are handled by hardware */
1629 complete_req(ep, req, 0);
1630
1631 /* if set_config or set_intf is waiting for ack by zlp
1632 * then set CSR_DONE
1633 */
1634 if (dev->set_cfg_not_acked) {
1635 pch_udc_set_csr_done(dev);
1636 dev->set_cfg_not_acked = 0;
1637 }
1638 /* setup command is ACK'ed now by zlp */
1639 if (!dev->stall && dev->waiting_zlp_ack) {
1640 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1641 dev->waiting_zlp_ack = 0;
1642 }
1643 }
1644
1645 /**
1646 * pch_udc_start_rxrequest() - This function starts the receive requirement.
1647 * @ep: Reference to the endpoint structure
1648 * @req: Reference to the request structure
1649 */
1650 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1651 struct pch_udc_request *req)
1652 {
1653 struct pch_udc_data_dma_desc *td_data;
1654
1655 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1656 td_data = req->td_data;
1657 /* Set the status bits for all descriptors */
1658 while (1) {
1659 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1660 PCH_UDC_BS_HST_RDY;
1661 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1662 break;
1663 td_data = phys_to_virt(td_data->next);
1664 }
1665 /* Write the descriptor pointer */
1666 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1667 req->dma_going = 1;
1668 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1669 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1670 pch_udc_ep_clear_nak(ep);
1671 pch_udc_ep_set_rrdy(ep);
1672 }
1673
1674 /**
1675 * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1676 * from gadget driver
1677 * @usbep: Reference to the USB endpoint structure
1678 * @desc: Reference to the USB endpoint descriptor structure
1679 *
1680 * Return codes:
1681 * 0: Success
1682 * -EINVAL:
1683 * -ESHUTDOWN:
1684 */
1685 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1686 const struct usb_endpoint_descriptor *desc)
1687 {
1688 struct pch_udc_ep *ep;
1689 struct pch_udc_dev *dev;
1690 unsigned long iflags;
1691
1692 if (!usbep || (usbep->name == ep0_string) || !desc ||
1693 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1694 return -EINVAL;
1695
1696 ep = container_of(usbep, struct pch_udc_ep, ep);
1697 dev = ep->dev;
1698 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1699 return -ESHUTDOWN;
1700 spin_lock_irqsave(&dev->lock, iflags);
1701 ep->ep.desc = desc;
1702 ep->halted = 0;
1703 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1704 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1705 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1706 spin_unlock_irqrestore(&dev->lock, iflags);
1707 return 0;
1708 }
1709
1710 /**
1711 * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1712 * from gadget driver
1713 * @usbep Reference to the USB endpoint structure
1714 *
1715 * Return codes:
1716 * 0: Success
1717 * -EINVAL:
1718 */
1719 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1720 {
1721 struct pch_udc_ep *ep;
1722 struct pch_udc_dev *dev;
1723 unsigned long iflags;
1724
1725 if (!usbep)
1726 return -EINVAL;
1727
1728 ep = container_of(usbep, struct pch_udc_ep, ep);
1729 dev = ep->dev;
1730 if ((usbep->name == ep0_string) || !ep->ep.desc)
1731 return -EINVAL;
1732
1733 spin_lock_irqsave(&ep->dev->lock, iflags);
1734 empty_req_queue(ep);
1735 ep->halted = 1;
1736 pch_udc_ep_disable(ep);
1737 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1738 ep->ep.desc = NULL;
1739 INIT_LIST_HEAD(&ep->queue);
1740 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1741 return 0;
1742 }
1743
1744 /**
1745 * pch_udc_alloc_request() - This function allocates request structure.
1746 * It is called by gadget driver
1747 * @usbep: Reference to the USB endpoint structure
1748 * @gfp: Flag to be used while allocating memory
1749 *
1750 * Return codes:
1751 * NULL: Failure
1752 * Allocated address: Success
1753 */
1754 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1755 gfp_t gfp)
1756 {
1757 struct pch_udc_request *req;
1758 struct pch_udc_ep *ep;
1759 struct pch_udc_data_dma_desc *dma_desc;
1760 struct pch_udc_dev *dev;
1761
1762 if (!usbep)
1763 return NULL;
1764 ep = container_of(usbep, struct pch_udc_ep, ep);
1765 dev = ep->dev;
1766 req = kzalloc(sizeof *req, gfp);
1767 if (!req)
1768 return NULL;
1769 req->req.dma = DMA_ADDR_INVALID;
1770 req->dma = DMA_ADDR_INVALID;
1771 INIT_LIST_HEAD(&req->queue);
1772 if (!ep->dev->dma_addr)
1773 return &req->req;
1774 /* ep0 in requests are allocated from data pool here */
1775 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1776 &req->td_data_phys);
1777 if (NULL == dma_desc) {
1778 kfree(req);
1779 return NULL;
1780 }
1781 /* prevent from using desc. - set HOST BUSY */
1782 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1783 dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1784 req->td_data = dma_desc;
1785 req->td_data_last = dma_desc;
1786 req->chain_len = 1;
1787 return &req->req;
1788 }
1789
1790 /**
1791 * pch_udc_free_request() - This function frees request structure.
1792 * It is called by gadget driver
1793 * @usbep: Reference to the USB endpoint structure
1794 * @usbreq: Reference to the USB request
1795 */
1796 static void pch_udc_free_request(struct usb_ep *usbep,
1797 struct usb_request *usbreq)
1798 {
1799 struct pch_udc_ep *ep;
1800 struct pch_udc_request *req;
1801 struct pch_udc_dev *dev;
1802
1803 if (!usbep || !usbreq)
1804 return;
1805 ep = container_of(usbep, struct pch_udc_ep, ep);
1806 req = container_of(usbreq, struct pch_udc_request, req);
1807 dev = ep->dev;
1808 if (!list_empty(&req->queue))
1809 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1810 __func__, usbep->name, req);
1811 if (req->td_data != NULL) {
1812 if (req->chain_len > 1)
1813 pch_udc_free_dma_chain(ep->dev, req);
1814 pci_pool_free(ep->dev->data_requests, req->td_data,
1815 req->td_data_phys);
1816 }
1817 kfree(req);
1818 }
1819
1820 /**
1821 * pch_udc_pcd_queue() - This function queues a request packet. It is called
1822 * by gadget driver
1823 * @usbep: Reference to the USB endpoint structure
1824 * @usbreq: Reference to the USB request
1825 * @gfp: Flag to be used while mapping the data buffer
1826 *
1827 * Return codes:
1828 * 0: Success
1829 * linux error number: Failure
1830 */
1831 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1832 gfp_t gfp)
1833 {
1834 int retval = 0;
1835 struct pch_udc_ep *ep;
1836 struct pch_udc_dev *dev;
1837 struct pch_udc_request *req;
1838 unsigned long iflags;
1839
1840 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1841 return -EINVAL;
1842 ep = container_of(usbep, struct pch_udc_ep, ep);
1843 dev = ep->dev;
1844 if (!ep->ep.desc && ep->num)
1845 return -EINVAL;
1846 req = container_of(usbreq, struct pch_udc_request, req);
1847 if (!list_empty(&req->queue))
1848 return -EINVAL;
1849 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1850 return -ESHUTDOWN;
1851 spin_lock_irqsave(&dev->lock, iflags);
1852 /* map the buffer for dma */
1853 if (usbreq->length &&
1854 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1855 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1856 if (ep->in)
1857 usbreq->dma = dma_map_single(&dev->pdev->dev,
1858 usbreq->buf,
1859 usbreq->length,
1860 DMA_TO_DEVICE);
1861 else
1862 usbreq->dma = dma_map_single(&dev->pdev->dev,
1863 usbreq->buf,
1864 usbreq->length,
1865 DMA_FROM_DEVICE);
1866 } else {
1867 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1868 if (!req->buf) {
1869 retval = -ENOMEM;
1870 goto probe_end;
1871 }
1872 if (ep->in) {
1873 memcpy(req->buf, usbreq->buf, usbreq->length);
1874 req->dma = dma_map_single(&dev->pdev->dev,
1875 req->buf,
1876 usbreq->length,
1877 DMA_TO_DEVICE);
1878 } else
1879 req->dma = dma_map_single(&dev->pdev->dev,
1880 req->buf,
1881 usbreq->length,
1882 DMA_FROM_DEVICE);
1883 }
1884 req->dma_mapped = 1;
1885 }
1886 if (usbreq->length > 0) {
1887 retval = prepare_dma(ep, req, GFP_ATOMIC);
1888 if (retval)
1889 goto probe_end;
1890 }
1891 usbreq->actual = 0;
1892 usbreq->status = -EINPROGRESS;
1893 req->dma_done = 0;
1894 if (list_empty(&ep->queue) && !ep->halted) {
1895 /* no pending transfer, so start this req */
1896 if (!usbreq->length) {
1897 process_zlp(ep, req);
1898 retval = 0;
1899 goto probe_end;
1900 }
1901 if (!ep->in) {
1902 pch_udc_start_rxrequest(ep, req);
1903 } else {
1904 /*
1905 * For IN trfr the descriptors will be programmed and
1906 * P bit will be set when
1907 * we get an IN token
1908 */
1909 pch_udc_wait_ep_stall(ep);
1910 pch_udc_ep_clear_nak(ep);
1911 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1912 }
1913 }
1914 /* Now add this request to the ep's pending requests */
1915 if (req != NULL)
1916 list_add_tail(&req->queue, &ep->queue);
1917
1918 probe_end:
1919 spin_unlock_irqrestore(&dev->lock, iflags);
1920 return retval;
1921 }
1922
1923 /**
1924 * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1925 * It is called by gadget driver
1926 * @usbep: Reference to the USB endpoint structure
1927 * @usbreq: Reference to the USB request
1928 *
1929 * Return codes:
1930 * 0: Success
1931 * linux error number: Failure
1932 */
1933 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1934 struct usb_request *usbreq)
1935 {
1936 struct pch_udc_ep *ep;
1937 struct pch_udc_request *req;
1938 struct pch_udc_dev *dev;
1939 unsigned long flags;
1940 int ret = -EINVAL;
1941
1942 ep = container_of(usbep, struct pch_udc_ep, ep);
1943 dev = ep->dev;
1944 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1945 return ret;
1946 req = container_of(usbreq, struct pch_udc_request, req);
1947 spin_lock_irqsave(&ep->dev->lock, flags);
1948 /* make sure it's still queued on this endpoint */
1949 list_for_each_entry(req, &ep->queue, queue) {
1950 if (&req->req == usbreq) {
1951 pch_udc_ep_set_nak(ep);
1952 if (!list_empty(&req->queue))
1953 complete_req(ep, req, -ECONNRESET);
1954 ret = 0;
1955 break;
1956 }
1957 }
1958 spin_unlock_irqrestore(&ep->dev->lock, flags);
1959 return ret;
1960 }
1961
1962 /**
1963 * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1964 * feature
1965 * @usbep: Reference to the USB endpoint structure
1966 * @halt: Specifies whether to set or clear the feature
1967 *
1968 * Return codes:
1969 * 0: Success
1970 * linux error number: Failure
1971 */
1972 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1973 {
1974 struct pch_udc_ep *ep;
1975 struct pch_udc_dev *dev;
1976 unsigned long iflags;
1977 int ret;
1978
1979 if (!usbep)
1980 return -EINVAL;
1981 ep = container_of(usbep, struct pch_udc_ep, ep);
1982 dev = ep->dev;
1983 if (!ep->ep.desc && !ep->num)
1984 return -EINVAL;
1985 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1986 return -ESHUTDOWN;
1987 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1988 if (list_empty(&ep->queue)) {
1989 if (halt) {
1990 if (ep->num == PCH_UDC_EP0)
1991 ep->dev->stall = 1;
1992 pch_udc_ep_set_stall(ep);
1993 pch_udc_enable_ep_interrupts(ep->dev,
1994 PCH_UDC_EPINT(ep->in,
1995 ep->num));
1996 } else {
1997 pch_udc_ep_clear_stall(ep);
1998 }
1999 ret = 0;
2000 } else {
2001 ret = -EAGAIN;
2002 }
2003 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2004 return ret;
2005 }
2006
2007 /**
2008 * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2009 * halt feature
2010 * @usbep: Reference to the USB endpoint structure
2011 * @halt: Specifies whether to set or clear the feature
2012 *
2013 * Return codes:
2014 * 0: Success
2015 * linux error number: Failure
2016 */
2017 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2018 {
2019 struct pch_udc_ep *ep;
2020 struct pch_udc_dev *dev;
2021 unsigned long iflags;
2022 int ret;
2023
2024 if (!usbep)
2025 return -EINVAL;
2026 ep = container_of(usbep, struct pch_udc_ep, ep);
2027 dev = ep->dev;
2028 if (!ep->ep.desc && !ep->num)
2029 return -EINVAL;
2030 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2031 return -ESHUTDOWN;
2032 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2033 if (!list_empty(&ep->queue)) {
2034 ret = -EAGAIN;
2035 } else {
2036 if (ep->num == PCH_UDC_EP0)
2037 ep->dev->stall = 1;
2038 pch_udc_ep_set_stall(ep);
2039 pch_udc_enable_ep_interrupts(ep->dev,
2040 PCH_UDC_EPINT(ep->in, ep->num));
2041 ep->dev->prot_stall = 1;
2042 ret = 0;
2043 }
2044 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2045 return ret;
2046 }
2047
2048 /**
2049 * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2050 * @usbep: Reference to the USB endpoint structure
2051 */
2052 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2053 {
2054 struct pch_udc_ep *ep;
2055
2056 if (!usbep)
2057 return;
2058
2059 ep = container_of(usbep, struct pch_udc_ep, ep);
2060 if (ep->ep.desc || !ep->num)
2061 pch_udc_ep_fifo_flush(ep, ep->in);
2062 }
2063
2064 static const struct usb_ep_ops pch_udc_ep_ops = {
2065 .enable = pch_udc_pcd_ep_enable,
2066 .disable = pch_udc_pcd_ep_disable,
2067 .alloc_request = pch_udc_alloc_request,
2068 .free_request = pch_udc_free_request,
2069 .queue = pch_udc_pcd_queue,
2070 .dequeue = pch_udc_pcd_dequeue,
2071 .set_halt = pch_udc_pcd_set_halt,
2072 .set_wedge = pch_udc_pcd_set_wedge,
2073 .fifo_status = NULL,
2074 .fifo_flush = pch_udc_pcd_fifo_flush,
2075 };
2076
2077 /**
2078 * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2079 * @td_stp: Reference to the SETP buffer structure
2080 */
2081 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2082 {
2083 static u32 pky_marker;
2084
2085 if (!td_stp)
2086 return;
2087 td_stp->reserved = ++pky_marker;
2088 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2089 td_stp->status = PCH_UDC_BS_HST_RDY;
2090 }
2091
2092 /**
2093 * pch_udc_start_next_txrequest() - This function starts
2094 * the next transmission requirement
2095 * @ep: Reference to the endpoint structure
2096 */
2097 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2098 {
2099 struct pch_udc_request *req;
2100 struct pch_udc_data_dma_desc *td_data;
2101
2102 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2103 return;
2104
2105 if (list_empty(&ep->queue))
2106 return;
2107
2108 /* next request */
2109 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2110 if (req->dma_going)
2111 return;
2112 if (!req->td_data)
2113 return;
2114 pch_udc_wait_ep_stall(ep);
2115 req->dma_going = 1;
2116 pch_udc_ep_set_ddptr(ep, 0);
2117 td_data = req->td_data;
2118 while (1) {
2119 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2120 PCH_UDC_BS_HST_RDY;
2121 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2122 break;
2123 td_data = phys_to_virt(td_data->next);
2124 }
2125 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2126 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2127 pch_udc_ep_set_pd(ep);
2128 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2129 pch_udc_ep_clear_nak(ep);
2130 }
2131
2132 /**
2133 * pch_udc_complete_transfer() - This function completes a transfer
2134 * @ep: Reference to the endpoint structure
2135 */
2136 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2137 {
2138 struct pch_udc_request *req;
2139 struct pch_udc_dev *dev = ep->dev;
2140
2141 if (list_empty(&ep->queue))
2142 return;
2143 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2144 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2145 PCH_UDC_BS_DMA_DONE)
2146 return;
2147 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2148 PCH_UDC_RTS_SUCC) {
2149 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2150 "epstatus=0x%08x\n",
2151 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2152 (int)(ep->epsts));
2153 return;
2154 }
2155
2156 req->req.actual = req->req.length;
2157 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2158 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2159 complete_req(ep, req, 0);
2160 req->dma_going = 0;
2161 if (!list_empty(&ep->queue)) {
2162 pch_udc_wait_ep_stall(ep);
2163 pch_udc_ep_clear_nak(ep);
2164 pch_udc_enable_ep_interrupts(ep->dev,
2165 PCH_UDC_EPINT(ep->in, ep->num));
2166 } else {
2167 pch_udc_disable_ep_interrupts(ep->dev,
2168 PCH_UDC_EPINT(ep->in, ep->num));
2169 }
2170 }
2171
2172 /**
2173 * pch_udc_complete_receiver() - This function completes a receiver
2174 * @ep: Reference to the endpoint structure
2175 */
2176 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2177 {
2178 struct pch_udc_request *req;
2179 struct pch_udc_dev *dev = ep->dev;
2180 unsigned int count;
2181 struct pch_udc_data_dma_desc *td;
2182 dma_addr_t addr;
2183
2184 if (list_empty(&ep->queue))
2185 return;
2186 /* next request */
2187 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2188 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2189 pch_udc_ep_set_ddptr(ep, 0);
2190 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2191 PCH_UDC_BS_DMA_DONE)
2192 td = req->td_data_last;
2193 else
2194 td = req->td_data;
2195
2196 while (1) {
2197 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2198 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2199 "epstatus=0x%08x\n",
2200 (req->td_data->status & PCH_UDC_RXTX_STS),
2201 (int)(ep->epsts));
2202 return;
2203 }
2204 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2205 if (td->status & PCH_UDC_DMA_LAST) {
2206 count = td->status & PCH_UDC_RXTX_BYTES;
2207 break;
2208 }
2209 if (td == req->td_data_last) {
2210 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2211 return;
2212 }
2213 addr = (dma_addr_t)td->next;
2214 td = phys_to_virt(addr);
2215 }
2216 /* on 64k packets the RXBYTES field is zero */
2217 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2218 count = UDC_DMA_MAXPACKET;
2219 req->td_data->status |= PCH_UDC_DMA_LAST;
2220 td->status |= PCH_UDC_BS_HST_BSY;
2221
2222 req->dma_going = 0;
2223 req->req.actual = count;
2224 complete_req(ep, req, 0);
2225 /* If there is a new/failed requests try that now */
2226 if (!list_empty(&ep->queue)) {
2227 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2228 pch_udc_start_rxrequest(ep, req);
2229 }
2230 }
2231
2232 /**
2233 * pch_udc_svc_data_in() - This function process endpoint interrupts
2234 * for IN endpoints
2235 * @dev: Reference to the device structure
2236 * @ep_num: Endpoint that generated the interrupt
2237 */
2238 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2239 {
2240 u32 epsts;
2241 struct pch_udc_ep *ep;
2242
2243 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2244 epsts = ep->epsts;
2245 ep->epsts = 0;
2246
2247 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2248 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2249 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2250 return;
2251 if ((epsts & UDC_EPSTS_BNA))
2252 return;
2253 if (epsts & UDC_EPSTS_HE)
2254 return;
2255 if (epsts & UDC_EPSTS_RSS) {
2256 pch_udc_ep_set_stall(ep);
2257 pch_udc_enable_ep_interrupts(ep->dev,
2258 PCH_UDC_EPINT(ep->in, ep->num));
2259 }
2260 if (epsts & UDC_EPSTS_RCS) {
2261 if (!dev->prot_stall) {
2262 pch_udc_ep_clear_stall(ep);
2263 } else {
2264 pch_udc_ep_set_stall(ep);
2265 pch_udc_enable_ep_interrupts(ep->dev,
2266 PCH_UDC_EPINT(ep->in, ep->num));
2267 }
2268 }
2269 if (epsts & UDC_EPSTS_TDC)
2270 pch_udc_complete_transfer(ep);
2271 /* On IN interrupt, provide data if we have any */
2272 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2273 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2274 pch_udc_start_next_txrequest(ep);
2275 }
2276
2277 /**
2278 * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2279 * @dev: Reference to the device structure
2280 * @ep_num: Endpoint that generated the interrupt
2281 */
2282 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2283 {
2284 u32 epsts;
2285 struct pch_udc_ep *ep;
2286 struct pch_udc_request *req = NULL;
2287
2288 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2289 epsts = ep->epsts;
2290 ep->epsts = 0;
2291
2292 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2293 /* next request */
2294 req = list_entry(ep->queue.next, struct pch_udc_request,
2295 queue);
2296 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2297 PCH_UDC_BS_DMA_DONE) {
2298 if (!req->dma_going)
2299 pch_udc_start_rxrequest(ep, req);
2300 return;
2301 }
2302 }
2303 if (epsts & UDC_EPSTS_HE)
2304 return;
2305 if (epsts & UDC_EPSTS_RSS) {
2306 pch_udc_ep_set_stall(ep);
2307 pch_udc_enable_ep_interrupts(ep->dev,
2308 PCH_UDC_EPINT(ep->in, ep->num));
2309 }
2310 if (epsts & UDC_EPSTS_RCS) {
2311 if (!dev->prot_stall) {
2312 pch_udc_ep_clear_stall(ep);
2313 } else {
2314 pch_udc_ep_set_stall(ep);
2315 pch_udc_enable_ep_interrupts(ep->dev,
2316 PCH_UDC_EPINT(ep->in, ep->num));
2317 }
2318 }
2319 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2320 UDC_EPSTS_OUT_DATA) {
2321 if (ep->dev->prot_stall == 1) {
2322 pch_udc_ep_set_stall(ep);
2323 pch_udc_enable_ep_interrupts(ep->dev,
2324 PCH_UDC_EPINT(ep->in, ep->num));
2325 } else {
2326 pch_udc_complete_receiver(ep);
2327 }
2328 }
2329 if (list_empty(&ep->queue))
2330 pch_udc_set_dma(dev, DMA_DIR_RX);
2331 }
2332
2333 /**
2334 * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2335 * @dev: Reference to the device structure
2336 */
2337 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2338 {
2339 u32 epsts;
2340 struct pch_udc_ep *ep;
2341 struct pch_udc_ep *ep_out;
2342
2343 ep = &dev->ep[UDC_EP0IN_IDX];
2344 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2345 epsts = ep->epsts;
2346 ep->epsts = 0;
2347
2348 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2349 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2350 UDC_EPSTS_XFERDONE)))
2351 return;
2352 if ((epsts & UDC_EPSTS_BNA))
2353 return;
2354 if (epsts & UDC_EPSTS_HE)
2355 return;
2356 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2357 pch_udc_complete_transfer(ep);
2358 pch_udc_clear_dma(dev, DMA_DIR_RX);
2359 ep_out->td_data->status = (ep_out->td_data->status &
2360 ~PCH_UDC_BUFF_STS) |
2361 PCH_UDC_BS_HST_RDY;
2362 pch_udc_ep_clear_nak(ep_out);
2363 pch_udc_set_dma(dev, DMA_DIR_RX);
2364 pch_udc_ep_set_rrdy(ep_out);
2365 }
2366 /* On IN interrupt, provide data if we have any */
2367 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2368 !(epsts & UDC_EPSTS_TXEMPTY))
2369 pch_udc_start_next_txrequest(ep);
2370 }
2371
2372 /**
2373 * pch_udc_svc_control_out() - Routine that handle Control
2374 * OUT endpoint interrupts
2375 * @dev: Reference to the device structure
2376 */
2377 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2378 __releases(&dev->lock)
2379 __acquires(&dev->lock)
2380 {
2381 u32 stat;
2382 int setup_supported;
2383 struct pch_udc_ep *ep;
2384
2385 ep = &dev->ep[UDC_EP0OUT_IDX];
2386 stat = ep->epsts;
2387 ep->epsts = 0;
2388
2389 /* If setup data */
2390 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2391 UDC_EPSTS_OUT_SETUP) {
2392 dev->stall = 0;
2393 dev->ep[UDC_EP0IN_IDX].halted = 0;
2394 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2395 dev->setup_data = ep->td_stp->request;
2396 pch_udc_init_setup_buff(ep->td_stp);
2397 pch_udc_clear_dma(dev, DMA_DIR_RX);
2398 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2399 dev->ep[UDC_EP0IN_IDX].in);
2400 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2401 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2402 else /* OUT */
2403 dev->gadget.ep0 = &ep->ep;
2404 spin_lock(&dev->lock);
2405 /* If Mass storage Reset */
2406 if ((dev->setup_data.bRequestType == 0x21) &&
2407 (dev->setup_data.bRequest == 0xFF))
2408 dev->prot_stall = 0;
2409 /* call gadget with setup data received */
2410 setup_supported = dev->driver->setup(&dev->gadget,
2411 &dev->setup_data);
2412 spin_unlock(&dev->lock);
2413
2414 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2415 ep->td_data->status = (ep->td_data->status &
2416 ~PCH_UDC_BUFF_STS) |
2417 PCH_UDC_BS_HST_RDY;
2418 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2419 }
2420 /* ep0 in returns data on IN phase */
2421 if (setup_supported >= 0 && setup_supported <
2422 UDC_EP0IN_MAX_PKT_SIZE) {
2423 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2424 /* Gadget would have queued a request when
2425 * we called the setup */
2426 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2427 pch_udc_set_dma(dev, DMA_DIR_RX);
2428 pch_udc_ep_clear_nak(ep);
2429 }
2430 } else if (setup_supported < 0) {
2431 /* if unsupported request, then stall */
2432 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2433 pch_udc_enable_ep_interrupts(ep->dev,
2434 PCH_UDC_EPINT(ep->in, ep->num));
2435 dev->stall = 0;
2436 pch_udc_set_dma(dev, DMA_DIR_RX);
2437 } else {
2438 dev->waiting_zlp_ack = 1;
2439 }
2440 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2441 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2442 pch_udc_clear_dma(dev, DMA_DIR_RX);
2443 pch_udc_ep_set_ddptr(ep, 0);
2444 if (!list_empty(&ep->queue)) {
2445 ep->epsts = stat;
2446 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2447 }
2448 pch_udc_set_dma(dev, DMA_DIR_RX);
2449 }
2450 pch_udc_ep_set_rrdy(ep);
2451 }
2452
2453
2454 /**
2455 * pch_udc_postsvc_epinters() - This function enables end point interrupts
2456 * and clears NAK status
2457 * @dev: Reference to the device structure
2458 * @ep_num: End point number
2459 */
2460 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2461 {
2462 struct pch_udc_ep *ep;
2463 struct pch_udc_request *req;
2464
2465 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2466 if (!list_empty(&ep->queue)) {
2467 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2468 pch_udc_enable_ep_interrupts(ep->dev,
2469 PCH_UDC_EPINT(ep->in, ep->num));
2470 pch_udc_ep_clear_nak(ep);
2471 }
2472 }
2473
2474 /**
2475 * pch_udc_read_all_epstatus() - This function read all endpoint status
2476 * @dev: Reference to the device structure
2477 * @ep_intr: Status of endpoint interrupt
2478 */
2479 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2480 {
2481 int i;
2482 struct pch_udc_ep *ep;
2483
2484 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2485 /* IN */
2486 if (ep_intr & (0x1 << i)) {
2487 ep = &dev->ep[UDC_EPIN_IDX(i)];
2488 ep->epsts = pch_udc_read_ep_status(ep);
2489 pch_udc_clear_ep_status(ep, ep->epsts);
2490 }
2491 /* OUT */
2492 if (ep_intr & (0x10000 << i)) {
2493 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2494 ep->epsts = pch_udc_read_ep_status(ep);
2495 pch_udc_clear_ep_status(ep, ep->epsts);
2496 }
2497 }
2498 }
2499
2500 /**
2501 * pch_udc_activate_control_ep() - This function enables the control endpoints
2502 * for traffic after a reset
2503 * @dev: Reference to the device structure
2504 */
2505 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2506 {
2507 struct pch_udc_ep *ep;
2508 u32 val;
2509
2510 /* Setup the IN endpoint */
2511 ep = &dev->ep[UDC_EP0IN_IDX];
2512 pch_udc_clear_ep_control(ep);
2513 pch_udc_ep_fifo_flush(ep, ep->in);
2514 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2515 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2516 /* Initialize the IN EP Descriptor */
2517 ep->td_data = NULL;
2518 ep->td_stp = NULL;
2519 ep->td_data_phys = 0;
2520 ep->td_stp_phys = 0;
2521
2522 /* Setup the OUT endpoint */
2523 ep = &dev->ep[UDC_EP0OUT_IDX];
2524 pch_udc_clear_ep_control(ep);
2525 pch_udc_ep_fifo_flush(ep, ep->in);
2526 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2527 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2528 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2529 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2530
2531 /* Initialize the SETUP buffer */
2532 pch_udc_init_setup_buff(ep->td_stp);
2533 /* Write the pointer address of dma descriptor */
2534 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2535 /* Write the pointer address of Setup descriptor */
2536 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2537
2538 /* Initialize the dma descriptor */
2539 ep->td_data->status = PCH_UDC_DMA_LAST;
2540 ep->td_data->dataptr = dev->dma_addr;
2541 ep->td_data->next = ep->td_data_phys;
2542
2543 pch_udc_ep_clear_nak(ep);
2544 }
2545
2546
2547 /**
2548 * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2549 * @dev: Reference to driver structure
2550 */
2551 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2552 {
2553 struct pch_udc_ep *ep;
2554 int i;
2555
2556 pch_udc_clear_dma(dev, DMA_DIR_TX);
2557 pch_udc_clear_dma(dev, DMA_DIR_RX);
2558 /* Mask all endpoint interrupts */
2559 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2560 /* clear all endpoint interrupts */
2561 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2562
2563 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2564 ep = &dev->ep[i];
2565 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2566 pch_udc_clear_ep_control(ep);
2567 pch_udc_ep_set_ddptr(ep, 0);
2568 pch_udc_write_csr(ep->dev, 0x00, i);
2569 }
2570 dev->stall = 0;
2571 dev->prot_stall = 0;
2572 dev->waiting_zlp_ack = 0;
2573 dev->set_cfg_not_acked = 0;
2574
2575 /* disable ep to empty req queue. Skip the control EP's */
2576 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2577 ep = &dev->ep[i];
2578 pch_udc_ep_set_nak(ep);
2579 pch_udc_ep_fifo_flush(ep, ep->in);
2580 /* Complete request queue */
2581 empty_req_queue(ep);
2582 }
2583 if (dev->driver) {
2584 spin_lock(&dev->lock);
2585 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2586 spin_unlock(&dev->lock);
2587 }
2588 }
2589
2590 /**
2591 * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2592 * done interrupt
2593 * @dev: Reference to driver structure
2594 */
2595 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2596 {
2597 u32 dev_stat, dev_speed;
2598 u32 speed = USB_SPEED_FULL;
2599
2600 dev_stat = pch_udc_read_device_status(dev);
2601 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2602 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2603 switch (dev_speed) {
2604 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2605 speed = USB_SPEED_HIGH;
2606 break;
2607 case UDC_DEVSTS_ENUM_SPEED_FULL:
2608 speed = USB_SPEED_FULL;
2609 break;
2610 case UDC_DEVSTS_ENUM_SPEED_LOW:
2611 speed = USB_SPEED_LOW;
2612 break;
2613 default:
2614 BUG();
2615 }
2616 dev->gadget.speed = speed;
2617 pch_udc_activate_control_ep(dev);
2618 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2619 pch_udc_set_dma(dev, DMA_DIR_TX);
2620 pch_udc_set_dma(dev, DMA_DIR_RX);
2621 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2622
2623 /* enable device interrupts */
2624 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2625 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2626 UDC_DEVINT_SI | UDC_DEVINT_SC);
2627 }
2628
2629 /**
2630 * pch_udc_svc_intf_interrupt() - This function handles a set interface
2631 * interrupt
2632 * @dev: Reference to driver structure
2633 */
2634 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2635 {
2636 u32 reg, dev_stat = 0;
2637 int i, ret;
2638
2639 dev_stat = pch_udc_read_device_status(dev);
2640 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2641 UDC_DEVSTS_INTF_SHIFT;
2642 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2643 UDC_DEVSTS_ALT_SHIFT;
2644 dev->set_cfg_not_acked = 1;
2645 /* Construct the usb request for gadget driver and inform it */
2646 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2647 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2648 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2649 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2650 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2651 /* programm the Endpoint Cfg registers */
2652 /* Only one end point cfg register */
2653 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2654 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2655 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2656 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2657 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2658 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2659 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2660 /* clear stall bits */
2661 pch_udc_ep_clear_stall(&(dev->ep[i]));
2662 dev->ep[i].halted = 0;
2663 }
2664 dev->stall = 0;
2665 spin_lock(&dev->lock);
2666 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2667 spin_unlock(&dev->lock);
2668 }
2669
2670 /**
2671 * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2672 * interrupt
2673 * @dev: Reference to driver structure
2674 */
2675 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2676 {
2677 int i, ret;
2678 u32 reg, dev_stat = 0;
2679
2680 dev_stat = pch_udc_read_device_status(dev);
2681 dev->set_cfg_not_acked = 1;
2682 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2683 UDC_DEVSTS_CFG_SHIFT;
2684 /* make usb request for gadget driver */
2685 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2686 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2687 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2688 /* program the NE registers */
2689 /* Only one end point cfg register */
2690 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2691 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2692 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2693 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2694 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2695 /* clear stall bits */
2696 pch_udc_ep_clear_stall(&(dev->ep[i]));
2697 dev->ep[i].halted = 0;
2698 }
2699 dev->stall = 0;
2700
2701 /* call gadget zero with setup data received */
2702 spin_lock(&dev->lock);
2703 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2704 spin_unlock(&dev->lock);
2705 }
2706
2707 /**
2708 * pch_udc_dev_isr() - This function services device interrupts
2709 * by invoking appropriate routines.
2710 * @dev: Reference to the device structure
2711 * @dev_intr: The Device interrupt status.
2712 */
2713 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2714 {
2715 int vbus;
2716
2717 /* USB Reset Interrupt */
2718 if (dev_intr & UDC_DEVINT_UR) {
2719 pch_udc_svc_ur_interrupt(dev);
2720 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2721 }
2722 /* Enumeration Done Interrupt */
2723 if (dev_intr & UDC_DEVINT_ENUM) {
2724 pch_udc_svc_enum_interrupt(dev);
2725 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2726 }
2727 /* Set Interface Interrupt */
2728 if (dev_intr & UDC_DEVINT_SI)
2729 pch_udc_svc_intf_interrupt(dev);
2730 /* Set Config Interrupt */
2731 if (dev_intr & UDC_DEVINT_SC)
2732 pch_udc_svc_cfg_interrupt(dev);
2733 /* USB Suspend interrupt */
2734 if (dev_intr & UDC_DEVINT_US) {
2735 if (dev->driver
2736 && dev->driver->suspend) {
2737 spin_unlock(&dev->lock);
2738 dev->driver->suspend(&dev->gadget);
2739 spin_lock(&dev->lock);
2740 }
2741
2742 vbus = pch_vbus_gpio_get_value(dev);
2743 if ((dev->vbus_session == 0)
2744 && (vbus != 1)) {
2745 if (dev->driver && dev->driver->disconnect) {
2746 spin_unlock(&dev->lock);
2747 dev->driver->disconnect(&dev->gadget);
2748 spin_lock(&dev->lock);
2749 }
2750 pch_udc_reconnect(dev);
2751 } else if ((dev->vbus_session == 0)
2752 && (vbus == 1)
2753 && !dev->vbus_gpio.intr)
2754 schedule_work(&dev->vbus_gpio.irq_work_fall);
2755
2756 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2757 }
2758 /* Clear the SOF interrupt, if enabled */
2759 if (dev_intr & UDC_DEVINT_SOF)
2760 dev_dbg(&dev->pdev->dev, "SOF\n");
2761 /* ES interrupt, IDLE > 3ms on the USB */
2762 if (dev_intr & UDC_DEVINT_ES)
2763 dev_dbg(&dev->pdev->dev, "ES\n");
2764 /* RWKP interrupt */
2765 if (dev_intr & UDC_DEVINT_RWKP)
2766 dev_dbg(&dev->pdev->dev, "RWKP\n");
2767 }
2768
2769 /**
2770 * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2771 * @irq: Interrupt request number
2772 * @dev: Reference to the device structure
2773 */
2774 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2775 {
2776 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2777 u32 dev_intr, ep_intr;
2778 int i;
2779
2780 dev_intr = pch_udc_read_device_interrupts(dev);
2781 ep_intr = pch_udc_read_ep_interrupts(dev);
2782
2783 /* For a hot plug, this find that the controller is hung up. */
2784 if (dev_intr == ep_intr)
2785 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2786 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2787 /* The controller is reset */
2788 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2789 return IRQ_HANDLED;
2790 }
2791 if (dev_intr)
2792 /* Clear device interrupts */
2793 pch_udc_write_device_interrupts(dev, dev_intr);
2794 if (ep_intr)
2795 /* Clear ep interrupts */
2796 pch_udc_write_ep_interrupts(dev, ep_intr);
2797 if (!dev_intr && !ep_intr)
2798 return IRQ_NONE;
2799 spin_lock(&dev->lock);
2800 if (dev_intr)
2801 pch_udc_dev_isr(dev, dev_intr);
2802 if (ep_intr) {
2803 pch_udc_read_all_epstatus(dev, ep_intr);
2804 /* Process Control In interrupts, if present */
2805 if (ep_intr & UDC_EPINT_IN_EP0) {
2806 pch_udc_svc_control_in(dev);
2807 pch_udc_postsvc_epinters(dev, 0);
2808 }
2809 /* Process Control Out interrupts, if present */
2810 if (ep_intr & UDC_EPINT_OUT_EP0)
2811 pch_udc_svc_control_out(dev);
2812 /* Process data in end point interrupts */
2813 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2814 if (ep_intr & (1 << i)) {
2815 pch_udc_svc_data_in(dev, i);
2816 pch_udc_postsvc_epinters(dev, i);
2817 }
2818 }
2819 /* Process data out end point interrupts */
2820 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2821 PCH_UDC_USED_EP_NUM); i++)
2822 if (ep_intr & (1 << i))
2823 pch_udc_svc_data_out(dev, i -
2824 UDC_EPINT_OUT_SHIFT);
2825 }
2826 spin_unlock(&dev->lock);
2827 return IRQ_HANDLED;
2828 }
2829
2830 /**
2831 * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2832 * @dev: Reference to the device structure
2833 */
2834 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2835 {
2836 /* enable ep0 interrupts */
2837 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2838 UDC_EPINT_OUT_EP0);
2839 /* enable device interrupts */
2840 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2841 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2842 UDC_DEVINT_SI | UDC_DEVINT_SC);
2843 }
2844
2845 /**
2846 * gadget_release() - Free the gadget driver private data
2847 * @pdev reference to struct pci_dev
2848 */
2849 static void gadget_release(struct device *pdev)
2850 {
2851 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2852
2853 kfree(dev);
2854 }
2855
2856 /**
2857 * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2858 * @dev: Reference to the driver structure
2859 */
2860 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2861 {
2862 const char *const ep_string[] = {
2863 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2864 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2865 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2866 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2867 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2868 "ep15in", "ep15out",
2869 };
2870 int i;
2871
2872 dev->gadget.speed = USB_SPEED_UNKNOWN;
2873 INIT_LIST_HEAD(&dev->gadget.ep_list);
2874
2875 /* Initialize the endpoints structures */
2876 memset(dev->ep, 0, sizeof dev->ep);
2877 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2878 struct pch_udc_ep *ep = &dev->ep[i];
2879 ep->dev = dev;
2880 ep->halted = 1;
2881 ep->num = i / 2;
2882 ep->in = ~i & 1;
2883 ep->ep.name = ep_string[i];
2884 ep->ep.ops = &pch_udc_ep_ops;
2885 if (ep->in) {
2886 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2887 ep->ep.caps.dir_in = true;
2888 } else {
2889 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2890 UDC_EP_REG_SHIFT;
2891 ep->ep.caps.dir_out = true;
2892 }
2893 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2894 ep->ep.caps.type_control = true;
2895 } else {
2896 ep->ep.caps.type_iso = true;
2897 ep->ep.caps.type_bulk = true;
2898 ep->ep.caps.type_int = true;
2899 }
2900 /* need to set ep->ep.maxpacket and set Default Configuration?*/
2901 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2902 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2903 INIT_LIST_HEAD(&ep->queue);
2904 }
2905 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2906 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2907
2908 /* remove ep0 in and out from the list. They have own pointer */
2909 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2910 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2911
2912 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2913 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2914 }
2915
2916 /**
2917 * pch_udc_pcd_init() - This API initializes the driver structure
2918 * @dev: Reference to the driver structure
2919 *
2920 * Return codes:
2921 * 0: Success
2922 */
2923 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2924 {
2925 pch_udc_init(dev);
2926 pch_udc_pcd_reinit(dev);
2927 pch_vbus_gpio_init(dev, vbus_gpio_port);
2928 return 0;
2929 }
2930
2931 /**
2932 * init_dma_pools() - create dma pools during initialization
2933 * @pdev: reference to struct pci_dev
2934 */
2935 static int init_dma_pools(struct pch_udc_dev *dev)
2936 {
2937 struct pch_udc_stp_dma_desc *td_stp;
2938 struct pch_udc_data_dma_desc *td_data;
2939 void *ep0out_buf;
2940
2941 /* DMA setup */
2942 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2943 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2944 if (!dev->data_requests) {
2945 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2946 __func__);
2947 return -ENOMEM;
2948 }
2949
2950 /* dma desc for setup data */
2951 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2952 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2953 if (!dev->stp_requests) {
2954 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2955 __func__);
2956 return -ENOMEM;
2957 }
2958 /* setup */
2959 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2960 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2961 if (!td_stp) {
2962 dev_err(&dev->pdev->dev,
2963 "%s: can't allocate setup dma descriptor\n", __func__);
2964 return -ENOMEM;
2965 }
2966 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2967
2968 /* data: 0 packets !? */
2969 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2970 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2971 if (!td_data) {
2972 dev_err(&dev->pdev->dev,
2973 "%s: can't allocate data dma descriptor\n", __func__);
2974 return -ENOMEM;
2975 }
2976 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2977 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2978 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2979 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2980 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2981
2982 ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2983 GFP_KERNEL);
2984 if (!ep0out_buf)
2985 return -ENOMEM;
2986 dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2987 UDC_EP0OUT_BUFF_SIZE * 4,
2988 DMA_FROM_DEVICE);
2989 return 0;
2990 }
2991
2992 static int pch_udc_start(struct usb_gadget *g,
2993 struct usb_gadget_driver *driver)
2994 {
2995 struct pch_udc_dev *dev = to_pch_udc(g);
2996
2997 driver->driver.bus = NULL;
2998 dev->driver = driver;
2999
3000 /* get ready for ep0 traffic */
3001 pch_udc_setup_ep0(dev);
3002
3003 /* clear SD */
3004 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
3005 pch_udc_clear_disconnect(dev);
3006
3007 dev->connected = 1;
3008 return 0;
3009 }
3010
3011 static int pch_udc_stop(struct usb_gadget *g)
3012 {
3013 struct pch_udc_dev *dev = to_pch_udc(g);
3014
3015 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3016
3017 /* Assures that there are no pending requests with this driver */
3018 dev->driver = NULL;
3019 dev->connected = 0;
3020
3021 /* set SD */
3022 pch_udc_set_disconnect(dev);
3023
3024 return 0;
3025 }
3026
3027 static void pch_udc_shutdown(struct pci_dev *pdev)
3028 {
3029 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3030
3031 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3032 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3033
3034 /* disable the pullup so the host will think we're gone */
3035 pch_udc_set_disconnect(dev);
3036 }
3037
3038 static void pch_udc_remove(struct pci_dev *pdev)
3039 {
3040 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3041
3042 usb_del_gadget_udc(&dev->gadget);
3043
3044 /* gadget driver must not be registered */
3045 if (dev->driver)
3046 dev_err(&pdev->dev,
3047 "%s: gadget driver still bound!!!\n", __func__);
3048 /* dma pool cleanup */
3049 if (dev->data_requests)
3050 pci_pool_destroy(dev->data_requests);
3051
3052 if (dev->stp_requests) {
3053 /* cleanup DMA desc's for ep0in */
3054 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3055 pci_pool_free(dev->stp_requests,
3056 dev->ep[UDC_EP0OUT_IDX].td_stp,
3057 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3058 }
3059 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3060 pci_pool_free(dev->stp_requests,
3061 dev->ep[UDC_EP0OUT_IDX].td_data,
3062 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3063 }
3064 pci_pool_destroy(dev->stp_requests);
3065 }
3066
3067 if (dev->dma_addr)
3068 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3069 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3070
3071 pch_vbus_gpio_free(dev);
3072
3073 pch_udc_exit(dev);
3074 }
3075
3076 #ifdef CONFIG_PM_SLEEP
3077 static int pch_udc_suspend(struct device *d)
3078 {
3079 struct pci_dev *pdev = to_pci_dev(d);
3080 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3081
3082 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3083 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3084
3085 return 0;
3086 }
3087
3088 static int pch_udc_resume(struct device *d)
3089 {
3090 return 0;
3091 }
3092
3093 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3094 #define PCH_UDC_PM_OPS (&pch_udc_pm)
3095 #else
3096 #define PCH_UDC_PM_OPS NULL
3097 #endif /* CONFIG_PM_SLEEP */
3098
3099 static int pch_udc_probe(struct pci_dev *pdev,
3100 const struct pci_device_id *id)
3101 {
3102 int bar;
3103 int retval;
3104 struct pch_udc_dev *dev;
3105
3106 /* init */
3107 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3108 if (!dev)
3109 return -ENOMEM;
3110
3111 /* pci setup */
3112 retval = pcim_enable_device(pdev);
3113 if (retval)
3114 return retval;
3115
3116 pci_set_drvdata(pdev, dev);
3117
3118 /* Determine BAR based on PCI ID */
3119 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3120 bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3121 else
3122 bar = PCH_UDC_PCI_BAR;
3123
3124 /* PCI resource allocation */
3125 retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3126 if (retval)
3127 return retval;
3128
3129 dev->base_addr = pcim_iomap_table(pdev)[bar];
3130
3131 /* initialize the hardware */
3132 if (pch_udc_pcd_init(dev))
3133 return -ENODEV;
3134
3135 pci_enable_msi(pdev);
3136
3137 retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3138 IRQF_SHARED, KBUILD_MODNAME, dev);
3139 if (retval) {
3140 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3141 pdev->irq);
3142 goto finished;
3143 }
3144
3145 pci_set_master(pdev);
3146 pci_try_set_mwi(pdev);
3147
3148 /* device struct setup */
3149 spin_lock_init(&dev->lock);
3150 dev->pdev = pdev;
3151 dev->gadget.ops = &pch_udc_ops;
3152
3153 retval = init_dma_pools(dev);
3154 if (retval)
3155 goto finished;
3156
3157 dev->gadget.name = KBUILD_MODNAME;
3158 dev->gadget.max_speed = USB_SPEED_HIGH;
3159
3160 /* Put the device in disconnected state till a driver is bound */
3161 pch_udc_set_disconnect(dev);
3162 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3163 gadget_release);
3164 if (retval)
3165 goto finished;
3166 return 0;
3167
3168 finished:
3169 pch_udc_remove(pdev);
3170 return retval;
3171 }
3172
3173 static const struct pci_device_id pch_udc_pcidev_id[] = {
3174 {
3175 PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3176 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3177 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3178 .class_mask = 0xffffffff,
3179 },
3180 {
3181 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3182 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3183 .class_mask = 0xffffffff,
3184 },
3185 {
3186 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3187 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3188 .class_mask = 0xffffffff,
3189 },
3190 {
3191 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3192 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3193 .class_mask = 0xffffffff,
3194 },
3195 { 0 },
3196 };
3197
3198 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3199
3200 static struct pci_driver pch_udc_driver = {
3201 .name = KBUILD_MODNAME,
3202 .id_table = pch_udc_pcidev_id,
3203 .probe = pch_udc_probe,
3204 .remove = pch_udc_remove,
3205 .shutdown = pch_udc_shutdown,
3206 .driver = {
3207 .pm = PCH_UDC_PM_OPS,
3208 },
3209 };
3210
3211 module_pci_driver(pch_udc_driver);
3212
3213 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3214 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3215 MODULE_LICENSE("GPL");
This page took 0.100266 seconds and 5 git commands to generate.