Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[deliverable/linux.git] / drivers / usb / gadget / pch_udc.c
1 /*
2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/gpio.h>
19 #include <linux/irq.h>
20
21 /* GPIO port for VBUS detecting */
22 static int vbus_gpio_port = -1; /* GPIO port number (-1:Not used) */
23
24 #define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
25 #define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */
26
27 /* Address offset of Registers */
28 #define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
29
30 #define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
31 #define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
32 #define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
33 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
34 #define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
35 #define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
36 #define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
37
38 #define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
39 #define UDC_DEVCTL_ADDR 0x404 /* Device control */
40 #define UDC_DEVSTS_ADDR 0x408 /* Device status */
41 #define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
42 #define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
43 #define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
44 #define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
45 #define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
46 #define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
47 #define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
48 #define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
49
50 /* Endpoint control register */
51 /* Bit position */
52 #define UDC_EPCTL_MRXFLUSH (1 << 12)
53 #define UDC_EPCTL_RRDY (1 << 9)
54 #define UDC_EPCTL_CNAK (1 << 8)
55 #define UDC_EPCTL_SNAK (1 << 7)
56 #define UDC_EPCTL_NAK (1 << 6)
57 #define UDC_EPCTL_P (1 << 3)
58 #define UDC_EPCTL_F (1 << 1)
59 #define UDC_EPCTL_S (1 << 0)
60 #define UDC_EPCTL_ET_SHIFT 4
61 /* Mask patern */
62 #define UDC_EPCTL_ET_MASK 0x00000030
63 /* Value for ET field */
64 #define UDC_EPCTL_ET_CONTROL 0
65 #define UDC_EPCTL_ET_ISO 1
66 #define UDC_EPCTL_ET_BULK 2
67 #define UDC_EPCTL_ET_INTERRUPT 3
68
69 /* Endpoint status register */
70 /* Bit position */
71 #define UDC_EPSTS_XFERDONE (1 << 27)
72 #define UDC_EPSTS_RSS (1 << 26)
73 #define UDC_EPSTS_RCS (1 << 25)
74 #define UDC_EPSTS_TXEMPTY (1 << 24)
75 #define UDC_EPSTS_TDC (1 << 10)
76 #define UDC_EPSTS_HE (1 << 9)
77 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
78 #define UDC_EPSTS_BNA (1 << 7)
79 #define UDC_EPSTS_IN (1 << 6)
80 #define UDC_EPSTS_OUT_SHIFT 4
81 /* Mask patern */
82 #define UDC_EPSTS_OUT_MASK 0x00000030
83 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
84 /* Value for OUT field */
85 #define UDC_EPSTS_OUT_SETUP 2
86 #define UDC_EPSTS_OUT_DATA 1
87
88 /* Device configuration register */
89 /* Bit position */
90 #define UDC_DEVCFG_CSR_PRG (1 << 17)
91 #define UDC_DEVCFG_SP (1 << 3)
92 /* SPD Valee */
93 #define UDC_DEVCFG_SPD_HS 0x0
94 #define UDC_DEVCFG_SPD_FS 0x1
95 #define UDC_DEVCFG_SPD_LS 0x2
96
97 /* Device control register */
98 /* Bit position */
99 #define UDC_DEVCTL_THLEN_SHIFT 24
100 #define UDC_DEVCTL_BRLEN_SHIFT 16
101 #define UDC_DEVCTL_CSR_DONE (1 << 13)
102 #define UDC_DEVCTL_SD (1 << 10)
103 #define UDC_DEVCTL_MODE (1 << 9)
104 #define UDC_DEVCTL_BREN (1 << 8)
105 #define UDC_DEVCTL_THE (1 << 7)
106 #define UDC_DEVCTL_DU (1 << 4)
107 #define UDC_DEVCTL_TDE (1 << 3)
108 #define UDC_DEVCTL_RDE (1 << 2)
109 #define UDC_DEVCTL_RES (1 << 0)
110
111 /* Device status register */
112 /* Bit position */
113 #define UDC_DEVSTS_TS_SHIFT 18
114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
115 #define UDC_DEVSTS_ALT_SHIFT 8
116 #define UDC_DEVSTS_INTF_SHIFT 4
117 #define UDC_DEVSTS_CFG_SHIFT 0
118 /* Mask patern */
119 #define UDC_DEVSTS_TS_MASK 0xfffc0000
120 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
121 #define UDC_DEVSTS_ALT_MASK 0x00000f00
122 #define UDC_DEVSTS_INTF_MASK 0x000000f0
123 #define UDC_DEVSTS_CFG_MASK 0x0000000f
124 /* value for maximum speed for SPEED field */
125 #define UDC_DEVSTS_ENUM_SPEED_FULL 1
126 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0
127 #define UDC_DEVSTS_ENUM_SPEED_LOW 2
128 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3
129
130 /* Device irq register */
131 /* Bit position */
132 #define UDC_DEVINT_RWKP (1 << 7)
133 #define UDC_DEVINT_ENUM (1 << 6)
134 #define UDC_DEVINT_SOF (1 << 5)
135 #define UDC_DEVINT_US (1 << 4)
136 #define UDC_DEVINT_UR (1 << 3)
137 #define UDC_DEVINT_ES (1 << 2)
138 #define UDC_DEVINT_SI (1 << 1)
139 #define UDC_DEVINT_SC (1 << 0)
140 /* Mask patern */
141 #define UDC_DEVINT_MSK 0x7f
142
143 /* Endpoint irq register */
144 /* Bit position */
145 #define UDC_EPINT_IN_SHIFT 0
146 #define UDC_EPINT_OUT_SHIFT 16
147 #define UDC_EPINT_IN_EP0 (1 << 0)
148 #define UDC_EPINT_OUT_EP0 (1 << 16)
149 /* Mask patern */
150 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
151
152 /* UDC_CSR_BUSY Status register */
153 /* Bit position */
154 #define UDC_CSR_BUSY (1 << 0)
155
156 /* SOFT RESET register */
157 /* Bit position */
158 #define UDC_PSRST (1 << 1)
159 #define UDC_SRST (1 << 0)
160
161 /* USB_DEVICE endpoint register */
162 /* Bit position */
163 #define UDC_CSR_NE_NUM_SHIFT 0
164 #define UDC_CSR_NE_DIR_SHIFT 4
165 #define UDC_CSR_NE_TYPE_SHIFT 5
166 #define UDC_CSR_NE_CFG_SHIFT 7
167 #define UDC_CSR_NE_INTF_SHIFT 11
168 #define UDC_CSR_NE_ALT_SHIFT 15
169 #define UDC_CSR_NE_MAX_PKT_SHIFT 19
170 /* Mask patern */
171 #define UDC_CSR_NE_NUM_MASK 0x0000000f
172 #define UDC_CSR_NE_DIR_MASK 0x00000010
173 #define UDC_CSR_NE_TYPE_MASK 0x00000060
174 #define UDC_CSR_NE_CFG_MASK 0x00000780
175 #define UDC_CSR_NE_INTF_MASK 0x00007800
176 #define UDC_CSR_NE_ALT_MASK 0x00078000
177 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
178
179 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
180 #define PCH_UDC_EPINT(in, num)\
181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182
183 /* Index of endpoint */
184 #define UDC_EP0IN_IDX 0
185 #define UDC_EP0OUT_IDX 1
186 #define UDC_EPIN_IDX(ep) (ep * 2)
187 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
188 #define PCH_UDC_EP0 0
189 #define PCH_UDC_EP1 1
190 #define PCH_UDC_EP2 2
191 #define PCH_UDC_EP3 3
192
193 /* Number of endpoint */
194 #define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
195 #define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
196 /* Length Value */
197 #define PCH_UDC_BRLEN 0x0F /* Burst length */
198 #define PCH_UDC_THLEN 0x1F /* Threshold length */
199 /* Value of EP Buffer Size */
200 #define UDC_EP0IN_BUFF_SIZE 16
201 #define UDC_EPIN_BUFF_SIZE 256
202 #define UDC_EP0OUT_BUFF_SIZE 16
203 #define UDC_EPOUT_BUFF_SIZE 256
204 /* Value of EP maximum packet size */
205 #define UDC_EP0IN_MAX_PKT_SIZE 64
206 #define UDC_EP0OUT_MAX_PKT_SIZE 64
207 #define UDC_BULK_MAX_PKT_SIZE 512
208
209 /* DMA */
210 #define DMA_DIR_RX 1 /* DMA for data receive */
211 #define DMA_DIR_TX 2 /* DMA for data transmit */
212 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
213 #define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
214
215 /**
216 * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
217 * for data
218 * @status: Status quadlet
219 * @reserved: Reserved
220 * @dataptr: Buffer descriptor
221 * @next: Next descriptor
222 */
223 struct pch_udc_data_dma_desc {
224 u32 status;
225 u32 reserved;
226 u32 dataptr;
227 u32 next;
228 };
229
230 /**
231 * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
232 * for control data
233 * @status: Status
234 * @reserved: Reserved
235 * @data12: First setup word
236 * @data34: Second setup word
237 */
238 struct pch_udc_stp_dma_desc {
239 u32 status;
240 u32 reserved;
241 struct usb_ctrlrequest request;
242 } __attribute((packed));
243
244 /* DMA status definitions */
245 /* Buffer status */
246 #define PCH_UDC_BUFF_STS 0xC0000000
247 #define PCH_UDC_BS_HST_RDY 0x00000000
248 #define PCH_UDC_BS_DMA_BSY 0x40000000
249 #define PCH_UDC_BS_DMA_DONE 0x80000000
250 #define PCH_UDC_BS_HST_BSY 0xC0000000
251 /* Rx/Tx Status */
252 #define PCH_UDC_RXTX_STS 0x30000000
253 #define PCH_UDC_RTS_SUCC 0x00000000
254 #define PCH_UDC_RTS_DESERR 0x10000000
255 #define PCH_UDC_RTS_BUFERR 0x30000000
256 /* Last Descriptor Indication */
257 #define PCH_UDC_DMA_LAST 0x08000000
258 /* Number of Rx/Tx Bytes Mask */
259 #define PCH_UDC_RXTX_BYTES 0x0000ffff
260
261 /**
262 * struct pch_udc_cfg_data - Structure to hold current configuration
263 * and interface information
264 * @cur_cfg: current configuration in use
265 * @cur_intf: current interface in use
266 * @cur_alt: current alt interface in use
267 */
268 struct pch_udc_cfg_data {
269 u16 cur_cfg;
270 u16 cur_intf;
271 u16 cur_alt;
272 };
273
274 /**
275 * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
276 * @ep: embedded ep request
277 * @td_stp_phys: for setup request
278 * @td_data_phys: for data request
279 * @td_stp: for setup request
280 * @td_data: for data request
281 * @dev: reference to device struct
282 * @offset_addr: offset address of ep register
283 * @desc: for this ep
284 * @queue: queue for requests
285 * @num: endpoint number
286 * @in: endpoint is IN
287 * @halted: endpoint halted?
288 * @epsts: Endpoint status
289 */
290 struct pch_udc_ep {
291 struct usb_ep ep;
292 dma_addr_t td_stp_phys;
293 dma_addr_t td_data_phys;
294 struct pch_udc_stp_dma_desc *td_stp;
295 struct pch_udc_data_dma_desc *td_data;
296 struct pch_udc_dev *dev;
297 unsigned long offset_addr;
298 struct list_head queue;
299 unsigned num:5,
300 in:1,
301 halted:1;
302 unsigned long epsts;
303 };
304
305 /**
306 * struct pch_vbus_gpio_data - Structure holding GPIO informaton
307 * for detecting VBUS
308 * @port: gpio port number
309 * @intr: gpio interrupt number
310 * @irq_work_fall Structure for WorkQueue
311 * @irq_work_rise Structure for WorkQueue
312 */
313 struct pch_vbus_gpio_data {
314 int port;
315 int intr;
316 struct work_struct irq_work_fall;
317 struct work_struct irq_work_rise;
318 };
319
320 /**
321 * struct pch_udc_dev - Structure holding complete information
322 * of the PCH USB device
323 * @gadget: gadget driver data
324 * @driver: reference to gadget driver bound
325 * @pdev: reference to the PCI device
326 * @ep: array of endpoints
327 * @lock: protects all state
328 * @active: enabled the PCI device
329 * @stall: stall requested
330 * @prot_stall: protcol stall requested
331 * @irq_registered: irq registered with system
332 * @mem_region: device memory mapped
333 * @registered: driver regsitered with system
334 * @suspended: driver in suspended state
335 * @connected: gadget driver associated
336 * @vbus_session: required vbus_session state
337 * @set_cfg_not_acked: pending acknowledgement 4 setup
338 * @waiting_zlp_ack: pending acknowledgement 4 ZLP
339 * @data_requests: DMA pool for data requests
340 * @stp_requests: DMA pool for setup requests
341 * @dma_addr: DMA pool for received
342 * @ep0out_buf: Buffer for DMA
343 * @setup_data: Received setup data
344 * @phys_addr: of device memory
345 * @base_addr: for mapped device memory
346 * @irq: IRQ line for the device
347 * @cfg_data: current cfg, intf, and alt in use
348 * @vbus_gpio: GPIO informaton for detecting VBUS
349 */
350 struct pch_udc_dev {
351 struct usb_gadget gadget;
352 struct usb_gadget_driver *driver;
353 struct pci_dev *pdev;
354 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
355 spinlock_t lock; /* protects all state */
356 unsigned active:1,
357 stall:1,
358 prot_stall:1,
359 irq_registered:1,
360 mem_region:1,
361 registered:1,
362 suspended:1,
363 connected:1,
364 vbus_session:1,
365 set_cfg_not_acked:1,
366 waiting_zlp_ack:1;
367 struct pci_pool *data_requests;
368 struct pci_pool *stp_requests;
369 dma_addr_t dma_addr;
370 void *ep0out_buf;
371 struct usb_ctrlrequest setup_data;
372 unsigned long phys_addr;
373 void __iomem *base_addr;
374 unsigned irq;
375 struct pch_udc_cfg_data cfg_data;
376 struct pch_vbus_gpio_data vbus_gpio;
377 };
378 #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
379
380 #define PCH_UDC_PCI_BAR 1
381 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
382 #define PCI_VENDOR_ID_ROHM 0x10DB
383 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
384 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
385
386 static const char ep0_string[] = "ep0in";
387 static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
388 static bool speed_fs;
389 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
390 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
391
392 /**
393 * struct pch_udc_request - Structure holding a PCH USB device request packet
394 * @req: embedded ep request
395 * @td_data_phys: phys. address
396 * @td_data: first dma desc. of chain
397 * @td_data_last: last dma desc. of chain
398 * @queue: associated queue
399 * @dma_going: DMA in progress for request
400 * @dma_mapped: DMA memory mapped for request
401 * @dma_done: DMA completed for request
402 * @chain_len: chain length
403 * @buf: Buffer memory for align adjustment
404 * @dma: DMA memory for align adjustment
405 */
406 struct pch_udc_request {
407 struct usb_request req;
408 dma_addr_t td_data_phys;
409 struct pch_udc_data_dma_desc *td_data;
410 struct pch_udc_data_dma_desc *td_data_last;
411 struct list_head queue;
412 unsigned dma_going:1,
413 dma_mapped:1,
414 dma_done:1;
415 unsigned chain_len;
416 void *buf;
417 dma_addr_t dma;
418 };
419
420 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
421 {
422 return ioread32(dev->base_addr + reg);
423 }
424
425 static inline void pch_udc_writel(struct pch_udc_dev *dev,
426 unsigned long val, unsigned long reg)
427 {
428 iowrite32(val, dev->base_addr + reg);
429 }
430
431 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
432 unsigned long reg,
433 unsigned long bitmask)
434 {
435 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
436 }
437
438 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
439 unsigned long reg,
440 unsigned long bitmask)
441 {
442 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
443 }
444
445 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
446 {
447 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
448 }
449
450 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
451 unsigned long val, unsigned long reg)
452 {
453 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
454 }
455
456 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
457 unsigned long reg,
458 unsigned long bitmask)
459 {
460 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
461 }
462
463 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
464 unsigned long reg,
465 unsigned long bitmask)
466 {
467 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
468 }
469
470 /**
471 * pch_udc_csr_busy() - Wait till idle.
472 * @dev: Reference to pch_udc_dev structure
473 */
474 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
475 {
476 unsigned int count = 200;
477
478 /* Wait till idle */
479 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
480 && --count)
481 cpu_relax();
482 if (!count)
483 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
484 }
485
486 /**
487 * pch_udc_write_csr() - Write the command and status registers.
488 * @dev: Reference to pch_udc_dev structure
489 * @val: value to be written to CSR register
490 * @addr: address of CSR register
491 */
492 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
493 unsigned int ep)
494 {
495 unsigned long reg = PCH_UDC_CSR(ep);
496
497 pch_udc_csr_busy(dev); /* Wait till idle */
498 pch_udc_writel(dev, val, reg);
499 pch_udc_csr_busy(dev); /* Wait till idle */
500 }
501
502 /**
503 * pch_udc_read_csr() - Read the command and status registers.
504 * @dev: Reference to pch_udc_dev structure
505 * @addr: address of CSR register
506 *
507 * Return codes: content of CSR register
508 */
509 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
510 {
511 unsigned long reg = PCH_UDC_CSR(ep);
512
513 pch_udc_csr_busy(dev); /* Wait till idle */
514 pch_udc_readl(dev, reg); /* Dummy read */
515 pch_udc_csr_busy(dev); /* Wait till idle */
516 return pch_udc_readl(dev, reg);
517 }
518
519 /**
520 * pch_udc_rmt_wakeup() - Initiate for remote wakeup
521 * @dev: Reference to pch_udc_dev structure
522 */
523 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
524 {
525 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
526 mdelay(1);
527 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
528 }
529
530 /**
531 * pch_udc_get_frame() - Get the current frame from device status register
532 * @dev: Reference to pch_udc_dev structure
533 * Retern current frame
534 */
535 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
536 {
537 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
538 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
539 }
540
541 /**
542 * pch_udc_clear_selfpowered() - Clear the self power control
543 * @dev: Reference to pch_udc_regs structure
544 */
545 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
546 {
547 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
548 }
549
550 /**
551 * pch_udc_set_selfpowered() - Set the self power control
552 * @dev: Reference to pch_udc_regs structure
553 */
554 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
555 {
556 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
557 }
558
559 /**
560 * pch_udc_set_disconnect() - Set the disconnect status.
561 * @dev: Reference to pch_udc_regs structure
562 */
563 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
564 {
565 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
566 }
567
568 /**
569 * pch_udc_clear_disconnect() - Clear the disconnect status.
570 * @dev: Reference to pch_udc_regs structure
571 */
572 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
573 {
574 /* Clear the disconnect */
575 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
576 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
577 mdelay(1);
578 /* Resume USB signalling */
579 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
580 }
581
582 /**
583 * pch_udc_reconnect() - This API initializes usb device controller,
584 * and clear the disconnect status.
585 * @dev: Reference to pch_udc_regs structure
586 */
587 static void pch_udc_init(struct pch_udc_dev *dev);
588 static void pch_udc_reconnect(struct pch_udc_dev *dev)
589 {
590 pch_udc_init(dev);
591
592 /* enable device interrupts */
593 /* pch_udc_enable_interrupts() */
594 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
595 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
596
597 /* Clear the disconnect */
598 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
599 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
600 mdelay(1);
601 /* Resume USB signalling */
602 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
603 }
604
605 /**
606 * pch_udc_vbus_session() - set or clearr the disconnect status.
607 * @dev: Reference to pch_udc_regs structure
608 * @is_active: Parameter specifying the action
609 * 0: indicating VBUS power is ending
610 * !0: indicating VBUS power is starting
611 */
612 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
613 int is_active)
614 {
615 if (is_active) {
616 pch_udc_reconnect(dev);
617 dev->vbus_session = 1;
618 } else {
619 if (dev->driver && dev->driver->disconnect) {
620 spin_unlock(&dev->lock);
621 dev->driver->disconnect(&dev->gadget);
622 spin_lock(&dev->lock);
623 }
624 pch_udc_set_disconnect(dev);
625 dev->vbus_session = 0;
626 }
627 }
628
629 /**
630 * pch_udc_ep_set_stall() - Set the stall of endpoint
631 * @ep: Reference to structure of type pch_udc_ep_regs
632 */
633 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
634 {
635 if (ep->in) {
636 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
637 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
638 } else {
639 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
640 }
641 }
642
643 /**
644 * pch_udc_ep_clear_stall() - Clear the stall of endpoint
645 * @ep: Reference to structure of type pch_udc_ep_regs
646 */
647 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
648 {
649 /* Clear the stall */
650 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
651 /* Clear NAK by writing CNAK */
652 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
653 }
654
655 /**
656 * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
657 * @ep: Reference to structure of type pch_udc_ep_regs
658 * @type: Type of endpoint
659 */
660 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
661 u8 type)
662 {
663 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
664 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
665 }
666
667 /**
668 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
669 * @ep: Reference to structure of type pch_udc_ep_regs
670 * @buf_size: The buffer word size
671 */
672 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
673 u32 buf_size, u32 ep_in)
674 {
675 u32 data;
676 if (ep_in) {
677 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
678 data = (data & 0xffff0000) | (buf_size & 0xffff);
679 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
680 } else {
681 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
682 data = (buf_size << 16) | (data & 0xffff);
683 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
684 }
685 }
686
687 /**
688 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
689 * @ep: Reference to structure of type pch_udc_ep_regs
690 * @pkt_size: The packet byte size
691 */
692 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
693 {
694 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
695 data = (data & 0xffff0000) | (pkt_size & 0xffff);
696 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
697 }
698
699 /**
700 * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
701 * @ep: Reference to structure of type pch_udc_ep_regs
702 * @addr: Address of the register
703 */
704 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
705 {
706 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
707 }
708
709 /**
710 * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
711 * @ep: Reference to structure of type pch_udc_ep_regs
712 * @addr: Address of the register
713 */
714 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
715 {
716 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
717 }
718
719 /**
720 * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
721 * @ep: Reference to structure of type pch_udc_ep_regs
722 */
723 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
724 {
725 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
726 }
727
728 /**
729 * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
730 * @ep: Reference to structure of type pch_udc_ep_regs
731 */
732 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
733 {
734 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
735 }
736
737 /**
738 * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
739 * @ep: Reference to structure of type pch_udc_ep_regs
740 */
741 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
742 {
743 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
744 }
745
746 /**
747 * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
748 * register depending on the direction specified
749 * @dev: Reference to structure of type pch_udc_regs
750 * @dir: whether Tx or Rx
751 * DMA_DIR_RX: Receive
752 * DMA_DIR_TX: Transmit
753 */
754 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
755 {
756 if (dir == DMA_DIR_RX)
757 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
758 else if (dir == DMA_DIR_TX)
759 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
760 }
761
762 /**
763 * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
764 * register depending on the direction specified
765 * @dev: Reference to structure of type pch_udc_regs
766 * @dir: Whether Tx or Rx
767 * DMA_DIR_RX: Receive
768 * DMA_DIR_TX: Transmit
769 */
770 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
771 {
772 if (dir == DMA_DIR_RX)
773 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
774 else if (dir == DMA_DIR_TX)
775 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
776 }
777
778 /**
779 * pch_udc_set_csr_done() - Set the device control register
780 * CSR done field (bit 13)
781 * @dev: reference to structure of type pch_udc_regs
782 */
783 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
784 {
785 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
786 }
787
788 /**
789 * pch_udc_disable_interrupts() - Disables the specified interrupts
790 * @dev: Reference to structure of type pch_udc_regs
791 * @mask: Mask to disable interrupts
792 */
793 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
794 u32 mask)
795 {
796 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
797 }
798
799 /**
800 * pch_udc_enable_interrupts() - Enable the specified interrupts
801 * @dev: Reference to structure of type pch_udc_regs
802 * @mask: Mask to enable interrupts
803 */
804 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
805 u32 mask)
806 {
807 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
808 }
809
810 /**
811 * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
812 * @dev: Reference to structure of type pch_udc_regs
813 * @mask: Mask to disable interrupts
814 */
815 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
816 u32 mask)
817 {
818 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
819 }
820
821 /**
822 * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
823 * @dev: Reference to structure of type pch_udc_regs
824 * @mask: Mask to enable interrupts
825 */
826 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
827 u32 mask)
828 {
829 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
830 }
831
832 /**
833 * pch_udc_read_device_interrupts() - Read the device interrupts
834 * @dev: Reference to structure of type pch_udc_regs
835 * Retern The device interrupts
836 */
837 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
838 {
839 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
840 }
841
842 /**
843 * pch_udc_write_device_interrupts() - Write device interrupts
844 * @dev: Reference to structure of type pch_udc_regs
845 * @val: The value to be written to interrupt register
846 */
847 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
848 u32 val)
849 {
850 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
851 }
852
853 /**
854 * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
855 * @dev: Reference to structure of type pch_udc_regs
856 * Retern The endpoint interrupt
857 */
858 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
859 {
860 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
861 }
862
863 /**
864 * pch_udc_write_ep_interrupts() - Clear endpoint interupts
865 * @dev: Reference to structure of type pch_udc_regs
866 * @val: The value to be written to interrupt register
867 */
868 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
869 u32 val)
870 {
871 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
872 }
873
874 /**
875 * pch_udc_read_device_status() - Read the device status
876 * @dev: Reference to structure of type pch_udc_regs
877 * Retern The device status
878 */
879 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
880 {
881 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
882 }
883
884 /**
885 * pch_udc_read_ep_control() - Read the endpoint control
886 * @ep: Reference to structure of type pch_udc_ep_regs
887 * Retern The endpoint control register value
888 */
889 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
890 {
891 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
892 }
893
894 /**
895 * pch_udc_clear_ep_control() - Clear the endpoint control register
896 * @ep: Reference to structure of type pch_udc_ep_regs
897 * Retern The endpoint control register value
898 */
899 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
900 {
901 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
902 }
903
904 /**
905 * pch_udc_read_ep_status() - Read the endpoint status
906 * @ep: Reference to structure of type pch_udc_ep_regs
907 * Retern The endpoint status
908 */
909 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
910 {
911 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
912 }
913
914 /**
915 * pch_udc_clear_ep_status() - Clear the endpoint status
916 * @ep: Reference to structure of type pch_udc_ep_regs
917 * @stat: Endpoint status
918 */
919 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
920 u32 stat)
921 {
922 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
923 }
924
925 /**
926 * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
927 * of the endpoint control register
928 * @ep: Reference to structure of type pch_udc_ep_regs
929 */
930 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
931 {
932 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
933 }
934
935 /**
936 * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
937 * of the endpoint control register
938 * @ep: reference to structure of type pch_udc_ep_regs
939 */
940 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
941 {
942 unsigned int loopcnt = 0;
943 struct pch_udc_dev *dev = ep->dev;
944
945 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
946 return;
947 if (!ep->in) {
948 loopcnt = 10000;
949 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
950 --loopcnt)
951 udelay(5);
952 if (!loopcnt)
953 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
954 __func__);
955 }
956 loopcnt = 10000;
957 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
958 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
959 udelay(5);
960 }
961 if (!loopcnt)
962 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
963 __func__, ep->num, (ep->in ? "in" : "out"));
964 }
965
966 /**
967 * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
968 * @ep: reference to structure of type pch_udc_ep_regs
969 * @dir: direction of endpoint
970 * 0: endpoint is OUT
971 * !0: endpoint is IN
972 */
973 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
974 {
975 if (dir) { /* IN ep */
976 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
977 return;
978 }
979 }
980
981 /**
982 * pch_udc_ep_enable() - This api enables endpoint
983 * @regs: Reference to structure pch_udc_ep_regs
984 * @desc: endpoint descriptor
985 */
986 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
987 struct pch_udc_cfg_data *cfg,
988 const struct usb_endpoint_descriptor *desc)
989 {
990 u32 val = 0;
991 u32 buff_size = 0;
992
993 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
994 if (ep->in)
995 buff_size = UDC_EPIN_BUFF_SIZE;
996 else
997 buff_size = UDC_EPOUT_BUFF_SIZE;
998 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
999 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
1000 pch_udc_ep_set_nak(ep);
1001 pch_udc_ep_fifo_flush(ep, ep->in);
1002 /* Configure the endpoint */
1003 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
1004 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
1005 UDC_CSR_NE_TYPE_SHIFT) |
1006 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
1007 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1008 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1009 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1010
1011 if (ep->in)
1012 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1013 else
1014 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1015 }
1016
1017 /**
1018 * pch_udc_ep_disable() - This api disables endpoint
1019 * @regs: Reference to structure pch_udc_ep_regs
1020 */
1021 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1022 {
1023 if (ep->in) {
1024 /* flush the fifo */
1025 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1026 /* set NAK */
1027 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1028 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1029 } else {
1030 /* set NAK */
1031 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1032 }
1033 /* reset desc pointer */
1034 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1035 }
1036
1037 /**
1038 * pch_udc_wait_ep_stall() - Wait EP stall.
1039 * @dev: Reference to pch_udc_dev structure
1040 */
1041 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1042 {
1043 unsigned int count = 10000;
1044
1045 /* Wait till idle */
1046 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1047 udelay(5);
1048 if (!count)
1049 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1050 }
1051
1052 /**
1053 * pch_udc_init() - This API initializes usb device controller
1054 * @dev: Rreference to pch_udc_regs structure
1055 */
1056 static void pch_udc_init(struct pch_udc_dev *dev)
1057 {
1058 if (NULL == dev) {
1059 pr_err("%s: Invalid address\n", __func__);
1060 return;
1061 }
1062 /* Soft Reset and Reset PHY */
1063 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1064 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1065 mdelay(1);
1066 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1067 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1068 mdelay(1);
1069 /* mask and clear all device interrupts */
1070 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1071 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1072
1073 /* mask and clear all ep interrupts */
1074 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1075 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1076
1077 /* enable dynamic CSR programmingi, self powered and device speed */
1078 if (speed_fs)
1079 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1080 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1081 else /* defaul high speed */
1082 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1083 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1084 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1085 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1086 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1087 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1088 UDC_DEVCTL_THE);
1089 }
1090
1091 /**
1092 * pch_udc_exit() - This API exit usb device controller
1093 * @dev: Reference to pch_udc_regs structure
1094 */
1095 static void pch_udc_exit(struct pch_udc_dev *dev)
1096 {
1097 /* mask all device interrupts */
1098 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1099 /* mask all ep interrupts */
1100 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1101 /* put device in disconnected state */
1102 pch_udc_set_disconnect(dev);
1103 }
1104
1105 /**
1106 * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1107 * @gadget: Reference to the gadget driver
1108 *
1109 * Return codes:
1110 * 0: Success
1111 * -EINVAL: If the gadget passed is NULL
1112 */
1113 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1114 {
1115 struct pch_udc_dev *dev;
1116
1117 if (!gadget)
1118 return -EINVAL;
1119 dev = container_of(gadget, struct pch_udc_dev, gadget);
1120 return pch_udc_get_frame(dev);
1121 }
1122
1123 /**
1124 * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1125 * @gadget: Reference to the gadget driver
1126 *
1127 * Return codes:
1128 * 0: Success
1129 * -EINVAL: If the gadget passed is NULL
1130 */
1131 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1132 {
1133 struct pch_udc_dev *dev;
1134 unsigned long flags;
1135
1136 if (!gadget)
1137 return -EINVAL;
1138 dev = container_of(gadget, struct pch_udc_dev, gadget);
1139 spin_lock_irqsave(&dev->lock, flags);
1140 pch_udc_rmt_wakeup(dev);
1141 spin_unlock_irqrestore(&dev->lock, flags);
1142 return 0;
1143 }
1144
1145 /**
1146 * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1147 * is self powered or not
1148 * @gadget: Reference to the gadget driver
1149 * @value: Specifies self powered or not
1150 *
1151 * Return codes:
1152 * 0: Success
1153 * -EINVAL: If the gadget passed is NULL
1154 */
1155 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1156 {
1157 struct pch_udc_dev *dev;
1158
1159 if (!gadget)
1160 return -EINVAL;
1161 dev = container_of(gadget, struct pch_udc_dev, gadget);
1162 if (value)
1163 pch_udc_set_selfpowered(dev);
1164 else
1165 pch_udc_clear_selfpowered(dev);
1166 return 0;
1167 }
1168
1169 /**
1170 * pch_udc_pcd_pullup() - This API is invoked to make the device
1171 * visible/invisible to the host
1172 * @gadget: Reference to the gadget driver
1173 * @is_on: Specifies whether the pull up is made active or inactive
1174 *
1175 * Return codes:
1176 * 0: Success
1177 * -EINVAL: If the gadget passed is NULL
1178 */
1179 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1180 {
1181 struct pch_udc_dev *dev;
1182
1183 if (!gadget)
1184 return -EINVAL;
1185 dev = container_of(gadget, struct pch_udc_dev, gadget);
1186 if (is_on) {
1187 pch_udc_reconnect(dev);
1188 } else {
1189 if (dev->driver && dev->driver->disconnect) {
1190 spin_unlock(&dev->lock);
1191 dev->driver->disconnect(&dev->gadget);
1192 spin_lock(&dev->lock);
1193 }
1194 pch_udc_set_disconnect(dev);
1195 }
1196
1197 return 0;
1198 }
1199
1200 /**
1201 * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1202 * transceiver (or GPIO) that
1203 * detects a VBUS power session starting/ending
1204 * @gadget: Reference to the gadget driver
1205 * @is_active: specifies whether the session is starting or ending
1206 *
1207 * Return codes:
1208 * 0: Success
1209 * -EINVAL: If the gadget passed is NULL
1210 */
1211 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1212 {
1213 struct pch_udc_dev *dev;
1214
1215 if (!gadget)
1216 return -EINVAL;
1217 dev = container_of(gadget, struct pch_udc_dev, gadget);
1218 pch_udc_vbus_session(dev, is_active);
1219 return 0;
1220 }
1221
1222 /**
1223 * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1224 * SET_CONFIGURATION calls to
1225 * specify how much power the device can consume
1226 * @gadget: Reference to the gadget driver
1227 * @mA: specifies the current limit in 2mA unit
1228 *
1229 * Return codes:
1230 * -EINVAL: If the gadget passed is NULL
1231 * -EOPNOTSUPP:
1232 */
1233 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1234 {
1235 return -EOPNOTSUPP;
1236 }
1237
1238 static int pch_udc_start(struct usb_gadget *g,
1239 struct usb_gadget_driver *driver);
1240 static int pch_udc_stop(struct usb_gadget *g,
1241 struct usb_gadget_driver *driver);
1242 static const struct usb_gadget_ops pch_udc_ops = {
1243 .get_frame = pch_udc_pcd_get_frame,
1244 .wakeup = pch_udc_pcd_wakeup,
1245 .set_selfpowered = pch_udc_pcd_selfpowered,
1246 .pullup = pch_udc_pcd_pullup,
1247 .vbus_session = pch_udc_pcd_vbus_session,
1248 .vbus_draw = pch_udc_pcd_vbus_draw,
1249 .udc_start = pch_udc_start,
1250 .udc_stop = pch_udc_stop,
1251 };
1252
1253 /**
1254 * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1255 * @dev: Reference to the driver structure
1256 *
1257 * Return value:
1258 * 1: VBUS is high
1259 * 0: VBUS is low
1260 * -1: It is not enable to detect VBUS using GPIO
1261 */
1262 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1263 {
1264 int vbus = 0;
1265
1266 if (dev->vbus_gpio.port)
1267 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1268 else
1269 vbus = -1;
1270
1271 return vbus;
1272 }
1273
1274 /**
1275 * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1276 * If VBUS is Low, disconnect is processed
1277 * @irq_work: Structure for WorkQueue
1278 *
1279 */
1280 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1281 {
1282 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1283 struct pch_vbus_gpio_data, irq_work_fall);
1284 struct pch_udc_dev *dev =
1285 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1286 int vbus_saved = -1;
1287 int vbus;
1288 int count;
1289
1290 if (!dev->vbus_gpio.port)
1291 return;
1292
1293 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1294 count++) {
1295 vbus = pch_vbus_gpio_get_value(dev);
1296
1297 if ((vbus_saved == vbus) && (vbus == 0)) {
1298 dev_dbg(&dev->pdev->dev, "VBUS fell");
1299 if (dev->driver
1300 && dev->driver->disconnect) {
1301 dev->driver->disconnect(
1302 &dev->gadget);
1303 }
1304 if (dev->vbus_gpio.intr)
1305 pch_udc_init(dev);
1306 else
1307 pch_udc_reconnect(dev);
1308 return;
1309 }
1310 vbus_saved = vbus;
1311 mdelay(PCH_VBUS_INTERVAL);
1312 }
1313 }
1314
1315 /**
1316 * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1317 * If VBUS is High, connect is processed
1318 * @irq_work: Structure for WorkQueue
1319 *
1320 */
1321 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1322 {
1323 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1324 struct pch_vbus_gpio_data, irq_work_rise);
1325 struct pch_udc_dev *dev =
1326 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1327 int vbus;
1328
1329 if (!dev->vbus_gpio.port)
1330 return;
1331
1332 mdelay(PCH_VBUS_INTERVAL);
1333 vbus = pch_vbus_gpio_get_value(dev);
1334
1335 if (vbus == 1) {
1336 dev_dbg(&dev->pdev->dev, "VBUS rose");
1337 pch_udc_reconnect(dev);
1338 return;
1339 }
1340 }
1341
1342 /**
1343 * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
1344 * @irq: Interrupt request number
1345 * @dev: Reference to the device structure
1346 *
1347 * Return codes:
1348 * 0: Success
1349 * -EINVAL: GPIO port is invalid or can't be initialized.
1350 */
1351 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1352 {
1353 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1354
1355 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1356 return IRQ_NONE;
1357
1358 if (pch_vbus_gpio_get_value(dev))
1359 schedule_work(&dev->vbus_gpio.irq_work_rise);
1360 else
1361 schedule_work(&dev->vbus_gpio.irq_work_fall);
1362
1363 return IRQ_HANDLED;
1364 }
1365
1366 /**
1367 * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1368 * @dev: Reference to the driver structure
1369 * @vbus_gpio Number of GPIO port to detect gpio
1370 *
1371 * Return codes:
1372 * 0: Success
1373 * -EINVAL: GPIO port is invalid or can't be initialized.
1374 */
1375 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1376 {
1377 int err;
1378 int irq_num = 0;
1379
1380 dev->vbus_gpio.port = 0;
1381 dev->vbus_gpio.intr = 0;
1382
1383 if (vbus_gpio_port <= -1)
1384 return -EINVAL;
1385
1386 err = gpio_is_valid(vbus_gpio_port);
1387 if (!err) {
1388 pr_err("%s: gpio port %d is invalid\n",
1389 __func__, vbus_gpio_port);
1390 return -EINVAL;
1391 }
1392
1393 err = gpio_request(vbus_gpio_port, "pch_vbus");
1394 if (err) {
1395 pr_err("%s: can't request gpio port %d, err: %d\n",
1396 __func__, vbus_gpio_port, err);
1397 return -EINVAL;
1398 }
1399
1400 dev->vbus_gpio.port = vbus_gpio_port;
1401 gpio_direction_input(vbus_gpio_port);
1402 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1403
1404 irq_num = gpio_to_irq(vbus_gpio_port);
1405 if (irq_num > 0) {
1406 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1407 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1408 "vbus_detect", dev);
1409 if (!err) {
1410 dev->vbus_gpio.intr = irq_num;
1411 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1412 pch_vbus_gpio_work_rise);
1413 } else {
1414 pr_err("%s: can't request irq %d, err: %d\n",
1415 __func__, irq_num, err);
1416 }
1417 }
1418
1419 return 0;
1420 }
1421
1422 /**
1423 * pch_vbus_gpio_free() - This API frees resources of GPIO port
1424 * @dev: Reference to the driver structure
1425 */
1426 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1427 {
1428 if (dev->vbus_gpio.intr)
1429 free_irq(dev->vbus_gpio.intr, dev);
1430
1431 if (dev->vbus_gpio.port)
1432 gpio_free(dev->vbus_gpio.port);
1433 }
1434
1435 /**
1436 * complete_req() - This API is invoked from the driver when processing
1437 * of a request is complete
1438 * @ep: Reference to the endpoint structure
1439 * @req: Reference to the request structure
1440 * @status: Indicates the success/failure of completion
1441 */
1442 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1443 int status)
1444 {
1445 struct pch_udc_dev *dev;
1446 unsigned halted = ep->halted;
1447
1448 list_del_init(&req->queue);
1449
1450 /* set new status if pending */
1451 if (req->req.status == -EINPROGRESS)
1452 req->req.status = status;
1453 else
1454 status = req->req.status;
1455
1456 dev = ep->dev;
1457 if (req->dma_mapped) {
1458 if (req->dma == DMA_ADDR_INVALID) {
1459 if (ep->in)
1460 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1461 req->req.length,
1462 DMA_TO_DEVICE);
1463 else
1464 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1465 req->req.length,
1466 DMA_FROM_DEVICE);
1467 req->req.dma = DMA_ADDR_INVALID;
1468 } else {
1469 if (ep->in)
1470 dma_unmap_single(&dev->pdev->dev, req->dma,
1471 req->req.length,
1472 DMA_TO_DEVICE);
1473 else {
1474 dma_unmap_single(&dev->pdev->dev, req->dma,
1475 req->req.length,
1476 DMA_FROM_DEVICE);
1477 memcpy(req->req.buf, req->buf, req->req.length);
1478 }
1479 kfree(req->buf);
1480 req->dma = DMA_ADDR_INVALID;
1481 }
1482 req->dma_mapped = 0;
1483 }
1484 ep->halted = 1;
1485 spin_unlock(&dev->lock);
1486 if (!ep->in)
1487 pch_udc_ep_clear_rrdy(ep);
1488 req->req.complete(&ep->ep, &req->req);
1489 spin_lock(&dev->lock);
1490 ep->halted = halted;
1491 }
1492
1493 /**
1494 * empty_req_queue() - This API empties the request queue of an endpoint
1495 * @ep: Reference to the endpoint structure
1496 */
1497 static void empty_req_queue(struct pch_udc_ep *ep)
1498 {
1499 struct pch_udc_request *req;
1500
1501 ep->halted = 1;
1502 while (!list_empty(&ep->queue)) {
1503 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1504 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
1505 }
1506 }
1507
1508 /**
1509 * pch_udc_free_dma_chain() - This function frees the DMA chain created
1510 * for the request
1511 * @dev Reference to the driver structure
1512 * @req Reference to the request to be freed
1513 *
1514 * Return codes:
1515 * 0: Success
1516 */
1517 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1518 struct pch_udc_request *req)
1519 {
1520 struct pch_udc_data_dma_desc *td = req->td_data;
1521 unsigned i = req->chain_len;
1522
1523 dma_addr_t addr2;
1524 dma_addr_t addr = (dma_addr_t)td->next;
1525 td->next = 0x00;
1526 for (; i > 1; --i) {
1527 /* do not free first desc., will be done by free for request */
1528 td = phys_to_virt(addr);
1529 addr2 = (dma_addr_t)td->next;
1530 pci_pool_free(dev->data_requests, td, addr);
1531 td->next = 0x00;
1532 addr = addr2;
1533 }
1534 req->chain_len = 1;
1535 }
1536
1537 /**
1538 * pch_udc_create_dma_chain() - This function creates or reinitializes
1539 * a DMA chain
1540 * @ep: Reference to the endpoint structure
1541 * @req: Reference to the request
1542 * @buf_len: The buffer length
1543 * @gfp_flags: Flags to be used while mapping the data buffer
1544 *
1545 * Return codes:
1546 * 0: success,
1547 * -ENOMEM: pci_pool_alloc invocation fails
1548 */
1549 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1550 struct pch_udc_request *req,
1551 unsigned long buf_len,
1552 gfp_t gfp_flags)
1553 {
1554 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1555 unsigned long bytes = req->req.length, i = 0;
1556 dma_addr_t dma_addr;
1557 unsigned len = 1;
1558
1559 if (req->chain_len > 1)
1560 pch_udc_free_dma_chain(ep->dev, req);
1561
1562 if (req->dma == DMA_ADDR_INVALID)
1563 td->dataptr = req->req.dma;
1564 else
1565 td->dataptr = req->dma;
1566
1567 td->status = PCH_UDC_BS_HST_BSY;
1568 for (; ; bytes -= buf_len, ++len) {
1569 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1570 if (bytes <= buf_len)
1571 break;
1572 last = td;
1573 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1574 &dma_addr);
1575 if (!td)
1576 goto nomem;
1577 i += buf_len;
1578 td->dataptr = req->td_data->dataptr + i;
1579 last->next = dma_addr;
1580 }
1581
1582 req->td_data_last = td;
1583 td->status |= PCH_UDC_DMA_LAST;
1584 td->next = req->td_data_phys;
1585 req->chain_len = len;
1586 return 0;
1587
1588 nomem:
1589 if (len > 1) {
1590 req->chain_len = len;
1591 pch_udc_free_dma_chain(ep->dev, req);
1592 }
1593 req->chain_len = 1;
1594 return -ENOMEM;
1595 }
1596
1597 /**
1598 * prepare_dma() - This function creates and initializes the DMA chain
1599 * for the request
1600 * @ep: Reference to the endpoint structure
1601 * @req: Reference to the request
1602 * @gfp: Flag to be used while mapping the data buffer
1603 *
1604 * Return codes:
1605 * 0: Success
1606 * Other 0: linux error number on failure
1607 */
1608 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1609 gfp_t gfp)
1610 {
1611 int retval;
1612
1613 /* Allocate and create a DMA chain */
1614 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1615 if (retval) {
1616 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1617 return retval;
1618 }
1619 if (ep->in)
1620 req->td_data->status = (req->td_data->status &
1621 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1622 return 0;
1623 }
1624
1625 /**
1626 * process_zlp() - This function process zero length packets
1627 * from the gadget driver
1628 * @ep: Reference to the endpoint structure
1629 * @req: Reference to the request
1630 */
1631 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1632 {
1633 struct pch_udc_dev *dev = ep->dev;
1634
1635 /* IN zlp's are handled by hardware */
1636 complete_req(ep, req, 0);
1637
1638 /* if set_config or set_intf is waiting for ack by zlp
1639 * then set CSR_DONE
1640 */
1641 if (dev->set_cfg_not_acked) {
1642 pch_udc_set_csr_done(dev);
1643 dev->set_cfg_not_acked = 0;
1644 }
1645 /* setup command is ACK'ed now by zlp */
1646 if (!dev->stall && dev->waiting_zlp_ack) {
1647 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1648 dev->waiting_zlp_ack = 0;
1649 }
1650 }
1651
1652 /**
1653 * pch_udc_start_rxrequest() - This function starts the receive requirement.
1654 * @ep: Reference to the endpoint structure
1655 * @req: Reference to the request structure
1656 */
1657 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1658 struct pch_udc_request *req)
1659 {
1660 struct pch_udc_data_dma_desc *td_data;
1661
1662 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1663 td_data = req->td_data;
1664 /* Set the status bits for all descriptors */
1665 while (1) {
1666 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1667 PCH_UDC_BS_HST_RDY;
1668 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1669 break;
1670 td_data = phys_to_virt(td_data->next);
1671 }
1672 /* Write the descriptor pointer */
1673 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1674 req->dma_going = 1;
1675 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1676 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1677 pch_udc_ep_clear_nak(ep);
1678 pch_udc_ep_set_rrdy(ep);
1679 }
1680
1681 /**
1682 * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1683 * from gadget driver
1684 * @usbep: Reference to the USB endpoint structure
1685 * @desc: Reference to the USB endpoint descriptor structure
1686 *
1687 * Return codes:
1688 * 0: Success
1689 * -EINVAL:
1690 * -ESHUTDOWN:
1691 */
1692 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1693 const struct usb_endpoint_descriptor *desc)
1694 {
1695 struct pch_udc_ep *ep;
1696 struct pch_udc_dev *dev;
1697 unsigned long iflags;
1698
1699 if (!usbep || (usbep->name == ep0_string) || !desc ||
1700 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1701 return -EINVAL;
1702
1703 ep = container_of(usbep, struct pch_udc_ep, ep);
1704 dev = ep->dev;
1705 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1706 return -ESHUTDOWN;
1707 spin_lock_irqsave(&dev->lock, iflags);
1708 ep->ep.desc = desc;
1709 ep->halted = 0;
1710 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1711 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1712 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1713 spin_unlock_irqrestore(&dev->lock, iflags);
1714 return 0;
1715 }
1716
1717 /**
1718 * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1719 * from gadget driver
1720 * @usbep Reference to the USB endpoint structure
1721 *
1722 * Return codes:
1723 * 0: Success
1724 * -EINVAL:
1725 */
1726 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1727 {
1728 struct pch_udc_ep *ep;
1729 struct pch_udc_dev *dev;
1730 unsigned long iflags;
1731
1732 if (!usbep)
1733 return -EINVAL;
1734
1735 ep = container_of(usbep, struct pch_udc_ep, ep);
1736 dev = ep->dev;
1737 if ((usbep->name == ep0_string) || !ep->ep.desc)
1738 return -EINVAL;
1739
1740 spin_lock_irqsave(&ep->dev->lock, iflags);
1741 empty_req_queue(ep);
1742 ep->halted = 1;
1743 pch_udc_ep_disable(ep);
1744 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1745 ep->ep.desc = NULL;
1746 INIT_LIST_HEAD(&ep->queue);
1747 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1748 return 0;
1749 }
1750
1751 /**
1752 * pch_udc_alloc_request() - This function allocates request structure.
1753 * It is called by gadget driver
1754 * @usbep: Reference to the USB endpoint structure
1755 * @gfp: Flag to be used while allocating memory
1756 *
1757 * Return codes:
1758 * NULL: Failure
1759 * Allocated address: Success
1760 */
1761 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1762 gfp_t gfp)
1763 {
1764 struct pch_udc_request *req;
1765 struct pch_udc_ep *ep;
1766 struct pch_udc_data_dma_desc *dma_desc;
1767 struct pch_udc_dev *dev;
1768
1769 if (!usbep)
1770 return NULL;
1771 ep = container_of(usbep, struct pch_udc_ep, ep);
1772 dev = ep->dev;
1773 req = kzalloc(sizeof *req, gfp);
1774 if (!req)
1775 return NULL;
1776 req->req.dma = DMA_ADDR_INVALID;
1777 req->dma = DMA_ADDR_INVALID;
1778 INIT_LIST_HEAD(&req->queue);
1779 if (!ep->dev->dma_addr)
1780 return &req->req;
1781 /* ep0 in requests are allocated from data pool here */
1782 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1783 &req->td_data_phys);
1784 if (NULL == dma_desc) {
1785 kfree(req);
1786 return NULL;
1787 }
1788 /* prevent from using desc. - set HOST BUSY */
1789 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1790 dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
1791 req->td_data = dma_desc;
1792 req->td_data_last = dma_desc;
1793 req->chain_len = 1;
1794 return &req->req;
1795 }
1796
1797 /**
1798 * pch_udc_free_request() - This function frees request structure.
1799 * It is called by gadget driver
1800 * @usbep: Reference to the USB endpoint structure
1801 * @usbreq: Reference to the USB request
1802 */
1803 static void pch_udc_free_request(struct usb_ep *usbep,
1804 struct usb_request *usbreq)
1805 {
1806 struct pch_udc_ep *ep;
1807 struct pch_udc_request *req;
1808 struct pch_udc_dev *dev;
1809
1810 if (!usbep || !usbreq)
1811 return;
1812 ep = container_of(usbep, struct pch_udc_ep, ep);
1813 req = container_of(usbreq, struct pch_udc_request, req);
1814 dev = ep->dev;
1815 if (!list_empty(&req->queue))
1816 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1817 __func__, usbep->name, req);
1818 if (req->td_data != NULL) {
1819 if (req->chain_len > 1)
1820 pch_udc_free_dma_chain(ep->dev, req);
1821 pci_pool_free(ep->dev->data_requests, req->td_data,
1822 req->td_data_phys);
1823 }
1824 kfree(req);
1825 }
1826
1827 /**
1828 * pch_udc_pcd_queue() - This function queues a request packet. It is called
1829 * by gadget driver
1830 * @usbep: Reference to the USB endpoint structure
1831 * @usbreq: Reference to the USB request
1832 * @gfp: Flag to be used while mapping the data buffer
1833 *
1834 * Return codes:
1835 * 0: Success
1836 * linux error number: Failure
1837 */
1838 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1839 gfp_t gfp)
1840 {
1841 int retval = 0;
1842 struct pch_udc_ep *ep;
1843 struct pch_udc_dev *dev;
1844 struct pch_udc_request *req;
1845 unsigned long iflags;
1846
1847 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1848 return -EINVAL;
1849 ep = container_of(usbep, struct pch_udc_ep, ep);
1850 dev = ep->dev;
1851 if (!ep->ep.desc && ep->num)
1852 return -EINVAL;
1853 req = container_of(usbreq, struct pch_udc_request, req);
1854 if (!list_empty(&req->queue))
1855 return -EINVAL;
1856 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1857 return -ESHUTDOWN;
1858 spin_lock_irqsave(&dev->lock, iflags);
1859 /* map the buffer for dma */
1860 if (usbreq->length &&
1861 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1862 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1863 if (ep->in)
1864 usbreq->dma = dma_map_single(&dev->pdev->dev,
1865 usbreq->buf,
1866 usbreq->length,
1867 DMA_TO_DEVICE);
1868 else
1869 usbreq->dma = dma_map_single(&dev->pdev->dev,
1870 usbreq->buf,
1871 usbreq->length,
1872 DMA_FROM_DEVICE);
1873 } else {
1874 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1875 if (!req->buf) {
1876 retval = -ENOMEM;
1877 goto probe_end;
1878 }
1879 if (ep->in) {
1880 memcpy(req->buf, usbreq->buf, usbreq->length);
1881 req->dma = dma_map_single(&dev->pdev->dev,
1882 req->buf,
1883 usbreq->length,
1884 DMA_TO_DEVICE);
1885 } else
1886 req->dma = dma_map_single(&dev->pdev->dev,
1887 req->buf,
1888 usbreq->length,
1889 DMA_FROM_DEVICE);
1890 }
1891 req->dma_mapped = 1;
1892 }
1893 if (usbreq->length > 0) {
1894 retval = prepare_dma(ep, req, GFP_ATOMIC);
1895 if (retval)
1896 goto probe_end;
1897 }
1898 usbreq->actual = 0;
1899 usbreq->status = -EINPROGRESS;
1900 req->dma_done = 0;
1901 if (list_empty(&ep->queue) && !ep->halted) {
1902 /* no pending transfer, so start this req */
1903 if (!usbreq->length) {
1904 process_zlp(ep, req);
1905 retval = 0;
1906 goto probe_end;
1907 }
1908 if (!ep->in) {
1909 pch_udc_start_rxrequest(ep, req);
1910 } else {
1911 /*
1912 * For IN trfr the descriptors will be programmed and
1913 * P bit will be set when
1914 * we get an IN token
1915 */
1916 pch_udc_wait_ep_stall(ep);
1917 pch_udc_ep_clear_nak(ep);
1918 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1919 }
1920 }
1921 /* Now add this request to the ep's pending requests */
1922 if (req != NULL)
1923 list_add_tail(&req->queue, &ep->queue);
1924
1925 probe_end:
1926 spin_unlock_irqrestore(&dev->lock, iflags);
1927 return retval;
1928 }
1929
1930 /**
1931 * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1932 * It is called by gadget driver
1933 * @usbep: Reference to the USB endpoint structure
1934 * @usbreq: Reference to the USB request
1935 *
1936 * Return codes:
1937 * 0: Success
1938 * linux error number: Failure
1939 */
1940 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1941 struct usb_request *usbreq)
1942 {
1943 struct pch_udc_ep *ep;
1944 struct pch_udc_request *req;
1945 struct pch_udc_dev *dev;
1946 unsigned long flags;
1947 int ret = -EINVAL;
1948
1949 ep = container_of(usbep, struct pch_udc_ep, ep);
1950 dev = ep->dev;
1951 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1952 return ret;
1953 req = container_of(usbreq, struct pch_udc_request, req);
1954 spin_lock_irqsave(&ep->dev->lock, flags);
1955 /* make sure it's still queued on this endpoint */
1956 list_for_each_entry(req, &ep->queue, queue) {
1957 if (&req->req == usbreq) {
1958 pch_udc_ep_set_nak(ep);
1959 if (!list_empty(&req->queue))
1960 complete_req(ep, req, -ECONNRESET);
1961 ret = 0;
1962 break;
1963 }
1964 }
1965 spin_unlock_irqrestore(&ep->dev->lock, flags);
1966 return ret;
1967 }
1968
1969 /**
1970 * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1971 * feature
1972 * @usbep: Reference to the USB endpoint structure
1973 * @halt: Specifies whether to set or clear the feature
1974 *
1975 * Return codes:
1976 * 0: Success
1977 * linux error number: Failure
1978 */
1979 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1980 {
1981 struct pch_udc_ep *ep;
1982 struct pch_udc_dev *dev;
1983 unsigned long iflags;
1984 int ret;
1985
1986 if (!usbep)
1987 return -EINVAL;
1988 ep = container_of(usbep, struct pch_udc_ep, ep);
1989 dev = ep->dev;
1990 if (!ep->ep.desc && !ep->num)
1991 return -EINVAL;
1992 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1993 return -ESHUTDOWN;
1994 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1995 if (list_empty(&ep->queue)) {
1996 if (halt) {
1997 if (ep->num == PCH_UDC_EP0)
1998 ep->dev->stall = 1;
1999 pch_udc_ep_set_stall(ep);
2000 pch_udc_enable_ep_interrupts(ep->dev,
2001 PCH_UDC_EPINT(ep->in,
2002 ep->num));
2003 } else {
2004 pch_udc_ep_clear_stall(ep);
2005 }
2006 ret = 0;
2007 } else {
2008 ret = -EAGAIN;
2009 }
2010 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2011 return ret;
2012 }
2013
2014 /**
2015 * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2016 * halt feature
2017 * @usbep: Reference to the USB endpoint structure
2018 * @halt: Specifies whether to set or clear the feature
2019 *
2020 * Return codes:
2021 * 0: Success
2022 * linux error number: Failure
2023 */
2024 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2025 {
2026 struct pch_udc_ep *ep;
2027 struct pch_udc_dev *dev;
2028 unsigned long iflags;
2029 int ret;
2030
2031 if (!usbep)
2032 return -EINVAL;
2033 ep = container_of(usbep, struct pch_udc_ep, ep);
2034 dev = ep->dev;
2035 if (!ep->ep.desc && !ep->num)
2036 return -EINVAL;
2037 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2038 return -ESHUTDOWN;
2039 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2040 if (!list_empty(&ep->queue)) {
2041 ret = -EAGAIN;
2042 } else {
2043 if (ep->num == PCH_UDC_EP0)
2044 ep->dev->stall = 1;
2045 pch_udc_ep_set_stall(ep);
2046 pch_udc_enable_ep_interrupts(ep->dev,
2047 PCH_UDC_EPINT(ep->in, ep->num));
2048 ep->dev->prot_stall = 1;
2049 ret = 0;
2050 }
2051 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2052 return ret;
2053 }
2054
2055 /**
2056 * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2057 * @usbep: Reference to the USB endpoint structure
2058 */
2059 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2060 {
2061 struct pch_udc_ep *ep;
2062
2063 if (!usbep)
2064 return;
2065
2066 ep = container_of(usbep, struct pch_udc_ep, ep);
2067 if (ep->ep.desc || !ep->num)
2068 pch_udc_ep_fifo_flush(ep, ep->in);
2069 }
2070
2071 static const struct usb_ep_ops pch_udc_ep_ops = {
2072 .enable = pch_udc_pcd_ep_enable,
2073 .disable = pch_udc_pcd_ep_disable,
2074 .alloc_request = pch_udc_alloc_request,
2075 .free_request = pch_udc_free_request,
2076 .queue = pch_udc_pcd_queue,
2077 .dequeue = pch_udc_pcd_dequeue,
2078 .set_halt = pch_udc_pcd_set_halt,
2079 .set_wedge = pch_udc_pcd_set_wedge,
2080 .fifo_status = NULL,
2081 .fifo_flush = pch_udc_pcd_fifo_flush,
2082 };
2083
2084 /**
2085 * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2086 * @td_stp: Reference to the SETP buffer structure
2087 */
2088 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2089 {
2090 static u32 pky_marker;
2091
2092 if (!td_stp)
2093 return;
2094 td_stp->reserved = ++pky_marker;
2095 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2096 td_stp->status = PCH_UDC_BS_HST_RDY;
2097 }
2098
2099 /**
2100 * pch_udc_start_next_txrequest() - This function starts
2101 * the next transmission requirement
2102 * @ep: Reference to the endpoint structure
2103 */
2104 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2105 {
2106 struct pch_udc_request *req;
2107 struct pch_udc_data_dma_desc *td_data;
2108
2109 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2110 return;
2111
2112 if (list_empty(&ep->queue))
2113 return;
2114
2115 /* next request */
2116 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2117 if (req->dma_going)
2118 return;
2119 if (!req->td_data)
2120 return;
2121 pch_udc_wait_ep_stall(ep);
2122 req->dma_going = 1;
2123 pch_udc_ep_set_ddptr(ep, 0);
2124 td_data = req->td_data;
2125 while (1) {
2126 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2127 PCH_UDC_BS_HST_RDY;
2128 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2129 break;
2130 td_data = phys_to_virt(td_data->next);
2131 }
2132 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2133 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2134 pch_udc_ep_set_pd(ep);
2135 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2136 pch_udc_ep_clear_nak(ep);
2137 }
2138
2139 /**
2140 * pch_udc_complete_transfer() - This function completes a transfer
2141 * @ep: Reference to the endpoint structure
2142 */
2143 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2144 {
2145 struct pch_udc_request *req;
2146 struct pch_udc_dev *dev = ep->dev;
2147
2148 if (list_empty(&ep->queue))
2149 return;
2150 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2151 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2152 PCH_UDC_BS_DMA_DONE)
2153 return;
2154 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2155 PCH_UDC_RTS_SUCC) {
2156 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2157 "epstatus=0x%08x\n",
2158 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2159 (int)(ep->epsts));
2160 return;
2161 }
2162
2163 req->req.actual = req->req.length;
2164 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2165 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2166 complete_req(ep, req, 0);
2167 req->dma_going = 0;
2168 if (!list_empty(&ep->queue)) {
2169 pch_udc_wait_ep_stall(ep);
2170 pch_udc_ep_clear_nak(ep);
2171 pch_udc_enable_ep_interrupts(ep->dev,
2172 PCH_UDC_EPINT(ep->in, ep->num));
2173 } else {
2174 pch_udc_disable_ep_interrupts(ep->dev,
2175 PCH_UDC_EPINT(ep->in, ep->num));
2176 }
2177 }
2178
2179 /**
2180 * pch_udc_complete_receiver() - This function completes a receiver
2181 * @ep: Reference to the endpoint structure
2182 */
2183 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2184 {
2185 struct pch_udc_request *req;
2186 struct pch_udc_dev *dev = ep->dev;
2187 unsigned int count;
2188 struct pch_udc_data_dma_desc *td;
2189 dma_addr_t addr;
2190
2191 if (list_empty(&ep->queue))
2192 return;
2193 /* next request */
2194 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2195 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2196 pch_udc_ep_set_ddptr(ep, 0);
2197 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2198 PCH_UDC_BS_DMA_DONE)
2199 td = req->td_data_last;
2200 else
2201 td = req->td_data;
2202
2203 while (1) {
2204 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2205 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2206 "epstatus=0x%08x\n",
2207 (req->td_data->status & PCH_UDC_RXTX_STS),
2208 (int)(ep->epsts));
2209 return;
2210 }
2211 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2212 if (td->status & PCH_UDC_DMA_LAST) {
2213 count = td->status & PCH_UDC_RXTX_BYTES;
2214 break;
2215 }
2216 if (td == req->td_data_last) {
2217 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2218 return;
2219 }
2220 addr = (dma_addr_t)td->next;
2221 td = phys_to_virt(addr);
2222 }
2223 /* on 64k packets the RXBYTES field is zero */
2224 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2225 count = UDC_DMA_MAXPACKET;
2226 req->td_data->status |= PCH_UDC_DMA_LAST;
2227 td->status |= PCH_UDC_BS_HST_BSY;
2228
2229 req->dma_going = 0;
2230 req->req.actual = count;
2231 complete_req(ep, req, 0);
2232 /* If there is a new/failed requests try that now */
2233 if (!list_empty(&ep->queue)) {
2234 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2235 pch_udc_start_rxrequest(ep, req);
2236 }
2237 }
2238
2239 /**
2240 * pch_udc_svc_data_in() - This function process endpoint interrupts
2241 * for IN endpoints
2242 * @dev: Reference to the device structure
2243 * @ep_num: Endpoint that generated the interrupt
2244 */
2245 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2246 {
2247 u32 epsts;
2248 struct pch_udc_ep *ep;
2249
2250 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2251 epsts = ep->epsts;
2252 ep->epsts = 0;
2253
2254 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2255 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2256 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2257 return;
2258 if ((epsts & UDC_EPSTS_BNA))
2259 return;
2260 if (epsts & UDC_EPSTS_HE)
2261 return;
2262 if (epsts & UDC_EPSTS_RSS) {
2263 pch_udc_ep_set_stall(ep);
2264 pch_udc_enable_ep_interrupts(ep->dev,
2265 PCH_UDC_EPINT(ep->in, ep->num));
2266 }
2267 if (epsts & UDC_EPSTS_RCS) {
2268 if (!dev->prot_stall) {
2269 pch_udc_ep_clear_stall(ep);
2270 } else {
2271 pch_udc_ep_set_stall(ep);
2272 pch_udc_enable_ep_interrupts(ep->dev,
2273 PCH_UDC_EPINT(ep->in, ep->num));
2274 }
2275 }
2276 if (epsts & UDC_EPSTS_TDC)
2277 pch_udc_complete_transfer(ep);
2278 /* On IN interrupt, provide data if we have any */
2279 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2280 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2281 pch_udc_start_next_txrequest(ep);
2282 }
2283
2284 /**
2285 * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2286 * @dev: Reference to the device structure
2287 * @ep_num: Endpoint that generated the interrupt
2288 */
2289 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2290 {
2291 u32 epsts;
2292 struct pch_udc_ep *ep;
2293 struct pch_udc_request *req = NULL;
2294
2295 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2296 epsts = ep->epsts;
2297 ep->epsts = 0;
2298
2299 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2300 /* next request */
2301 req = list_entry(ep->queue.next, struct pch_udc_request,
2302 queue);
2303 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2304 PCH_UDC_BS_DMA_DONE) {
2305 if (!req->dma_going)
2306 pch_udc_start_rxrequest(ep, req);
2307 return;
2308 }
2309 }
2310 if (epsts & UDC_EPSTS_HE)
2311 return;
2312 if (epsts & UDC_EPSTS_RSS) {
2313 pch_udc_ep_set_stall(ep);
2314 pch_udc_enable_ep_interrupts(ep->dev,
2315 PCH_UDC_EPINT(ep->in, ep->num));
2316 }
2317 if (epsts & UDC_EPSTS_RCS) {
2318 if (!dev->prot_stall) {
2319 pch_udc_ep_clear_stall(ep);
2320 } else {
2321 pch_udc_ep_set_stall(ep);
2322 pch_udc_enable_ep_interrupts(ep->dev,
2323 PCH_UDC_EPINT(ep->in, ep->num));
2324 }
2325 }
2326 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2327 UDC_EPSTS_OUT_DATA) {
2328 if (ep->dev->prot_stall == 1) {
2329 pch_udc_ep_set_stall(ep);
2330 pch_udc_enable_ep_interrupts(ep->dev,
2331 PCH_UDC_EPINT(ep->in, ep->num));
2332 } else {
2333 pch_udc_complete_receiver(ep);
2334 }
2335 }
2336 if (list_empty(&ep->queue))
2337 pch_udc_set_dma(dev, DMA_DIR_RX);
2338 }
2339
2340 /**
2341 * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2342 * @dev: Reference to the device structure
2343 */
2344 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2345 {
2346 u32 epsts;
2347 struct pch_udc_ep *ep;
2348 struct pch_udc_ep *ep_out;
2349
2350 ep = &dev->ep[UDC_EP0IN_IDX];
2351 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2352 epsts = ep->epsts;
2353 ep->epsts = 0;
2354
2355 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2356 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2357 UDC_EPSTS_XFERDONE)))
2358 return;
2359 if ((epsts & UDC_EPSTS_BNA))
2360 return;
2361 if (epsts & UDC_EPSTS_HE)
2362 return;
2363 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2364 pch_udc_complete_transfer(ep);
2365 pch_udc_clear_dma(dev, DMA_DIR_RX);
2366 ep_out->td_data->status = (ep_out->td_data->status &
2367 ~PCH_UDC_BUFF_STS) |
2368 PCH_UDC_BS_HST_RDY;
2369 pch_udc_ep_clear_nak(ep_out);
2370 pch_udc_set_dma(dev, DMA_DIR_RX);
2371 pch_udc_ep_set_rrdy(ep_out);
2372 }
2373 /* On IN interrupt, provide data if we have any */
2374 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2375 !(epsts & UDC_EPSTS_TXEMPTY))
2376 pch_udc_start_next_txrequest(ep);
2377 }
2378
2379 /**
2380 * pch_udc_svc_control_out() - Routine that handle Control
2381 * OUT endpoint interrupts
2382 * @dev: Reference to the device structure
2383 */
2384 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2385 {
2386 u32 stat;
2387 int setup_supported;
2388 struct pch_udc_ep *ep;
2389
2390 ep = &dev->ep[UDC_EP0OUT_IDX];
2391 stat = ep->epsts;
2392 ep->epsts = 0;
2393
2394 /* If setup data */
2395 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2396 UDC_EPSTS_OUT_SETUP) {
2397 dev->stall = 0;
2398 dev->ep[UDC_EP0IN_IDX].halted = 0;
2399 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2400 dev->setup_data = ep->td_stp->request;
2401 pch_udc_init_setup_buff(ep->td_stp);
2402 pch_udc_clear_dma(dev, DMA_DIR_RX);
2403 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2404 dev->ep[UDC_EP0IN_IDX].in);
2405 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2406 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2407 else /* OUT */
2408 dev->gadget.ep0 = &ep->ep;
2409 spin_unlock(&dev->lock);
2410 /* If Mass storage Reset */
2411 if ((dev->setup_data.bRequestType == 0x21) &&
2412 (dev->setup_data.bRequest == 0xFF))
2413 dev->prot_stall = 0;
2414 /* call gadget with setup data received */
2415 setup_supported = dev->driver->setup(&dev->gadget,
2416 &dev->setup_data);
2417 spin_lock(&dev->lock);
2418
2419 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2420 ep->td_data->status = (ep->td_data->status &
2421 ~PCH_UDC_BUFF_STS) |
2422 PCH_UDC_BS_HST_RDY;
2423 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2424 }
2425 /* ep0 in returns data on IN phase */
2426 if (setup_supported >= 0 && setup_supported <
2427 UDC_EP0IN_MAX_PKT_SIZE) {
2428 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2429 /* Gadget would have queued a request when
2430 * we called the setup */
2431 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2432 pch_udc_set_dma(dev, DMA_DIR_RX);
2433 pch_udc_ep_clear_nak(ep);
2434 }
2435 } else if (setup_supported < 0) {
2436 /* if unsupported request, then stall */
2437 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2438 pch_udc_enable_ep_interrupts(ep->dev,
2439 PCH_UDC_EPINT(ep->in, ep->num));
2440 dev->stall = 0;
2441 pch_udc_set_dma(dev, DMA_DIR_RX);
2442 } else {
2443 dev->waiting_zlp_ack = 1;
2444 }
2445 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2446 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2447 pch_udc_clear_dma(dev, DMA_DIR_RX);
2448 pch_udc_ep_set_ddptr(ep, 0);
2449 if (!list_empty(&ep->queue)) {
2450 ep->epsts = stat;
2451 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2452 }
2453 pch_udc_set_dma(dev, DMA_DIR_RX);
2454 }
2455 pch_udc_ep_set_rrdy(ep);
2456 }
2457
2458
2459 /**
2460 * pch_udc_postsvc_epinters() - This function enables end point interrupts
2461 * and clears NAK status
2462 * @dev: Reference to the device structure
2463 * @ep_num: End point number
2464 */
2465 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2466 {
2467 struct pch_udc_ep *ep;
2468 struct pch_udc_request *req;
2469
2470 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2471 if (!list_empty(&ep->queue)) {
2472 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2473 pch_udc_enable_ep_interrupts(ep->dev,
2474 PCH_UDC_EPINT(ep->in, ep->num));
2475 pch_udc_ep_clear_nak(ep);
2476 }
2477 }
2478
2479 /**
2480 * pch_udc_read_all_epstatus() - This function read all endpoint status
2481 * @dev: Reference to the device structure
2482 * @ep_intr: Status of endpoint interrupt
2483 */
2484 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2485 {
2486 int i;
2487 struct pch_udc_ep *ep;
2488
2489 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2490 /* IN */
2491 if (ep_intr & (0x1 << i)) {
2492 ep = &dev->ep[UDC_EPIN_IDX(i)];
2493 ep->epsts = pch_udc_read_ep_status(ep);
2494 pch_udc_clear_ep_status(ep, ep->epsts);
2495 }
2496 /* OUT */
2497 if (ep_intr & (0x10000 << i)) {
2498 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2499 ep->epsts = pch_udc_read_ep_status(ep);
2500 pch_udc_clear_ep_status(ep, ep->epsts);
2501 }
2502 }
2503 }
2504
2505 /**
2506 * pch_udc_activate_control_ep() - This function enables the control endpoints
2507 * for traffic after a reset
2508 * @dev: Reference to the device structure
2509 */
2510 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2511 {
2512 struct pch_udc_ep *ep;
2513 u32 val;
2514
2515 /* Setup the IN endpoint */
2516 ep = &dev->ep[UDC_EP0IN_IDX];
2517 pch_udc_clear_ep_control(ep);
2518 pch_udc_ep_fifo_flush(ep, ep->in);
2519 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2520 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2521 /* Initialize the IN EP Descriptor */
2522 ep->td_data = NULL;
2523 ep->td_stp = NULL;
2524 ep->td_data_phys = 0;
2525 ep->td_stp_phys = 0;
2526
2527 /* Setup the OUT endpoint */
2528 ep = &dev->ep[UDC_EP0OUT_IDX];
2529 pch_udc_clear_ep_control(ep);
2530 pch_udc_ep_fifo_flush(ep, ep->in);
2531 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2532 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2533 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2534 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2535
2536 /* Initialize the SETUP buffer */
2537 pch_udc_init_setup_buff(ep->td_stp);
2538 /* Write the pointer address of dma descriptor */
2539 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2540 /* Write the pointer address of Setup descriptor */
2541 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2542
2543 /* Initialize the dma descriptor */
2544 ep->td_data->status = PCH_UDC_DMA_LAST;
2545 ep->td_data->dataptr = dev->dma_addr;
2546 ep->td_data->next = ep->td_data_phys;
2547
2548 pch_udc_ep_clear_nak(ep);
2549 }
2550
2551
2552 /**
2553 * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2554 * @dev: Reference to driver structure
2555 */
2556 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2557 {
2558 struct pch_udc_ep *ep;
2559 int i;
2560
2561 pch_udc_clear_dma(dev, DMA_DIR_TX);
2562 pch_udc_clear_dma(dev, DMA_DIR_RX);
2563 /* Mask all endpoint interrupts */
2564 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2565 /* clear all endpoint interrupts */
2566 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2567
2568 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2569 ep = &dev->ep[i];
2570 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2571 pch_udc_clear_ep_control(ep);
2572 pch_udc_ep_set_ddptr(ep, 0);
2573 pch_udc_write_csr(ep->dev, 0x00, i);
2574 }
2575 dev->stall = 0;
2576 dev->prot_stall = 0;
2577 dev->waiting_zlp_ack = 0;
2578 dev->set_cfg_not_acked = 0;
2579
2580 /* disable ep to empty req queue. Skip the control EP's */
2581 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2582 ep = &dev->ep[i];
2583 pch_udc_ep_set_nak(ep);
2584 pch_udc_ep_fifo_flush(ep, ep->in);
2585 /* Complete request queue */
2586 empty_req_queue(ep);
2587 }
2588 if (dev->driver && dev->driver->disconnect) {
2589 spin_unlock(&dev->lock);
2590 dev->driver->disconnect(&dev->gadget);
2591 spin_lock(&dev->lock);
2592 }
2593 }
2594
2595 /**
2596 * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2597 * done interrupt
2598 * @dev: Reference to driver structure
2599 */
2600 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2601 {
2602 u32 dev_stat, dev_speed;
2603 u32 speed = USB_SPEED_FULL;
2604
2605 dev_stat = pch_udc_read_device_status(dev);
2606 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2607 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2608 switch (dev_speed) {
2609 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2610 speed = USB_SPEED_HIGH;
2611 break;
2612 case UDC_DEVSTS_ENUM_SPEED_FULL:
2613 speed = USB_SPEED_FULL;
2614 break;
2615 case UDC_DEVSTS_ENUM_SPEED_LOW:
2616 speed = USB_SPEED_LOW;
2617 break;
2618 default:
2619 BUG();
2620 }
2621 dev->gadget.speed = speed;
2622 pch_udc_activate_control_ep(dev);
2623 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2624 pch_udc_set_dma(dev, DMA_DIR_TX);
2625 pch_udc_set_dma(dev, DMA_DIR_RX);
2626 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2627
2628 /* enable device interrupts */
2629 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2630 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2631 UDC_DEVINT_SI | UDC_DEVINT_SC);
2632 }
2633
2634 /**
2635 * pch_udc_svc_intf_interrupt() - This function handles a set interface
2636 * interrupt
2637 * @dev: Reference to driver structure
2638 */
2639 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2640 {
2641 u32 reg, dev_stat = 0;
2642 int i, ret;
2643
2644 dev_stat = pch_udc_read_device_status(dev);
2645 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2646 UDC_DEVSTS_INTF_SHIFT;
2647 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2648 UDC_DEVSTS_ALT_SHIFT;
2649 dev->set_cfg_not_acked = 1;
2650 /* Construct the usb request for gadget driver and inform it */
2651 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2652 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2653 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2654 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2655 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2656 /* programm the Endpoint Cfg registers */
2657 /* Only one end point cfg register */
2658 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2659 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2660 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2661 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2662 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2663 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2664 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2665 /* clear stall bits */
2666 pch_udc_ep_clear_stall(&(dev->ep[i]));
2667 dev->ep[i].halted = 0;
2668 }
2669 dev->stall = 0;
2670 spin_unlock(&dev->lock);
2671 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2672 spin_lock(&dev->lock);
2673 }
2674
2675 /**
2676 * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2677 * interrupt
2678 * @dev: Reference to driver structure
2679 */
2680 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2681 {
2682 int i, ret;
2683 u32 reg, dev_stat = 0;
2684
2685 dev_stat = pch_udc_read_device_status(dev);
2686 dev->set_cfg_not_acked = 1;
2687 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2688 UDC_DEVSTS_CFG_SHIFT;
2689 /* make usb request for gadget driver */
2690 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2691 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2692 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2693 /* program the NE registers */
2694 /* Only one end point cfg register */
2695 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2696 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2697 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2698 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2699 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2700 /* clear stall bits */
2701 pch_udc_ep_clear_stall(&(dev->ep[i]));
2702 dev->ep[i].halted = 0;
2703 }
2704 dev->stall = 0;
2705
2706 /* call gadget zero with setup data received */
2707 spin_unlock(&dev->lock);
2708 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2709 spin_lock(&dev->lock);
2710 }
2711
2712 /**
2713 * pch_udc_dev_isr() - This function services device interrupts
2714 * by invoking appropriate routines.
2715 * @dev: Reference to the device structure
2716 * @dev_intr: The Device interrupt status.
2717 */
2718 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2719 {
2720 int vbus;
2721
2722 /* USB Reset Interrupt */
2723 if (dev_intr & UDC_DEVINT_UR) {
2724 pch_udc_svc_ur_interrupt(dev);
2725 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2726 }
2727 /* Enumeration Done Interrupt */
2728 if (dev_intr & UDC_DEVINT_ENUM) {
2729 pch_udc_svc_enum_interrupt(dev);
2730 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2731 }
2732 /* Set Interface Interrupt */
2733 if (dev_intr & UDC_DEVINT_SI)
2734 pch_udc_svc_intf_interrupt(dev);
2735 /* Set Config Interrupt */
2736 if (dev_intr & UDC_DEVINT_SC)
2737 pch_udc_svc_cfg_interrupt(dev);
2738 /* USB Suspend interrupt */
2739 if (dev_intr & UDC_DEVINT_US) {
2740 if (dev->driver
2741 && dev->driver->suspend) {
2742 spin_unlock(&dev->lock);
2743 dev->driver->suspend(&dev->gadget);
2744 spin_lock(&dev->lock);
2745 }
2746
2747 vbus = pch_vbus_gpio_get_value(dev);
2748 if ((dev->vbus_session == 0)
2749 && (vbus != 1)) {
2750 if (dev->driver && dev->driver->disconnect) {
2751 spin_unlock(&dev->lock);
2752 dev->driver->disconnect(&dev->gadget);
2753 spin_lock(&dev->lock);
2754 }
2755 pch_udc_reconnect(dev);
2756 } else if ((dev->vbus_session == 0)
2757 && (vbus == 1)
2758 && !dev->vbus_gpio.intr)
2759 schedule_work(&dev->vbus_gpio.irq_work_fall);
2760
2761 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2762 }
2763 /* Clear the SOF interrupt, if enabled */
2764 if (dev_intr & UDC_DEVINT_SOF)
2765 dev_dbg(&dev->pdev->dev, "SOF\n");
2766 /* ES interrupt, IDLE > 3ms on the USB */
2767 if (dev_intr & UDC_DEVINT_ES)
2768 dev_dbg(&dev->pdev->dev, "ES\n");
2769 /* RWKP interrupt */
2770 if (dev_intr & UDC_DEVINT_RWKP)
2771 dev_dbg(&dev->pdev->dev, "RWKP\n");
2772 }
2773
2774 /**
2775 * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2776 * @irq: Interrupt request number
2777 * @dev: Reference to the device structure
2778 */
2779 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2780 {
2781 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2782 u32 dev_intr, ep_intr;
2783 int i;
2784
2785 dev_intr = pch_udc_read_device_interrupts(dev);
2786 ep_intr = pch_udc_read_ep_interrupts(dev);
2787
2788 /* For a hot plug, this find that the controller is hung up. */
2789 if (dev_intr == ep_intr)
2790 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2791 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2792 /* The controller is reset */
2793 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2794 return IRQ_HANDLED;
2795 }
2796 if (dev_intr)
2797 /* Clear device interrupts */
2798 pch_udc_write_device_interrupts(dev, dev_intr);
2799 if (ep_intr)
2800 /* Clear ep interrupts */
2801 pch_udc_write_ep_interrupts(dev, ep_intr);
2802 if (!dev_intr && !ep_intr)
2803 return IRQ_NONE;
2804 spin_lock(&dev->lock);
2805 if (dev_intr)
2806 pch_udc_dev_isr(dev, dev_intr);
2807 if (ep_intr) {
2808 pch_udc_read_all_epstatus(dev, ep_intr);
2809 /* Process Control In interrupts, if present */
2810 if (ep_intr & UDC_EPINT_IN_EP0) {
2811 pch_udc_svc_control_in(dev);
2812 pch_udc_postsvc_epinters(dev, 0);
2813 }
2814 /* Process Control Out interrupts, if present */
2815 if (ep_intr & UDC_EPINT_OUT_EP0)
2816 pch_udc_svc_control_out(dev);
2817 /* Process data in end point interrupts */
2818 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2819 if (ep_intr & (1 << i)) {
2820 pch_udc_svc_data_in(dev, i);
2821 pch_udc_postsvc_epinters(dev, i);
2822 }
2823 }
2824 /* Process data out end point interrupts */
2825 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2826 PCH_UDC_USED_EP_NUM); i++)
2827 if (ep_intr & (1 << i))
2828 pch_udc_svc_data_out(dev, i -
2829 UDC_EPINT_OUT_SHIFT);
2830 }
2831 spin_unlock(&dev->lock);
2832 return IRQ_HANDLED;
2833 }
2834
2835 /**
2836 * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2837 * @dev: Reference to the device structure
2838 */
2839 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2840 {
2841 /* enable ep0 interrupts */
2842 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2843 UDC_EPINT_OUT_EP0);
2844 /* enable device interrupts */
2845 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2846 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2847 UDC_DEVINT_SI | UDC_DEVINT_SC);
2848 }
2849
2850 /**
2851 * gadget_release() - Free the gadget driver private data
2852 * @pdev reference to struct pci_dev
2853 */
2854 static void gadget_release(struct device *pdev)
2855 {
2856 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2857
2858 kfree(dev);
2859 }
2860
2861 /**
2862 * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2863 * @dev: Reference to the driver structure
2864 */
2865 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2866 {
2867 const char *const ep_string[] = {
2868 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2869 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2870 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2871 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2872 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2873 "ep15in", "ep15out",
2874 };
2875 int i;
2876
2877 dev->gadget.speed = USB_SPEED_UNKNOWN;
2878 INIT_LIST_HEAD(&dev->gadget.ep_list);
2879
2880 /* Initialize the endpoints structures */
2881 memset(dev->ep, 0, sizeof dev->ep);
2882 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2883 struct pch_udc_ep *ep = &dev->ep[i];
2884 ep->dev = dev;
2885 ep->halted = 1;
2886 ep->num = i / 2;
2887 ep->in = ~i & 1;
2888 ep->ep.name = ep_string[i];
2889 ep->ep.ops = &pch_udc_ep_ops;
2890 if (ep->in)
2891 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2892 else
2893 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2894 UDC_EP_REG_SHIFT;
2895 /* need to set ep->ep.maxpacket and set Default Configuration?*/
2896 ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
2897 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2898 INIT_LIST_HEAD(&ep->queue);
2899 }
2900 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
2901 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
2902
2903 /* remove ep0 in and out from the list. They have own pointer */
2904 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2905 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2906
2907 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2908 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2909 }
2910
2911 /**
2912 * pch_udc_pcd_init() - This API initializes the driver structure
2913 * @dev: Reference to the driver structure
2914 *
2915 * Return codes:
2916 * 0: Success
2917 */
2918 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2919 {
2920 pch_udc_init(dev);
2921 pch_udc_pcd_reinit(dev);
2922 pch_vbus_gpio_init(dev, vbus_gpio_port);
2923 return 0;
2924 }
2925
2926 /**
2927 * init_dma_pools() - create dma pools during initialization
2928 * @pdev: reference to struct pci_dev
2929 */
2930 static int init_dma_pools(struct pch_udc_dev *dev)
2931 {
2932 struct pch_udc_stp_dma_desc *td_stp;
2933 struct pch_udc_data_dma_desc *td_data;
2934
2935 /* DMA setup */
2936 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2937 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2938 if (!dev->data_requests) {
2939 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2940 __func__);
2941 return -ENOMEM;
2942 }
2943
2944 /* dma desc for setup data */
2945 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2946 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2947 if (!dev->stp_requests) {
2948 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2949 __func__);
2950 return -ENOMEM;
2951 }
2952 /* setup */
2953 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2954 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2955 if (!td_stp) {
2956 dev_err(&dev->pdev->dev,
2957 "%s: can't allocate setup dma descriptor\n", __func__);
2958 return -ENOMEM;
2959 }
2960 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2961
2962 /* data: 0 packets !? */
2963 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2964 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2965 if (!td_data) {
2966 dev_err(&dev->pdev->dev,
2967 "%s: can't allocate data dma descriptor\n", __func__);
2968 return -ENOMEM;
2969 }
2970 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2971 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2972 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2973 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2974 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2975
2976 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2977 if (!dev->ep0out_buf)
2978 return -ENOMEM;
2979 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2980 UDC_EP0OUT_BUFF_SIZE * 4,
2981 DMA_FROM_DEVICE);
2982 return 0;
2983 }
2984
2985 static int pch_udc_start(struct usb_gadget *g,
2986 struct usb_gadget_driver *driver)
2987 {
2988 struct pch_udc_dev *dev = to_pch_udc(g);
2989
2990 driver->driver.bus = NULL;
2991 dev->driver = driver;
2992 dev->gadget.dev.driver = &driver->driver;
2993
2994 /* get ready for ep0 traffic */
2995 pch_udc_setup_ep0(dev);
2996
2997 /* clear SD */
2998 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2999 pch_udc_clear_disconnect(dev);
3000
3001 dev->connected = 1;
3002 return 0;
3003 }
3004
3005 static int pch_udc_stop(struct usb_gadget *g,
3006 struct usb_gadget_driver *driver)
3007 {
3008 struct pch_udc_dev *dev = to_pch_udc(g);
3009
3010 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3011
3012 /* Assures that there are no pending requests with this driver */
3013 dev->gadget.dev.driver = NULL;
3014 dev->driver = NULL;
3015 dev->connected = 0;
3016
3017 /* set SD */
3018 pch_udc_set_disconnect(dev);
3019
3020 return 0;
3021 }
3022
3023 static void pch_udc_shutdown(struct pci_dev *pdev)
3024 {
3025 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3026
3027 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3028 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3029
3030 /* disable the pullup so the host will think we're gone */
3031 pch_udc_set_disconnect(dev);
3032 }
3033
3034 static void pch_udc_remove(struct pci_dev *pdev)
3035 {
3036 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3037
3038 usb_del_gadget_udc(&dev->gadget);
3039
3040 /* gadget driver must not be registered */
3041 if (dev->driver)
3042 dev_err(&pdev->dev,
3043 "%s: gadget driver still bound!!!\n", __func__);
3044 /* dma pool cleanup */
3045 if (dev->data_requests)
3046 pci_pool_destroy(dev->data_requests);
3047
3048 if (dev->stp_requests) {
3049 /* cleanup DMA desc's for ep0in */
3050 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3051 pci_pool_free(dev->stp_requests,
3052 dev->ep[UDC_EP0OUT_IDX].td_stp,
3053 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3054 }
3055 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3056 pci_pool_free(dev->stp_requests,
3057 dev->ep[UDC_EP0OUT_IDX].td_data,
3058 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3059 }
3060 pci_pool_destroy(dev->stp_requests);
3061 }
3062
3063 if (dev->dma_addr)
3064 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3065 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3066 kfree(dev->ep0out_buf);
3067
3068 pch_vbus_gpio_free(dev);
3069
3070 pch_udc_exit(dev);
3071
3072 if (dev->irq_registered)
3073 free_irq(pdev->irq, dev);
3074 if (dev->base_addr)
3075 iounmap(dev->base_addr);
3076 if (dev->mem_region)
3077 release_mem_region(dev->phys_addr,
3078 pci_resource_len(pdev, PCH_UDC_PCI_BAR));
3079 if (dev->active)
3080 pci_disable_device(pdev);
3081 if (dev->registered)
3082 device_unregister(&dev->gadget.dev);
3083 kfree(dev);
3084 pci_set_drvdata(pdev, NULL);
3085 }
3086
3087 #ifdef CONFIG_PM
3088 static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3089 {
3090 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3091
3092 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3093 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3094
3095 pci_disable_device(pdev);
3096 pci_enable_wake(pdev, PCI_D3hot, 0);
3097
3098 if (pci_save_state(pdev)) {
3099 dev_err(&pdev->dev,
3100 "%s: could not save PCI config state\n", __func__);
3101 return -ENOMEM;
3102 }
3103 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3104 return 0;
3105 }
3106
3107 static int pch_udc_resume(struct pci_dev *pdev)
3108 {
3109 int ret;
3110
3111 pci_set_power_state(pdev, PCI_D0);
3112 pci_restore_state(pdev);
3113 ret = pci_enable_device(pdev);
3114 if (ret) {
3115 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
3116 return ret;
3117 }
3118 pci_enable_wake(pdev, PCI_D3hot, 0);
3119 return 0;
3120 }
3121 #else
3122 #define pch_udc_suspend NULL
3123 #define pch_udc_resume NULL
3124 #endif /* CONFIG_PM */
3125
3126 static int pch_udc_probe(struct pci_dev *pdev,
3127 const struct pci_device_id *id)
3128 {
3129 unsigned long resource;
3130 unsigned long len;
3131 int retval;
3132 struct pch_udc_dev *dev;
3133
3134 /* init */
3135 dev = kzalloc(sizeof *dev, GFP_KERNEL);
3136 if (!dev) {
3137 pr_err("%s: no memory for device structure\n", __func__);
3138 return -ENOMEM;
3139 }
3140 /* pci setup */
3141 if (pci_enable_device(pdev) < 0) {
3142 kfree(dev);
3143 pr_err("%s: pci_enable_device failed\n", __func__);
3144 return -ENODEV;
3145 }
3146 dev->active = 1;
3147 pci_set_drvdata(pdev, dev);
3148
3149 /* PCI resource allocation */
3150 resource = pci_resource_start(pdev, 1);
3151 len = pci_resource_len(pdev, 1);
3152
3153 if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
3154 dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
3155 retval = -EBUSY;
3156 goto finished;
3157 }
3158 dev->phys_addr = resource;
3159 dev->mem_region = 1;
3160
3161 dev->base_addr = ioremap_nocache(resource, len);
3162 if (!dev->base_addr) {
3163 pr_err("%s: device memory cannot be mapped\n", __func__);
3164 retval = -ENOMEM;
3165 goto finished;
3166 }
3167 if (!pdev->irq) {
3168 dev_err(&pdev->dev, "%s: irq not set\n", __func__);
3169 retval = -ENODEV;
3170 goto finished;
3171 }
3172 /* initialize the hardware */
3173 if (pch_udc_pcd_init(dev)) {
3174 retval = -ENODEV;
3175 goto finished;
3176 }
3177 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
3178 dev)) {
3179 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3180 pdev->irq);
3181 retval = -ENODEV;
3182 goto finished;
3183 }
3184 dev->irq = pdev->irq;
3185 dev->irq_registered = 1;
3186
3187 pci_set_master(pdev);
3188 pci_try_set_mwi(pdev);
3189
3190 /* device struct setup */
3191 spin_lock_init(&dev->lock);
3192 dev->pdev = pdev;
3193 dev->gadget.ops = &pch_udc_ops;
3194
3195 retval = init_dma_pools(dev);
3196 if (retval)
3197 goto finished;
3198
3199 dev_set_name(&dev->gadget.dev, "gadget");
3200 dev->gadget.dev.parent = &pdev->dev;
3201 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3202 dev->gadget.dev.release = gadget_release;
3203 dev->gadget.name = KBUILD_MODNAME;
3204 dev->gadget.max_speed = USB_SPEED_HIGH;
3205
3206 retval = device_register(&dev->gadget.dev);
3207 if (retval)
3208 goto finished;
3209 dev->registered = 1;
3210
3211 /* Put the device in disconnected state till a driver is bound */
3212 pch_udc_set_disconnect(dev);
3213 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3214 if (retval)
3215 goto finished;
3216 return 0;
3217
3218 finished:
3219 pch_udc_remove(pdev);
3220 return retval;
3221 }
3222
3223 static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
3224 {
3225 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3226 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3227 .class_mask = 0xffffffff,
3228 },
3229 {
3230 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3231 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3232 .class_mask = 0xffffffff,
3233 },
3234 {
3235 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3236 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3237 .class_mask = 0xffffffff,
3238 },
3239 { 0 },
3240 };
3241
3242 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3243
3244 static struct pci_driver pch_udc_driver = {
3245 .name = KBUILD_MODNAME,
3246 .id_table = pch_udc_pcidev_id,
3247 .probe = pch_udc_probe,
3248 .remove = pch_udc_remove,
3249 .suspend = pch_udc_suspend,
3250 .resume = pch_udc_resume,
3251 .shutdown = pch_udc_shutdown,
3252 };
3253
3254 module_pci_driver(pch_udc_driver);
3255
3256 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3257 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3258 MODULE_LICENSE("GPL");
This page took 0.141663 seconds and 5 git commands to generate.