usb: gadget: pch_udc: Fix likely misuse of | for &
[deliverable/linux.git] / drivers / usb / gadget / pch_udc.c
1 /*
2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/gpio.h>
19 #include <linux/irq.h>
20
21 /* GPIO port for VBUS detecting */
22 static int vbus_gpio_port = -1; /* GPIO port number (-1:Not used) */
23
24 #define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
25 #define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */
26
27 /* Address offset of Registers */
28 #define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
29
30 #define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
31 #define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
32 #define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
33 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
34 #define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
35 #define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
36 #define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
37
38 #define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
39 #define UDC_DEVCTL_ADDR 0x404 /* Device control */
40 #define UDC_DEVSTS_ADDR 0x408 /* Device status */
41 #define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
42 #define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
43 #define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
44 #define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
45 #define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
46 #define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
47 #define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
48 #define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
49
50 /* Endpoint control register */
51 /* Bit position */
52 #define UDC_EPCTL_MRXFLUSH (1 << 12)
53 #define UDC_EPCTL_RRDY (1 << 9)
54 #define UDC_EPCTL_CNAK (1 << 8)
55 #define UDC_EPCTL_SNAK (1 << 7)
56 #define UDC_EPCTL_NAK (1 << 6)
57 #define UDC_EPCTL_P (1 << 3)
58 #define UDC_EPCTL_F (1 << 1)
59 #define UDC_EPCTL_S (1 << 0)
60 #define UDC_EPCTL_ET_SHIFT 4
61 /* Mask patern */
62 #define UDC_EPCTL_ET_MASK 0x00000030
63 /* Value for ET field */
64 #define UDC_EPCTL_ET_CONTROL 0
65 #define UDC_EPCTL_ET_ISO 1
66 #define UDC_EPCTL_ET_BULK 2
67 #define UDC_EPCTL_ET_INTERRUPT 3
68
69 /* Endpoint status register */
70 /* Bit position */
71 #define UDC_EPSTS_XFERDONE (1 << 27)
72 #define UDC_EPSTS_RSS (1 << 26)
73 #define UDC_EPSTS_RCS (1 << 25)
74 #define UDC_EPSTS_TXEMPTY (1 << 24)
75 #define UDC_EPSTS_TDC (1 << 10)
76 #define UDC_EPSTS_HE (1 << 9)
77 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
78 #define UDC_EPSTS_BNA (1 << 7)
79 #define UDC_EPSTS_IN (1 << 6)
80 #define UDC_EPSTS_OUT_SHIFT 4
81 /* Mask patern */
82 #define UDC_EPSTS_OUT_MASK 0x00000030
83 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
84 /* Value for OUT field */
85 #define UDC_EPSTS_OUT_SETUP 2
86 #define UDC_EPSTS_OUT_DATA 1
87
88 /* Device configuration register */
89 /* Bit position */
90 #define UDC_DEVCFG_CSR_PRG (1 << 17)
91 #define UDC_DEVCFG_SP (1 << 3)
92 /* SPD Valee */
93 #define UDC_DEVCFG_SPD_HS 0x0
94 #define UDC_DEVCFG_SPD_FS 0x1
95 #define UDC_DEVCFG_SPD_LS 0x2
96
97 /* Device control register */
98 /* Bit position */
99 #define UDC_DEVCTL_THLEN_SHIFT 24
100 #define UDC_DEVCTL_BRLEN_SHIFT 16
101 #define UDC_DEVCTL_CSR_DONE (1 << 13)
102 #define UDC_DEVCTL_SD (1 << 10)
103 #define UDC_DEVCTL_MODE (1 << 9)
104 #define UDC_DEVCTL_BREN (1 << 8)
105 #define UDC_DEVCTL_THE (1 << 7)
106 #define UDC_DEVCTL_DU (1 << 4)
107 #define UDC_DEVCTL_TDE (1 << 3)
108 #define UDC_DEVCTL_RDE (1 << 2)
109 #define UDC_DEVCTL_RES (1 << 0)
110
111 /* Device status register */
112 /* Bit position */
113 #define UDC_DEVSTS_TS_SHIFT 18
114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
115 #define UDC_DEVSTS_ALT_SHIFT 8
116 #define UDC_DEVSTS_INTF_SHIFT 4
117 #define UDC_DEVSTS_CFG_SHIFT 0
118 /* Mask patern */
119 #define UDC_DEVSTS_TS_MASK 0xfffc0000
120 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
121 #define UDC_DEVSTS_ALT_MASK 0x00000f00
122 #define UDC_DEVSTS_INTF_MASK 0x000000f0
123 #define UDC_DEVSTS_CFG_MASK 0x0000000f
124 /* value for maximum speed for SPEED field */
125 #define UDC_DEVSTS_ENUM_SPEED_FULL 1
126 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0
127 #define UDC_DEVSTS_ENUM_SPEED_LOW 2
128 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3
129
130 /* Device irq register */
131 /* Bit position */
132 #define UDC_DEVINT_RWKP (1 << 7)
133 #define UDC_DEVINT_ENUM (1 << 6)
134 #define UDC_DEVINT_SOF (1 << 5)
135 #define UDC_DEVINT_US (1 << 4)
136 #define UDC_DEVINT_UR (1 << 3)
137 #define UDC_DEVINT_ES (1 << 2)
138 #define UDC_DEVINT_SI (1 << 1)
139 #define UDC_DEVINT_SC (1 << 0)
140 /* Mask patern */
141 #define UDC_DEVINT_MSK 0x7f
142
143 /* Endpoint irq register */
144 /* Bit position */
145 #define UDC_EPINT_IN_SHIFT 0
146 #define UDC_EPINT_OUT_SHIFT 16
147 #define UDC_EPINT_IN_EP0 (1 << 0)
148 #define UDC_EPINT_OUT_EP0 (1 << 16)
149 /* Mask patern */
150 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
151
152 /* UDC_CSR_BUSY Status register */
153 /* Bit position */
154 #define UDC_CSR_BUSY (1 << 0)
155
156 /* SOFT RESET register */
157 /* Bit position */
158 #define UDC_PSRST (1 << 1)
159 #define UDC_SRST (1 << 0)
160
161 /* USB_DEVICE endpoint register */
162 /* Bit position */
163 #define UDC_CSR_NE_NUM_SHIFT 0
164 #define UDC_CSR_NE_DIR_SHIFT 4
165 #define UDC_CSR_NE_TYPE_SHIFT 5
166 #define UDC_CSR_NE_CFG_SHIFT 7
167 #define UDC_CSR_NE_INTF_SHIFT 11
168 #define UDC_CSR_NE_ALT_SHIFT 15
169 #define UDC_CSR_NE_MAX_PKT_SHIFT 19
170 /* Mask patern */
171 #define UDC_CSR_NE_NUM_MASK 0x0000000f
172 #define UDC_CSR_NE_DIR_MASK 0x00000010
173 #define UDC_CSR_NE_TYPE_MASK 0x00000060
174 #define UDC_CSR_NE_CFG_MASK 0x00000780
175 #define UDC_CSR_NE_INTF_MASK 0x00007800
176 #define UDC_CSR_NE_ALT_MASK 0x00078000
177 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
178
179 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
180 #define PCH_UDC_EPINT(in, num)\
181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182
183 /* Index of endpoint */
184 #define UDC_EP0IN_IDX 0
185 #define UDC_EP0OUT_IDX 1
186 #define UDC_EPIN_IDX(ep) (ep * 2)
187 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
188 #define PCH_UDC_EP0 0
189 #define PCH_UDC_EP1 1
190 #define PCH_UDC_EP2 2
191 #define PCH_UDC_EP3 3
192
193 /* Number of endpoint */
194 #define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
195 #define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
196 /* Length Value */
197 #define PCH_UDC_BRLEN 0x0F /* Burst length */
198 #define PCH_UDC_THLEN 0x1F /* Threshold length */
199 /* Value of EP Buffer Size */
200 #define UDC_EP0IN_BUFF_SIZE 16
201 #define UDC_EPIN_BUFF_SIZE 256
202 #define UDC_EP0OUT_BUFF_SIZE 16
203 #define UDC_EPOUT_BUFF_SIZE 256
204 /* Value of EP maximum packet size */
205 #define UDC_EP0IN_MAX_PKT_SIZE 64
206 #define UDC_EP0OUT_MAX_PKT_SIZE 64
207 #define UDC_BULK_MAX_PKT_SIZE 512
208
209 /* DMA */
210 #define DMA_DIR_RX 1 /* DMA for data receive */
211 #define DMA_DIR_TX 2 /* DMA for data transmit */
212 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
213 #define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
214
215 /**
216 * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
217 * for data
218 * @status: Status quadlet
219 * @reserved: Reserved
220 * @dataptr: Buffer descriptor
221 * @next: Next descriptor
222 */
223 struct pch_udc_data_dma_desc {
224 u32 status;
225 u32 reserved;
226 u32 dataptr;
227 u32 next;
228 };
229
230 /**
231 * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
232 * for control data
233 * @status: Status
234 * @reserved: Reserved
235 * @data12: First setup word
236 * @data34: Second setup word
237 */
238 struct pch_udc_stp_dma_desc {
239 u32 status;
240 u32 reserved;
241 struct usb_ctrlrequest request;
242 } __attribute((packed));
243
244 /* DMA status definitions */
245 /* Buffer status */
246 #define PCH_UDC_BUFF_STS 0xC0000000
247 #define PCH_UDC_BS_HST_RDY 0x00000000
248 #define PCH_UDC_BS_DMA_BSY 0x40000000
249 #define PCH_UDC_BS_DMA_DONE 0x80000000
250 #define PCH_UDC_BS_HST_BSY 0xC0000000
251 /* Rx/Tx Status */
252 #define PCH_UDC_RXTX_STS 0x30000000
253 #define PCH_UDC_RTS_SUCC 0x00000000
254 #define PCH_UDC_RTS_DESERR 0x10000000
255 #define PCH_UDC_RTS_BUFERR 0x30000000
256 /* Last Descriptor Indication */
257 #define PCH_UDC_DMA_LAST 0x08000000
258 /* Number of Rx/Tx Bytes Mask */
259 #define PCH_UDC_RXTX_BYTES 0x0000ffff
260
261 /**
262 * struct pch_udc_cfg_data - Structure to hold current configuration
263 * and interface information
264 * @cur_cfg: current configuration in use
265 * @cur_intf: current interface in use
266 * @cur_alt: current alt interface in use
267 */
268 struct pch_udc_cfg_data {
269 u16 cur_cfg;
270 u16 cur_intf;
271 u16 cur_alt;
272 };
273
274 /**
275 * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
276 * @ep: embedded ep request
277 * @td_stp_phys: for setup request
278 * @td_data_phys: for data request
279 * @td_stp: for setup request
280 * @td_data: for data request
281 * @dev: reference to device struct
282 * @offset_addr: offset address of ep register
283 * @desc: for this ep
284 * @queue: queue for requests
285 * @num: endpoint number
286 * @in: endpoint is IN
287 * @halted: endpoint halted?
288 * @epsts: Endpoint status
289 */
290 struct pch_udc_ep {
291 struct usb_ep ep;
292 dma_addr_t td_stp_phys;
293 dma_addr_t td_data_phys;
294 struct pch_udc_stp_dma_desc *td_stp;
295 struct pch_udc_data_dma_desc *td_data;
296 struct pch_udc_dev *dev;
297 unsigned long offset_addr;
298 struct list_head queue;
299 unsigned num:5,
300 in:1,
301 halted:1;
302 unsigned long epsts;
303 };
304
305 /**
306 * struct pch_vbus_gpio_data - Structure holding GPIO informaton
307 * for detecting VBUS
308 * @port: gpio port number
309 * @intr: gpio interrupt number
310 * @irq_work_fall Structure for WorkQueue
311 * @irq_work_rise Structure for WorkQueue
312 */
313 struct pch_vbus_gpio_data {
314 int port;
315 int intr;
316 struct work_struct irq_work_fall;
317 struct work_struct irq_work_rise;
318 };
319
320 /**
321 * struct pch_udc_dev - Structure holding complete information
322 * of the PCH USB device
323 * @gadget: gadget driver data
324 * @driver: reference to gadget driver bound
325 * @pdev: reference to the PCI device
326 * @ep: array of endpoints
327 * @lock: protects all state
328 * @active: enabled the PCI device
329 * @stall: stall requested
330 * @prot_stall: protcol stall requested
331 * @irq_registered: irq registered with system
332 * @mem_region: device memory mapped
333 * @registered: driver regsitered with system
334 * @suspended: driver in suspended state
335 * @connected: gadget driver associated
336 * @vbus_session: required vbus_session state
337 * @set_cfg_not_acked: pending acknowledgement 4 setup
338 * @waiting_zlp_ack: pending acknowledgement 4 ZLP
339 * @data_requests: DMA pool for data requests
340 * @stp_requests: DMA pool for setup requests
341 * @dma_addr: DMA pool for received
342 * @ep0out_buf: Buffer for DMA
343 * @setup_data: Received setup data
344 * @phys_addr: of device memory
345 * @base_addr: for mapped device memory
346 * @irq: IRQ line for the device
347 * @cfg_data: current cfg, intf, and alt in use
348 * @vbus_gpio: GPIO informaton for detecting VBUS
349 */
350 struct pch_udc_dev {
351 struct usb_gadget gadget;
352 struct usb_gadget_driver *driver;
353 struct pci_dev *pdev;
354 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
355 spinlock_t lock; /* protects all state */
356 unsigned active:1,
357 stall:1,
358 prot_stall:1,
359 irq_registered:1,
360 mem_region:1,
361 registered:1,
362 suspended:1,
363 connected:1,
364 vbus_session:1,
365 set_cfg_not_acked:1,
366 waiting_zlp_ack:1;
367 struct pci_pool *data_requests;
368 struct pci_pool *stp_requests;
369 dma_addr_t dma_addr;
370 void *ep0out_buf;
371 struct usb_ctrlrequest setup_data;
372 unsigned long phys_addr;
373 void __iomem *base_addr;
374 unsigned irq;
375 struct pch_udc_cfg_data cfg_data;
376 struct pch_vbus_gpio_data vbus_gpio;
377 };
378
379 #define PCH_UDC_PCI_BAR 1
380 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
381 #define PCI_VENDOR_ID_ROHM 0x10DB
382 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
383 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
384
385 static const char ep0_string[] = "ep0in";
386 static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
387 struct pch_udc_dev *pch_udc; /* pointer to device object */
388 static bool speed_fs;
389 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
390 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
391
392 /**
393 * struct pch_udc_request - Structure holding a PCH USB device request packet
394 * @req: embedded ep request
395 * @td_data_phys: phys. address
396 * @td_data: first dma desc. of chain
397 * @td_data_last: last dma desc. of chain
398 * @queue: associated queue
399 * @dma_going: DMA in progress for request
400 * @dma_mapped: DMA memory mapped for request
401 * @dma_done: DMA completed for request
402 * @chain_len: chain length
403 * @buf: Buffer memory for align adjustment
404 * @dma: DMA memory for align adjustment
405 */
406 struct pch_udc_request {
407 struct usb_request req;
408 dma_addr_t td_data_phys;
409 struct pch_udc_data_dma_desc *td_data;
410 struct pch_udc_data_dma_desc *td_data_last;
411 struct list_head queue;
412 unsigned dma_going:1,
413 dma_mapped:1,
414 dma_done:1;
415 unsigned chain_len;
416 void *buf;
417 dma_addr_t dma;
418 };
419
420 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
421 {
422 return ioread32(dev->base_addr + reg);
423 }
424
425 static inline void pch_udc_writel(struct pch_udc_dev *dev,
426 unsigned long val, unsigned long reg)
427 {
428 iowrite32(val, dev->base_addr + reg);
429 }
430
431 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
432 unsigned long reg,
433 unsigned long bitmask)
434 {
435 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
436 }
437
438 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
439 unsigned long reg,
440 unsigned long bitmask)
441 {
442 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
443 }
444
445 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
446 {
447 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
448 }
449
450 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
451 unsigned long val, unsigned long reg)
452 {
453 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
454 }
455
456 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
457 unsigned long reg,
458 unsigned long bitmask)
459 {
460 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
461 }
462
463 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
464 unsigned long reg,
465 unsigned long bitmask)
466 {
467 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
468 }
469
470 /**
471 * pch_udc_csr_busy() - Wait till idle.
472 * @dev: Reference to pch_udc_dev structure
473 */
474 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
475 {
476 unsigned int count = 200;
477
478 /* Wait till idle */
479 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
480 && --count)
481 cpu_relax();
482 if (!count)
483 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
484 }
485
486 /**
487 * pch_udc_write_csr() - Write the command and status registers.
488 * @dev: Reference to pch_udc_dev structure
489 * @val: value to be written to CSR register
490 * @addr: address of CSR register
491 */
492 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
493 unsigned int ep)
494 {
495 unsigned long reg = PCH_UDC_CSR(ep);
496
497 pch_udc_csr_busy(dev); /* Wait till idle */
498 pch_udc_writel(dev, val, reg);
499 pch_udc_csr_busy(dev); /* Wait till idle */
500 }
501
502 /**
503 * pch_udc_read_csr() - Read the command and status registers.
504 * @dev: Reference to pch_udc_dev structure
505 * @addr: address of CSR register
506 *
507 * Return codes: content of CSR register
508 */
509 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
510 {
511 unsigned long reg = PCH_UDC_CSR(ep);
512
513 pch_udc_csr_busy(dev); /* Wait till idle */
514 pch_udc_readl(dev, reg); /* Dummy read */
515 pch_udc_csr_busy(dev); /* Wait till idle */
516 return pch_udc_readl(dev, reg);
517 }
518
519 /**
520 * pch_udc_rmt_wakeup() - Initiate for remote wakeup
521 * @dev: Reference to pch_udc_dev structure
522 */
523 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
524 {
525 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
526 mdelay(1);
527 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
528 }
529
530 /**
531 * pch_udc_get_frame() - Get the current frame from device status register
532 * @dev: Reference to pch_udc_dev structure
533 * Retern current frame
534 */
535 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
536 {
537 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
538 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
539 }
540
541 /**
542 * pch_udc_clear_selfpowered() - Clear the self power control
543 * @dev: Reference to pch_udc_regs structure
544 */
545 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
546 {
547 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
548 }
549
550 /**
551 * pch_udc_set_selfpowered() - Set the self power control
552 * @dev: Reference to pch_udc_regs structure
553 */
554 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
555 {
556 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
557 }
558
559 /**
560 * pch_udc_set_disconnect() - Set the disconnect status.
561 * @dev: Reference to pch_udc_regs structure
562 */
563 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
564 {
565 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
566 }
567
568 /**
569 * pch_udc_clear_disconnect() - Clear the disconnect status.
570 * @dev: Reference to pch_udc_regs structure
571 */
572 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
573 {
574 /* Clear the disconnect */
575 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
576 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
577 mdelay(1);
578 /* Resume USB signalling */
579 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
580 }
581
582 /**
583 * pch_udc_reconnect() - This API initializes usb device controller,
584 * and clear the disconnect status.
585 * @dev: Reference to pch_udc_regs structure
586 */
587 static void pch_udc_init(struct pch_udc_dev *dev);
588 static void pch_udc_reconnect(struct pch_udc_dev *dev)
589 {
590 pch_udc_init(dev);
591
592 /* enable device interrupts */
593 /* pch_udc_enable_interrupts() */
594 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
595 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
596
597 /* Clear the disconnect */
598 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
599 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
600 mdelay(1);
601 /* Resume USB signalling */
602 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
603 }
604
605 /**
606 * pch_udc_vbus_session() - set or clearr the disconnect status.
607 * @dev: Reference to pch_udc_regs structure
608 * @is_active: Parameter specifying the action
609 * 0: indicating VBUS power is ending
610 * !0: indicating VBUS power is starting
611 */
612 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
613 int is_active)
614 {
615 if (is_active) {
616 pch_udc_reconnect(dev);
617 dev->vbus_session = 1;
618 } else {
619 if (dev->driver && dev->driver->disconnect) {
620 spin_unlock(&dev->lock);
621 dev->driver->disconnect(&dev->gadget);
622 spin_lock(&dev->lock);
623 }
624 pch_udc_set_disconnect(dev);
625 dev->vbus_session = 0;
626 }
627 }
628
629 /**
630 * pch_udc_ep_set_stall() - Set the stall of endpoint
631 * @ep: Reference to structure of type pch_udc_ep_regs
632 */
633 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
634 {
635 if (ep->in) {
636 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
637 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
638 } else {
639 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
640 }
641 }
642
643 /**
644 * pch_udc_ep_clear_stall() - Clear the stall of endpoint
645 * @ep: Reference to structure of type pch_udc_ep_regs
646 */
647 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
648 {
649 /* Clear the stall */
650 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
651 /* Clear NAK by writing CNAK */
652 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
653 }
654
655 /**
656 * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
657 * @ep: Reference to structure of type pch_udc_ep_regs
658 * @type: Type of endpoint
659 */
660 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
661 u8 type)
662 {
663 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
664 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
665 }
666
667 /**
668 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
669 * @ep: Reference to structure of type pch_udc_ep_regs
670 * @buf_size: The buffer word size
671 */
672 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
673 u32 buf_size, u32 ep_in)
674 {
675 u32 data;
676 if (ep_in) {
677 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
678 data = (data & 0xffff0000) | (buf_size & 0xffff);
679 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
680 } else {
681 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
682 data = (buf_size << 16) | (data & 0xffff);
683 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
684 }
685 }
686
687 /**
688 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
689 * @ep: Reference to structure of type pch_udc_ep_regs
690 * @pkt_size: The packet byte size
691 */
692 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
693 {
694 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
695 data = (data & 0xffff0000) | (pkt_size & 0xffff);
696 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
697 }
698
699 /**
700 * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
701 * @ep: Reference to structure of type pch_udc_ep_regs
702 * @addr: Address of the register
703 */
704 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
705 {
706 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
707 }
708
709 /**
710 * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
711 * @ep: Reference to structure of type pch_udc_ep_regs
712 * @addr: Address of the register
713 */
714 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
715 {
716 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
717 }
718
719 /**
720 * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
721 * @ep: Reference to structure of type pch_udc_ep_regs
722 */
723 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
724 {
725 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
726 }
727
728 /**
729 * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
730 * @ep: Reference to structure of type pch_udc_ep_regs
731 */
732 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
733 {
734 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
735 }
736
737 /**
738 * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
739 * @ep: Reference to structure of type pch_udc_ep_regs
740 */
741 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
742 {
743 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
744 }
745
746 /**
747 * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
748 * register depending on the direction specified
749 * @dev: Reference to structure of type pch_udc_regs
750 * @dir: whether Tx or Rx
751 * DMA_DIR_RX: Receive
752 * DMA_DIR_TX: Transmit
753 */
754 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
755 {
756 if (dir == DMA_DIR_RX)
757 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
758 else if (dir == DMA_DIR_TX)
759 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
760 }
761
762 /**
763 * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
764 * register depending on the direction specified
765 * @dev: Reference to structure of type pch_udc_regs
766 * @dir: Whether Tx or Rx
767 * DMA_DIR_RX: Receive
768 * DMA_DIR_TX: Transmit
769 */
770 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
771 {
772 if (dir == DMA_DIR_RX)
773 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
774 else if (dir == DMA_DIR_TX)
775 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
776 }
777
778 /**
779 * pch_udc_set_csr_done() - Set the device control register
780 * CSR done field (bit 13)
781 * @dev: reference to structure of type pch_udc_regs
782 */
783 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
784 {
785 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
786 }
787
788 /**
789 * pch_udc_disable_interrupts() - Disables the specified interrupts
790 * @dev: Reference to structure of type pch_udc_regs
791 * @mask: Mask to disable interrupts
792 */
793 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
794 u32 mask)
795 {
796 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
797 }
798
799 /**
800 * pch_udc_enable_interrupts() - Enable the specified interrupts
801 * @dev: Reference to structure of type pch_udc_regs
802 * @mask: Mask to enable interrupts
803 */
804 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
805 u32 mask)
806 {
807 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
808 }
809
810 /**
811 * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
812 * @dev: Reference to structure of type pch_udc_regs
813 * @mask: Mask to disable interrupts
814 */
815 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
816 u32 mask)
817 {
818 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
819 }
820
821 /**
822 * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
823 * @dev: Reference to structure of type pch_udc_regs
824 * @mask: Mask to enable interrupts
825 */
826 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
827 u32 mask)
828 {
829 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
830 }
831
832 /**
833 * pch_udc_read_device_interrupts() - Read the device interrupts
834 * @dev: Reference to structure of type pch_udc_regs
835 * Retern The device interrupts
836 */
837 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
838 {
839 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
840 }
841
842 /**
843 * pch_udc_write_device_interrupts() - Write device interrupts
844 * @dev: Reference to structure of type pch_udc_regs
845 * @val: The value to be written to interrupt register
846 */
847 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
848 u32 val)
849 {
850 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
851 }
852
853 /**
854 * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
855 * @dev: Reference to structure of type pch_udc_regs
856 * Retern The endpoint interrupt
857 */
858 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
859 {
860 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
861 }
862
863 /**
864 * pch_udc_write_ep_interrupts() - Clear endpoint interupts
865 * @dev: Reference to structure of type pch_udc_regs
866 * @val: The value to be written to interrupt register
867 */
868 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
869 u32 val)
870 {
871 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
872 }
873
874 /**
875 * pch_udc_read_device_status() - Read the device status
876 * @dev: Reference to structure of type pch_udc_regs
877 * Retern The device status
878 */
879 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
880 {
881 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
882 }
883
884 /**
885 * pch_udc_read_ep_control() - Read the endpoint control
886 * @ep: Reference to structure of type pch_udc_ep_regs
887 * Retern The endpoint control register value
888 */
889 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
890 {
891 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
892 }
893
894 /**
895 * pch_udc_clear_ep_control() - Clear the endpoint control register
896 * @ep: Reference to structure of type pch_udc_ep_regs
897 * Retern The endpoint control register value
898 */
899 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
900 {
901 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
902 }
903
904 /**
905 * pch_udc_read_ep_status() - Read the endpoint status
906 * @ep: Reference to structure of type pch_udc_ep_regs
907 * Retern The endpoint status
908 */
909 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
910 {
911 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
912 }
913
914 /**
915 * pch_udc_clear_ep_status() - Clear the endpoint status
916 * @ep: Reference to structure of type pch_udc_ep_regs
917 * @stat: Endpoint status
918 */
919 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
920 u32 stat)
921 {
922 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
923 }
924
925 /**
926 * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
927 * of the endpoint control register
928 * @ep: Reference to structure of type pch_udc_ep_regs
929 */
930 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
931 {
932 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
933 }
934
935 /**
936 * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
937 * of the endpoint control register
938 * @ep: reference to structure of type pch_udc_ep_regs
939 */
940 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
941 {
942 unsigned int loopcnt = 0;
943 struct pch_udc_dev *dev = ep->dev;
944
945 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
946 return;
947 if (!ep->in) {
948 loopcnt = 10000;
949 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
950 --loopcnt)
951 udelay(5);
952 if (!loopcnt)
953 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
954 __func__);
955 }
956 loopcnt = 10000;
957 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
958 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
959 udelay(5);
960 }
961 if (!loopcnt)
962 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
963 __func__, ep->num, (ep->in ? "in" : "out"));
964 }
965
966 /**
967 * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
968 * @ep: reference to structure of type pch_udc_ep_regs
969 * @dir: direction of endpoint
970 * 0: endpoint is OUT
971 * !0: endpoint is IN
972 */
973 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
974 {
975 if (dir) { /* IN ep */
976 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
977 return;
978 }
979 }
980
981 /**
982 * pch_udc_ep_enable() - This api enables endpoint
983 * @regs: Reference to structure pch_udc_ep_regs
984 * @desc: endpoint descriptor
985 */
986 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
987 struct pch_udc_cfg_data *cfg,
988 const struct usb_endpoint_descriptor *desc)
989 {
990 u32 val = 0;
991 u32 buff_size = 0;
992
993 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
994 if (ep->in)
995 buff_size = UDC_EPIN_BUFF_SIZE;
996 else
997 buff_size = UDC_EPOUT_BUFF_SIZE;
998 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
999 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
1000 pch_udc_ep_set_nak(ep);
1001 pch_udc_ep_fifo_flush(ep, ep->in);
1002 /* Configure the endpoint */
1003 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
1004 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
1005 UDC_CSR_NE_TYPE_SHIFT) |
1006 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
1007 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1008 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1009 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1010
1011 if (ep->in)
1012 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1013 else
1014 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1015 }
1016
1017 /**
1018 * pch_udc_ep_disable() - This api disables endpoint
1019 * @regs: Reference to structure pch_udc_ep_regs
1020 */
1021 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1022 {
1023 if (ep->in) {
1024 /* flush the fifo */
1025 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1026 /* set NAK */
1027 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1028 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1029 } else {
1030 /* set NAK */
1031 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1032 }
1033 /* reset desc pointer */
1034 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1035 }
1036
1037 /**
1038 * pch_udc_wait_ep_stall() - Wait EP stall.
1039 * @dev: Reference to pch_udc_dev structure
1040 */
1041 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1042 {
1043 unsigned int count = 10000;
1044
1045 /* Wait till idle */
1046 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1047 udelay(5);
1048 if (!count)
1049 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1050 }
1051
1052 /**
1053 * pch_udc_init() - This API initializes usb device controller
1054 * @dev: Rreference to pch_udc_regs structure
1055 */
1056 static void pch_udc_init(struct pch_udc_dev *dev)
1057 {
1058 if (NULL == dev) {
1059 pr_err("%s: Invalid address\n", __func__);
1060 return;
1061 }
1062 /* Soft Reset and Reset PHY */
1063 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1064 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1065 mdelay(1);
1066 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1067 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1068 mdelay(1);
1069 /* mask and clear all device interrupts */
1070 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1071 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1072
1073 /* mask and clear all ep interrupts */
1074 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1075 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1076
1077 /* enable dynamic CSR programmingi, self powered and device speed */
1078 if (speed_fs)
1079 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1080 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1081 else /* defaul high speed */
1082 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1083 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1084 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1085 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1086 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1087 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1088 UDC_DEVCTL_THE);
1089 }
1090
1091 /**
1092 * pch_udc_exit() - This API exit usb device controller
1093 * @dev: Reference to pch_udc_regs structure
1094 */
1095 static void pch_udc_exit(struct pch_udc_dev *dev)
1096 {
1097 /* mask all device interrupts */
1098 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1099 /* mask all ep interrupts */
1100 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1101 /* put device in disconnected state */
1102 pch_udc_set_disconnect(dev);
1103 }
1104
1105 /**
1106 * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1107 * @gadget: Reference to the gadget driver
1108 *
1109 * Return codes:
1110 * 0: Success
1111 * -EINVAL: If the gadget passed is NULL
1112 */
1113 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1114 {
1115 struct pch_udc_dev *dev;
1116
1117 if (!gadget)
1118 return -EINVAL;
1119 dev = container_of(gadget, struct pch_udc_dev, gadget);
1120 return pch_udc_get_frame(dev);
1121 }
1122
1123 /**
1124 * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1125 * @gadget: Reference to the gadget driver
1126 *
1127 * Return codes:
1128 * 0: Success
1129 * -EINVAL: If the gadget passed is NULL
1130 */
1131 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1132 {
1133 struct pch_udc_dev *dev;
1134 unsigned long flags;
1135
1136 if (!gadget)
1137 return -EINVAL;
1138 dev = container_of(gadget, struct pch_udc_dev, gadget);
1139 spin_lock_irqsave(&dev->lock, flags);
1140 pch_udc_rmt_wakeup(dev);
1141 spin_unlock_irqrestore(&dev->lock, flags);
1142 return 0;
1143 }
1144
1145 /**
1146 * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1147 * is self powered or not
1148 * @gadget: Reference to the gadget driver
1149 * @value: Specifies self powered or not
1150 *
1151 * Return codes:
1152 * 0: Success
1153 * -EINVAL: If the gadget passed is NULL
1154 */
1155 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1156 {
1157 struct pch_udc_dev *dev;
1158
1159 if (!gadget)
1160 return -EINVAL;
1161 dev = container_of(gadget, struct pch_udc_dev, gadget);
1162 if (value)
1163 pch_udc_set_selfpowered(dev);
1164 else
1165 pch_udc_clear_selfpowered(dev);
1166 return 0;
1167 }
1168
1169 /**
1170 * pch_udc_pcd_pullup() - This API is invoked to make the device
1171 * visible/invisible to the host
1172 * @gadget: Reference to the gadget driver
1173 * @is_on: Specifies whether the pull up is made active or inactive
1174 *
1175 * Return codes:
1176 * 0: Success
1177 * -EINVAL: If the gadget passed is NULL
1178 */
1179 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1180 {
1181 struct pch_udc_dev *dev;
1182
1183 if (!gadget)
1184 return -EINVAL;
1185 dev = container_of(gadget, struct pch_udc_dev, gadget);
1186 if (is_on) {
1187 pch_udc_reconnect(dev);
1188 } else {
1189 if (dev->driver && dev->driver->disconnect) {
1190 spin_unlock(&dev->lock);
1191 dev->driver->disconnect(&dev->gadget);
1192 spin_lock(&dev->lock);
1193 }
1194 pch_udc_set_disconnect(dev);
1195 }
1196
1197 return 0;
1198 }
1199
1200 /**
1201 * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1202 * transceiver (or GPIO) that
1203 * detects a VBUS power session starting/ending
1204 * @gadget: Reference to the gadget driver
1205 * @is_active: specifies whether the session is starting or ending
1206 *
1207 * Return codes:
1208 * 0: Success
1209 * -EINVAL: If the gadget passed is NULL
1210 */
1211 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1212 {
1213 struct pch_udc_dev *dev;
1214
1215 if (!gadget)
1216 return -EINVAL;
1217 dev = container_of(gadget, struct pch_udc_dev, gadget);
1218 pch_udc_vbus_session(dev, is_active);
1219 return 0;
1220 }
1221
1222 /**
1223 * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1224 * SET_CONFIGURATION calls to
1225 * specify how much power the device can consume
1226 * @gadget: Reference to the gadget driver
1227 * @mA: specifies the current limit in 2mA unit
1228 *
1229 * Return codes:
1230 * -EINVAL: If the gadget passed is NULL
1231 * -EOPNOTSUPP:
1232 */
1233 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1234 {
1235 return -EOPNOTSUPP;
1236 }
1237
1238 static int pch_udc_start(struct usb_gadget_driver *driver,
1239 int (*bind)(struct usb_gadget *));
1240 static int pch_udc_stop(struct usb_gadget_driver *driver);
1241 static const struct usb_gadget_ops pch_udc_ops = {
1242 .get_frame = pch_udc_pcd_get_frame,
1243 .wakeup = pch_udc_pcd_wakeup,
1244 .set_selfpowered = pch_udc_pcd_selfpowered,
1245 .pullup = pch_udc_pcd_pullup,
1246 .vbus_session = pch_udc_pcd_vbus_session,
1247 .vbus_draw = pch_udc_pcd_vbus_draw,
1248 .start = pch_udc_start,
1249 .stop = pch_udc_stop,
1250 };
1251
1252 /**
1253 * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1254 * @dev: Reference to the driver structure
1255 *
1256 * Return value:
1257 * 1: VBUS is high
1258 * 0: VBUS is low
1259 * -1: It is not enable to detect VBUS using GPIO
1260 */
1261 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1262 {
1263 int vbus = 0;
1264
1265 if (dev->vbus_gpio.port)
1266 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1267 else
1268 vbus = -1;
1269
1270 return vbus;
1271 }
1272
1273 /**
1274 * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1275 * If VBUS is Low, disconnect is processed
1276 * @irq_work: Structure for WorkQueue
1277 *
1278 */
1279 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1280 {
1281 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1282 struct pch_vbus_gpio_data, irq_work_fall);
1283 struct pch_udc_dev *dev =
1284 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1285 int vbus_saved = -1;
1286 int vbus;
1287 int count;
1288
1289 if (!dev->vbus_gpio.port)
1290 return;
1291
1292 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1293 count++) {
1294 vbus = pch_vbus_gpio_get_value(dev);
1295
1296 if ((vbus_saved == vbus) && (vbus == 0)) {
1297 dev_dbg(&dev->pdev->dev, "VBUS fell");
1298 if (dev->driver
1299 && dev->driver->disconnect) {
1300 dev->driver->disconnect(
1301 &dev->gadget);
1302 }
1303 if (dev->vbus_gpio.intr)
1304 pch_udc_init(dev);
1305 else
1306 pch_udc_reconnect(dev);
1307 return;
1308 }
1309 vbus_saved = vbus;
1310 mdelay(PCH_VBUS_INTERVAL);
1311 }
1312 }
1313
1314 /**
1315 * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1316 * If VBUS is High, connect is processed
1317 * @irq_work: Structure for WorkQueue
1318 *
1319 */
1320 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1321 {
1322 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1323 struct pch_vbus_gpio_data, irq_work_rise);
1324 struct pch_udc_dev *dev =
1325 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1326 int vbus;
1327
1328 if (!dev->vbus_gpio.port)
1329 return;
1330
1331 mdelay(PCH_VBUS_INTERVAL);
1332 vbus = pch_vbus_gpio_get_value(dev);
1333
1334 if (vbus == 1) {
1335 dev_dbg(&dev->pdev->dev, "VBUS rose");
1336 pch_udc_reconnect(dev);
1337 return;
1338 }
1339 }
1340
1341 /**
1342 * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
1343 * @irq: Interrupt request number
1344 * @dev: Reference to the device structure
1345 *
1346 * Return codes:
1347 * 0: Success
1348 * -EINVAL: GPIO port is invalid or can't be initialized.
1349 */
1350 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1351 {
1352 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1353
1354 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1355 return IRQ_NONE;
1356
1357 if (pch_vbus_gpio_get_value(dev))
1358 schedule_work(&dev->vbus_gpio.irq_work_rise);
1359 else
1360 schedule_work(&dev->vbus_gpio.irq_work_fall);
1361
1362 return IRQ_HANDLED;
1363 }
1364
1365 /**
1366 * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1367 * @dev: Reference to the driver structure
1368 * @vbus_gpio Number of GPIO port to detect gpio
1369 *
1370 * Return codes:
1371 * 0: Success
1372 * -EINVAL: GPIO port is invalid or can't be initialized.
1373 */
1374 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1375 {
1376 int err;
1377 int irq_num = 0;
1378
1379 dev->vbus_gpio.port = 0;
1380 dev->vbus_gpio.intr = 0;
1381
1382 if (vbus_gpio_port <= -1)
1383 return -EINVAL;
1384
1385 err = gpio_is_valid(vbus_gpio_port);
1386 if (!err) {
1387 pr_err("%s: gpio port %d is invalid\n",
1388 __func__, vbus_gpio_port);
1389 return -EINVAL;
1390 }
1391
1392 err = gpio_request(vbus_gpio_port, "pch_vbus");
1393 if (err) {
1394 pr_err("%s: can't request gpio port %d, err: %d\n",
1395 __func__, vbus_gpio_port, err);
1396 return -EINVAL;
1397 }
1398
1399 dev->vbus_gpio.port = vbus_gpio_port;
1400 gpio_direction_input(vbus_gpio_port);
1401 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1402
1403 irq_num = gpio_to_irq(vbus_gpio_port);
1404 if (irq_num > 0) {
1405 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1406 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1407 "vbus_detect", dev);
1408 if (!err) {
1409 dev->vbus_gpio.intr = irq_num;
1410 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1411 pch_vbus_gpio_work_rise);
1412 } else {
1413 pr_err("%s: can't request irq %d, err: %d\n",
1414 __func__, irq_num, err);
1415 }
1416 }
1417
1418 return 0;
1419 }
1420
1421 /**
1422 * pch_vbus_gpio_free() - This API frees resources of GPIO port
1423 * @dev: Reference to the driver structure
1424 */
1425 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1426 {
1427 if (dev->vbus_gpio.intr)
1428 free_irq(dev->vbus_gpio.intr, dev);
1429
1430 if (dev->vbus_gpio.port)
1431 gpio_free(dev->vbus_gpio.port);
1432 }
1433
1434 /**
1435 * complete_req() - This API is invoked from the driver when processing
1436 * of a request is complete
1437 * @ep: Reference to the endpoint structure
1438 * @req: Reference to the request structure
1439 * @status: Indicates the success/failure of completion
1440 */
1441 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1442 int status)
1443 {
1444 struct pch_udc_dev *dev;
1445 unsigned halted = ep->halted;
1446
1447 list_del_init(&req->queue);
1448
1449 /* set new status if pending */
1450 if (req->req.status == -EINPROGRESS)
1451 req->req.status = status;
1452 else
1453 status = req->req.status;
1454
1455 dev = ep->dev;
1456 if (req->dma_mapped) {
1457 if (req->dma == DMA_ADDR_INVALID) {
1458 if (ep->in)
1459 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1460 req->req.length,
1461 DMA_TO_DEVICE);
1462 else
1463 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1464 req->req.length,
1465 DMA_FROM_DEVICE);
1466 req->req.dma = DMA_ADDR_INVALID;
1467 } else {
1468 if (ep->in)
1469 dma_unmap_single(&dev->pdev->dev, req->dma,
1470 req->req.length,
1471 DMA_TO_DEVICE);
1472 else {
1473 dma_unmap_single(&dev->pdev->dev, req->dma,
1474 req->req.length,
1475 DMA_FROM_DEVICE);
1476 memcpy(req->req.buf, req->buf, req->req.length);
1477 }
1478 kfree(req->buf);
1479 req->dma = DMA_ADDR_INVALID;
1480 }
1481 req->dma_mapped = 0;
1482 }
1483 ep->halted = 1;
1484 spin_unlock(&dev->lock);
1485 if (!ep->in)
1486 pch_udc_ep_clear_rrdy(ep);
1487 req->req.complete(&ep->ep, &req->req);
1488 spin_lock(&dev->lock);
1489 ep->halted = halted;
1490 }
1491
1492 /**
1493 * empty_req_queue() - This API empties the request queue of an endpoint
1494 * @ep: Reference to the endpoint structure
1495 */
1496 static void empty_req_queue(struct pch_udc_ep *ep)
1497 {
1498 struct pch_udc_request *req;
1499
1500 ep->halted = 1;
1501 while (!list_empty(&ep->queue)) {
1502 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1503 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
1504 }
1505 }
1506
1507 /**
1508 * pch_udc_free_dma_chain() - This function frees the DMA chain created
1509 * for the request
1510 * @dev Reference to the driver structure
1511 * @req Reference to the request to be freed
1512 *
1513 * Return codes:
1514 * 0: Success
1515 */
1516 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1517 struct pch_udc_request *req)
1518 {
1519 struct pch_udc_data_dma_desc *td = req->td_data;
1520 unsigned i = req->chain_len;
1521
1522 dma_addr_t addr2;
1523 dma_addr_t addr = (dma_addr_t)td->next;
1524 td->next = 0x00;
1525 for (; i > 1; --i) {
1526 /* do not free first desc., will be done by free for request */
1527 td = phys_to_virt(addr);
1528 addr2 = (dma_addr_t)td->next;
1529 pci_pool_free(dev->data_requests, td, addr);
1530 td->next = 0x00;
1531 addr = addr2;
1532 }
1533 req->chain_len = 1;
1534 }
1535
1536 /**
1537 * pch_udc_create_dma_chain() - This function creates or reinitializes
1538 * a DMA chain
1539 * @ep: Reference to the endpoint structure
1540 * @req: Reference to the request
1541 * @buf_len: The buffer length
1542 * @gfp_flags: Flags to be used while mapping the data buffer
1543 *
1544 * Return codes:
1545 * 0: success,
1546 * -ENOMEM: pci_pool_alloc invocation fails
1547 */
1548 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1549 struct pch_udc_request *req,
1550 unsigned long buf_len,
1551 gfp_t gfp_flags)
1552 {
1553 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1554 unsigned long bytes = req->req.length, i = 0;
1555 dma_addr_t dma_addr;
1556 unsigned len = 1;
1557
1558 if (req->chain_len > 1)
1559 pch_udc_free_dma_chain(ep->dev, req);
1560
1561 if (req->dma == DMA_ADDR_INVALID)
1562 td->dataptr = req->req.dma;
1563 else
1564 td->dataptr = req->dma;
1565
1566 td->status = PCH_UDC_BS_HST_BSY;
1567 for (; ; bytes -= buf_len, ++len) {
1568 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1569 if (bytes <= buf_len)
1570 break;
1571 last = td;
1572 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1573 &dma_addr);
1574 if (!td)
1575 goto nomem;
1576 i += buf_len;
1577 td->dataptr = req->td_data->dataptr + i;
1578 last->next = dma_addr;
1579 }
1580
1581 req->td_data_last = td;
1582 td->status |= PCH_UDC_DMA_LAST;
1583 td->next = req->td_data_phys;
1584 req->chain_len = len;
1585 return 0;
1586
1587 nomem:
1588 if (len > 1) {
1589 req->chain_len = len;
1590 pch_udc_free_dma_chain(ep->dev, req);
1591 }
1592 req->chain_len = 1;
1593 return -ENOMEM;
1594 }
1595
1596 /**
1597 * prepare_dma() - This function creates and initializes the DMA chain
1598 * for the request
1599 * @ep: Reference to the endpoint structure
1600 * @req: Reference to the request
1601 * @gfp: Flag to be used while mapping the data buffer
1602 *
1603 * Return codes:
1604 * 0: Success
1605 * Other 0: linux error number on failure
1606 */
1607 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1608 gfp_t gfp)
1609 {
1610 int retval;
1611
1612 /* Allocate and create a DMA chain */
1613 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1614 if (retval) {
1615 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1616 return retval;
1617 }
1618 if (ep->in)
1619 req->td_data->status = (req->td_data->status &
1620 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1621 return 0;
1622 }
1623
1624 /**
1625 * process_zlp() - This function process zero length packets
1626 * from the gadget driver
1627 * @ep: Reference to the endpoint structure
1628 * @req: Reference to the request
1629 */
1630 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1631 {
1632 struct pch_udc_dev *dev = ep->dev;
1633
1634 /* IN zlp's are handled by hardware */
1635 complete_req(ep, req, 0);
1636
1637 /* if set_config or set_intf is waiting for ack by zlp
1638 * then set CSR_DONE
1639 */
1640 if (dev->set_cfg_not_acked) {
1641 pch_udc_set_csr_done(dev);
1642 dev->set_cfg_not_acked = 0;
1643 }
1644 /* setup command is ACK'ed now by zlp */
1645 if (!dev->stall && dev->waiting_zlp_ack) {
1646 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1647 dev->waiting_zlp_ack = 0;
1648 }
1649 }
1650
1651 /**
1652 * pch_udc_start_rxrequest() - This function starts the receive requirement.
1653 * @ep: Reference to the endpoint structure
1654 * @req: Reference to the request structure
1655 */
1656 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1657 struct pch_udc_request *req)
1658 {
1659 struct pch_udc_data_dma_desc *td_data;
1660
1661 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1662 td_data = req->td_data;
1663 /* Set the status bits for all descriptors */
1664 while (1) {
1665 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1666 PCH_UDC_BS_HST_RDY;
1667 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1668 break;
1669 td_data = phys_to_virt(td_data->next);
1670 }
1671 /* Write the descriptor pointer */
1672 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1673 req->dma_going = 1;
1674 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1675 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1676 pch_udc_ep_clear_nak(ep);
1677 pch_udc_ep_set_rrdy(ep);
1678 }
1679
1680 /**
1681 * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1682 * from gadget driver
1683 * @usbep: Reference to the USB endpoint structure
1684 * @desc: Reference to the USB endpoint descriptor structure
1685 *
1686 * Return codes:
1687 * 0: Success
1688 * -EINVAL:
1689 * -ESHUTDOWN:
1690 */
1691 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1692 const struct usb_endpoint_descriptor *desc)
1693 {
1694 struct pch_udc_ep *ep;
1695 struct pch_udc_dev *dev;
1696 unsigned long iflags;
1697
1698 if (!usbep || (usbep->name == ep0_string) || !desc ||
1699 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1700 return -EINVAL;
1701
1702 ep = container_of(usbep, struct pch_udc_ep, ep);
1703 dev = ep->dev;
1704 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1705 return -ESHUTDOWN;
1706 spin_lock_irqsave(&dev->lock, iflags);
1707 ep->ep.desc = desc;
1708 ep->halted = 0;
1709 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1710 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1711 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1712 spin_unlock_irqrestore(&dev->lock, iflags);
1713 return 0;
1714 }
1715
1716 /**
1717 * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1718 * from gadget driver
1719 * @usbep Reference to the USB endpoint structure
1720 *
1721 * Return codes:
1722 * 0: Success
1723 * -EINVAL:
1724 */
1725 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1726 {
1727 struct pch_udc_ep *ep;
1728 struct pch_udc_dev *dev;
1729 unsigned long iflags;
1730
1731 if (!usbep)
1732 return -EINVAL;
1733
1734 ep = container_of(usbep, struct pch_udc_ep, ep);
1735 dev = ep->dev;
1736 if ((usbep->name == ep0_string) || !ep->ep.desc)
1737 return -EINVAL;
1738
1739 spin_lock_irqsave(&ep->dev->lock, iflags);
1740 empty_req_queue(ep);
1741 ep->halted = 1;
1742 pch_udc_ep_disable(ep);
1743 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1744 ep->ep.desc = NULL;
1745 INIT_LIST_HEAD(&ep->queue);
1746 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1747 return 0;
1748 }
1749
1750 /**
1751 * pch_udc_alloc_request() - This function allocates request structure.
1752 * It is called by gadget driver
1753 * @usbep: Reference to the USB endpoint structure
1754 * @gfp: Flag to be used while allocating memory
1755 *
1756 * Return codes:
1757 * NULL: Failure
1758 * Allocated address: Success
1759 */
1760 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1761 gfp_t gfp)
1762 {
1763 struct pch_udc_request *req;
1764 struct pch_udc_ep *ep;
1765 struct pch_udc_data_dma_desc *dma_desc;
1766 struct pch_udc_dev *dev;
1767
1768 if (!usbep)
1769 return NULL;
1770 ep = container_of(usbep, struct pch_udc_ep, ep);
1771 dev = ep->dev;
1772 req = kzalloc(sizeof *req, gfp);
1773 if (!req)
1774 return NULL;
1775 req->req.dma = DMA_ADDR_INVALID;
1776 req->dma = DMA_ADDR_INVALID;
1777 INIT_LIST_HEAD(&req->queue);
1778 if (!ep->dev->dma_addr)
1779 return &req->req;
1780 /* ep0 in requests are allocated from data pool here */
1781 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1782 &req->td_data_phys);
1783 if (NULL == dma_desc) {
1784 kfree(req);
1785 return NULL;
1786 }
1787 /* prevent from using desc. - set HOST BUSY */
1788 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1789 dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
1790 req->td_data = dma_desc;
1791 req->td_data_last = dma_desc;
1792 req->chain_len = 1;
1793 return &req->req;
1794 }
1795
1796 /**
1797 * pch_udc_free_request() - This function frees request structure.
1798 * It is called by gadget driver
1799 * @usbep: Reference to the USB endpoint structure
1800 * @usbreq: Reference to the USB request
1801 */
1802 static void pch_udc_free_request(struct usb_ep *usbep,
1803 struct usb_request *usbreq)
1804 {
1805 struct pch_udc_ep *ep;
1806 struct pch_udc_request *req;
1807 struct pch_udc_dev *dev;
1808
1809 if (!usbep || !usbreq)
1810 return;
1811 ep = container_of(usbep, struct pch_udc_ep, ep);
1812 req = container_of(usbreq, struct pch_udc_request, req);
1813 dev = ep->dev;
1814 if (!list_empty(&req->queue))
1815 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1816 __func__, usbep->name, req);
1817 if (req->td_data != NULL) {
1818 if (req->chain_len > 1)
1819 pch_udc_free_dma_chain(ep->dev, req);
1820 pci_pool_free(ep->dev->data_requests, req->td_data,
1821 req->td_data_phys);
1822 }
1823 kfree(req);
1824 }
1825
1826 /**
1827 * pch_udc_pcd_queue() - This function queues a request packet. It is called
1828 * by gadget driver
1829 * @usbep: Reference to the USB endpoint structure
1830 * @usbreq: Reference to the USB request
1831 * @gfp: Flag to be used while mapping the data buffer
1832 *
1833 * Return codes:
1834 * 0: Success
1835 * linux error number: Failure
1836 */
1837 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1838 gfp_t gfp)
1839 {
1840 int retval = 0;
1841 struct pch_udc_ep *ep;
1842 struct pch_udc_dev *dev;
1843 struct pch_udc_request *req;
1844 unsigned long iflags;
1845
1846 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1847 return -EINVAL;
1848 ep = container_of(usbep, struct pch_udc_ep, ep);
1849 dev = ep->dev;
1850 if (!ep->ep.desc && ep->num)
1851 return -EINVAL;
1852 req = container_of(usbreq, struct pch_udc_request, req);
1853 if (!list_empty(&req->queue))
1854 return -EINVAL;
1855 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1856 return -ESHUTDOWN;
1857 spin_lock_irqsave(&dev->lock, iflags);
1858 /* map the buffer for dma */
1859 if (usbreq->length &&
1860 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1861 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1862 if (ep->in)
1863 usbreq->dma = dma_map_single(&dev->pdev->dev,
1864 usbreq->buf,
1865 usbreq->length,
1866 DMA_TO_DEVICE);
1867 else
1868 usbreq->dma = dma_map_single(&dev->pdev->dev,
1869 usbreq->buf,
1870 usbreq->length,
1871 DMA_FROM_DEVICE);
1872 } else {
1873 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1874 if (!req->buf) {
1875 retval = -ENOMEM;
1876 goto probe_end;
1877 }
1878 if (ep->in) {
1879 memcpy(req->buf, usbreq->buf, usbreq->length);
1880 req->dma = dma_map_single(&dev->pdev->dev,
1881 req->buf,
1882 usbreq->length,
1883 DMA_TO_DEVICE);
1884 } else
1885 req->dma = dma_map_single(&dev->pdev->dev,
1886 req->buf,
1887 usbreq->length,
1888 DMA_FROM_DEVICE);
1889 }
1890 req->dma_mapped = 1;
1891 }
1892 if (usbreq->length > 0) {
1893 retval = prepare_dma(ep, req, GFP_ATOMIC);
1894 if (retval)
1895 goto probe_end;
1896 }
1897 usbreq->actual = 0;
1898 usbreq->status = -EINPROGRESS;
1899 req->dma_done = 0;
1900 if (list_empty(&ep->queue) && !ep->halted) {
1901 /* no pending transfer, so start this req */
1902 if (!usbreq->length) {
1903 process_zlp(ep, req);
1904 retval = 0;
1905 goto probe_end;
1906 }
1907 if (!ep->in) {
1908 pch_udc_start_rxrequest(ep, req);
1909 } else {
1910 /*
1911 * For IN trfr the descriptors will be programmed and
1912 * P bit will be set when
1913 * we get an IN token
1914 */
1915 pch_udc_wait_ep_stall(ep);
1916 pch_udc_ep_clear_nak(ep);
1917 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1918 }
1919 }
1920 /* Now add this request to the ep's pending requests */
1921 if (req != NULL)
1922 list_add_tail(&req->queue, &ep->queue);
1923
1924 probe_end:
1925 spin_unlock_irqrestore(&dev->lock, iflags);
1926 return retval;
1927 }
1928
1929 /**
1930 * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1931 * It is called by gadget driver
1932 * @usbep: Reference to the USB endpoint structure
1933 * @usbreq: Reference to the USB request
1934 *
1935 * Return codes:
1936 * 0: Success
1937 * linux error number: Failure
1938 */
1939 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1940 struct usb_request *usbreq)
1941 {
1942 struct pch_udc_ep *ep;
1943 struct pch_udc_request *req;
1944 struct pch_udc_dev *dev;
1945 unsigned long flags;
1946 int ret = -EINVAL;
1947
1948 ep = container_of(usbep, struct pch_udc_ep, ep);
1949 dev = ep->dev;
1950 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1951 return ret;
1952 req = container_of(usbreq, struct pch_udc_request, req);
1953 spin_lock_irqsave(&ep->dev->lock, flags);
1954 /* make sure it's still queued on this endpoint */
1955 list_for_each_entry(req, &ep->queue, queue) {
1956 if (&req->req == usbreq) {
1957 pch_udc_ep_set_nak(ep);
1958 if (!list_empty(&req->queue))
1959 complete_req(ep, req, -ECONNRESET);
1960 ret = 0;
1961 break;
1962 }
1963 }
1964 spin_unlock_irqrestore(&ep->dev->lock, flags);
1965 return ret;
1966 }
1967
1968 /**
1969 * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1970 * feature
1971 * @usbep: Reference to the USB endpoint structure
1972 * @halt: Specifies whether to set or clear the feature
1973 *
1974 * Return codes:
1975 * 0: Success
1976 * linux error number: Failure
1977 */
1978 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1979 {
1980 struct pch_udc_ep *ep;
1981 struct pch_udc_dev *dev;
1982 unsigned long iflags;
1983 int ret;
1984
1985 if (!usbep)
1986 return -EINVAL;
1987 ep = container_of(usbep, struct pch_udc_ep, ep);
1988 dev = ep->dev;
1989 if (!ep->ep.desc && !ep->num)
1990 return -EINVAL;
1991 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1992 return -ESHUTDOWN;
1993 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1994 if (list_empty(&ep->queue)) {
1995 if (halt) {
1996 if (ep->num == PCH_UDC_EP0)
1997 ep->dev->stall = 1;
1998 pch_udc_ep_set_stall(ep);
1999 pch_udc_enable_ep_interrupts(ep->dev,
2000 PCH_UDC_EPINT(ep->in,
2001 ep->num));
2002 } else {
2003 pch_udc_ep_clear_stall(ep);
2004 }
2005 ret = 0;
2006 } else {
2007 ret = -EAGAIN;
2008 }
2009 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2010 return ret;
2011 }
2012
2013 /**
2014 * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2015 * halt feature
2016 * @usbep: Reference to the USB endpoint structure
2017 * @halt: Specifies whether to set or clear the feature
2018 *
2019 * Return codes:
2020 * 0: Success
2021 * linux error number: Failure
2022 */
2023 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2024 {
2025 struct pch_udc_ep *ep;
2026 struct pch_udc_dev *dev;
2027 unsigned long iflags;
2028 int ret;
2029
2030 if (!usbep)
2031 return -EINVAL;
2032 ep = container_of(usbep, struct pch_udc_ep, ep);
2033 dev = ep->dev;
2034 if (!ep->ep.desc && !ep->num)
2035 return -EINVAL;
2036 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2037 return -ESHUTDOWN;
2038 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2039 if (!list_empty(&ep->queue)) {
2040 ret = -EAGAIN;
2041 } else {
2042 if (ep->num == PCH_UDC_EP0)
2043 ep->dev->stall = 1;
2044 pch_udc_ep_set_stall(ep);
2045 pch_udc_enable_ep_interrupts(ep->dev,
2046 PCH_UDC_EPINT(ep->in, ep->num));
2047 ep->dev->prot_stall = 1;
2048 ret = 0;
2049 }
2050 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2051 return ret;
2052 }
2053
2054 /**
2055 * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2056 * @usbep: Reference to the USB endpoint structure
2057 */
2058 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2059 {
2060 struct pch_udc_ep *ep;
2061
2062 if (!usbep)
2063 return;
2064
2065 ep = container_of(usbep, struct pch_udc_ep, ep);
2066 if (ep->ep.desc || !ep->num)
2067 pch_udc_ep_fifo_flush(ep, ep->in);
2068 }
2069
2070 static const struct usb_ep_ops pch_udc_ep_ops = {
2071 .enable = pch_udc_pcd_ep_enable,
2072 .disable = pch_udc_pcd_ep_disable,
2073 .alloc_request = pch_udc_alloc_request,
2074 .free_request = pch_udc_free_request,
2075 .queue = pch_udc_pcd_queue,
2076 .dequeue = pch_udc_pcd_dequeue,
2077 .set_halt = pch_udc_pcd_set_halt,
2078 .set_wedge = pch_udc_pcd_set_wedge,
2079 .fifo_status = NULL,
2080 .fifo_flush = pch_udc_pcd_fifo_flush,
2081 };
2082
2083 /**
2084 * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2085 * @td_stp: Reference to the SETP buffer structure
2086 */
2087 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2088 {
2089 static u32 pky_marker;
2090
2091 if (!td_stp)
2092 return;
2093 td_stp->reserved = ++pky_marker;
2094 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2095 td_stp->status = PCH_UDC_BS_HST_RDY;
2096 }
2097
2098 /**
2099 * pch_udc_start_next_txrequest() - This function starts
2100 * the next transmission requirement
2101 * @ep: Reference to the endpoint structure
2102 */
2103 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2104 {
2105 struct pch_udc_request *req;
2106 struct pch_udc_data_dma_desc *td_data;
2107
2108 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2109 return;
2110
2111 if (list_empty(&ep->queue))
2112 return;
2113
2114 /* next request */
2115 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2116 if (req->dma_going)
2117 return;
2118 if (!req->td_data)
2119 return;
2120 pch_udc_wait_ep_stall(ep);
2121 req->dma_going = 1;
2122 pch_udc_ep_set_ddptr(ep, 0);
2123 td_data = req->td_data;
2124 while (1) {
2125 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2126 PCH_UDC_BS_HST_RDY;
2127 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2128 break;
2129 td_data = phys_to_virt(td_data->next);
2130 }
2131 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2132 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2133 pch_udc_ep_set_pd(ep);
2134 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2135 pch_udc_ep_clear_nak(ep);
2136 }
2137
2138 /**
2139 * pch_udc_complete_transfer() - This function completes a transfer
2140 * @ep: Reference to the endpoint structure
2141 */
2142 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2143 {
2144 struct pch_udc_request *req;
2145 struct pch_udc_dev *dev = ep->dev;
2146
2147 if (list_empty(&ep->queue))
2148 return;
2149 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2150 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2151 PCH_UDC_BS_DMA_DONE)
2152 return;
2153 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2154 PCH_UDC_RTS_SUCC) {
2155 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2156 "epstatus=0x%08x\n",
2157 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2158 (int)(ep->epsts));
2159 return;
2160 }
2161
2162 req->req.actual = req->req.length;
2163 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2164 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2165 complete_req(ep, req, 0);
2166 req->dma_going = 0;
2167 if (!list_empty(&ep->queue)) {
2168 pch_udc_wait_ep_stall(ep);
2169 pch_udc_ep_clear_nak(ep);
2170 pch_udc_enable_ep_interrupts(ep->dev,
2171 PCH_UDC_EPINT(ep->in, ep->num));
2172 } else {
2173 pch_udc_disable_ep_interrupts(ep->dev,
2174 PCH_UDC_EPINT(ep->in, ep->num));
2175 }
2176 }
2177
2178 /**
2179 * pch_udc_complete_receiver() - This function completes a receiver
2180 * @ep: Reference to the endpoint structure
2181 */
2182 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2183 {
2184 struct pch_udc_request *req;
2185 struct pch_udc_dev *dev = ep->dev;
2186 unsigned int count;
2187 struct pch_udc_data_dma_desc *td;
2188 dma_addr_t addr;
2189
2190 if (list_empty(&ep->queue))
2191 return;
2192 /* next request */
2193 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2194 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2195 pch_udc_ep_set_ddptr(ep, 0);
2196 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2197 PCH_UDC_BS_DMA_DONE)
2198 td = req->td_data_last;
2199 else
2200 td = req->td_data;
2201
2202 while (1) {
2203 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2204 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2205 "epstatus=0x%08x\n",
2206 (req->td_data->status & PCH_UDC_RXTX_STS),
2207 (int)(ep->epsts));
2208 return;
2209 }
2210 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2211 if (td->status & PCH_UDC_DMA_LAST) {
2212 count = td->status & PCH_UDC_RXTX_BYTES;
2213 break;
2214 }
2215 if (td == req->td_data_last) {
2216 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2217 return;
2218 }
2219 addr = (dma_addr_t)td->next;
2220 td = phys_to_virt(addr);
2221 }
2222 /* on 64k packets the RXBYTES field is zero */
2223 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2224 count = UDC_DMA_MAXPACKET;
2225 req->td_data->status |= PCH_UDC_DMA_LAST;
2226 td->status |= PCH_UDC_BS_HST_BSY;
2227
2228 req->dma_going = 0;
2229 req->req.actual = count;
2230 complete_req(ep, req, 0);
2231 /* If there is a new/failed requests try that now */
2232 if (!list_empty(&ep->queue)) {
2233 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2234 pch_udc_start_rxrequest(ep, req);
2235 }
2236 }
2237
2238 /**
2239 * pch_udc_svc_data_in() - This function process endpoint interrupts
2240 * for IN endpoints
2241 * @dev: Reference to the device structure
2242 * @ep_num: Endpoint that generated the interrupt
2243 */
2244 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2245 {
2246 u32 epsts;
2247 struct pch_udc_ep *ep;
2248
2249 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2250 epsts = ep->epsts;
2251 ep->epsts = 0;
2252
2253 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2254 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2255 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2256 return;
2257 if ((epsts & UDC_EPSTS_BNA))
2258 return;
2259 if (epsts & UDC_EPSTS_HE)
2260 return;
2261 if (epsts & UDC_EPSTS_RSS) {
2262 pch_udc_ep_set_stall(ep);
2263 pch_udc_enable_ep_interrupts(ep->dev,
2264 PCH_UDC_EPINT(ep->in, ep->num));
2265 }
2266 if (epsts & UDC_EPSTS_RCS) {
2267 if (!dev->prot_stall) {
2268 pch_udc_ep_clear_stall(ep);
2269 } else {
2270 pch_udc_ep_set_stall(ep);
2271 pch_udc_enable_ep_interrupts(ep->dev,
2272 PCH_UDC_EPINT(ep->in, ep->num));
2273 }
2274 }
2275 if (epsts & UDC_EPSTS_TDC)
2276 pch_udc_complete_transfer(ep);
2277 /* On IN interrupt, provide data if we have any */
2278 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2279 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2280 pch_udc_start_next_txrequest(ep);
2281 }
2282
2283 /**
2284 * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2285 * @dev: Reference to the device structure
2286 * @ep_num: Endpoint that generated the interrupt
2287 */
2288 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2289 {
2290 u32 epsts;
2291 struct pch_udc_ep *ep;
2292 struct pch_udc_request *req = NULL;
2293
2294 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2295 epsts = ep->epsts;
2296 ep->epsts = 0;
2297
2298 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2299 /* next request */
2300 req = list_entry(ep->queue.next, struct pch_udc_request,
2301 queue);
2302 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2303 PCH_UDC_BS_DMA_DONE) {
2304 if (!req->dma_going)
2305 pch_udc_start_rxrequest(ep, req);
2306 return;
2307 }
2308 }
2309 if (epsts & UDC_EPSTS_HE)
2310 return;
2311 if (epsts & UDC_EPSTS_RSS) {
2312 pch_udc_ep_set_stall(ep);
2313 pch_udc_enable_ep_interrupts(ep->dev,
2314 PCH_UDC_EPINT(ep->in, ep->num));
2315 }
2316 if (epsts & UDC_EPSTS_RCS) {
2317 if (!dev->prot_stall) {
2318 pch_udc_ep_clear_stall(ep);
2319 } else {
2320 pch_udc_ep_set_stall(ep);
2321 pch_udc_enable_ep_interrupts(ep->dev,
2322 PCH_UDC_EPINT(ep->in, ep->num));
2323 }
2324 }
2325 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2326 UDC_EPSTS_OUT_DATA) {
2327 if (ep->dev->prot_stall == 1) {
2328 pch_udc_ep_set_stall(ep);
2329 pch_udc_enable_ep_interrupts(ep->dev,
2330 PCH_UDC_EPINT(ep->in, ep->num));
2331 } else {
2332 pch_udc_complete_receiver(ep);
2333 }
2334 }
2335 if (list_empty(&ep->queue))
2336 pch_udc_set_dma(dev, DMA_DIR_RX);
2337 }
2338
2339 /**
2340 * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2341 * @dev: Reference to the device structure
2342 */
2343 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2344 {
2345 u32 epsts;
2346 struct pch_udc_ep *ep;
2347 struct pch_udc_ep *ep_out;
2348
2349 ep = &dev->ep[UDC_EP0IN_IDX];
2350 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2351 epsts = ep->epsts;
2352 ep->epsts = 0;
2353
2354 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2355 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2356 UDC_EPSTS_XFERDONE)))
2357 return;
2358 if ((epsts & UDC_EPSTS_BNA))
2359 return;
2360 if (epsts & UDC_EPSTS_HE)
2361 return;
2362 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2363 pch_udc_complete_transfer(ep);
2364 pch_udc_clear_dma(dev, DMA_DIR_RX);
2365 ep_out->td_data->status = (ep_out->td_data->status &
2366 ~PCH_UDC_BUFF_STS) |
2367 PCH_UDC_BS_HST_RDY;
2368 pch_udc_ep_clear_nak(ep_out);
2369 pch_udc_set_dma(dev, DMA_DIR_RX);
2370 pch_udc_ep_set_rrdy(ep_out);
2371 }
2372 /* On IN interrupt, provide data if we have any */
2373 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2374 !(epsts & UDC_EPSTS_TXEMPTY))
2375 pch_udc_start_next_txrequest(ep);
2376 }
2377
2378 /**
2379 * pch_udc_svc_control_out() - Routine that handle Control
2380 * OUT endpoint interrupts
2381 * @dev: Reference to the device structure
2382 */
2383 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2384 {
2385 u32 stat;
2386 int setup_supported;
2387 struct pch_udc_ep *ep;
2388
2389 ep = &dev->ep[UDC_EP0OUT_IDX];
2390 stat = ep->epsts;
2391 ep->epsts = 0;
2392
2393 /* If setup data */
2394 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2395 UDC_EPSTS_OUT_SETUP) {
2396 dev->stall = 0;
2397 dev->ep[UDC_EP0IN_IDX].halted = 0;
2398 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2399 dev->setup_data = ep->td_stp->request;
2400 pch_udc_init_setup_buff(ep->td_stp);
2401 pch_udc_clear_dma(dev, DMA_DIR_RX);
2402 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2403 dev->ep[UDC_EP0IN_IDX].in);
2404 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2405 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2406 else /* OUT */
2407 dev->gadget.ep0 = &ep->ep;
2408 spin_unlock(&dev->lock);
2409 /* If Mass storage Reset */
2410 if ((dev->setup_data.bRequestType == 0x21) &&
2411 (dev->setup_data.bRequest == 0xFF))
2412 dev->prot_stall = 0;
2413 /* call gadget with setup data received */
2414 setup_supported = dev->driver->setup(&dev->gadget,
2415 &dev->setup_data);
2416 spin_lock(&dev->lock);
2417
2418 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2419 ep->td_data->status = (ep->td_data->status &
2420 ~PCH_UDC_BUFF_STS) |
2421 PCH_UDC_BS_HST_RDY;
2422 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2423 }
2424 /* ep0 in returns data on IN phase */
2425 if (setup_supported >= 0 && setup_supported <
2426 UDC_EP0IN_MAX_PKT_SIZE) {
2427 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2428 /* Gadget would have queued a request when
2429 * we called the setup */
2430 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2431 pch_udc_set_dma(dev, DMA_DIR_RX);
2432 pch_udc_ep_clear_nak(ep);
2433 }
2434 } else if (setup_supported < 0) {
2435 /* if unsupported request, then stall */
2436 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2437 pch_udc_enable_ep_interrupts(ep->dev,
2438 PCH_UDC_EPINT(ep->in, ep->num));
2439 dev->stall = 0;
2440 pch_udc_set_dma(dev, DMA_DIR_RX);
2441 } else {
2442 dev->waiting_zlp_ack = 1;
2443 }
2444 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2445 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2446 pch_udc_clear_dma(dev, DMA_DIR_RX);
2447 pch_udc_ep_set_ddptr(ep, 0);
2448 if (!list_empty(&ep->queue)) {
2449 ep->epsts = stat;
2450 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2451 }
2452 pch_udc_set_dma(dev, DMA_DIR_RX);
2453 }
2454 pch_udc_ep_set_rrdy(ep);
2455 }
2456
2457
2458 /**
2459 * pch_udc_postsvc_epinters() - This function enables end point interrupts
2460 * and clears NAK status
2461 * @dev: Reference to the device structure
2462 * @ep_num: End point number
2463 */
2464 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2465 {
2466 struct pch_udc_ep *ep;
2467 struct pch_udc_request *req;
2468
2469 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2470 if (!list_empty(&ep->queue)) {
2471 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2472 pch_udc_enable_ep_interrupts(ep->dev,
2473 PCH_UDC_EPINT(ep->in, ep->num));
2474 pch_udc_ep_clear_nak(ep);
2475 }
2476 }
2477
2478 /**
2479 * pch_udc_read_all_epstatus() - This function read all endpoint status
2480 * @dev: Reference to the device structure
2481 * @ep_intr: Status of endpoint interrupt
2482 */
2483 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2484 {
2485 int i;
2486 struct pch_udc_ep *ep;
2487
2488 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2489 /* IN */
2490 if (ep_intr & (0x1 << i)) {
2491 ep = &dev->ep[UDC_EPIN_IDX(i)];
2492 ep->epsts = pch_udc_read_ep_status(ep);
2493 pch_udc_clear_ep_status(ep, ep->epsts);
2494 }
2495 /* OUT */
2496 if (ep_intr & (0x10000 << i)) {
2497 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2498 ep->epsts = pch_udc_read_ep_status(ep);
2499 pch_udc_clear_ep_status(ep, ep->epsts);
2500 }
2501 }
2502 }
2503
2504 /**
2505 * pch_udc_activate_control_ep() - This function enables the control endpoints
2506 * for traffic after a reset
2507 * @dev: Reference to the device structure
2508 */
2509 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2510 {
2511 struct pch_udc_ep *ep;
2512 u32 val;
2513
2514 /* Setup the IN endpoint */
2515 ep = &dev->ep[UDC_EP0IN_IDX];
2516 pch_udc_clear_ep_control(ep);
2517 pch_udc_ep_fifo_flush(ep, ep->in);
2518 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2519 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2520 /* Initialize the IN EP Descriptor */
2521 ep->td_data = NULL;
2522 ep->td_stp = NULL;
2523 ep->td_data_phys = 0;
2524 ep->td_stp_phys = 0;
2525
2526 /* Setup the OUT endpoint */
2527 ep = &dev->ep[UDC_EP0OUT_IDX];
2528 pch_udc_clear_ep_control(ep);
2529 pch_udc_ep_fifo_flush(ep, ep->in);
2530 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2531 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2532 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2533 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2534
2535 /* Initialize the SETUP buffer */
2536 pch_udc_init_setup_buff(ep->td_stp);
2537 /* Write the pointer address of dma descriptor */
2538 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2539 /* Write the pointer address of Setup descriptor */
2540 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2541
2542 /* Initialize the dma descriptor */
2543 ep->td_data->status = PCH_UDC_DMA_LAST;
2544 ep->td_data->dataptr = dev->dma_addr;
2545 ep->td_data->next = ep->td_data_phys;
2546
2547 pch_udc_ep_clear_nak(ep);
2548 }
2549
2550
2551 /**
2552 * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2553 * @dev: Reference to driver structure
2554 */
2555 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2556 {
2557 struct pch_udc_ep *ep;
2558 int i;
2559
2560 pch_udc_clear_dma(dev, DMA_DIR_TX);
2561 pch_udc_clear_dma(dev, DMA_DIR_RX);
2562 /* Mask all endpoint interrupts */
2563 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2564 /* clear all endpoint interrupts */
2565 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2566
2567 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2568 ep = &dev->ep[i];
2569 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2570 pch_udc_clear_ep_control(ep);
2571 pch_udc_ep_set_ddptr(ep, 0);
2572 pch_udc_write_csr(ep->dev, 0x00, i);
2573 }
2574 dev->stall = 0;
2575 dev->prot_stall = 0;
2576 dev->waiting_zlp_ack = 0;
2577 dev->set_cfg_not_acked = 0;
2578
2579 /* disable ep to empty req queue. Skip the control EP's */
2580 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2581 ep = &dev->ep[i];
2582 pch_udc_ep_set_nak(ep);
2583 pch_udc_ep_fifo_flush(ep, ep->in);
2584 /* Complete request queue */
2585 empty_req_queue(ep);
2586 }
2587 if (dev->driver && dev->driver->disconnect) {
2588 spin_unlock(&dev->lock);
2589 dev->driver->disconnect(&dev->gadget);
2590 spin_lock(&dev->lock);
2591 }
2592 }
2593
2594 /**
2595 * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2596 * done interrupt
2597 * @dev: Reference to driver structure
2598 */
2599 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2600 {
2601 u32 dev_stat, dev_speed;
2602 u32 speed = USB_SPEED_FULL;
2603
2604 dev_stat = pch_udc_read_device_status(dev);
2605 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2606 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2607 switch (dev_speed) {
2608 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2609 speed = USB_SPEED_HIGH;
2610 break;
2611 case UDC_DEVSTS_ENUM_SPEED_FULL:
2612 speed = USB_SPEED_FULL;
2613 break;
2614 case UDC_DEVSTS_ENUM_SPEED_LOW:
2615 speed = USB_SPEED_LOW;
2616 break;
2617 default:
2618 BUG();
2619 }
2620 dev->gadget.speed = speed;
2621 pch_udc_activate_control_ep(dev);
2622 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2623 pch_udc_set_dma(dev, DMA_DIR_TX);
2624 pch_udc_set_dma(dev, DMA_DIR_RX);
2625 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2626
2627 /* enable device interrupts */
2628 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2629 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2630 UDC_DEVINT_SI | UDC_DEVINT_SC);
2631 }
2632
2633 /**
2634 * pch_udc_svc_intf_interrupt() - This function handles a set interface
2635 * interrupt
2636 * @dev: Reference to driver structure
2637 */
2638 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2639 {
2640 u32 reg, dev_stat = 0;
2641 int i, ret;
2642
2643 dev_stat = pch_udc_read_device_status(dev);
2644 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2645 UDC_DEVSTS_INTF_SHIFT;
2646 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2647 UDC_DEVSTS_ALT_SHIFT;
2648 dev->set_cfg_not_acked = 1;
2649 /* Construct the usb request for gadget driver and inform it */
2650 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2651 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2652 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2653 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2654 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2655 /* programm the Endpoint Cfg registers */
2656 /* Only one end point cfg register */
2657 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2658 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2659 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2660 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2661 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2662 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2663 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2664 /* clear stall bits */
2665 pch_udc_ep_clear_stall(&(dev->ep[i]));
2666 dev->ep[i].halted = 0;
2667 }
2668 dev->stall = 0;
2669 spin_unlock(&dev->lock);
2670 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2671 spin_lock(&dev->lock);
2672 }
2673
2674 /**
2675 * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2676 * interrupt
2677 * @dev: Reference to driver structure
2678 */
2679 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2680 {
2681 int i, ret;
2682 u32 reg, dev_stat = 0;
2683
2684 dev_stat = pch_udc_read_device_status(dev);
2685 dev->set_cfg_not_acked = 1;
2686 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2687 UDC_DEVSTS_CFG_SHIFT;
2688 /* make usb request for gadget driver */
2689 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2690 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2691 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2692 /* program the NE registers */
2693 /* Only one end point cfg register */
2694 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2695 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2696 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2697 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2698 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2699 /* clear stall bits */
2700 pch_udc_ep_clear_stall(&(dev->ep[i]));
2701 dev->ep[i].halted = 0;
2702 }
2703 dev->stall = 0;
2704
2705 /* call gadget zero with setup data received */
2706 spin_unlock(&dev->lock);
2707 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2708 spin_lock(&dev->lock);
2709 }
2710
2711 /**
2712 * pch_udc_dev_isr() - This function services device interrupts
2713 * by invoking appropriate routines.
2714 * @dev: Reference to the device structure
2715 * @dev_intr: The Device interrupt status.
2716 */
2717 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2718 {
2719 int vbus;
2720
2721 /* USB Reset Interrupt */
2722 if (dev_intr & UDC_DEVINT_UR) {
2723 pch_udc_svc_ur_interrupt(dev);
2724 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2725 }
2726 /* Enumeration Done Interrupt */
2727 if (dev_intr & UDC_DEVINT_ENUM) {
2728 pch_udc_svc_enum_interrupt(dev);
2729 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2730 }
2731 /* Set Interface Interrupt */
2732 if (dev_intr & UDC_DEVINT_SI)
2733 pch_udc_svc_intf_interrupt(dev);
2734 /* Set Config Interrupt */
2735 if (dev_intr & UDC_DEVINT_SC)
2736 pch_udc_svc_cfg_interrupt(dev);
2737 /* USB Suspend interrupt */
2738 if (dev_intr & UDC_DEVINT_US) {
2739 if (dev->driver
2740 && dev->driver->suspend) {
2741 spin_unlock(&dev->lock);
2742 dev->driver->suspend(&dev->gadget);
2743 spin_lock(&dev->lock);
2744 }
2745
2746 vbus = pch_vbus_gpio_get_value(dev);
2747 if ((dev->vbus_session == 0)
2748 && (vbus != 1)) {
2749 if (dev->driver && dev->driver->disconnect) {
2750 spin_unlock(&dev->lock);
2751 dev->driver->disconnect(&dev->gadget);
2752 spin_lock(&dev->lock);
2753 }
2754 pch_udc_reconnect(dev);
2755 } else if ((dev->vbus_session == 0)
2756 && (vbus == 1)
2757 && !dev->vbus_gpio.intr)
2758 schedule_work(&dev->vbus_gpio.irq_work_fall);
2759
2760 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2761 }
2762 /* Clear the SOF interrupt, if enabled */
2763 if (dev_intr & UDC_DEVINT_SOF)
2764 dev_dbg(&dev->pdev->dev, "SOF\n");
2765 /* ES interrupt, IDLE > 3ms on the USB */
2766 if (dev_intr & UDC_DEVINT_ES)
2767 dev_dbg(&dev->pdev->dev, "ES\n");
2768 /* RWKP interrupt */
2769 if (dev_intr & UDC_DEVINT_RWKP)
2770 dev_dbg(&dev->pdev->dev, "RWKP\n");
2771 }
2772
2773 /**
2774 * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2775 * @irq: Interrupt request number
2776 * @dev: Reference to the device structure
2777 */
2778 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2779 {
2780 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2781 u32 dev_intr, ep_intr;
2782 int i;
2783
2784 dev_intr = pch_udc_read_device_interrupts(dev);
2785 ep_intr = pch_udc_read_ep_interrupts(dev);
2786
2787 /* For a hot plug, this find that the controller is hung up. */
2788 if (dev_intr == ep_intr)
2789 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2790 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2791 /* The controller is reset */
2792 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2793 return IRQ_HANDLED;
2794 }
2795 if (dev_intr)
2796 /* Clear device interrupts */
2797 pch_udc_write_device_interrupts(dev, dev_intr);
2798 if (ep_intr)
2799 /* Clear ep interrupts */
2800 pch_udc_write_ep_interrupts(dev, ep_intr);
2801 if (!dev_intr && !ep_intr)
2802 return IRQ_NONE;
2803 spin_lock(&dev->lock);
2804 if (dev_intr)
2805 pch_udc_dev_isr(dev, dev_intr);
2806 if (ep_intr) {
2807 pch_udc_read_all_epstatus(dev, ep_intr);
2808 /* Process Control In interrupts, if present */
2809 if (ep_intr & UDC_EPINT_IN_EP0) {
2810 pch_udc_svc_control_in(dev);
2811 pch_udc_postsvc_epinters(dev, 0);
2812 }
2813 /* Process Control Out interrupts, if present */
2814 if (ep_intr & UDC_EPINT_OUT_EP0)
2815 pch_udc_svc_control_out(dev);
2816 /* Process data in end point interrupts */
2817 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2818 if (ep_intr & (1 << i)) {
2819 pch_udc_svc_data_in(dev, i);
2820 pch_udc_postsvc_epinters(dev, i);
2821 }
2822 }
2823 /* Process data out end point interrupts */
2824 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2825 PCH_UDC_USED_EP_NUM); i++)
2826 if (ep_intr & (1 << i))
2827 pch_udc_svc_data_out(dev, i -
2828 UDC_EPINT_OUT_SHIFT);
2829 }
2830 spin_unlock(&dev->lock);
2831 return IRQ_HANDLED;
2832 }
2833
2834 /**
2835 * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2836 * @dev: Reference to the device structure
2837 */
2838 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2839 {
2840 /* enable ep0 interrupts */
2841 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2842 UDC_EPINT_OUT_EP0);
2843 /* enable device interrupts */
2844 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2845 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2846 UDC_DEVINT_SI | UDC_DEVINT_SC);
2847 }
2848
2849 /**
2850 * gadget_release() - Free the gadget driver private data
2851 * @pdev reference to struct pci_dev
2852 */
2853 static void gadget_release(struct device *pdev)
2854 {
2855 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2856
2857 kfree(dev);
2858 }
2859
2860 /**
2861 * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2862 * @dev: Reference to the driver structure
2863 */
2864 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2865 {
2866 const char *const ep_string[] = {
2867 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2868 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2869 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2870 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2871 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2872 "ep15in", "ep15out",
2873 };
2874 int i;
2875
2876 dev->gadget.speed = USB_SPEED_UNKNOWN;
2877 INIT_LIST_HEAD(&dev->gadget.ep_list);
2878
2879 /* Initialize the endpoints structures */
2880 memset(dev->ep, 0, sizeof dev->ep);
2881 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2882 struct pch_udc_ep *ep = &dev->ep[i];
2883 ep->dev = dev;
2884 ep->halted = 1;
2885 ep->num = i / 2;
2886 ep->in = ~i & 1;
2887 ep->ep.name = ep_string[i];
2888 ep->ep.ops = &pch_udc_ep_ops;
2889 if (ep->in)
2890 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2891 else
2892 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2893 UDC_EP_REG_SHIFT;
2894 /* need to set ep->ep.maxpacket and set Default Configuration?*/
2895 ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
2896 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2897 INIT_LIST_HEAD(&ep->queue);
2898 }
2899 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
2900 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
2901
2902 /* remove ep0 in and out from the list. They have own pointer */
2903 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2904 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2905
2906 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2907 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2908 }
2909
2910 /**
2911 * pch_udc_pcd_init() - This API initializes the driver structure
2912 * @dev: Reference to the driver structure
2913 *
2914 * Return codes:
2915 * 0: Success
2916 */
2917 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2918 {
2919 pch_udc_init(dev);
2920 pch_udc_pcd_reinit(dev);
2921 pch_vbus_gpio_init(dev, vbus_gpio_port);
2922 return 0;
2923 }
2924
2925 /**
2926 * init_dma_pools() - create dma pools during initialization
2927 * @pdev: reference to struct pci_dev
2928 */
2929 static int init_dma_pools(struct pch_udc_dev *dev)
2930 {
2931 struct pch_udc_stp_dma_desc *td_stp;
2932 struct pch_udc_data_dma_desc *td_data;
2933
2934 /* DMA setup */
2935 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2936 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2937 if (!dev->data_requests) {
2938 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2939 __func__);
2940 return -ENOMEM;
2941 }
2942
2943 /* dma desc for setup data */
2944 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2945 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2946 if (!dev->stp_requests) {
2947 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2948 __func__);
2949 return -ENOMEM;
2950 }
2951 /* setup */
2952 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2953 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2954 if (!td_stp) {
2955 dev_err(&dev->pdev->dev,
2956 "%s: can't allocate setup dma descriptor\n", __func__);
2957 return -ENOMEM;
2958 }
2959 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2960
2961 /* data: 0 packets !? */
2962 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2963 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2964 if (!td_data) {
2965 dev_err(&dev->pdev->dev,
2966 "%s: can't allocate data dma descriptor\n", __func__);
2967 return -ENOMEM;
2968 }
2969 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2970 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2971 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2972 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2973 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2974
2975 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2976 if (!dev->ep0out_buf)
2977 return -ENOMEM;
2978 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2979 UDC_EP0OUT_BUFF_SIZE * 4,
2980 DMA_FROM_DEVICE);
2981 return 0;
2982 }
2983
2984 static int pch_udc_start(struct usb_gadget_driver *driver,
2985 int (*bind)(struct usb_gadget *))
2986 {
2987 struct pch_udc_dev *dev = pch_udc;
2988 int retval;
2989
2990 if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
2991 !driver->setup || !driver->unbind || !driver->disconnect) {
2992 dev_err(&dev->pdev->dev,
2993 "%s: invalid driver parameter\n", __func__);
2994 return -EINVAL;
2995 }
2996
2997 if (!dev)
2998 return -ENODEV;
2999
3000 if (dev->driver) {
3001 dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
3002 return -EBUSY;
3003 }
3004 driver->driver.bus = NULL;
3005 dev->driver = driver;
3006 dev->gadget.dev.driver = &driver->driver;
3007
3008 /* Invoke the bind routine of the gadget driver */
3009 retval = bind(&dev->gadget);
3010
3011 if (retval) {
3012 dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
3013 __func__, driver->driver.name, retval);
3014 dev->driver = NULL;
3015 dev->gadget.dev.driver = NULL;
3016 return retval;
3017 }
3018 /* get ready for ep0 traffic */
3019 pch_udc_setup_ep0(dev);
3020
3021 /* clear SD */
3022 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
3023 pch_udc_clear_disconnect(dev);
3024
3025 dev->connected = 1;
3026 return 0;
3027 }
3028
3029 static int pch_udc_stop(struct usb_gadget_driver *driver)
3030 {
3031 struct pch_udc_dev *dev = pch_udc;
3032
3033 if (!dev)
3034 return -ENODEV;
3035
3036 if (!driver || (driver != dev->driver)) {
3037 dev_err(&dev->pdev->dev,
3038 "%s: invalid driver parameter\n", __func__);
3039 return -EINVAL;
3040 }
3041
3042 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3043
3044 /* Assures that there are no pending requests with this driver */
3045 driver->disconnect(&dev->gadget);
3046 driver->unbind(&dev->gadget);
3047 dev->gadget.dev.driver = NULL;
3048 dev->driver = NULL;
3049 dev->connected = 0;
3050
3051 /* set SD */
3052 pch_udc_set_disconnect(dev);
3053 return 0;
3054 }
3055
3056 static void pch_udc_shutdown(struct pci_dev *pdev)
3057 {
3058 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3059
3060 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3061 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3062
3063 /* disable the pullup so the host will think we're gone */
3064 pch_udc_set_disconnect(dev);
3065 }
3066
3067 static void pch_udc_remove(struct pci_dev *pdev)
3068 {
3069 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3070
3071 usb_del_gadget_udc(&dev->gadget);
3072
3073 /* gadget driver must not be registered */
3074 if (dev->driver)
3075 dev_err(&pdev->dev,
3076 "%s: gadget driver still bound!!!\n", __func__);
3077 /* dma pool cleanup */
3078 if (dev->data_requests)
3079 pci_pool_destroy(dev->data_requests);
3080
3081 if (dev->stp_requests) {
3082 /* cleanup DMA desc's for ep0in */
3083 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3084 pci_pool_free(dev->stp_requests,
3085 dev->ep[UDC_EP0OUT_IDX].td_stp,
3086 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3087 }
3088 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3089 pci_pool_free(dev->stp_requests,
3090 dev->ep[UDC_EP0OUT_IDX].td_data,
3091 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3092 }
3093 pci_pool_destroy(dev->stp_requests);
3094 }
3095
3096 if (dev->dma_addr)
3097 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3098 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3099 kfree(dev->ep0out_buf);
3100
3101 pch_vbus_gpio_free(dev);
3102
3103 pch_udc_exit(dev);
3104
3105 if (dev->irq_registered)
3106 free_irq(pdev->irq, dev);
3107 if (dev->base_addr)
3108 iounmap(dev->base_addr);
3109 if (dev->mem_region)
3110 release_mem_region(dev->phys_addr,
3111 pci_resource_len(pdev, PCH_UDC_PCI_BAR));
3112 if (dev->active)
3113 pci_disable_device(pdev);
3114 if (dev->registered)
3115 device_unregister(&dev->gadget.dev);
3116 kfree(dev);
3117 pci_set_drvdata(pdev, NULL);
3118 }
3119
3120 #ifdef CONFIG_PM
3121 static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3122 {
3123 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3124
3125 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3126 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3127
3128 pci_disable_device(pdev);
3129 pci_enable_wake(pdev, PCI_D3hot, 0);
3130
3131 if (pci_save_state(pdev)) {
3132 dev_err(&pdev->dev,
3133 "%s: could not save PCI config state\n", __func__);
3134 return -ENOMEM;
3135 }
3136 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3137 return 0;
3138 }
3139
3140 static int pch_udc_resume(struct pci_dev *pdev)
3141 {
3142 int ret;
3143
3144 pci_set_power_state(pdev, PCI_D0);
3145 pci_restore_state(pdev);
3146 ret = pci_enable_device(pdev);
3147 if (ret) {
3148 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
3149 return ret;
3150 }
3151 pci_enable_wake(pdev, PCI_D3hot, 0);
3152 return 0;
3153 }
3154 #else
3155 #define pch_udc_suspend NULL
3156 #define pch_udc_resume NULL
3157 #endif /* CONFIG_PM */
3158
3159 static int pch_udc_probe(struct pci_dev *pdev,
3160 const struct pci_device_id *id)
3161 {
3162 unsigned long resource;
3163 unsigned long len;
3164 int retval;
3165 struct pch_udc_dev *dev;
3166
3167 /* one udc only */
3168 if (pch_udc) {
3169 pr_err("%s: already probed\n", __func__);
3170 return -EBUSY;
3171 }
3172 /* init */
3173 dev = kzalloc(sizeof *dev, GFP_KERNEL);
3174 if (!dev) {
3175 pr_err("%s: no memory for device structure\n", __func__);
3176 return -ENOMEM;
3177 }
3178 /* pci setup */
3179 if (pci_enable_device(pdev) < 0) {
3180 kfree(dev);
3181 pr_err("%s: pci_enable_device failed\n", __func__);
3182 return -ENODEV;
3183 }
3184 dev->active = 1;
3185 pci_set_drvdata(pdev, dev);
3186
3187 /* PCI resource allocation */
3188 resource = pci_resource_start(pdev, 1);
3189 len = pci_resource_len(pdev, 1);
3190
3191 if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
3192 dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
3193 retval = -EBUSY;
3194 goto finished;
3195 }
3196 dev->phys_addr = resource;
3197 dev->mem_region = 1;
3198
3199 dev->base_addr = ioremap_nocache(resource, len);
3200 if (!dev->base_addr) {
3201 pr_err("%s: device memory cannot be mapped\n", __func__);
3202 retval = -ENOMEM;
3203 goto finished;
3204 }
3205 if (!pdev->irq) {
3206 dev_err(&pdev->dev, "%s: irq not set\n", __func__);
3207 retval = -ENODEV;
3208 goto finished;
3209 }
3210 pch_udc = dev;
3211 /* initialize the hardware */
3212 if (pch_udc_pcd_init(dev)) {
3213 retval = -ENODEV;
3214 goto finished;
3215 }
3216 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
3217 dev)) {
3218 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3219 pdev->irq);
3220 retval = -ENODEV;
3221 goto finished;
3222 }
3223 dev->irq = pdev->irq;
3224 dev->irq_registered = 1;
3225
3226 pci_set_master(pdev);
3227 pci_try_set_mwi(pdev);
3228
3229 /* device struct setup */
3230 spin_lock_init(&dev->lock);
3231 dev->pdev = pdev;
3232 dev->gadget.ops = &pch_udc_ops;
3233
3234 retval = init_dma_pools(dev);
3235 if (retval)
3236 goto finished;
3237
3238 dev_set_name(&dev->gadget.dev, "gadget");
3239 dev->gadget.dev.parent = &pdev->dev;
3240 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3241 dev->gadget.dev.release = gadget_release;
3242 dev->gadget.name = KBUILD_MODNAME;
3243 dev->gadget.max_speed = USB_SPEED_HIGH;
3244
3245 retval = device_register(&dev->gadget.dev);
3246 if (retval)
3247 goto finished;
3248 dev->registered = 1;
3249
3250 /* Put the device in disconnected state till a driver is bound */
3251 pch_udc_set_disconnect(dev);
3252 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3253 if (retval)
3254 goto finished;
3255 return 0;
3256
3257 finished:
3258 pch_udc_remove(pdev);
3259 return retval;
3260 }
3261
3262 static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
3263 {
3264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3265 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3266 .class_mask = 0xffffffff,
3267 },
3268 {
3269 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3270 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3271 .class_mask = 0xffffffff,
3272 },
3273 {
3274 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3275 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3276 .class_mask = 0xffffffff,
3277 },
3278 { 0 },
3279 };
3280
3281 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3282
3283 static struct pci_driver pch_udc_driver = {
3284 .name = KBUILD_MODNAME,
3285 .id_table = pch_udc_pcidev_id,
3286 .probe = pch_udc_probe,
3287 .remove = pch_udc_remove,
3288 .suspend = pch_udc_suspend,
3289 .resume = pch_udc_resume,
3290 .shutdown = pch_udc_shutdown,
3291 };
3292
3293 module_pci_driver(pch_udc_driver);
3294
3295 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3296 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3297 MODULE_LICENSE("GPL");
This page took 0.116441 seconds and 5 git commands to generate.