Merge branch 'for-linus-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / usb / gadget / udc / bcm63xx_udc.c
CommitLineData
613065e5
KC
1/*
2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
613065e5
KC
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kconfig.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/platform_device.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/timer.h>
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
36#include <linux/workqueue.h>
37
38#include <bcm63xx_cpu.h>
39#include <bcm63xx_iudma.h>
40#include <bcm63xx_dev_usb_usbd.h>
41#include <bcm63xx_io.h>
42#include <bcm63xx_regs.h>
43
44#define DRV_MODULE_NAME "bcm63xx_udc"
45
46static const char bcm63xx_ep0name[] = "ep0";
1b0ba527
RB
47
48static const struct {
49 const char *name;
50 const struct usb_ep_caps caps;
51} bcm63xx_ep_info[] = {
52#define EP_INFO(_name, _caps) \
53 { \
54 .name = _name, \
55 .caps = _caps, \
56 }
57
58 EP_INFO(bcm63xx_ep0name,
59 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
60 EP_INFO("ep1in-bulk",
61 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
62 EP_INFO("ep2out-bulk",
63 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
64 EP_INFO("ep3in-int",
65 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
66 EP_INFO("ep4out-int",
67 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
68
69#undef EP_INFO
613065e5
KC
70};
71
72static bool use_fullspeed;
73module_param(use_fullspeed, bool, S_IRUGO);
74MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
75
76/*
77 * RX IRQ coalescing options:
78 *
79 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
80 * driver is able to pass the "testusb" suite and recover from conditions like:
81 *
82 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
83 * 2) Host sends 512 bytes of data
84 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
85 * 4) Device shuts down the endpoint and cancels the RX transaction
86 *
87 * true - one IRQ per transfer, for transfers <= 2048B. Generates
88 * considerably fewer IRQs, but error recovery is less robust. Does not
89 * reliably pass "testusb".
90 *
91 * TX always uses coalescing, because we can cancel partially complete TX
92 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
93 * this on RX.
94 */
95static bool irq_coalesce;
96module_param(irq_coalesce, bool, S_IRUGO);
97MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
98
99#define BCM63XX_NUM_EP 5
100#define BCM63XX_NUM_IUDMA 6
101#define BCM63XX_NUM_FIFO_PAIRS 3
102
103#define IUDMA_RESET_TIMEOUT_US 10000
104
105#define IUDMA_EP0_RXCHAN 0
106#define IUDMA_EP0_TXCHAN 1
107
108#define IUDMA_MAX_FRAGMENT 2048
109#define BCM63XX_MAX_CTRL_PKT 64
110
111#define BCMEP_CTRL 0x00
112#define BCMEP_ISOC 0x01
113#define BCMEP_BULK 0x02
114#define BCMEP_INTR 0x03
115
116#define BCMEP_OUT 0x00
117#define BCMEP_IN 0x01
118
119#define BCM63XX_SPD_FULL 1
120#define BCM63XX_SPD_HIGH 0
121
122#define IUDMA_DMAC_OFFSET 0x200
123#define IUDMA_DMAS_OFFSET 0x400
124
125enum bcm63xx_ep0_state {
126 EP0_REQUEUE,
127 EP0_IDLE,
128 EP0_IN_DATA_PHASE_SETUP,
129 EP0_IN_DATA_PHASE_COMPLETE,
130 EP0_OUT_DATA_PHASE_SETUP,
131 EP0_OUT_DATA_PHASE_COMPLETE,
132 EP0_OUT_STATUS_PHASE,
133 EP0_IN_FAKE_STATUS_PHASE,
134 EP0_SHUTDOWN,
135};
136
137static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
138 "REQUEUE",
139 "IDLE",
140 "IN_DATA_PHASE_SETUP",
141 "IN_DATA_PHASE_COMPLETE",
142 "OUT_DATA_PHASE_SETUP",
143 "OUT_DATA_PHASE_COMPLETE",
144 "OUT_STATUS_PHASE",
145 "IN_FAKE_STATUS_PHASE",
146 "SHUTDOWN",
147};
148
149/**
150 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
151 * @ep_num: USB endpoint number.
152 * @n_bds: Number of buffer descriptors in the ring.
153 * @ep_type: Endpoint type (control, bulk, interrupt).
154 * @dir: Direction (in, out).
155 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
156 * @max_pkt_hs: Maximum packet size in high speed mode.
157 * @max_pkt_fs: Maximum packet size in full speed mode.
158 */
159struct iudma_ch_cfg {
160 int ep_num;
161 int n_bds;
162 int ep_type;
163 int dir;
164 int n_fifo_slots;
165 int max_pkt_hs;
166 int max_pkt_fs;
167};
168
169static const struct iudma_ch_cfg iudma_defaults[] = {
170
171 /* This controller was designed to support a CDC/RNDIS application.
172 It may be possible to reconfigure some of the endpoints, but
173 the hardware limitations (FIFO sizing and number of DMA channels)
174 may significantly impact flexibility and/or stability. Change
175 these values at your own risk.
176
177 ep_num ep_type n_fifo_slots max_pkt_fs
178 idx | n_bds | dir | max_pkt_hs |
179 | | | | | | | | */
180 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
181 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
182 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
183 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
184 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
185 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
186};
187
188struct bcm63xx_udc;
189
190/**
191 * struct iudma_ch - Represents the current state of a single IUDMA channel.
192 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
193 * @ep_num: USB endpoint number. -1 for ep0 RX.
194 * @enabled: Whether bcm63xx_ep_enable() has been called.
195 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
196 * @is_tx: true for TX, false for RX.
197 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
198 * @udc: Reference to the device controller.
199 * @read_bd: Next buffer descriptor to reap from the hardware.
200 * @write_bd: Next BD available for a new packet.
201 * @end_bd: Points to the final BD in the ring.
202 * @n_bds_used: Number of BD entries currently occupied.
203 * @bd_ring: Base pointer to the BD ring.
204 * @bd_ring_dma: Physical (DMA) address of bd_ring.
205 * @n_bds: Total number of BDs in the ring.
206 *
207 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
208 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
209 * only.
210 *
211 * Each bulk/intr endpoint has a single IUDMA channel and a single
212 * struct usb_ep.
213 */
214struct iudma_ch {
215 unsigned int ch_idx;
216 int ep_num;
217 bool enabled;
218 int max_pkt;
219 bool is_tx;
220 struct bcm63xx_ep *bep;
221 struct bcm63xx_udc *udc;
222
223 struct bcm_enet_desc *read_bd;
224 struct bcm_enet_desc *write_bd;
225 struct bcm_enet_desc *end_bd;
226 int n_bds_used;
227
228 struct bcm_enet_desc *bd_ring;
229 dma_addr_t bd_ring_dma;
230 unsigned int n_bds;
231};
232
233/**
234 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
235 * @ep_num: USB endpoint number.
236 * @iudma: Pointer to IUDMA channel state.
237 * @ep: USB gadget layer representation of the EP.
238 * @udc: Reference to the device controller.
239 * @queue: Linked list of outstanding requests for this EP.
240 * @halted: 1 if the EP is stalled; 0 otherwise.
241 */
242struct bcm63xx_ep {
243 unsigned int ep_num;
244 struct iudma_ch *iudma;
245 struct usb_ep ep;
246 struct bcm63xx_udc *udc;
247 struct list_head queue;
248 unsigned halted:1;
249};
250
251/**
252 * struct bcm63xx_req - Internal (driver) state of a single request.
253 * @queue: Links back to the EP's request list.
254 * @req: USB gadget layer representation of the request.
255 * @offset: Current byte offset into the data buffer (next byte to queue).
256 * @bd_bytes: Number of data bytes in outstanding BD entries.
257 * @iudma: IUDMA channel used for the request.
258 */
259struct bcm63xx_req {
260 struct list_head queue; /* ep's requests */
261 struct usb_request req;
262 unsigned int offset;
263 unsigned int bd_bytes;
264 struct iudma_ch *iudma;
265};
266
267/**
268 * struct bcm63xx_udc - Driver/hardware private context.
269 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
270 * @dev: Generic Linux device structure.
271 * @pd: Platform data (board/port info).
272 * @usbd_clk: Clock descriptor for the USB device block.
273 * @usbh_clk: Clock descriptor for the USB host block.
274 * @gadget: USB slave device.
275 * @driver: Driver for USB slave devices.
276 * @usbd_regs: Base address of the USBD/USB20D block.
277 * @iudma_regs: Base address of the USBD's associated IUDMA block.
278 * @bep: Array of endpoints, including ep0.
279 * @iudma: Array of all IUDMA channels used by this controller.
280 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
281 * @iface: USB interface number, from SET_INTERFACE wIndex.
282 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
283 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
284 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
285 * @ep0state: Current state of the ep0 state machine.
286 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
287 * @wedgemap: Bitmap of wedged endpoints.
288 * @ep0_req_reset: USB reset is pending.
289 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
290 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
291 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
292 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
293 * @ep0_reply: Pending reply from gadget driver.
294 * @ep0_request: Outstanding ep0 request.
295 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
296 * @debugfs_usbd: debugfs file "usbd" for controller state.
297 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
298 */
299struct bcm63xx_udc {
300 spinlock_t lock;
301
302 struct device *dev;
303 struct bcm63xx_usbd_platform_data *pd;
304 struct clk *usbd_clk;
305 struct clk *usbh_clk;
306
307 struct usb_gadget gadget;
308 struct usb_gadget_driver *driver;
309
310 void __iomem *usbd_regs;
311 void __iomem *iudma_regs;
312
313 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
314 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
315
316 int cfg;
317 int iface;
318 int alt_iface;
319
320 struct bcm63xx_req ep0_ctrl_req;
321 u8 *ep0_ctrl_buf;
322
323 int ep0state;
324 struct work_struct ep0_wq;
325
326 unsigned long wedgemap;
327
328 unsigned ep0_req_reset:1;
329 unsigned ep0_req_set_cfg:1;
330 unsigned ep0_req_set_iface:1;
331 unsigned ep0_req_shutdown:1;
332
333 unsigned ep0_req_completed:1;
334 struct usb_request *ep0_reply;
335 struct usb_request *ep0_request;
336
337 struct dentry *debugfs_root;
338 struct dentry *debugfs_usbd;
339 struct dentry *debugfs_iudma;
340};
341
342static const struct usb_ep_ops bcm63xx_udc_ep_ops;
343
344/***********************************************************************
345 * Convenience functions
346 ***********************************************************************/
347
348static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
349{
350 return container_of(g, struct bcm63xx_udc, gadget);
351}
352
353static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
354{
355 return container_of(ep, struct bcm63xx_ep, ep);
356}
357
358static inline struct bcm63xx_req *our_req(struct usb_request *req)
359{
360 return container_of(req, struct bcm63xx_req, req);
361}
362
363static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
364{
365 return bcm_readl(udc->usbd_regs + off);
366}
367
368static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
369{
370 bcm_writel(val, udc->usbd_regs + off);
371}
372
373static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
374{
375 return bcm_readl(udc->iudma_regs + off);
376}
377
378static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
379{
380 bcm_writel(val, udc->iudma_regs + off);
381}
382
2d1f7af3 383static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
613065e5 384{
2d1f7af3
FF
385 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
386 (ENETDMA_CHAN_WIDTH * chan));
613065e5
KC
387}
388
2d1f7af3
FF
389static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
390 int chan)
613065e5 391{
2d1f7af3
FF
392 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
393 (ENETDMA_CHAN_WIDTH * chan));
613065e5
KC
394}
395
2d1f7af3 396static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
613065e5 397{
2d1f7af3
FF
398 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
399 (ENETDMA_CHAN_WIDTH * chan));
613065e5
KC
400}
401
2d1f7af3
FF
402static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
403 int chan)
613065e5 404{
2d1f7af3
FF
405 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
406 (ENETDMA_CHAN_WIDTH * chan));
613065e5
KC
407}
408
409static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
410{
411 if (is_enabled) {
412 clk_enable(udc->usbh_clk);
413 clk_enable(udc->usbd_clk);
414 udelay(10);
415 } else {
416 clk_disable(udc->usbd_clk);
417 clk_disable(udc->usbh_clk);
418 }
419}
420
421/***********************************************************************
422 * Low-level IUDMA / FIFO operations
423 ***********************************************************************/
424
425/**
426 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
427 * @udc: Reference to the device controller.
428 * @idx: Desired init_sel value.
429 *
430 * The "init_sel" signal is used as a selection index for both endpoints
431 * and IUDMA channels. Since these do not map 1:1, the use of this signal
432 * depends on the context.
433 */
434static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
435{
436 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
437
438 val &= ~USBD_CONTROL_INIT_SEL_MASK;
439 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
440 usbd_writel(udc, val, USBD_CONTROL_REG);
441}
442
443/**
444 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
445 * @udc: Reference to the device controller.
446 * @bep: Endpoint on which to operate.
447 * @is_stalled: true to enable stall, false to disable.
448 *
449 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
450 * halt/stall conditions.
451 */
452static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
453 bool is_stalled)
454{
455 u32 val;
456
457 val = USBD_STALL_UPDATE_MASK |
458 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
459 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
460 usbd_writel(udc, val, USBD_STALL_REG);
461}
462
463/**
464 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
465 * @udc: Reference to the device controller.
466 *
467 * These parameters depend on the USB link speed. Settings are
468 * per-IUDMA-channel-pair.
469 */
470static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
471{
472 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
473 u32 i, val, rx_fifo_slot, tx_fifo_slot;
474
475 /* set up FIFO boundaries and packet sizes; this is done in pairs */
476 rx_fifo_slot = tx_fifo_slot = 0;
477 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
478 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
479 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
480
481 bcm63xx_ep_dma_select(udc, i >> 1);
482
483 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
484 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
485 USBD_RXFIFO_CONFIG_END_SHIFT);
486 rx_fifo_slot += rx_cfg->n_fifo_slots;
487 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
488 usbd_writel(udc,
489 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
490 USBD_RXFIFO_EPSIZE_REG);
491
492 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
493 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
494 USBD_TXFIFO_CONFIG_END_SHIFT);
495 tx_fifo_slot += tx_cfg->n_fifo_slots;
496 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
497 usbd_writel(udc,
498 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
499 USBD_TXFIFO_EPSIZE_REG);
500
501 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
502 }
503}
504
505/**
506 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
507 * @udc: Reference to the device controller.
508 * @ep_num: Endpoint number.
509 */
510static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
511{
512 u32 val;
513
514 bcm63xx_ep_dma_select(udc, ep_num);
515
516 val = usbd_readl(udc, USBD_CONTROL_REG);
517 val |= USBD_CONTROL_FIFO_RESET_MASK;
518 usbd_writel(udc, val, USBD_CONTROL_REG);
519 usbd_readl(udc, USBD_CONTROL_REG);
520}
521
522/**
523 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
524 * @udc: Reference to the device controller.
525 */
526static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
527{
528 int i;
529
530 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
531 bcm63xx_fifo_reset_ep(udc, i);
532}
533
534/**
535 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
536 * @udc: Reference to the device controller.
537 */
538static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
539{
540 u32 i, val;
541
542 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
543 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
544
545 if (cfg->ep_num < 0)
546 continue;
547
548 bcm63xx_ep_dma_select(udc, cfg->ep_num);
549 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
550 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
551 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
552 }
553}
554
555/**
556 * bcm63xx_ep_setup - Configure per-endpoint settings.
557 * @udc: Reference to the device controller.
558 *
559 * This needs to be rerun if the speed/cfg/intf/altintf changes.
560 */
561static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
562{
563 u32 val, i;
564
565 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
566
567 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
568 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
569 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
570 cfg->max_pkt_hs : cfg->max_pkt_fs;
571 int idx = cfg->ep_num;
572
573 udc->iudma[i].max_pkt = max_pkt;
574
575 if (idx < 0)
576 continue;
e117e742 577 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
613065e5
KC
578
579 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
580 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
581 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
582 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
583 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
584 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
585 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
586 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
587 }
588}
589
590/**
591 * iudma_write - Queue a single IUDMA transaction.
592 * @udc: Reference to the device controller.
593 * @iudma: IUDMA channel to use.
594 * @breq: Request containing the transaction data.
595 *
596 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
597 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
598 * So iudma_write() may be called several times to fulfill a single
599 * usb_request.
600 *
601 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
602 */
603static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
604 struct bcm63xx_req *breq)
605{
606 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
607 unsigned int bytes_left = breq->req.length - breq->offset;
608 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
609 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
610
611 iudma->n_bds_used = 0;
612 breq->bd_bytes = 0;
613 breq->iudma = iudma;
614
615 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
616 extra_zero_pkt = 1;
617
618 do {
619 struct bcm_enet_desc *d = iudma->write_bd;
620 u32 dmaflags = 0;
621 unsigned int n_bytes;
622
623 if (d == iudma->end_bd) {
624 dmaflags |= DMADESC_WRAP_MASK;
625 iudma->write_bd = iudma->bd_ring;
626 } else {
627 iudma->write_bd++;
628 }
629 iudma->n_bds_used++;
630
631 n_bytes = min_t(int, bytes_left, max_bd_bytes);
632 if (n_bytes)
633 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
634 else
635 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
636 DMADESC_USB_ZERO_MASK;
637
638 dmaflags |= DMADESC_OWNER_MASK;
639 if (first_bd) {
640 dmaflags |= DMADESC_SOP_MASK;
641 first_bd = 0;
642 }
643
644 /*
645 * extra_zero_pkt forces one more iteration through the loop
646 * after all data is queued up, to send the zero packet
647 */
648 if (extra_zero_pkt && !bytes_left)
649 extra_zero_pkt = 0;
650
651 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
652 (n_bytes == bytes_left && !extra_zero_pkt)) {
653 last_bd = 1;
654 dmaflags |= DMADESC_EOP_MASK;
655 }
656
657 d->address = breq->req.dma + breq->offset;
658 mb();
659 d->len_stat = dmaflags;
660
661 breq->offset += n_bytes;
662 breq->bd_bytes += n_bytes;
663 bytes_left -= n_bytes;
664 } while (!last_bd);
665
666 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
2d1f7af3 667 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
613065e5
KC
668}
669
670/**
671 * iudma_read - Check for IUDMA buffer completion.
672 * @udc: Reference to the device controller.
673 * @iudma: IUDMA channel to use.
674 *
675 * This checks to see if ALL of the outstanding BDs on the DMA channel
676 * have been filled. If so, it returns the actual transfer length;
677 * otherwise it returns -EBUSY.
678 */
679static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
680{
681 int i, actual_len = 0;
682 struct bcm_enet_desc *d = iudma->read_bd;
683
684 if (!iudma->n_bds_used)
685 return -EINVAL;
686
687 for (i = 0; i < iudma->n_bds_used; i++) {
688 u32 dmaflags;
689
690 dmaflags = d->len_stat;
691
692 if (dmaflags & DMADESC_OWNER_MASK)
693 return -EBUSY;
694
695 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
696 DMADESC_LENGTH_SHIFT;
697 if (d == iudma->end_bd)
698 d = iudma->bd_ring;
699 else
700 d++;
701 }
702
703 iudma->read_bd = d;
704 iudma->n_bds_used = 0;
705 return actual_len;
706}
707
708/**
709 * iudma_reset_channel - Stop DMA on a single channel.
710 * @udc: Reference to the device controller.
711 * @iudma: IUDMA channel to reset.
712 */
713static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
714{
715 int timeout = IUDMA_RESET_TIMEOUT_US;
716 struct bcm_enet_desc *d;
717 int ch_idx = iudma->ch_idx;
718
719 if (!iudma->is_tx)
720 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
721
722 /* stop DMA, then wait for the hardware to wrap up */
2d1f7af3 723 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
613065e5 724
2d1f7af3 725 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
613065e5
KC
726 ENETDMAC_CHANCFG_EN_MASK) {
727 udelay(1);
728
729 /* repeatedly flush the FIFO data until the BD completes */
730 if (iudma->is_tx && iudma->ep_num >= 0)
731 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
732
733 if (!timeout--) {
734 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
735 ch_idx);
736 break;
737 }
738 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
739 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
740 ch_idx);
741 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
2d1f7af3 742 ENETDMAC_CHANCFG_REG, ch_idx);
613065e5
KC
743 }
744 }
2d1f7af3 745 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
613065e5
KC
746
747 /* don't leave "live" HW-owned entries for the next guy to step on */
748 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
749 d->len_stat = 0;
750 mb();
751
752 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
753 iudma->n_bds_used = 0;
754
755 /* set up IRQs, UBUS burst size, and BD base for this channel */
756 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2d1f7af3
FF
757 ENETDMAC_IRMASK_REG, ch_idx);
758 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
613065e5 759
2d1f7af3
FF
760 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
761 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
613065e5
KC
762}
763
764/**
765 * iudma_init_channel - One-time IUDMA channel initialization.
766 * @udc: Reference to the device controller.
767 * @ch_idx: Channel to initialize.
768 */
769static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
770{
771 struct iudma_ch *iudma = &udc->iudma[ch_idx];
772 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
773 unsigned int n_bds = cfg->n_bds;
774 struct bcm63xx_ep *bep = NULL;
775
776 iudma->ep_num = cfg->ep_num;
777 iudma->ch_idx = ch_idx;
778 iudma->is_tx = !!(ch_idx & 0x01);
779 if (iudma->ep_num >= 0) {
780 bep = &udc->bep[iudma->ep_num];
781 bep->iudma = iudma;
782 INIT_LIST_HEAD(&bep->queue);
783 }
784
785 iudma->bep = bep;
786 iudma->udc = udc;
787
788 /* ep0 is always active; others are controlled by the gadget driver */
789 if (iudma->ep_num <= 0)
790 iudma->enabled = true;
791
792 iudma->n_bds = n_bds;
793 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
794 n_bds * sizeof(struct bcm_enet_desc),
795 &iudma->bd_ring_dma, GFP_KERNEL);
796 if (!iudma->bd_ring)
797 return -ENOMEM;
798 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
799
800 return 0;
801}
802
803/**
804 * iudma_init - One-time initialization of all IUDMA channels.
805 * @udc: Reference to the device controller.
806 *
807 * Enable DMA, flush channels, and enable global IUDMA IRQs.
808 */
809static int iudma_init(struct bcm63xx_udc *udc)
810{
811 int i, rc;
812
813 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
814
815 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
816 rc = iudma_init_channel(udc, i);
817 if (rc)
818 return rc;
819 iudma_reset_channel(udc, &udc->iudma[i]);
820 }
821
822 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
823 return 0;
824}
825
826/**
827 * iudma_uninit - Uninitialize IUDMA channels.
828 * @udc: Reference to the device controller.
829 *
830 * Kill global IUDMA IRQs, flush channels, and kill DMA.
831 */
832static void iudma_uninit(struct bcm63xx_udc *udc)
833{
834 int i;
835
836 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
837
838 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
839 iudma_reset_channel(udc, &udc->iudma[i]);
840
841 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
842}
843
844/***********************************************************************
845 * Other low-level USBD operations
846 ***********************************************************************/
847
848/**
849 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
850 * @udc: Reference to the device controller.
851 * @enable_irqs: true to enable, false to disable.
852 */
853static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
854{
855 u32 val;
856
857 usbd_writel(udc, 0, USBD_STATUS_REG);
858
859 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
860 BIT(USBD_EVENT_IRQ_SETUP) |
861 BIT(USBD_EVENT_IRQ_SETCFG) |
862 BIT(USBD_EVENT_IRQ_SETINTF) |
863 BIT(USBD_EVENT_IRQ_USB_LINK);
864 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
865 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
866}
867
868/**
869 * bcm63xx_select_phy_mode - Select between USB device and host mode.
870 * @udc: Reference to the device controller.
871 * @is_device: true for device, false for host.
872 *
873 * This should probably be reworked to use the drivers/usb/otg
874 * infrastructure.
875 *
876 * By default, the AFE/pullups are disabled in device mode, until
877 * bcm63xx_select_pullup() is called.
878 */
879static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
880{
881 u32 val, portmask = BIT(udc->pd->port_no);
882
883 if (BCMCPU_IS_6328()) {
884 /* configure pinmux to sense VBUS signal */
885 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
886 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
887 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
888 GPIO_PINMUX_OTHR_6328_USB_HOST;
889 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
890 }
891
892 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
893 if (is_device) {
894 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
895 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
896 } else {
897 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
898 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
899 }
900 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
901
902 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
903 if (is_device)
904 val |= USBH_PRIV_SWAP_USBD_MASK;
905 else
906 val &= ~USBH_PRIV_SWAP_USBD_MASK;
907 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
908}
909
910/**
911 * bcm63xx_select_pullup - Enable/disable the pullup on D+
912 * @udc: Reference to the device controller.
913 * @is_on: true to enable the pullup, false to disable.
914 *
915 * If the pullup is active, the host will sense a FS/HS device connected to
916 * the port. If the pullup is inactive, the host will think the USB
917 * device has been disconnected.
918 */
919static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
920{
921 u32 val, portmask = BIT(udc->pd->port_no);
922
923 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
924 if (is_on)
925 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
926 else
927 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
928 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
929}
930
931/**
932 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
933 * @udc: Reference to the device controller.
934 *
935 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
936 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
937 */
938static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
939{
940 set_clocks(udc, true);
941 iudma_uninit(udc);
942 set_clocks(udc, false);
943
944 clk_put(udc->usbd_clk);
945 clk_put(udc->usbh_clk);
946}
947
948/**
949 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
950 * @udc: Reference to the device controller.
951 */
952static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
953{
954 int i, rc = 0;
955 u32 val;
956
957 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
958 GFP_KERNEL);
959 if (!udc->ep0_ctrl_buf)
960 return -ENOMEM;
961
962 INIT_LIST_HEAD(&udc->gadget.ep_list);
963 for (i = 0; i < BCM63XX_NUM_EP; i++) {
964 struct bcm63xx_ep *bep = &udc->bep[i];
965
1b0ba527
RB
966 bep->ep.name = bcm63xx_ep_info[i].name;
967 bep->ep.caps = bcm63xx_ep_info[i].caps;
613065e5
KC
968 bep->ep_num = i;
969 bep->ep.ops = &bcm63xx_udc_ep_ops;
970 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
971 bep->halted = 0;
e117e742 972 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
613065e5
KC
973 bep->udc = udc;
974 bep->ep.desc = NULL;
975 INIT_LIST_HEAD(&bep->queue);
976 }
977
978 udc->gadget.ep0 = &udc->bep[0].ep;
979 list_del(&udc->bep[0].ep.ep_list);
980
981 udc->gadget.speed = USB_SPEED_UNKNOWN;
982 udc->ep0state = EP0_SHUTDOWN;
983
984 udc->usbh_clk = clk_get(udc->dev, "usbh");
985 if (IS_ERR(udc->usbh_clk))
986 return -EIO;
987
988 udc->usbd_clk = clk_get(udc->dev, "usbd");
989 if (IS_ERR(udc->usbd_clk)) {
990 clk_put(udc->usbh_clk);
991 return -EIO;
992 }
993
994 set_clocks(udc, true);
995
996 val = USBD_CONTROL_AUTO_CSRS_MASK |
997 USBD_CONTROL_DONE_CSRS_MASK |
998 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
999 usbd_writel(udc, val, USBD_CONTROL_REG);
1000
1001 val = USBD_STRAPS_APP_SELF_PWR_MASK |
1002 USBD_STRAPS_APP_RAM_IF_MASK |
1003 USBD_STRAPS_APP_CSRPRGSUP_MASK |
1004 USBD_STRAPS_APP_8BITPHY_MASK |
1005 USBD_STRAPS_APP_RMTWKUP_MASK;
1006
1007 if (udc->gadget.max_speed == USB_SPEED_HIGH)
1008 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1009 else
1010 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1011 usbd_writel(udc, val, USBD_STRAPS_REG);
1012
1013 bcm63xx_set_ctrl_irqs(udc, false);
1014
1015 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1016
1017 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1018 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1019 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1020
1021 rc = iudma_init(udc);
1022 set_clocks(udc, false);
1023 if (rc)
1024 bcm63xx_uninit_udc_hw(udc);
1025
1026 return 0;
1027}
1028
1029/***********************************************************************
1030 * Standard EP gadget operations
1031 ***********************************************************************/
1032
1033/**
1034 * bcm63xx_ep_enable - Enable one endpoint.
1035 * @ep: Endpoint to enable.
1036 * @desc: Contains max packet, direction, etc.
1037 *
1038 * Most of the endpoint parameters are fixed in this controller, so there
1039 * isn't much for this function to do.
1040 */
1041static int bcm63xx_ep_enable(struct usb_ep *ep,
1042 const struct usb_endpoint_descriptor *desc)
1043{
1044 struct bcm63xx_ep *bep = our_ep(ep);
1045 struct bcm63xx_udc *udc = bep->udc;
1046 struct iudma_ch *iudma = bep->iudma;
1047 unsigned long flags;
1048
1049 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1050 return -EINVAL;
1051
1052 if (!udc->driver)
1053 return -ESHUTDOWN;
1054
1055 spin_lock_irqsave(&udc->lock, flags);
1056 if (iudma->enabled) {
1057 spin_unlock_irqrestore(&udc->lock, flags);
1058 return -EINVAL;
1059 }
1060
1061 iudma->enabled = true;
1062 BUG_ON(!list_empty(&bep->queue));
1063
1064 iudma_reset_channel(udc, iudma);
1065
1066 bep->halted = 0;
1067 bcm63xx_set_stall(udc, bep, false);
1068 clear_bit(bep->ep_num, &udc->wedgemap);
1069
1070 ep->desc = desc;
1071 ep->maxpacket = usb_endpoint_maxp(desc);
1072
1073 spin_unlock_irqrestore(&udc->lock, flags);
1074 return 0;
1075}
1076
1077/**
1078 * bcm63xx_ep_disable - Disable one endpoint.
1079 * @ep: Endpoint to disable.
1080 */
1081static int bcm63xx_ep_disable(struct usb_ep *ep)
1082{
1083 struct bcm63xx_ep *bep = our_ep(ep);
1084 struct bcm63xx_udc *udc = bep->udc;
1085 struct iudma_ch *iudma = bep->iudma;
1086 struct list_head *pos, *n;
1087 unsigned long flags;
1088
1089 if (!ep || !ep->desc)
1090 return -EINVAL;
1091
1092 spin_lock_irqsave(&udc->lock, flags);
1093 if (!iudma->enabled) {
1094 spin_unlock_irqrestore(&udc->lock, flags);
1095 return -EINVAL;
1096 }
1097 iudma->enabled = false;
1098
1099 iudma_reset_channel(udc, iudma);
1100
1101 if (!list_empty(&bep->queue)) {
1102 list_for_each_safe(pos, n, &bep->queue) {
1103 struct bcm63xx_req *breq =
1104 list_entry(pos, struct bcm63xx_req, queue);
1105
1106 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1107 iudma->is_tx);
1108 list_del(&breq->queue);
1109 breq->req.status = -ESHUTDOWN;
1110
1111 spin_unlock_irqrestore(&udc->lock, flags);
304f7e5e 1112 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
613065e5
KC
1113 spin_lock_irqsave(&udc->lock, flags);
1114 }
1115 }
1116 ep->desc = NULL;
1117
1118 spin_unlock_irqrestore(&udc->lock, flags);
1119 return 0;
1120}
1121
1122/**
1123 * bcm63xx_udc_alloc_request - Allocate a new request.
1124 * @ep: Endpoint associated with the request.
1125 * @mem_flags: Flags to pass to kzalloc().
1126 */
1127static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1128 gfp_t mem_flags)
1129{
1130 struct bcm63xx_req *breq;
1131
1132 breq = kzalloc(sizeof(*breq), mem_flags);
1133 if (!breq)
1134 return NULL;
1135 return &breq->req;
1136}
1137
1138/**
1139 * bcm63xx_udc_free_request - Free a request.
1140 * @ep: Endpoint associated with the request.
1141 * @req: Request to free.
1142 */
1143static void bcm63xx_udc_free_request(struct usb_ep *ep,
1144 struct usb_request *req)
1145{
1146 struct bcm63xx_req *breq = our_req(req);
1147 kfree(breq);
1148}
1149
1150/**
1151 * bcm63xx_udc_queue - Queue up a new request.
1152 * @ep: Endpoint associated with the request.
1153 * @req: Request to add.
1154 * @mem_flags: Unused.
1155 *
1156 * If the queue is empty, start this request immediately. Otherwise, add
1157 * it to the list.
1158 *
1159 * ep0 replies are sent through this function from the gadget driver, but
1160 * they are treated differently because they need to be handled by the ep0
1161 * state machine. (Sometimes they are replies to control requests that
1162 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1163 */
1164static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1165 gfp_t mem_flags)
1166{
1167 struct bcm63xx_ep *bep = our_ep(ep);
1168 struct bcm63xx_udc *udc = bep->udc;
1169 struct bcm63xx_req *breq = our_req(req);
1170 unsigned long flags;
1171 int rc = 0;
1172
1173 if (unlikely(!req || !req->complete || !req->buf || !ep))
1174 return -EINVAL;
1175
1176 req->actual = 0;
1177 req->status = 0;
1178 breq->offset = 0;
1179
1180 if (bep == &udc->bep[0]) {
1181 /* only one reply per request, please */
1182 if (udc->ep0_reply)
1183 return -EINVAL;
1184
1185 udc->ep0_reply = req;
1186 schedule_work(&udc->ep0_wq);
1187 return 0;
1188 }
1189
1190 spin_lock_irqsave(&udc->lock, flags);
1191 if (!bep->iudma->enabled) {
1192 rc = -ESHUTDOWN;
1193 goto out;
1194 }
1195
1196 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1197 if (rc == 0) {
1198 list_add_tail(&breq->queue, &bep->queue);
1199 if (list_is_singular(&bep->queue))
1200 iudma_write(udc, bep->iudma, breq);
1201 }
1202
1203out:
1204 spin_unlock_irqrestore(&udc->lock, flags);
1205 return rc;
1206}
1207
1208/**
1209 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1210 * @ep: Endpoint associated with the request.
1211 * @req: Request to remove.
1212 *
1213 * If the request is not at the head of the queue, this is easy - just nuke
1214 * it. If the request is at the head of the queue, we'll need to stop the
1215 * DMA transaction and then queue up the successor.
1216 */
1217static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1218{
1219 struct bcm63xx_ep *bep = our_ep(ep);
1220 struct bcm63xx_udc *udc = bep->udc;
1221 struct bcm63xx_req *breq = our_req(req), *cur;
1222 unsigned long flags;
1223 int rc = 0;
1224
1225 spin_lock_irqsave(&udc->lock, flags);
1226 if (list_empty(&bep->queue)) {
1227 rc = -EINVAL;
1228 goto out;
1229 }
1230
1231 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1232 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1233
1234 if (breq == cur) {
1235 iudma_reset_channel(udc, bep->iudma);
1236 list_del(&breq->queue);
1237
1238 if (!list_empty(&bep->queue)) {
1239 struct bcm63xx_req *next;
1240
1241 next = list_first_entry(&bep->queue,
1242 struct bcm63xx_req, queue);
1243 iudma_write(udc, bep->iudma, next);
1244 }
1245 } else {
1246 list_del(&breq->queue);
1247 }
1248
1249out:
1250 spin_unlock_irqrestore(&udc->lock, flags);
1251
1252 req->status = -ESHUTDOWN;
1253 req->complete(ep, req);
1254
1255 return rc;
1256}
1257
1258/**
1259 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1260 * @ep: Endpoint to halt.
1261 * @value: Zero to clear halt; nonzero to set halt.
1262 *
1263 * See comments in bcm63xx_update_wedge().
1264 */
1265static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1266{
1267 struct bcm63xx_ep *bep = our_ep(ep);
1268 struct bcm63xx_udc *udc = bep->udc;
1269 unsigned long flags;
1270
1271 spin_lock_irqsave(&udc->lock, flags);
1272 bcm63xx_set_stall(udc, bep, !!value);
1273 bep->halted = value;
1274 spin_unlock_irqrestore(&udc->lock, flags);
1275
1276 return 0;
1277}
1278
1279/**
1280 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1281 * @ep: Endpoint to wedge.
1282 *
1283 * See comments in bcm63xx_update_wedge().
1284 */
1285static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1286{
1287 struct bcm63xx_ep *bep = our_ep(ep);
1288 struct bcm63xx_udc *udc = bep->udc;
1289 unsigned long flags;
1290
1291 spin_lock_irqsave(&udc->lock, flags);
1292 set_bit(bep->ep_num, &udc->wedgemap);
1293 bcm63xx_set_stall(udc, bep, true);
1294 spin_unlock_irqrestore(&udc->lock, flags);
1295
1296 return 0;
1297}
1298
1299static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1300 .enable = bcm63xx_ep_enable,
1301 .disable = bcm63xx_ep_disable,
1302
1303 .alloc_request = bcm63xx_udc_alloc_request,
1304 .free_request = bcm63xx_udc_free_request,
1305
1306 .queue = bcm63xx_udc_queue,
1307 .dequeue = bcm63xx_udc_dequeue,
1308
1309 .set_halt = bcm63xx_udc_set_halt,
1310 .set_wedge = bcm63xx_udc_set_wedge,
1311};
1312
1313/***********************************************************************
1314 * EP0 handling
1315 ***********************************************************************/
1316
1317/**
1318 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1319 * @udc: Reference to the device controller.
1320 * @ctrl: 8-byte SETUP request.
1321 */
1322static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1323 struct usb_ctrlrequest *ctrl)
1324{
1325 int rc;
1326
1327 spin_unlock_irq(&udc->lock);
1328 rc = udc->driver->setup(&udc->gadget, ctrl);
1329 spin_lock_irq(&udc->lock);
1330 return rc;
1331}
1332
1333/**
1334 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1335 * @udc: Reference to the device controller.
1336 *
1337 * Many standard requests are handled automatically in the hardware, but
1338 * we still need to pass them to the gadget driver so that it can
1339 * reconfigure the interfaces/endpoints if necessary.
1340 *
1341 * Unfortunately we are not able to send a STALL response if the host
1342 * requests an invalid configuration. If this happens, we'll have to be
1343 * content with printing a warning.
1344 */
1345static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1346{
1347 struct usb_ctrlrequest ctrl;
1348 int rc;
1349
1350 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1351 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1352 ctrl.wValue = cpu_to_le16(udc->cfg);
1353 ctrl.wIndex = 0;
1354 ctrl.wLength = 0;
1355
1356 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1357 if (rc < 0) {
1358 dev_warn_ratelimited(udc->dev,
1359 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1360 udc->cfg);
1361 }
1362 return rc;
1363}
1364
1365/**
1366 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1367 * @udc: Reference to the device controller.
1368 */
1369static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1370{
1371 struct usb_ctrlrequest ctrl;
1372 int rc;
1373
1374 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1375 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1376 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1377 ctrl.wIndex = cpu_to_le16(udc->iface);
1378 ctrl.wLength = 0;
1379
1380 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1381 if (rc < 0) {
1382 dev_warn_ratelimited(udc->dev,
1383 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1384 udc->iface, udc->alt_iface);
1385 }
1386 return rc;
1387}
1388
1389/**
1390 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1391 * @udc: Reference to the device controller.
1392 * @ch_idx: IUDMA channel number.
1393 * @req: USB gadget layer representation of the request.
1394 */
1395static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1396 struct usb_request *req)
1397{
1398 struct bcm63xx_req *breq = our_req(req);
1399 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1400
1401 BUG_ON(udc->ep0_request);
1402 udc->ep0_request = req;
1403
1404 req->actual = 0;
1405 breq->offset = 0;
1406 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1407 iudma_write(udc, iudma, breq);
1408}
1409
1410/**
1411 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1412 * @udc: Reference to the device controller.
1413 * @req: USB gadget layer representation of the request.
1414 * @status: Status to return to the gadget driver.
1415 */
1416static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1417 struct usb_request *req, int status)
1418{
1419 req->status = status;
1420 if (status)
1421 req->actual = 0;
1422 if (req->complete) {
1423 spin_unlock_irq(&udc->lock);
1424 req->complete(&udc->bep[0].ep, req);
1425 spin_lock_irq(&udc->lock);
1426 }
1427}
1428
1429/**
1430 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1431 * reset/shutdown.
1432 * @udc: Reference to the device controller.
1433 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1434 */
1435static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1436{
1437 struct usb_request *req = udc->ep0_reply;
1438
1439 udc->ep0_reply = NULL;
1440 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1441 if (udc->ep0_request == req) {
1442 udc->ep0_req_completed = 0;
1443 udc->ep0_request = NULL;
1444 }
1445 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1446}
1447
1448/**
1449 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1450 * transfer len.
1451 * @udc: Reference to the device controller.
1452 */
1453static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1454{
1455 struct usb_request *req = udc->ep0_request;
1456
1457 udc->ep0_req_completed = 0;
1458 udc->ep0_request = NULL;
1459
1460 return req->actual;
1461}
1462
1463/**
1464 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1465 * @udc: Reference to the device controller.
1466 * @ch_idx: IUDMA channel number.
1467 * @length: Number of bytes to TX/RX.
1468 *
1469 * Used for simple transfers performed by the ep0 worker. This will always
1470 * use ep0_ctrl_req / ep0_ctrl_buf.
1471 */
1472static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1473 int length)
1474{
1475 struct usb_request *req = &udc->ep0_ctrl_req.req;
1476
1477 req->buf = udc->ep0_ctrl_buf;
1478 req->length = length;
1479 req->complete = NULL;
1480
1481 bcm63xx_ep0_map_write(udc, ch_idx, req);
1482}
1483
1484/**
1485 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1486 * @udc: Reference to the device controller.
1487 *
1488 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1489 * for the next packet. Anything else means the transaction requires multiple
1490 * stages of handling.
1491 */
1492static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1493{
1494 int rc;
1495 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1496
1497 rc = bcm63xx_ep0_read_complete(udc);
1498
1499 if (rc < 0) {
1500 dev_err(udc->dev, "missing SETUP packet\n");
1501 return EP0_IDLE;
1502 }
1503
1504 /*
1505 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1506 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1507 * just throw it away.
1508 */
1509 if (rc == 0)
1510 return EP0_REQUEUE;
1511
1512 /* Drop malformed SETUP packets */
1513 if (rc != sizeof(*ctrl)) {
1514 dev_warn_ratelimited(udc->dev,
1515 "malformed SETUP packet (%d bytes)\n", rc);
1516 return EP0_REQUEUE;
1517 }
1518
1519 /* Process new SETUP packet arriving on ep0 */
1520 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1521 if (rc < 0) {
1522 bcm63xx_set_stall(udc, &udc->bep[0], true);
1523 return EP0_REQUEUE;
1524 }
1525
1526 if (!ctrl->wLength)
1527 return EP0_REQUEUE;
1528 else if (ctrl->bRequestType & USB_DIR_IN)
1529 return EP0_IN_DATA_PHASE_SETUP;
1530 else
1531 return EP0_OUT_DATA_PHASE_SETUP;
1532}
1533
1534/**
1535 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1536 * @udc: Reference to the device controller.
1537 *
1538 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1539 * filled with a SETUP packet from the host. This function handles new
1540 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1541 * and reset/shutdown events.
1542 *
1543 * Returns 0 if work was done; -EAGAIN if nothing to do.
1544 */
1545static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1546{
1547 if (udc->ep0_req_reset) {
1548 udc->ep0_req_reset = 0;
1549 } else if (udc->ep0_req_set_cfg) {
1550 udc->ep0_req_set_cfg = 0;
1551 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1552 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1553 } else if (udc->ep0_req_set_iface) {
1554 udc->ep0_req_set_iface = 0;
1555 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1556 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1557 } else if (udc->ep0_req_completed) {
1558 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1559 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1560 } else if (udc->ep0_req_shutdown) {
1561 udc->ep0_req_shutdown = 0;
1562 udc->ep0_req_completed = 0;
1563 udc->ep0_request = NULL;
1564 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1565 usb_gadget_unmap_request(&udc->gadget,
1566 &udc->ep0_ctrl_req.req, 0);
1567
1568 /* bcm63xx_udc_pullup() is waiting for this */
1569 mb();
1570 udc->ep0state = EP0_SHUTDOWN;
1571 } else if (udc->ep0_reply) {
1572 /*
1573 * This could happen if a USB RESET shows up during an ep0
1574 * transaction (especially if a laggy driver like gadgetfs
1575 * is in use).
1576 */
1577 dev_warn(udc->dev, "nuking unexpected reply\n");
1578 bcm63xx_ep0_nuke_reply(udc, 0);
1579 } else {
1580 return -EAGAIN;
1581 }
1582
1583 return 0;
1584}
1585
1586/**
1587 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1588 * @udc: Reference to the device controller.
1589 *
1590 * Returns 0 if work was done; -EAGAIN if nothing to do.
1591 */
1592static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1593{
1594 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1595 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1596
1597 switch (udc->ep0state) {
1598 case EP0_REQUEUE:
1599 /* set up descriptor to receive SETUP packet */
1600 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1601 BCM63XX_MAX_CTRL_PKT);
1602 ep0state = EP0_IDLE;
1603 break;
1604 case EP0_IDLE:
1605 return bcm63xx_ep0_do_idle(udc);
1606 case EP0_IN_DATA_PHASE_SETUP:
1607 /*
1608 * Normal case: TX request is in ep0_reply (queued by the
1609 * callback), or will be queued shortly. When it's here,
1610 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1611 *
1612 * Shutdown case: Stop waiting for the reply. Just
1613 * REQUEUE->IDLE. The gadget driver is NOT expected to
1614 * queue anything else now.
1615 */
1616 if (udc->ep0_reply) {
1617 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1618 udc->ep0_reply);
1619 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1620 } else if (shutdown) {
1621 ep0state = EP0_REQUEUE;
1622 }
1623 break;
1624 case EP0_IN_DATA_PHASE_COMPLETE: {
1625 /*
1626 * Normal case: TX packet (ep0_reply) is in flight; wait for
1627 * it to finish, then go back to REQUEUE->IDLE.
1628 *
1629 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1630 * completion to the gadget driver, then REQUEUE->IDLE.
1631 */
1632 if (udc->ep0_req_completed) {
1633 udc->ep0_reply = NULL;
1634 bcm63xx_ep0_read_complete(udc);
1635 /*
1636 * the "ack" sometimes gets eaten (see
1637 * bcm63xx_ep0_do_idle)
1638 */
1639 ep0state = EP0_REQUEUE;
1640 } else if (shutdown) {
1641 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1642 bcm63xx_ep0_nuke_reply(udc, 1);
1643 ep0state = EP0_REQUEUE;
1644 }
1645 break;
1646 }
1647 case EP0_OUT_DATA_PHASE_SETUP:
1648 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1649 if (udc->ep0_reply) {
1650 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1651 udc->ep0_reply);
1652 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1653 } else if (shutdown) {
1654 ep0state = EP0_REQUEUE;
1655 }
1656 break;
1657 case EP0_OUT_DATA_PHASE_COMPLETE: {
1658 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1659 if (udc->ep0_req_completed) {
1660 udc->ep0_reply = NULL;
1661 bcm63xx_ep0_read_complete(udc);
1662
1663 /* send 0-byte ack to host */
1664 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1665 ep0state = EP0_OUT_STATUS_PHASE;
1666 } else if (shutdown) {
1667 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1668 bcm63xx_ep0_nuke_reply(udc, 0);
1669 ep0state = EP0_REQUEUE;
1670 }
1671 break;
1672 }
1673 case EP0_OUT_STATUS_PHASE:
1674 /*
1675 * Normal case: 0-byte OUT ack packet is in flight; wait
1676 * for it to finish, then go back to REQUEUE->IDLE.
1677 *
1678 * Shutdown case: just cancel the transmission. Don't bother
1679 * calling the completion, because it originated from this
1680 * function anyway. Then go back to REQUEUE->IDLE.
1681 */
1682 if (udc->ep0_req_completed) {
1683 bcm63xx_ep0_read_complete(udc);
1684 ep0state = EP0_REQUEUE;
1685 } else if (shutdown) {
1686 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1687 udc->ep0_request = NULL;
1688 ep0state = EP0_REQUEUE;
1689 }
1690 break;
1691 case EP0_IN_FAKE_STATUS_PHASE: {
1692 /*
1693 * Normal case: we spoofed a SETUP packet and are now
1694 * waiting for the gadget driver to send a 0-byte reply.
1695 * This doesn't actually get sent to the HW because the
1696 * HW has already sent its own reply. Once we get the
1697 * response, return to IDLE.
1698 *
1699 * Shutdown case: return to IDLE immediately.
1700 *
1701 * Note that the ep0 RX descriptor has remained queued
1702 * (and possibly unfilled) during this entire transaction.
1703 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1704 * or SET_INTERFACE transactions.
1705 */
1706 struct usb_request *r = udc->ep0_reply;
1707
1708 if (!r) {
1709 if (shutdown)
1710 ep0state = EP0_IDLE;
1711 break;
1712 }
1713
1714 bcm63xx_ep0_complete(udc, r, 0);
1715 udc->ep0_reply = NULL;
1716 ep0state = EP0_IDLE;
1717 break;
1718 }
1719 case EP0_SHUTDOWN:
1720 break;
1721 }
1722
1723 if (udc->ep0state == ep0state)
1724 return -EAGAIN;
1725
1726 udc->ep0state = ep0state;
1727 return 0;
1728}
1729
1730/**
1731 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1732 * @w: Workqueue struct.
1733 *
1734 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1735 * is used to synchronize ep0 events and ensure that both HW and SW events
1736 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1737 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1738 * by the USBD hardware.
1739 *
1740 * The worker function will continue iterating around the state machine
1741 * until there is nothing left to do. Usually "nothing left to do" means
1742 * that we're waiting for a new event from the hardware.
1743 */
1744static void bcm63xx_ep0_process(struct work_struct *w)
1745{
1746 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1747 spin_lock_irq(&udc->lock);
1748 while (bcm63xx_ep0_one_round(udc) == 0)
1749 ;
1750 spin_unlock_irq(&udc->lock);
1751}
1752
1753/***********************************************************************
1754 * Standard UDC gadget operations
1755 ***********************************************************************/
1756
1757/**
1758 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1759 * @gadget: USB slave device.
1760 */
1761static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1762{
1763 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1764
1765 return (usbd_readl(udc, USBD_STATUS_REG) &
1766 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1767}
1768
1769/**
1770 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1771 * @gadget: USB slave device.
1772 * @is_on: 0 to disable pullup, 1 to enable.
1773 *
1774 * See notes in bcm63xx_select_pullup().
1775 */
1776static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1777{
1778 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1779 unsigned long flags;
1780 int i, rc = -EINVAL;
1781
1782 spin_lock_irqsave(&udc->lock, flags);
1783 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1784 udc->gadget.speed = USB_SPEED_UNKNOWN;
1785 udc->ep0state = EP0_REQUEUE;
1786 bcm63xx_fifo_setup(udc);
1787 bcm63xx_fifo_reset(udc);
1788 bcm63xx_ep_setup(udc);
1789
1790 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1791 for (i = 0; i < BCM63XX_NUM_EP; i++)
1792 bcm63xx_set_stall(udc, &udc->bep[i], false);
1793
1794 bcm63xx_set_ctrl_irqs(udc, true);
1795 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1796 rc = 0;
1797 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1798 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1799
1800 udc->ep0_req_shutdown = 1;
1801 spin_unlock_irqrestore(&udc->lock, flags);
1802
1803 while (1) {
1804 schedule_work(&udc->ep0_wq);
1805 if (udc->ep0state == EP0_SHUTDOWN)
1806 break;
1807 msleep(50);
1808 }
1809 bcm63xx_set_ctrl_irqs(udc, false);
1810 cancel_work_sync(&udc->ep0_wq);
1811 return 0;
1812 }
1813
1814 spin_unlock_irqrestore(&udc->lock, flags);
1815 return rc;
1816}
1817
1818/**
1819 * bcm63xx_udc_start - Start the controller.
1820 * @gadget: USB slave device.
1821 * @driver: Driver for USB slave devices.
1822 */
1823static int bcm63xx_udc_start(struct usb_gadget *gadget,
1824 struct usb_gadget_driver *driver)
1825{
1826 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1827 unsigned long flags;
1828
1829 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1830 !driver->setup)
1831 return -EINVAL;
1832 if (!udc)
1833 return -ENODEV;
1834 if (udc->driver)
1835 return -EBUSY;
1836
1837 spin_lock_irqsave(&udc->lock, flags);
1838
1839 set_clocks(udc, true);
1840 bcm63xx_fifo_setup(udc);
1841 bcm63xx_ep_init(udc);
1842 bcm63xx_ep_setup(udc);
1843 bcm63xx_fifo_reset(udc);
1844 bcm63xx_select_phy_mode(udc, true);
1845
1846 udc->driver = driver;
1847 driver->driver.bus = NULL;
613065e5
KC
1848 udc->gadget.dev.of_node = udc->dev->of_node;
1849
1850 spin_unlock_irqrestore(&udc->lock, flags);
1851
1852 return 0;
1853}
1854
1855/**
1856 * bcm63xx_udc_stop - Shut down the controller.
1857 * @gadget: USB slave device.
1858 * @driver: Driver for USB slave devices.
1859 */
22835b80 1860static int bcm63xx_udc_stop(struct usb_gadget *gadget)
613065e5
KC
1861{
1862 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1863 unsigned long flags;
1864
1865 spin_lock_irqsave(&udc->lock, flags);
1866
1867 udc->driver = NULL;
613065e5
KC
1868
1869 /*
1870 * If we switch the PHY too abruptly after dropping D+, the host
1871 * will often complain:
1872 *
1873 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1874 */
1875 msleep(100);
1876
1877 bcm63xx_select_phy_mode(udc, false);
1878 set_clocks(udc, false);
1879
1880 spin_unlock_irqrestore(&udc->lock, flags);
1881
1882 return 0;
1883}
1884
1885static const struct usb_gadget_ops bcm63xx_udc_ops = {
1886 .get_frame = bcm63xx_udc_get_frame,
1887 .pullup = bcm63xx_udc_pullup,
1888 .udc_start = bcm63xx_udc_start,
1889 .udc_stop = bcm63xx_udc_stop,
1890};
1891
1892/***********************************************************************
1893 * IRQ handling
1894 ***********************************************************************/
1895
1896/**
1897 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1898 * @udc: Reference to the device controller.
1899 *
1900 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1901 * The driver never sees the raw control packets coming in on the ep0
1902 * IUDMA channel, but at least we get an interrupt event to tell us that
1903 * new values are waiting in the USBD_STATUS register.
1904 */
1905static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1906{
1907 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1908
1909 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1910 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1911 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1912 USBD_STATUS_ALTINTF_SHIFT;
1913 bcm63xx_ep_setup(udc);
1914}
1915
1916/**
1917 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1918 * @udc: Reference to the device controller.
1919 *
1920 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1921 * speed has changed, so that the caller can update the endpoint settings.
1922 */
1923static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1924{
1925 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1926 enum usb_device_speed oldspeed = udc->gadget.speed;
1927
1928 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1929 case BCM63XX_SPD_HIGH:
1930 udc->gadget.speed = USB_SPEED_HIGH;
1931 break;
1932 case BCM63XX_SPD_FULL:
1933 udc->gadget.speed = USB_SPEED_FULL;
1934 break;
1935 default:
1936 /* this should never happen */
1937 udc->gadget.speed = USB_SPEED_UNKNOWN;
1938 dev_err(udc->dev,
1939 "received SETUP packet with invalid link speed\n");
1940 return 0;
1941 }
1942
1943 if (udc->gadget.speed != oldspeed) {
1944 dev_info(udc->dev, "link up, %s-speed mode\n",
1945 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1946 return 1;
1947 } else {
1948 return 0;
1949 }
1950}
1951
1952/**
1953 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1954 * @udc: Reference to the device controller.
1955 * @new_status: true to "refresh" wedge status; false to clear it.
1956 *
1957 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1958 * because the controller hardware is designed to automatically clear
1959 * stalls in response to a CLEAR_FEATURE request from the host.
1960 *
1961 * On a RESET interrupt, we do want to restore all wedged endpoints.
1962 */
1963static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1964{
1965 int i;
1966
1967 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1968 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1969 if (!new_status)
1970 clear_bit(i, &udc->wedgemap);
1971 }
1972}
1973
1974/**
1975 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1976 * @irq: IRQ number (unused).
1977 * @dev_id: Reference to the device controller.
1978 *
1979 * This is where we handle link (VBUS) down, USB reset, speed changes,
1980 * SET_CONFIGURATION, and SET_INTERFACE events.
1981 */
1982static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1983{
1984 struct bcm63xx_udc *udc = dev_id;
1985 u32 stat;
4f6bd9fd 1986 bool disconnected = false, bus_reset = false;
613065e5
KC
1987
1988 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1989 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1990
1991 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1992
1993 spin_lock(&udc->lock);
1994 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1995 /* VBUS toggled */
1996
1997 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1998 USBD_EVENTS_USB_LINK_MASK) &&
1999 udc->gadget.speed != USB_SPEED_UNKNOWN)
2000 dev_info(udc->dev, "link down\n");
2001
2002 udc->gadget.speed = USB_SPEED_UNKNOWN;
2003 disconnected = true;
2004 }
2005 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
2006 bcm63xx_fifo_setup(udc);
2007 bcm63xx_fifo_reset(udc);
2008 bcm63xx_ep_setup(udc);
2009
2010 bcm63xx_update_wedge(udc, false);
2011
2012 udc->ep0_req_reset = 1;
2013 schedule_work(&udc->ep0_wq);
4f6bd9fd 2014 bus_reset = true;
613065e5
KC
2015 }
2016 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2017 if (bcm63xx_update_link_speed(udc)) {
2018 bcm63xx_fifo_setup(udc);
2019 bcm63xx_ep_setup(udc);
2020 }
2021 bcm63xx_update_wedge(udc, true);
2022 }
2023 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2024 bcm63xx_update_cfg_iface(udc);
2025 udc->ep0_req_set_cfg = 1;
2026 schedule_work(&udc->ep0_wq);
2027 }
2028 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2029 bcm63xx_update_cfg_iface(udc);
2030 udc->ep0_req_set_iface = 1;
2031 schedule_work(&udc->ep0_wq);
2032 }
2033 spin_unlock(&udc->lock);
2034
2035 if (disconnected && udc->driver)
2036 udc->driver->disconnect(&udc->gadget);
4f6bd9fd
PC
2037 else if (bus_reset && udc->driver)
2038 usb_gadget_udc_reset(&udc->gadget, udc->driver);
613065e5
KC
2039
2040 return IRQ_HANDLED;
2041}
2042
2043/**
2044 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2045 * @irq: IRQ number (unused).
2046 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2047 *
2048 * For the two ep0 channels, we have special handling that triggers the
2049 * ep0 worker thread. For normal bulk/intr channels, either queue up
2050 * the next buffer descriptor for the transaction (incomplete transaction),
2051 * or invoke the completion callback (complete transactions).
2052 */
2053static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2054{
2055 struct iudma_ch *iudma = dev_id;
2056 struct bcm63xx_udc *udc = iudma->udc;
2057 struct bcm63xx_ep *bep;
2058 struct usb_request *req = NULL;
2059 struct bcm63xx_req *breq = NULL;
2060 int rc;
2061 bool is_done = false;
2062
2063 spin_lock(&udc->lock);
2064
2065 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2d1f7af3 2066 ENETDMAC_IR_REG, iudma->ch_idx);
613065e5
KC
2067 bep = iudma->bep;
2068 rc = iudma_read(udc, iudma);
2069
2070 /* special handling for EP0 RX (0) and TX (1) */
2071 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2072 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2073 req = udc->ep0_request;
2074 breq = our_req(req);
2075
2076 /* a single request could require multiple submissions */
2077 if (rc >= 0) {
2078 req->actual += rc;
2079
2080 if (req->actual >= req->length || breq->bd_bytes > rc) {
2081 udc->ep0_req_completed = 1;
2082 is_done = true;
2083 schedule_work(&udc->ep0_wq);
2084
2085 /* "actual" on a ZLP is 1 byte */
2086 req->actual = min(req->actual, req->length);
2087 } else {
2088 /* queue up the next BD (same request) */
2089 iudma_write(udc, iudma, breq);
2090 }
2091 }
2092 } else if (!list_empty(&bep->queue)) {
2093 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2094 req = &breq->req;
2095
2096 if (rc >= 0) {
2097 req->actual += rc;
2098
2099 if (req->actual >= req->length || breq->bd_bytes > rc) {
2100 is_done = true;
2101 list_del(&breq->queue);
2102
2103 req->actual = min(req->actual, req->length);
2104
2105 if (!list_empty(&bep->queue)) {
2106 struct bcm63xx_req *next;
2107
2108 next = list_first_entry(&bep->queue,
2109 struct bcm63xx_req, queue);
2110 iudma_write(udc, iudma, next);
2111 }
2112 } else {
2113 iudma_write(udc, iudma, breq);
2114 }
2115 }
2116 }
2117 spin_unlock(&udc->lock);
2118
2119 if (is_done) {
2120 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2121 if (req->complete)
2122 req->complete(&bep->ep, req);
2123 }
2124
2125 return IRQ_HANDLED;
2126}
2127
2128/***********************************************************************
2129 * Debug filesystem
2130 ***********************************************************************/
2131
2132/*
2133 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2134 * @s: seq_file to which the information will be written.
2135 * @p: Unused.
2136 *
2137 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2138 */
2139static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2140{
2141 struct bcm63xx_udc *udc = s->private;
2142
2143 if (!udc->driver)
2144 return -ENODEV;
2145
2146 seq_printf(s, "ep0 state: %s\n",
2147 bcm63xx_ep0_state_names[udc->ep0state]);
2148 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2149 udc->ep0_req_reset ? "reset " : "",
2150 udc->ep0_req_set_cfg ? "set_cfg " : "",
2151 udc->ep0_req_set_iface ? "set_iface " : "",
2152 udc->ep0_req_shutdown ? "shutdown " : "",
2153 udc->ep0_request ? "pending " : "",
2154 udc->ep0_req_completed ? "completed " : "",
2155 udc->ep0_reply ? "reply " : "");
2156 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2157 udc->cfg, udc->iface, udc->alt_iface);
2158 seq_printf(s, "regs:\n");
2159 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2160 usbd_readl(udc, USBD_CONTROL_REG),
2161 usbd_readl(udc, USBD_STRAPS_REG),
2162 usbd_readl(udc, USBD_STATUS_REG));
2163 seq_printf(s, " events: %08x; stall: %08x\n",
2164 usbd_readl(udc, USBD_EVENTS_REG),
2165 usbd_readl(udc, USBD_STALL_REG));
2166
2167 return 0;
2168}
2169
2170/*
2171 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2172 * @s: seq_file to which the information will be written.
2173 * @p: Unused.
2174 *
2175 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2176 */
2177static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2178{
2179 struct bcm63xx_udc *udc = s->private;
2180 int ch_idx, i;
2181 u32 sram2, sram3;
2182
2183 if (!udc->driver)
2184 return -ENODEV;
2185
2186 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2187 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2188 struct list_head *pos;
2189
2190 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2191 switch (iudma_defaults[ch_idx].ep_type) {
2192 case BCMEP_CTRL:
2193 seq_printf(s, "control");
2194 break;
2195 case BCMEP_BULK:
2196 seq_printf(s, "bulk");
2197 break;
2198 case BCMEP_INTR:
2199 seq_printf(s, "interrupt");
2200 break;
2201 }
2202 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2203 seq_printf(s, " [ep%d]:\n",
2204 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2205 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2d1f7af3
FF
2206 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2207 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2208 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2209 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
613065e5 2210
2d1f7af3
FF
2211 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2212 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
613065e5 2213 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2d1f7af3 2214 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
613065e5
KC
2215 sram2 >> 16, sram2 & 0xffff,
2216 sram3 >> 16, sram3 & 0xffff,
2d1f7af3 2217 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
613065e5
KC
2218 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2219 iudma->n_bds);
2220
2221 if (iudma->bep) {
2222 i = 0;
2223 list_for_each(pos, &iudma->bep->queue)
2224 i++;
2225 seq_printf(s, "; %d queued\n", i);
2226 } else {
2227 seq_printf(s, "\n");
2228 }
2229
2230 for (i = 0; i < iudma->n_bds; i++) {
2231 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2232
2233 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2234 i * sizeof(*d), i,
2235 d->len_stat >> 16, d->len_stat & 0xffff,
2236 d->address);
2237 if (d == iudma->read_bd)
2238 seq_printf(s, " <<RD");
2239 if (d == iudma->write_bd)
2240 seq_printf(s, " <<WR");
2241 seq_printf(s, "\n");
2242 }
2243
2244 seq_printf(s, "\n");
2245 }
2246
2247 return 0;
2248}
2249
2250static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2251{
2252 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2253}
2254
2255static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2256{
2257 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2258}
2259
2260static const struct file_operations usbd_dbg_fops = {
2261 .owner = THIS_MODULE,
2262 .open = bcm63xx_usbd_dbg_open,
2263 .llseek = seq_lseek,
2264 .read = seq_read,
2265 .release = single_release,
2266};
2267
2268static const struct file_operations iudma_dbg_fops = {
2269 .owner = THIS_MODULE,
2270 .open = bcm63xx_iudma_dbg_open,
2271 .llseek = seq_lseek,
2272 .read = seq_read,
2273 .release = single_release,
2274};
2275
2276
2277/**
2278 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2279 * @udc: Reference to the device controller.
2280 */
2281static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2282{
2283 struct dentry *root, *usbd, *iudma;
2284
2285 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2286 return;
2287
2288 root = debugfs_create_dir(udc->gadget.name, NULL);
2289 if (IS_ERR(root) || !root)
2290 goto err_root;
2291
2292 usbd = debugfs_create_file("usbd", 0400, root, udc,
2293 &usbd_dbg_fops);
2294 if (!usbd)
2295 goto err_usbd;
2296 iudma = debugfs_create_file("iudma", 0400, root, udc,
2297 &iudma_dbg_fops);
2298 if (!iudma)
2299 goto err_iudma;
2300
2301 udc->debugfs_root = root;
2302 udc->debugfs_usbd = usbd;
2303 udc->debugfs_iudma = iudma;
2304 return;
2305err_iudma:
2306 debugfs_remove(usbd);
2307err_usbd:
2308 debugfs_remove(root);
2309err_root:
2310 dev_err(udc->dev, "debugfs is not available\n");
2311}
2312
2313/**
2314 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2315 * @udc: Reference to the device controller.
2316 *
2317 * debugfs_remove() is safe to call with a NULL argument.
2318 */
2319static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2320{
2321 debugfs_remove(udc->debugfs_iudma);
2322 debugfs_remove(udc->debugfs_usbd);
2323 debugfs_remove(udc->debugfs_root);
2324 udc->debugfs_iudma = NULL;
2325 udc->debugfs_usbd = NULL;
2326 udc->debugfs_root = NULL;
2327}
2328
2329/***********************************************************************
2330 * Driver init/exit
2331 ***********************************************************************/
2332
613065e5
KC
2333/**
2334 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2335 * @pdev: Platform device struct from the bcm63xx BSP code.
2336 *
2337 * Note that platform data is required, because pd.port_no varies from chip
2338 * to chip and is used to switch the correct USB port to device mode.
2339 */
41ac7b3a 2340static int bcm63xx_udc_probe(struct platform_device *pdev)
613065e5
KC
2341{
2342 struct device *dev = &pdev->dev;
e01ee9f5 2343 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
613065e5
KC
2344 struct bcm63xx_udc *udc;
2345 struct resource *res;
2346 int rc = -ENOMEM, i, irq;
2347
2348 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
0590c4bf 2349 if (!udc)
613065e5 2350 return -ENOMEM;
613065e5
KC
2351
2352 platform_set_drvdata(pdev, udc);
2353 udc->dev = dev;
2354 udc->pd = pd;
2355
2356 if (!pd) {
2357 dev_err(dev, "missing platform data\n");
2358 return -EINVAL;
2359 }
2360
2361 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
148e1134
TR
2362 udc->usbd_regs = devm_ioremap_resource(dev, res);
2363 if (IS_ERR(udc->usbd_regs))
2364 return PTR_ERR(udc->usbd_regs);
613065e5
KC
2365
2366 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
148e1134
TR
2367 udc->iudma_regs = devm_ioremap_resource(dev, res);
2368 if (IS_ERR(udc->iudma_regs))
2369 return PTR_ERR(udc->iudma_regs);
613065e5
KC
2370
2371 spin_lock_init(&udc->lock);
2372 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
613065e5
KC
2373
2374 udc->gadget.ops = &bcm63xx_udc_ops;
2375 udc->gadget.name = dev_name(dev);
613065e5
KC
2376
2377 if (!pd->use_fullspeed && !use_fullspeed)
2378 udc->gadget.max_speed = USB_SPEED_HIGH;
2379 else
2380 udc->gadget.max_speed = USB_SPEED_FULL;
2381
2382 /* request clocks, allocate buffers, and clear any pending IRQs */
2383 rc = bcm63xx_init_udc_hw(udc);
2384 if (rc)
2385 return rc;
2386
2387 rc = -ENXIO;
2388
2389 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2390 irq = platform_get_irq(pdev, 0);
2391 if (irq < 0) {
2392 dev_err(dev, "missing IRQ resource #0\n");
2393 goto out_uninit;
2394 }
2395 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2396 dev_name(dev), udc) < 0) {
2397 dev_err(dev, "error requesting IRQ #%d\n", irq);
2398 goto out_uninit;
2399 }
2400
2401 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2402 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2403 irq = platform_get_irq(pdev, i + 1);
2404 if (irq < 0) {
2405 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2406 goto out_uninit;
2407 }
2408 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2409 dev_name(dev), &udc->iudma[i]) < 0) {
2410 dev_err(dev, "error requesting IRQ #%d\n", irq);
2411 goto out_uninit;
2412 }
2413 }
2414
613065e5
KC
2415 bcm63xx_udc_init_debugfs(udc);
2416 rc = usb_add_gadget_udc(dev, &udc->gadget);
2417 if (!rc)
2418 return 0;
2419
2420 bcm63xx_udc_cleanup_debugfs(udc);
613065e5
KC
2421out_uninit:
2422 bcm63xx_uninit_udc_hw(udc);
2423 return rc;
2424}
2425
2426/**
2427 * bcm63xx_udc_remove - Remove the device from the system.
2428 * @pdev: Platform device struct from the bcm63xx BSP code.
2429 */
fb4e98ab 2430static int bcm63xx_udc_remove(struct platform_device *pdev)
613065e5
KC
2431{
2432 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2433
2434 bcm63xx_udc_cleanup_debugfs(udc);
2435 usb_del_gadget_udc(&udc->gadget);
613065e5
KC
2436 BUG_ON(udc->driver);
2437
613065e5
KC
2438 bcm63xx_uninit_udc_hw(udc);
2439
2440 return 0;
2441}
2442
2443static struct platform_driver bcm63xx_udc_driver = {
2444 .probe = bcm63xx_udc_probe,
7690417d 2445 .remove = bcm63xx_udc_remove,
613065e5
KC
2446 .driver = {
2447 .name = DRV_MODULE_NAME,
613065e5
KC
2448 },
2449};
2450module_platform_driver(bcm63xx_udc_driver);
2451
2452MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2453MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2454MODULE_LICENSE("GPL");
2455MODULE_ALIAS("platform:" DRV_MODULE_NAME);
This page took 0.3427 seconds and 5 git commands to generate.