staging: most: hdm-usb: Use setup_timer
[deliverable/linux.git] / drivers / staging / most / hdm-usb / hdm_usb.c
1 /*
2 * hdm_usb.c - Hardware dependent module for USB
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/fs.h>
17 #include <linux/usb.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/cdev.h>
21 #include <linux/device.h>
22 #include <linux/list.h>
23 #include <linux/completion.h>
24 #include <linux/mutex.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sysfs.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/etherdevice.h>
31 #include <linux/uaccess.h>
32 #include "mostcore.h"
33 #include "networking.h"
34
35 #define USB_MTU 512
36 #define NO_ISOCHRONOUS_URB 0
37 #define AV_PACKETS_PER_XACT 2
38 #define BUF_CHAIN_SIZE 0xFFFF
39 #define MAX_NUM_ENDPOINTS 30
40 #define MAX_SUFFIX_LEN 10
41 #define MAX_STRING_LEN 80
42 #define MAX_BUF_SIZE 0xFFFF
43 #define CEILING(x, y) (((x) + (y) - 1) / (y))
44
45 #define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
46 #define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
47 #define USB_DEV_ID_INIC 0xCF18 /* PID: USB INIC */
48 #define HW_RESYNC 0x0000
49 /* DRCI Addresses */
50 #define DRCI_REG_NI_STATE 0x0100
51 #define DRCI_REG_PACKET_BW 0x0101
52 #define DRCI_REG_NODE_ADDR 0x0102
53 #define DRCI_REG_NODE_POS 0x0103
54 #define DRCI_REG_MEP_FILTER 0x0140
55 #define DRCI_REG_HASH_TBL0 0x0141
56 #define DRCI_REG_HASH_TBL1 0x0142
57 #define DRCI_REG_HASH_TBL2 0x0143
58 #define DRCI_REG_HASH_TBL3 0x0144
59 #define DRCI_REG_HW_ADDR_HI 0x0145
60 #define DRCI_REG_HW_ADDR_MI 0x0146
61 #define DRCI_REG_HW_ADDR_LO 0x0147
62 #define DRCI_REG_BASE 0x1100
63 #define DRCI_COMMAND 0x02
64 #define DRCI_READ_REQ 0xA0
65 #define DRCI_WRITE_REQ 0xA1
66
67 /**
68 * struct buf_anchor - used to create a list of pending URBs
69 * @urb: pointer to USB request block
70 * @clear_work_obj:
71 * @list: linked list
72 * @urb_completion:
73 */
74 struct buf_anchor {
75 struct urb *urb;
76 struct work_struct clear_work_obj;
77 struct list_head list;
78 struct completion urb_compl;
79 };
80
81 #define to_buf_anchor(w) container_of(w, struct buf_anchor, clear_work_obj)
82
83 /**
84 * struct most_dci_obj - Direct Communication Interface
85 * @kobj:position in sysfs
86 * @usb_device: pointer to the usb device
87 */
88 struct most_dci_obj {
89 struct kobject kobj;
90 struct usb_device *usb_device;
91 };
92
93 #define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
94
95 /**
96 * struct most_dev - holds all usb interface specific stuff
97 * @parent: parent object in sysfs
98 * @usb_device: pointer to usb device
99 * @iface: hardware interface
100 * @cap: channel capabilities
101 * @conf: channel configuration
102 * @dci: direct communication interface of hardware
103 * @hw_addr: MAC address of hardware
104 * @ep_address: endpoint address table
105 * @link_stat: link status of hardware
106 * @description: device description
107 * @suffix: suffix for channel name
108 * @anchor_list_lock: locks list access
109 * @padding_active: indicates channel uses padding
110 * @is_channel_healthy: health status table of each channel
111 * @anchor_list: list of anchored items
112 * @io_mutex: synchronize I/O with disconnect
113 * @link_stat_timer: timer for link status reports
114 * @poll_work_obj: work for polling link status
115 */
116 struct most_dev {
117 struct kobject *parent;
118 struct usb_device *usb_device;
119 struct most_interface iface;
120 struct most_channel_capability *cap;
121 struct most_channel_config *conf;
122 struct most_dci_obj *dci;
123 u8 hw_addr[6];
124 u8 *ep_address;
125 u16 link_stat;
126 char description[MAX_STRING_LEN];
127 char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
128 spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
129 bool padding_active[MAX_NUM_ENDPOINTS];
130 bool is_channel_healthy[MAX_NUM_ENDPOINTS];
131 struct list_head *anchor_list;
132 struct mutex io_mutex;
133 struct timer_list link_stat_timer;
134 struct work_struct poll_work_obj;
135 };
136
137 #define to_mdev(d) container_of(d, struct most_dev, iface)
138 #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
139
140 static struct workqueue_struct *schedule_usb_work;
141 static void wq_clear_halt(struct work_struct *wq_obj);
142 static void wq_netinfo(struct work_struct *wq_obj);
143
144 /**
145 * drci_rd_reg - read a DCI register
146 * @dev: usb device
147 * @reg: register address
148 * @buf: buffer to store data
149 *
150 * This is reads data from INIC's direct register communication interface
151 */
152 static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
153 {
154 int retval;
155 u16 *dma_buf = kzalloc(sizeof(u16), GFP_KERNEL);
156 u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
157
158 if (!dma_buf)
159 return -ENOMEM;
160
161 retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
162 DRCI_READ_REQ, req_type,
163 0x0000,
164 reg, dma_buf, sizeof(u16), 5 * HZ);
165 *buf = le16_to_cpu(*dma_buf);
166 kfree(dma_buf);
167
168 return retval;
169 }
170
171 /**
172 * drci_wr_reg - write a DCI register
173 * @dev: usb device
174 * @reg: register address
175 * @data: data to write
176 *
177 * This is writes data to INIC's direct register communication interface
178 */
179 static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
180 {
181 return usb_control_msg(dev,
182 usb_sndctrlpipe(dev, 0),
183 DRCI_WRITE_REQ,
184 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
185 data,
186 reg,
187 NULL,
188 0,
189 5 * HZ);
190 }
191
192 /**
193 * free_anchored_buffers - free device's anchored items
194 * @mdev: the device
195 * @channel: channel ID
196 */
197 static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
198 {
199 struct mbo *mbo;
200 struct buf_anchor *anchor, *tmp;
201 unsigned long flags;
202
203 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
204 list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel],
205 list) {
206 struct urb *urb = anchor->urb;
207
208 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
209 if (likely(urb)) {
210 mbo = urb->context;
211 if (!irqs_disabled()) {
212 usb_kill_urb(urb);
213 } else {
214 usb_unlink_urb(urb);
215 wait_for_completion(&anchor->urb_compl);
216 }
217 if ((mbo) && (mbo->complete)) {
218 mbo->status = MBO_E_CLOSE;
219 mbo->processed_length = 0;
220 mbo->complete(mbo);
221 }
222 usb_free_urb(urb);
223 }
224 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
225 list_del(&anchor->list);
226 kfree(anchor);
227 }
228 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
229 }
230
231 /**
232 * get_stream_frame_size - calculate frame size of current configuration
233 * @cfg: channel configuration
234 */
235 static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
236 {
237 unsigned int frame_size = 0;
238 unsigned int sub_size = cfg->subbuffer_size;
239
240 if (!sub_size) {
241 pr_warn("Misconfig: Subbuffer size zero.\n");
242 return frame_size;
243 }
244 switch (cfg->data_type) {
245 case MOST_CH_ISOC_AVP:
246 frame_size = AV_PACKETS_PER_XACT * sub_size;
247 break;
248 case MOST_CH_SYNC:
249 if (cfg->packets_per_xact == 0) {
250 pr_warn("Misconfig: Packets per XACT zero\n");
251 frame_size = 0;
252 } else if (cfg->packets_per_xact == 0xFF) {
253 frame_size = (USB_MTU / sub_size) * sub_size;
254 } else {
255 frame_size = cfg->packets_per_xact * sub_size;
256 }
257 break;
258 default:
259 pr_warn("Query frame size of non-streaming channel\n");
260 break;
261 }
262 return frame_size;
263 }
264
265 /**
266 * hdm_poison_channel - mark buffers of this channel as invalid
267 * @iface: pointer to the interface
268 * @channel: channel ID
269 *
270 * This unlinks all URBs submitted to the HCD,
271 * calls the associated completion function of the core and removes
272 * them from the list.
273 *
274 * Returns 0 on success or error code otherwise.
275 */
276 static int hdm_poison_channel(struct most_interface *iface, int channel)
277 {
278 struct most_dev *mdev;
279
280 mdev = to_mdev(iface);
281 if (unlikely(!iface)) {
282 dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
283 return -EIO;
284 }
285 if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
286 dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
287 return -ECHRNG;
288 }
289
290 mdev->is_channel_healthy[channel] = false;
291
292 mutex_lock(&mdev->io_mutex);
293 free_anchored_buffers(mdev, channel);
294 if (mdev->padding_active[channel])
295 mdev->padding_active[channel] = false;
296
297 if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
298 del_timer_sync(&mdev->link_stat_timer);
299 cancel_work_sync(&mdev->poll_work_obj);
300 }
301 mutex_unlock(&mdev->io_mutex);
302 return 0;
303 }
304
305 /**
306 * hdm_add_padding - add padding bytes
307 * @mdev: most device
308 * @channel: channel ID
309 * @mbo: buffer object
310 *
311 * This inserts the INIC hardware specific padding bytes into a streaming
312 * channel's buffer
313 */
314 static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
315 {
316 struct most_channel_config *conf = &mdev->conf[channel];
317 unsigned int j, num_frames, frame_size;
318 u16 rd_addr, wr_addr;
319
320 frame_size = get_stream_frame_size(conf);
321 if (!frame_size)
322 return -EIO;
323 num_frames = mbo->buffer_length / frame_size;
324
325 if (num_frames < 1) {
326 dev_err(&mdev->usb_device->dev,
327 "Missed minimal transfer unit.\n");
328 return -EIO;
329 }
330
331 for (j = 1; j < num_frames; j++) {
332 wr_addr = (num_frames - j) * USB_MTU;
333 rd_addr = (num_frames - j) * frame_size;
334 memmove(mbo->virt_address + wr_addr,
335 mbo->virt_address + rd_addr,
336 frame_size);
337 }
338 mbo->buffer_length = num_frames * USB_MTU;
339 return 0;
340 }
341
342 /**
343 * hdm_remove_padding - remove padding bytes
344 * @mdev: most device
345 * @channel: channel ID
346 * @mbo: buffer object
347 *
348 * This takes the INIC hardware specific padding bytes off a streaming
349 * channel's buffer.
350 */
351 static int hdm_remove_padding(struct most_dev *mdev, int channel,
352 struct mbo *mbo)
353 {
354 unsigned int j, num_frames, frame_size;
355 struct most_channel_config *const conf = &mdev->conf[channel];
356
357 frame_size = get_stream_frame_size(conf);
358 if (!frame_size)
359 return -EIO;
360 num_frames = mbo->processed_length / USB_MTU;
361
362 for (j = 1; j < num_frames; j++)
363 memmove(mbo->virt_address + frame_size * j,
364 mbo->virt_address + USB_MTU * j,
365 frame_size);
366
367 mbo->processed_length = frame_size * num_frames;
368 return 0;
369 }
370
371 /**
372 * hdm_write_completion - completion function for submitted Tx URBs
373 * @urb: the URB that has been completed
374 *
375 * This checks the status of the completed URB. In case the URB has been
376 * unlinked before, it is immediately freed. On any other error the MBO
377 * transfer flag is set. On success it frees allocated resources and calls
378 * the completion function.
379 *
380 * Context: interrupt!
381 */
382 static void hdm_write_completion(struct urb *urb)
383 {
384 struct mbo *mbo;
385 struct buf_anchor *anchor;
386 struct most_dev *mdev;
387 struct device *dev;
388 unsigned int channel;
389 unsigned long flags;
390
391 mbo = urb->context;
392 anchor = mbo->priv;
393 mdev = to_mdev(mbo->ifp);
394 channel = mbo->hdm_channel_id;
395 dev = &mdev->usb_device->dev;
396
397 if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
398 (!mdev->is_channel_healthy[channel])) {
399 complete(&anchor->urb_compl);
400 return;
401 }
402
403 if (unlikely(urb->status && !(urb->status == -ENOENT ||
404 urb->status == -ECONNRESET ||
405 urb->status == -ESHUTDOWN))) {
406 mbo->processed_length = 0;
407 switch (urb->status) {
408 case -EPIPE:
409 dev_warn(dev, "Broken OUT pipe detected\n");
410 most_stop_enqueue(&mdev->iface, channel);
411 mbo->status = MBO_E_INVAL;
412 usb_unlink_urb(urb);
413 INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
414 queue_work(schedule_usb_work, &anchor->clear_work_obj);
415 return;
416 case -ENODEV:
417 case -EPROTO:
418 mbo->status = MBO_E_CLOSE;
419 break;
420 default:
421 mbo->status = MBO_E_INVAL;
422 break;
423 }
424 } else {
425 mbo->status = MBO_SUCCESS;
426 mbo->processed_length = urb->actual_length;
427 }
428
429 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
430 list_del(&anchor->list);
431 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
432 kfree(anchor);
433
434 if (likely(mbo->complete))
435 mbo->complete(mbo);
436 usb_free_urb(urb);
437 }
438
439 /**
440 * hdm_read_completion - completion function for submitted Rx URBs
441 * @urb: the URB that has been completed
442 *
443 * This checks the status of the completed URB. In case the URB has been
444 * unlinked before it is immediately freed. On any other error the MBO transfer
445 * flag is set. On success it frees allocated resources, removes
446 * padding bytes -if necessary- and calls the completion function.
447 *
448 * Context: interrupt!
449 *
450 * **************************************************************************
451 * Error codes returned by in urb->status
452 * or in iso_frame_desc[n].status (for ISO)
453 * *************************************************************************
454 *
455 * USB device drivers may only test urb status values in completion handlers.
456 * This is because otherwise there would be a race between HCDs updating
457 * these values on one CPU, and device drivers testing them on another CPU.
458 *
459 * A transfer's actual_length may be positive even when an error has been
460 * reported. That's because transfers often involve several packets, so that
461 * one or more packets could finish before an error stops further endpoint I/O.
462 *
463 * For isochronous URBs, the urb status value is non-zero only if the URB is
464 * unlinked, the device is removed, the host controller is disabled or the total
465 * transferred length is less than the requested length and the URB_SHORT_NOT_OK
466 * flag is set. Completion handlers for isochronous URBs should only see
467 * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
468 * Individual frame descriptor status fields may report more status codes.
469 *
470 *
471 * 0 Transfer completed successfully
472 *
473 * -ENOENT URB was synchronously unlinked by usb_unlink_urb
474 *
475 * -EINPROGRESS URB still pending, no results yet
476 * (That is, if drivers see this it's a bug.)
477 *
478 * -EPROTO (*, **) a) bitstuff error
479 * b) no response packet received within the
480 * prescribed bus turn-around time
481 * c) unknown USB error
482 *
483 * -EILSEQ (*, **) a) CRC mismatch
484 * b) no response packet received within the
485 * prescribed bus turn-around time
486 * c) unknown USB error
487 *
488 * Note that often the controller hardware does not
489 * distinguish among cases a), b), and c), so a
490 * driver cannot tell whether there was a protocol
491 * error, a failure to respond (often caused by
492 * device disconnect), or some other fault.
493 *
494 * -ETIME (**) No response packet received within the prescribed
495 * bus turn-around time. This error may instead be
496 * reported as -EPROTO or -EILSEQ.
497 *
498 * -ETIMEDOUT Synchronous USB message functions use this code
499 * to indicate timeout expired before the transfer
500 * completed, and no other error was reported by HC.
501 *
502 * -EPIPE (**) Endpoint stalled. For non-control endpoints,
503 * reset this status with usb_clear_halt().
504 *
505 * -ECOMM During an IN transfer, the host controller
506 * received data from an endpoint faster than it
507 * could be written to system memory
508 *
509 * -ENOSR During an OUT transfer, the host controller
510 * could not retrieve data from system memory fast
511 * enough to keep up with the USB data rate
512 *
513 * -EOVERFLOW (*) The amount of data returned by the endpoint was
514 * greater than either the max packet size of the
515 * endpoint or the remaining buffer size. "Babble".
516 *
517 * -EREMOTEIO The data read from the endpoint did not fill the
518 * specified buffer, and URB_SHORT_NOT_OK was set in
519 * urb->transfer_flags.
520 *
521 * -ENODEV Device was removed. Often preceded by a burst of
522 * other errors, since the hub driver doesn't detect
523 * device removal events immediately.
524 *
525 * -EXDEV ISO transfer only partially completed
526 * (only set in iso_frame_desc[n].status, not urb->status)
527 *
528 * -EINVAL ISO madness, if this happens: Log off and go home
529 *
530 * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
531 *
532 * -ESHUTDOWN The device or host controller has been disabled due
533 * to some problem that could not be worked around,
534 * such as a physical disconnect.
535 *
536 *
537 * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
538 * hardware problems such as bad devices (including firmware) or cables.
539 *
540 * (**) This is also one of several codes that different kinds of host
541 * controller use to indicate a transfer has failed because of device
542 * disconnect. In the interval before the hub driver starts disconnect
543 * processing, devices may receive such fault reports for every request.
544 *
545 * See <https://www.kernel.org/doc/Documentation/usb/error-codes.txt>
546 */
547 static void hdm_read_completion(struct urb *urb)
548 {
549 struct mbo *mbo;
550 struct buf_anchor *anchor;
551 struct most_dev *mdev;
552 struct device *dev;
553 unsigned long flags;
554 unsigned int channel;
555
556 mbo = urb->context;
557 anchor = mbo->priv;
558 mdev = to_mdev(mbo->ifp);
559 channel = mbo->hdm_channel_id;
560 dev = &mdev->usb_device->dev;
561
562 if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
563 (!mdev->is_channel_healthy[channel])) {
564 complete(&anchor->urb_compl);
565 return;
566 }
567
568 if (unlikely(urb->status && !(urb->status == -ENOENT ||
569 urb->status == -ECONNRESET ||
570 urb->status == -ESHUTDOWN))) {
571 mbo->processed_length = 0;
572 switch (urb->status) {
573 case -EPIPE:
574 dev_warn(dev, "Broken IN pipe detected\n");
575 mbo->status = MBO_E_INVAL;
576 usb_unlink_urb(urb);
577 INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
578 queue_work(schedule_usb_work, &anchor->clear_work_obj);
579 return;
580 case -ENODEV:
581 case -EPROTO:
582 mbo->status = MBO_E_CLOSE;
583 break;
584 case -EOVERFLOW:
585 dev_warn(dev, "Babble on IN pipe detected\n");
586 default:
587 mbo->status = MBO_E_INVAL;
588 break;
589 }
590 } else {
591 mbo->processed_length = urb->actual_length;
592 if (!mdev->padding_active[channel]) {
593 mbo->status = MBO_SUCCESS;
594 } else {
595 if (hdm_remove_padding(mdev, channel, mbo)) {
596 mbo->processed_length = 0;
597 mbo->status = MBO_E_INVAL;
598 } else {
599 mbo->status = MBO_SUCCESS;
600 }
601 }
602 }
603 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
604 list_del(&anchor->list);
605 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
606 kfree(anchor);
607
608 if (likely(mbo->complete))
609 mbo->complete(mbo);
610 usb_free_urb(urb);
611 }
612
613 /**
614 * hdm_enqueue - receive a buffer to be used for data transfer
615 * @iface: interface to enqueue to
616 * @channel: ID of the channel
617 * @mbo: pointer to the buffer object
618 *
619 * This allocates a new URB and fills it according to the channel
620 * that is being used for transmission of data. Before the URB is
621 * submitted it is stored in the private anchor list.
622 *
623 * Returns 0 on success. On any error the URB is freed and a error code
624 * is returned.
625 *
626 * Context: Could in _some_ cases be interrupt!
627 */
628 static int hdm_enqueue(struct most_interface *iface, int channel,
629 struct mbo *mbo)
630 {
631 struct most_dev *mdev;
632 struct buf_anchor *anchor;
633 struct most_channel_config *conf;
634 struct device *dev;
635 int retval = 0;
636 struct urb *urb;
637 unsigned long flags;
638 unsigned long length;
639 void *virt_address;
640
641 if (unlikely(!iface || !mbo))
642 return -EIO;
643 if (unlikely(iface->num_channels <= channel) || (channel < 0))
644 return -ECHRNG;
645
646 mdev = to_mdev(iface);
647 conf = &mdev->conf[channel];
648 dev = &mdev->usb_device->dev;
649
650 if (!mdev->usb_device)
651 return -ENODEV;
652
653 urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
654 if (!urb) {
655 dev_err(dev, "Failed to allocate URB\n");
656 return -ENOMEM;
657 }
658
659 anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
660 if (!anchor) {
661 retval = -ENOMEM;
662 goto _error;
663 }
664
665 anchor->urb = urb;
666 init_completion(&anchor->urb_compl);
667 mbo->priv = anchor;
668
669 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
670 list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
671 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
672
673 if ((mdev->padding_active[channel]) &&
674 (conf->direction & MOST_CH_TX))
675 if (hdm_add_padding(mdev, channel, mbo)) {
676 retval = -EIO;
677 goto _error_1;
678 }
679
680 urb->transfer_dma = mbo->bus_address;
681 virt_address = mbo->virt_address;
682 length = mbo->buffer_length;
683
684 if (conf->direction & MOST_CH_TX) {
685 usb_fill_bulk_urb(urb, mdev->usb_device,
686 usb_sndbulkpipe(mdev->usb_device,
687 mdev->ep_address[channel]),
688 virt_address,
689 length,
690 hdm_write_completion,
691 mbo);
692 if (conf->data_type != MOST_CH_ISOC_AVP)
693 urb->transfer_flags |= URB_ZERO_PACKET;
694 } else {
695 usb_fill_bulk_urb(urb, mdev->usb_device,
696 usb_rcvbulkpipe(mdev->usb_device,
697 mdev->ep_address[channel]),
698 virt_address,
699 length + conf->extra_len,
700 hdm_read_completion,
701 mbo);
702 }
703 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
704
705 retval = usb_submit_urb(urb, GFP_KERNEL);
706 if (retval) {
707 dev_err(dev, "URB submit failed with error %d.\n", retval);
708 goto _error_1;
709 }
710 return 0;
711
712 _error_1:
713 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
714 list_del(&anchor->list);
715 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
716 kfree(anchor);
717 _error:
718 usb_free_urb(urb);
719 return retval;
720 }
721
722 /**
723 * hdm_configure_channel - receive channel configuration from core
724 * @iface: interface
725 * @channel: channel ID
726 * @conf: structure that holds the configuration information
727 */
728 static int hdm_configure_channel(struct most_interface *iface, int channel,
729 struct most_channel_config *conf)
730 {
731 unsigned int num_frames;
732 unsigned int frame_size;
733 unsigned int temp_size;
734 unsigned int tail_space;
735 struct most_dev *mdev;
736 struct device *dev;
737
738 mdev = to_mdev(iface);
739 mdev->is_channel_healthy[channel] = true;
740 dev = &mdev->usb_device->dev;
741
742 if (unlikely(!iface || !conf)) {
743 dev_err(dev, "Bad interface or config pointer.\n");
744 return -EINVAL;
745 }
746 if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
747 dev_err(dev, "Channel ID out of range.\n");
748 return -EINVAL;
749 }
750 if ((!conf->num_buffers) || (!conf->buffer_size)) {
751 dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
752 return -EINVAL;
753 }
754
755 if (!(conf->data_type == MOST_CH_SYNC) &&
756 !((conf->data_type == MOST_CH_ISOC_AVP) &&
757 (conf->packets_per_xact != 0xFF))) {
758 mdev->padding_active[channel] = false;
759 goto exit;
760 }
761
762 mdev->padding_active[channel] = true;
763 temp_size = conf->buffer_size;
764
765 frame_size = get_stream_frame_size(conf);
766 if ((frame_size == 0) || (frame_size > USB_MTU)) {
767 dev_warn(dev, "Misconfig: frame size wrong\n");
768 return -EINVAL;
769 }
770
771 if (conf->buffer_size % frame_size) {
772 u16 tmp_val;
773
774 tmp_val = conf->buffer_size / frame_size;
775 conf->buffer_size = tmp_val * frame_size;
776 dev_notice(dev,
777 "Channel %d - rounding buffer size to %d bytes, channel config says %d bytes\n",
778 channel,
779 conf->buffer_size,
780 temp_size);
781 }
782
783 num_frames = conf->buffer_size / frame_size;
784 tail_space = num_frames * (USB_MTU - frame_size);
785 temp_size += tail_space;
786
787 /* calculate extra length to comply w/ HW padding */
788 conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
789 - conf->buffer_size;
790 exit:
791 mdev->conf[channel] = *conf;
792 return 0;
793 }
794
795 /**
796 * hdm_update_netinfo - retrieve latest networking information
797 * @mdev: device interface
798 *
799 * This triggers the USB vendor requests to read the hardware address and
800 * the current link status of the attached device.
801 */
802 static int hdm_update_netinfo(struct most_dev *mdev)
803 {
804 struct usb_device *usb_device = mdev->usb_device;
805 struct device *dev = &usb_device->dev;
806 u16 hi, mi, lo, link;
807
808 if (!is_valid_ether_addr(mdev->hw_addr)) {
809 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
810 dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
811 return -1;
812 }
813
814 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
815 dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
816 return -1;
817 }
818
819 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
820 dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
821 return -1;
822 }
823
824 mutex_lock(&mdev->io_mutex);
825 mdev->hw_addr[0] = hi >> 8;
826 mdev->hw_addr[1] = hi;
827 mdev->hw_addr[2] = mi >> 8;
828 mdev->hw_addr[3] = mi;
829 mdev->hw_addr[4] = lo >> 8;
830 mdev->hw_addr[5] = lo;
831 mutex_unlock(&mdev->io_mutex);
832 }
833
834 if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
835 dev_err(dev, "Vendor request \"link status\" failed\n");
836 return -1;
837 }
838
839 mutex_lock(&mdev->io_mutex);
840 mdev->link_stat = link;
841 mutex_unlock(&mdev->io_mutex);
842 return 0;
843 }
844
845 /**
846 * hdm_request_netinfo - request network information
847 * @iface: pointer to interface
848 * @channel: channel ID
849 *
850 * This is used as trigger to set up the link status timer that
851 * polls for the NI state of the INIC every 2 seconds.
852 *
853 */
854 static void hdm_request_netinfo(struct most_interface *iface, int channel)
855 {
856 struct most_dev *mdev;
857
858 BUG_ON(!iface);
859 mdev = to_mdev(iface);
860 mdev->link_stat_timer.expires = jiffies + HZ;
861 mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
862 }
863
864 /**
865 * link_stat_timer_handler - add work to link_stat work queue
866 * @data: pointer to USB device instance
867 *
868 * The handler runs in interrupt context. That's why we need to defer the
869 * tasks to a work queue.
870 */
871 static void link_stat_timer_handler(unsigned long data)
872 {
873 struct most_dev *mdev = (struct most_dev *)data;
874
875 queue_work(schedule_usb_work, &mdev->poll_work_obj);
876 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
877 add_timer(&mdev->link_stat_timer);
878 }
879
880 /**
881 * wq_netinfo - work queue function
882 * @wq_obj: object that holds data for our deferred work to do
883 *
884 * This retrieves the network interface status of the USB INIC
885 * and compares it with the current status. If the status has
886 * changed, it updates the status of the core.
887 */
888 static void wq_netinfo(struct work_struct *wq_obj)
889 {
890 struct most_dev *mdev;
891 int i, prev_link_stat;
892 u8 prev_hw_addr[6];
893
894 mdev = to_mdev_from_work(wq_obj);
895 prev_link_stat = mdev->link_stat;
896
897 for (i = 0; i < 6; i++)
898 prev_hw_addr[i] = mdev->hw_addr[i];
899
900 if (hdm_update_netinfo(mdev) < 0)
901 return;
902 if ((prev_link_stat != mdev->link_stat) ||
903 (prev_hw_addr[0] != mdev->hw_addr[0]) ||
904 (prev_hw_addr[1] != mdev->hw_addr[1]) ||
905 (prev_hw_addr[2] != mdev->hw_addr[2]) ||
906 (prev_hw_addr[3] != mdev->hw_addr[3]) ||
907 (prev_hw_addr[4] != mdev->hw_addr[4]) ||
908 (prev_hw_addr[5] != mdev->hw_addr[5]))
909 most_deliver_netinfo(&mdev->iface, mdev->link_stat,
910 &mdev->hw_addr[0]);
911 }
912
913 /**
914 * wq_clear_halt - work queue function
915 * @wq_obj: work_struct object to execute
916 *
917 * This sends a clear_halt to the given USB pipe.
918 */
919 static void wq_clear_halt(struct work_struct *wq_obj)
920 {
921 struct buf_anchor *anchor;
922 struct most_dev *mdev;
923 struct mbo *mbo;
924 struct urb *urb;
925 unsigned int channel;
926 unsigned long flags;
927
928 anchor = to_buf_anchor(wq_obj);
929 urb = anchor->urb;
930 mbo = urb->context;
931 mdev = to_mdev(mbo->ifp);
932 channel = mbo->hdm_channel_id;
933
934 if (usb_clear_halt(urb->dev, urb->pipe))
935 dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
936
937 usb_free_urb(urb);
938 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
939 list_del(&anchor->list);
940 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
941
942 if (likely(mbo->complete))
943 mbo->complete(mbo);
944 if (mdev->conf[channel].direction & MOST_CH_TX)
945 most_resume_enqueue(&mdev->iface, channel);
946
947 kfree(anchor);
948 }
949
950 /**
951 * hdm_usb_fops - file operation table for USB driver
952 */
953 static const struct file_operations hdm_usb_fops = {
954 .owner = THIS_MODULE,
955 };
956
957 /**
958 * usb_device_id - ID table for HCD device probing
959 */
960 static struct usb_device_id usbid[] = {
961 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
962 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_INIC), },
963 { } /* Terminating entry */
964 };
965
966 #define MOST_DCI_RO_ATTR(_name) \
967 struct most_dci_attribute most_dci_attr_##_name = \
968 __ATTR(_name, S_IRUGO, show_value, NULL)
969
970 #define MOST_DCI_ATTR(_name) \
971 struct most_dci_attribute most_dci_attr_##_name = \
972 __ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
973
974 /**
975 * struct most_dci_attribute - to access the attributes of a dci object
976 * @attr: attributes of a dci object
977 * @show: pointer to the show function
978 * @store: pointer to the store function
979 */
980 struct most_dci_attribute {
981 struct attribute attr;
982 ssize_t (*show)(struct most_dci_obj *d,
983 struct most_dci_attribute *attr,
984 char *buf);
985 ssize_t (*store)(struct most_dci_obj *d,
986 struct most_dci_attribute *attr,
987 const char *buf,
988 size_t count);
989 };
990
991 #define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
992
993 /**
994 * dci_attr_show - show function for dci object
995 * @kobj: pointer to kobject
996 * @attr: pointer to attribute struct
997 * @buf: buffer
998 */
999 static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
1000 char *buf)
1001 {
1002 struct most_dci_attribute *dci_attr = to_dci_attr(attr);
1003 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1004
1005 if (!dci_attr->show)
1006 return -EIO;
1007
1008 return dci_attr->show(dci_obj, dci_attr, buf);
1009 }
1010
1011 /**
1012 * dci_attr_store - store function for dci object
1013 * @kobj: pointer to kobject
1014 * @attr: pointer to attribute struct
1015 * @buf: buffer
1016 * @len: length of buffer
1017 */
1018 static ssize_t dci_attr_store(struct kobject *kobj,
1019 struct attribute *attr,
1020 const char *buf,
1021 size_t len)
1022 {
1023 struct most_dci_attribute *dci_attr = to_dci_attr(attr);
1024 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1025
1026 if (!dci_attr->store)
1027 return -EIO;
1028
1029 return dci_attr->store(dci_obj, dci_attr, buf, len);
1030 }
1031
1032 static const struct sysfs_ops most_dci_sysfs_ops = {
1033 .show = dci_attr_show,
1034 .store = dci_attr_store,
1035 };
1036
1037 /**
1038 * most_dci_release - release function for dci object
1039 * @kobj: pointer to kobject
1040 *
1041 * This frees the memory allocated for the dci object
1042 */
1043 static void most_dci_release(struct kobject *kobj)
1044 {
1045 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1046
1047 kfree(dci_obj);
1048 }
1049
1050 static ssize_t show_value(struct most_dci_obj *dci_obj,
1051 struct most_dci_attribute *attr, char *buf)
1052 {
1053 u16 tmp_val;
1054 u16 reg_addr;
1055 int err;
1056
1057 if (!strcmp(attr->attr.name, "ni_state"))
1058 reg_addr = DRCI_REG_NI_STATE;
1059 else if (!strcmp(attr->attr.name, "packet_bandwidth"))
1060 reg_addr = DRCI_REG_PACKET_BW;
1061 else if (!strcmp(attr->attr.name, "node_address"))
1062 reg_addr = DRCI_REG_NODE_ADDR;
1063 else if (!strcmp(attr->attr.name, "node_position"))
1064 reg_addr = DRCI_REG_NODE_POS;
1065 else if (!strcmp(attr->attr.name, "mep_filter"))
1066 reg_addr = DRCI_REG_MEP_FILTER;
1067 else if (!strcmp(attr->attr.name, "mep_hash0"))
1068 reg_addr = DRCI_REG_HASH_TBL0;
1069 else if (!strcmp(attr->attr.name, "mep_hash1"))
1070 reg_addr = DRCI_REG_HASH_TBL1;
1071 else if (!strcmp(attr->attr.name, "mep_hash2"))
1072 reg_addr = DRCI_REG_HASH_TBL2;
1073 else if (!strcmp(attr->attr.name, "mep_hash3"))
1074 reg_addr = DRCI_REG_HASH_TBL3;
1075 else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
1076 reg_addr = DRCI_REG_HW_ADDR_HI;
1077 else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
1078 reg_addr = DRCI_REG_HW_ADDR_MI;
1079 else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
1080 reg_addr = DRCI_REG_HW_ADDR_LO;
1081 else
1082 return -EIO;
1083
1084 err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
1085 if (err < 0)
1086 return err;
1087
1088 return snprintf(buf, PAGE_SIZE, "%04x\n", tmp_val);
1089 }
1090
1091 static ssize_t store_value(struct most_dci_obj *dci_obj,
1092 struct most_dci_attribute *attr,
1093 const char *buf, size_t count)
1094 {
1095 u16 val;
1096 u16 reg_addr;
1097 int err;
1098
1099 if (!strcmp(attr->attr.name, "mep_filter"))
1100 reg_addr = DRCI_REG_MEP_FILTER;
1101 else if (!strcmp(attr->attr.name, "mep_hash0"))
1102 reg_addr = DRCI_REG_HASH_TBL0;
1103 else if (!strcmp(attr->attr.name, "mep_hash1"))
1104 reg_addr = DRCI_REG_HASH_TBL1;
1105 else if (!strcmp(attr->attr.name, "mep_hash2"))
1106 reg_addr = DRCI_REG_HASH_TBL2;
1107 else if (!strcmp(attr->attr.name, "mep_hash3"))
1108 reg_addr = DRCI_REG_HASH_TBL3;
1109 else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
1110 reg_addr = DRCI_REG_HW_ADDR_HI;
1111 else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
1112 reg_addr = DRCI_REG_HW_ADDR_MI;
1113 else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
1114 reg_addr = DRCI_REG_HW_ADDR_LO;
1115 else
1116 return -EIO;
1117
1118 err = kstrtou16(buf, 16, &val);
1119 if (err)
1120 return err;
1121
1122 err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
1123 if (err < 0)
1124 return err;
1125
1126 return count;
1127 }
1128
1129 static MOST_DCI_RO_ATTR(ni_state);
1130 static MOST_DCI_RO_ATTR(packet_bandwidth);
1131 static MOST_DCI_RO_ATTR(node_address);
1132 static MOST_DCI_RO_ATTR(node_position);
1133 static MOST_DCI_ATTR(mep_filter);
1134 static MOST_DCI_ATTR(mep_hash0);
1135 static MOST_DCI_ATTR(mep_hash1);
1136 static MOST_DCI_ATTR(mep_hash2);
1137 static MOST_DCI_ATTR(mep_hash3);
1138 static MOST_DCI_ATTR(mep_eui48_hi);
1139 static MOST_DCI_ATTR(mep_eui48_mi);
1140 static MOST_DCI_ATTR(mep_eui48_lo);
1141
1142 /**
1143 * most_dci_def_attrs - array of default attribute files of the dci object
1144 */
1145 static struct attribute *most_dci_def_attrs[] = {
1146 &most_dci_attr_ni_state.attr,
1147 &most_dci_attr_packet_bandwidth.attr,
1148 &most_dci_attr_node_address.attr,
1149 &most_dci_attr_node_position.attr,
1150 &most_dci_attr_mep_filter.attr,
1151 &most_dci_attr_mep_hash0.attr,
1152 &most_dci_attr_mep_hash1.attr,
1153 &most_dci_attr_mep_hash2.attr,
1154 &most_dci_attr_mep_hash3.attr,
1155 &most_dci_attr_mep_eui48_hi.attr,
1156 &most_dci_attr_mep_eui48_mi.attr,
1157 &most_dci_attr_mep_eui48_lo.attr,
1158 NULL,
1159 };
1160
1161 /**
1162 * DCI ktype
1163 */
1164 static struct kobj_type most_dci_ktype = {
1165 .sysfs_ops = &most_dci_sysfs_ops,
1166 .release = most_dci_release,
1167 .default_attrs = most_dci_def_attrs,
1168 };
1169
1170 /**
1171 * create_most_dci_obj - allocates a dci object
1172 * @parent: parent kobject
1173 *
1174 * This creates a dci object and registers it with sysfs.
1175 * Returns a pointer to the object or NULL when something went wrong.
1176 */
1177 static struct
1178 most_dci_obj *create_most_dci_obj(struct kobject *parent)
1179 {
1180 struct most_dci_obj *most_dci;
1181 int retval;
1182
1183 most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
1184 if (!most_dci)
1185 return NULL;
1186
1187 retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
1188 "dci");
1189 if (retval) {
1190 kobject_put(&most_dci->kobj);
1191 return NULL;
1192 }
1193 return most_dci;
1194 }
1195
1196 /**
1197 * destroy_most_dci_obj - DCI object release function
1198 * @p: pointer to dci object
1199 */
1200 static void destroy_most_dci_obj(struct most_dci_obj *p)
1201 {
1202 kobject_put(&p->kobj);
1203 }
1204
1205 /**
1206 * hdm_probe - probe function of USB device driver
1207 * @interface: Interface of the attached USB device
1208 * @id: Pointer to the USB ID table.
1209 *
1210 * This allocates and initializes the device instance, adds the new
1211 * entry to the internal list, scans the USB descriptors and registers
1212 * the interface with the core.
1213 * Additionally, the DCI objects are created and the hardware is sync'd.
1214 *
1215 * Return 0 on success. In case of an error a negative number is returned.
1216 */
1217 static int
1218 hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
1219 {
1220 unsigned int i;
1221 unsigned int num_endpoints;
1222 struct most_channel_capability *tmp_cap;
1223 struct most_dev *mdev;
1224 struct usb_device *usb_dev;
1225 struct device *dev;
1226 struct usb_host_interface *usb_iface_desc;
1227 struct usb_endpoint_descriptor *ep_desc;
1228 int ret = 0;
1229 int err;
1230
1231 usb_iface_desc = interface->cur_altsetting;
1232 usb_dev = interface_to_usbdev(interface);
1233 dev = &usb_dev->dev;
1234 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1235 if (!mdev)
1236 goto exit_ENOMEM;
1237
1238 usb_set_intfdata(interface, mdev);
1239 num_endpoints = usb_iface_desc->desc.bNumEndpoints;
1240 mutex_init(&mdev->io_mutex);
1241 INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
1242 setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
1243 (unsigned long)mdev);
1244
1245 mdev->usb_device = usb_dev;
1246 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
1247
1248 mdev->iface.mod = hdm_usb_fops.owner;
1249 mdev->iface.interface = ITYPE_USB;
1250 mdev->iface.configure = hdm_configure_channel;
1251 mdev->iface.request_netinfo = hdm_request_netinfo;
1252 mdev->iface.enqueue = hdm_enqueue;
1253 mdev->iface.poison_channel = hdm_poison_channel;
1254 mdev->iface.description = mdev->description;
1255 mdev->iface.num_channels = num_endpoints;
1256
1257 snprintf(mdev->description, sizeof(mdev->description),
1258 "usb_device %d-%s:%d.%d",
1259 usb_dev->bus->busnum,
1260 usb_dev->devpath,
1261 usb_dev->config->desc.bConfigurationValue,
1262 usb_iface_desc->desc.bInterfaceNumber);
1263
1264 mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
1265 if (!mdev->conf)
1266 goto exit_free;
1267
1268 mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
1269 if (!mdev->cap)
1270 goto exit_free1;
1271
1272 mdev->iface.channel_vector = mdev->cap;
1273 mdev->iface.priv = NULL;
1274
1275 mdev->ep_address =
1276 kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
1277 if (!mdev->ep_address)
1278 goto exit_free2;
1279
1280 mdev->anchor_list =
1281 kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
1282 if (!mdev->anchor_list)
1283 goto exit_free3;
1284
1285 tmp_cap = mdev->cap;
1286 for (i = 0; i < num_endpoints; i++) {
1287 ep_desc = &usb_iface_desc->endpoint[i].desc;
1288 mdev->ep_address[i] = ep_desc->bEndpointAddress;
1289 mdev->padding_active[i] = false;
1290 mdev->is_channel_healthy[i] = true;
1291
1292 snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
1293 mdev->ep_address[i]);
1294
1295 tmp_cap->name_suffix = &mdev->suffix[i][0];
1296 tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
1297 tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
1298 tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
1299 tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
1300 tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
1301 MOST_CH_ISOC_AVP | MOST_CH_SYNC;
1302 if (ep_desc->bEndpointAddress & USB_DIR_IN)
1303 tmp_cap->direction = MOST_CH_RX;
1304 else
1305 tmp_cap->direction = MOST_CH_TX;
1306 tmp_cap++;
1307 INIT_LIST_HEAD(&mdev->anchor_list[i]);
1308 spin_lock_init(&mdev->anchor_list_lock[i]);
1309 err = drci_wr_reg(usb_dev,
1310 DRCI_REG_BASE + DRCI_COMMAND +
1311 ep_desc->bEndpointAddress * 16,
1312 1);
1313 if (err < 0)
1314 pr_warn("DCI Sync for EP %02x failed",
1315 ep_desc->bEndpointAddress);
1316 }
1317 dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
1318 le16_to_cpu(usb_dev->descriptor.idVendor),
1319 le16_to_cpu(usb_dev->descriptor.idProduct),
1320 usb_dev->bus->busnum,
1321 usb_dev->devnum);
1322
1323 dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
1324 usb_dev->bus->busnum,
1325 usb_dev->devpath,
1326 usb_dev->config->desc.bConfigurationValue,
1327 usb_iface_desc->desc.bInterfaceNumber);
1328
1329 mdev->parent = most_register_interface(&mdev->iface);
1330 if (IS_ERR(mdev->parent)) {
1331 ret = PTR_ERR(mdev->parent);
1332 goto exit_free4;
1333 }
1334
1335 mutex_lock(&mdev->io_mutex);
1336 if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_INIC) {
1337 /* this increments the reference count of the instance
1338 * object of the core
1339 */
1340 mdev->dci = create_most_dci_obj(mdev->parent);
1341 if (!mdev->dci) {
1342 mutex_unlock(&mdev->io_mutex);
1343 most_deregister_interface(&mdev->iface);
1344 ret = -ENOMEM;
1345 goto exit_free4;
1346 }
1347
1348 kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
1349 mdev->dci->usb_device = mdev->usb_device;
1350 }
1351 mutex_unlock(&mdev->io_mutex);
1352 return 0;
1353
1354 exit_free4:
1355 kfree(mdev->anchor_list);
1356 exit_free3:
1357 kfree(mdev->ep_address);
1358 exit_free2:
1359 kfree(mdev->cap);
1360 exit_free1:
1361 kfree(mdev->conf);
1362 exit_free:
1363 kfree(mdev);
1364 exit_ENOMEM:
1365 if (ret == 0 || ret == -ENOMEM) {
1366 ret = -ENOMEM;
1367 dev_err(dev, "out of memory\n");
1368 }
1369 return ret;
1370 }
1371
1372 /**
1373 * hdm_disconnect - disconnect function of USB device driver
1374 * @interface: Interface of the attached USB device
1375 *
1376 * This deregisters the interface with the core, removes the kernel timer
1377 * and frees resources.
1378 *
1379 * Context: hub kernel thread
1380 */
1381 static void hdm_disconnect(struct usb_interface *interface)
1382 {
1383 struct most_dev *mdev;
1384
1385 mdev = usb_get_intfdata(interface);
1386 mutex_lock(&mdev->io_mutex);
1387 usb_set_intfdata(interface, NULL);
1388 mdev->usb_device = NULL;
1389 mutex_unlock(&mdev->io_mutex);
1390
1391 del_timer_sync(&mdev->link_stat_timer);
1392 cancel_work_sync(&mdev->poll_work_obj);
1393
1394 destroy_most_dci_obj(mdev->dci);
1395 most_deregister_interface(&mdev->iface);
1396
1397 kfree(mdev->anchor_list);
1398 kfree(mdev->cap);
1399 kfree(mdev->conf);
1400 kfree(mdev->ep_address);
1401 kfree(mdev);
1402 }
1403
1404 static struct usb_driver hdm_usb = {
1405 .name = "hdm_usb",
1406 .id_table = usbid,
1407 .probe = hdm_probe,
1408 .disconnect = hdm_disconnect,
1409 };
1410
1411 static int __init hdm_usb_init(void)
1412 {
1413 pr_info("hdm_usb_init()\n");
1414 if (usb_register(&hdm_usb)) {
1415 pr_err("could not register hdm_usb driver\n");
1416 return -EIO;
1417 }
1418 schedule_usb_work = create_workqueue("hdmu_work");
1419 if (!schedule_usb_work) {
1420 pr_err("could not create workqueue\n");
1421 usb_deregister(&hdm_usb);
1422 return -ENOMEM;
1423 }
1424 return 0;
1425 }
1426
1427 static void __exit hdm_usb_exit(void)
1428 {
1429 pr_info("hdm_usb_exit()\n");
1430 destroy_workqueue(schedule_usb_work);
1431 usb_deregister(&hdm_usb);
1432 }
1433
1434 module_init(hdm_usb_init);
1435 module_exit(hdm_usb_exit);
1436 MODULE_LICENSE("GPL");
1437 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1438 MODULE_DESCRIPTION("HDM_4_USB");
This page took 0.059025 seconds and 5 git commands to generate.