staging: most: remove multiple blank lines
[deliverable/linux.git] / drivers / staging / most / hdm-usb / hdm_usb.c
1 /*
2 * hdm_usb.c - Hardware dependent module for USB
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/fs.h>
17 #include <linux/usb.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/cdev.h>
21 #include <linux/device.h>
22 #include <linux/list.h>
23 #include <linux/completion.h>
24 #include <linux/mutex.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sysfs.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/etherdevice.h>
31 #include <linux/uaccess.h>
32 #include "mostcore.h"
33 #include "networking.h"
34
35 #define USB_MTU 512
36 #define NO_ISOCHRONOUS_URB 0
37 #define AV_PACKETS_PER_XACT 2
38 #define BUF_CHAIN_SIZE 0xFFFF
39 #define MAX_NUM_ENDPOINTS 30
40 #define MAX_SUFFIX_LEN 10
41 #define MAX_STRING_LEN 80
42 #define MAX_BUF_SIZE 0xFFFF
43 #define CEILING(x, y) (((x) + (y) - 1) / (y))
44
45 #define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
46 #define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
47 #define USB_DEV_ID_INIC 0xCF18 /* PID: USB INIC */
48 #define HW_RESYNC 0x0000
49 /* DRCI Addresses */
50 #define DRCI_REG_NI_STATE 0x0100
51 #define DRCI_REG_PACKET_BW 0x0101
52 #define DRCI_REG_NODE_ADDR 0x0102
53 #define DRCI_REG_NODE_POS 0x0103
54 #define DRCI_REG_MEP_FILTER 0x0140
55 #define DRCI_REG_HASH_TBL0 0x0141
56 #define DRCI_REG_HASH_TBL1 0x0142
57 #define DRCI_REG_HASH_TBL2 0x0143
58 #define DRCI_REG_HASH_TBL3 0x0144
59 #define DRCI_REG_HW_ADDR_HI 0x0145
60 #define DRCI_REG_HW_ADDR_MI 0x0146
61 #define DRCI_REG_HW_ADDR_LO 0x0147
62 #define DRCI_REG_BASE 0x1100
63 #define DRCI_COMMAND 0x02
64 #define DRCI_READ_REQ 0xA0
65 #define DRCI_WRITE_REQ 0xA1
66
67 /**
68 * struct buf_anchor - used to create a list of pending URBs
69 * @urb: pointer to USB request block
70 * @clear_work_obj:
71 * @list: linked list
72 * @urb_completion:
73 */
74 struct buf_anchor {
75 struct urb *urb;
76 struct work_struct clear_work_obj;
77 struct list_head list;
78 struct completion urb_compl;
79 };
80 #define to_buf_anchor(w) container_of(w, struct buf_anchor, clear_work_obj)
81
82 /**
83 * struct most_dci_obj - Direct Communication Interface
84 * @kobj:position in sysfs
85 * @usb_device: pointer to the usb device
86 */
87 struct most_dci_obj {
88 struct kobject kobj;
89 struct usb_device *usb_device;
90 };
91 #define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
92
93 /**
94 * struct most_dev - holds all usb interface specific stuff
95 * @parent: parent object in sysfs
96 * @usb_device: pointer to usb device
97 * @iface: hardware interface
98 * @cap: channel capabilities
99 * @conf: channel configuration
100 * @dci: direct communication interface of hardware
101 * @hw_addr: MAC address of hardware
102 * @ep_address: endpoint address table
103 * @link_stat: link status of hardware
104 * @description: device description
105 * @suffix: suffix for channel name
106 * @anchor_list_lock: locks list access
107 * @padding_active: indicates channel uses padding
108 * @is_channel_healthy: health status table of each channel
109 * @anchor_list: list of anchored items
110 * @io_mutex: synchronize I/O with disconnect
111 * @link_stat_timer: timer for link status reports
112 * @poll_work_obj: work for polling link status
113 */
114 struct most_dev {
115 struct kobject *parent;
116 struct usb_device *usb_device;
117 struct most_interface iface;
118 struct most_channel_capability *cap;
119 struct most_channel_config *conf;
120 struct most_dci_obj *dci;
121 u8 hw_addr[6];
122 u8 *ep_address;
123 u16 link_stat;
124 char description[MAX_STRING_LEN];
125 char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
126 spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
127 bool padding_active[MAX_NUM_ENDPOINTS];
128 bool is_channel_healthy[MAX_NUM_ENDPOINTS];
129 struct list_head *anchor_list;
130 struct mutex io_mutex;
131 struct timer_list link_stat_timer;
132 struct work_struct poll_work_obj;
133 };
134 #define to_mdev(d) container_of(d, struct most_dev, iface)
135 #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
136
137 static struct workqueue_struct *schedule_usb_work;
138 static void wq_clear_halt(struct work_struct *wq_obj);
139 static void wq_netinfo(struct work_struct *wq_obj);
140
141 /**
142 * drci_rd_reg - read a DCI register
143 * @dev: usb device
144 * @reg: register address
145 * @buf: buffer to store data
146 *
147 * This is reads data from INIC's direct register communication interface
148 */
149 static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
150 {
151 int retval;
152 u16 *dma_buf = kzalloc(sizeof(u16), GFP_KERNEL);
153 u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
154
155 if (!dma_buf)
156 return -ENOMEM;
157
158 retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
159 DRCI_READ_REQ, req_type,
160 0x0000,
161 reg, dma_buf, sizeof(u16), 5 * HZ);
162 *buf = le16_to_cpu(*dma_buf);
163 kfree(dma_buf);
164
165 return retval;
166 }
167
168 /**
169 * drci_wr_reg - write a DCI register
170 * @dev: usb device
171 * @reg: register address
172 * @data: data to write
173 *
174 * This is writes data to INIC's direct register communication interface
175 */
176 static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
177 {
178 return usb_control_msg(dev,
179 usb_sndctrlpipe(dev, 0),
180 DRCI_WRITE_REQ,
181 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
182 data,
183 reg,
184 NULL,
185 0,
186 5 * HZ);
187 }
188
189 /**
190 * free_anchored_buffers - free device's anchored items
191 * @mdev: the device
192 * @channel: channel ID
193 */
194 static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
195 {
196 struct mbo *mbo;
197 struct buf_anchor *anchor, *tmp;
198 unsigned long flags;
199
200 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
201 list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel],
202 list) {
203 struct urb *urb = anchor->urb;
204
205 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
206 if (likely(urb)) {
207 mbo = urb->context;
208 if (!irqs_disabled()) {
209 usb_kill_urb(urb);
210 } else {
211 usb_unlink_urb(urb);
212 wait_for_completion(&anchor->urb_compl);
213 }
214 if ((mbo) && (mbo->complete)) {
215 mbo->status = MBO_E_CLOSE;
216 mbo->processed_length = 0;
217 mbo->complete(mbo);
218 }
219 usb_free_urb(urb);
220 }
221 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
222 list_del(&anchor->list);
223 kfree(anchor);
224 }
225 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
226 }
227
228 /**
229 * get_stream_frame_size - calculate frame size of current configuration
230 * @cfg: channel configuration
231 */
232 static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
233 {
234 unsigned int frame_size = 0;
235 unsigned int sub_size = cfg->subbuffer_size;
236
237 if (!sub_size) {
238 pr_warn("Misconfig: Subbuffer size zero.\n");
239 return frame_size;
240 }
241 switch (cfg->data_type) {
242 case MOST_CH_ISOC_AVP:
243 frame_size = AV_PACKETS_PER_XACT * sub_size;
244 break;
245 case MOST_CH_SYNC:
246 if (cfg->packets_per_xact == 0) {
247 pr_warn("Misconfig: Packets per XACT zero\n");
248 frame_size = 0;
249 } else if (cfg->packets_per_xact == 0xFF)
250 frame_size = (USB_MTU / sub_size) * sub_size;
251 else
252 frame_size = cfg->packets_per_xact * sub_size;
253 break;
254 default:
255 pr_warn("Query frame size of non-streaming channel\n");
256 break;
257 }
258 return frame_size;
259 }
260
261 /**
262 * hdm_poison_channel - mark buffers of this channel as invalid
263 * @iface: pointer to the interface
264 * @channel: channel ID
265 *
266 * This unlinks all URBs submitted to the HCD,
267 * calls the associated completion function of the core and removes
268 * them from the list.
269 *
270 * Returns 0 on success or error code otherwise.
271 */
272 static int hdm_poison_channel(struct most_interface *iface, int channel)
273 {
274 struct most_dev *mdev;
275
276 mdev = to_mdev(iface);
277 if (unlikely(!iface)) {
278 dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
279 return -EIO;
280 }
281 if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
282 dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
283 return -ECHRNG;
284 }
285
286 mdev->is_channel_healthy[channel] = false;
287
288 mutex_lock(&mdev->io_mutex);
289 free_anchored_buffers(mdev, channel);
290 if (mdev->padding_active[channel])
291 mdev->padding_active[channel] = false;
292
293 if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
294 del_timer_sync(&mdev->link_stat_timer);
295 cancel_work_sync(&mdev->poll_work_obj);
296 }
297 mutex_unlock(&mdev->io_mutex);
298 return 0;
299 }
300
301 /**
302 * hdm_add_padding - add padding bytes
303 * @mdev: most device
304 * @channel: channel ID
305 * @mbo: buffer object
306 *
307 * This inserts the INIC hardware specific padding bytes into a streaming
308 * channel's buffer
309 */
310 static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
311 {
312 struct most_channel_config *conf = &mdev->conf[channel];
313 unsigned int j, num_frames, frame_size;
314 u16 rd_addr, wr_addr;
315
316 frame_size = get_stream_frame_size(conf);
317 if (!frame_size)
318 return -EIO;
319 num_frames = mbo->buffer_length / frame_size;
320
321 if (num_frames < 1) {
322 dev_err(&mdev->usb_device->dev,
323 "Missed minimal transfer unit.\n");
324 return -EIO;
325 }
326
327 for (j = 1; j < num_frames; j++) {
328 wr_addr = (num_frames - j) * USB_MTU;
329 rd_addr = (num_frames - j) * frame_size;
330 memmove(mbo->virt_address + wr_addr,
331 mbo->virt_address + rd_addr,
332 frame_size);
333 }
334 mbo->buffer_length = num_frames * USB_MTU;
335 return 0;
336 }
337
338 /**
339 * hdm_remove_padding - remove padding bytes
340 * @mdev: most device
341 * @channel: channel ID
342 * @mbo: buffer object
343 *
344 * This takes the INIC hardware specific padding bytes off a streaming
345 * channel's buffer.
346 */
347 static int hdm_remove_padding(struct most_dev *mdev, int channel,
348 struct mbo *mbo)
349 {
350 unsigned int j, num_frames, frame_size;
351 struct most_channel_config *const conf = &mdev->conf[channel];
352
353 frame_size = get_stream_frame_size(conf);
354 if (!frame_size)
355 return -EIO;
356 num_frames = mbo->processed_length / USB_MTU;
357
358 for (j = 1; j < num_frames; j++)
359 memmove(mbo->virt_address + frame_size * j,
360 mbo->virt_address + USB_MTU * j,
361 frame_size);
362
363 mbo->processed_length = frame_size * num_frames;
364 return 0;
365 }
366
367 /**
368 * hdm_write_completion - completion function for submitted Tx URBs
369 * @urb: the URB that has been completed
370 *
371 * This checks the status of the completed URB. In case the URB has been
372 * unlinked before, it is immediately freed. On any other error the MBO
373 * transfer flag is set. On success it frees allocated resources and calls
374 * the completion function.
375 *
376 * Context: interrupt!
377 */
378 static void hdm_write_completion(struct urb *urb)
379 {
380 struct mbo *mbo;
381 struct buf_anchor *anchor;
382 struct most_dev *mdev;
383 struct device *dev;
384 unsigned int channel;
385 unsigned long flags;
386
387 mbo = urb->context;
388 anchor = mbo->priv;
389 mdev = to_mdev(mbo->ifp);
390 channel = mbo->hdm_channel_id;
391 dev = &mdev->usb_device->dev;
392
393 if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
394 (!mdev->is_channel_healthy[channel])) {
395 complete(&anchor->urb_compl);
396 return;
397 }
398
399 if (unlikely(urb->status && !(urb->status == -ENOENT ||
400 urb->status == -ECONNRESET ||
401 urb->status == -ESHUTDOWN))) {
402 mbo->processed_length = 0;
403 switch (urb->status) {
404 case -EPIPE:
405 dev_warn(dev, "Broken OUT pipe detected\n");
406 most_stop_enqueue(&mdev->iface, channel);
407 mbo->status = MBO_E_INVAL;
408 usb_unlink_urb(urb);
409 INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
410 queue_work(schedule_usb_work, &anchor->clear_work_obj);
411 return;
412 case -ENODEV:
413 case -EPROTO:
414 mbo->status = MBO_E_CLOSE;
415 break;
416 default:
417 mbo->status = MBO_E_INVAL;
418 break;
419 }
420 } else {
421 mbo->status = MBO_SUCCESS;
422 mbo->processed_length = urb->actual_length;
423 }
424
425 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
426 list_del(&anchor->list);
427 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
428 kfree(anchor);
429
430 if (likely(mbo->complete))
431 mbo->complete(mbo);
432 usb_free_urb(urb);
433 }
434
435 /**
436 * hdm_read_completion - completion function for submitted Rx URBs
437 * @urb: the URB that has been completed
438 *
439 * This checks the status of the completed URB. In case the URB has been
440 * unlinked before it is immediately freed. On any other error the MBO transfer
441 * flag is set. On success it frees allocated resources, removes
442 * padding bytes -if necessary- and calls the completion function.
443 *
444 * Context: interrupt!
445 *
446 * **************************************************************************
447 * Error codes returned by in urb->status
448 * or in iso_frame_desc[n].status (for ISO)
449 * *************************************************************************
450 *
451 * USB device drivers may only test urb status values in completion handlers.
452 * This is because otherwise there would be a race between HCDs updating
453 * these values on one CPU, and device drivers testing them on another CPU.
454 *
455 * A transfer's actual_length may be positive even when an error has been
456 * reported. That's because transfers often involve several packets, so that
457 * one or more packets could finish before an error stops further endpoint I/O.
458 *
459 * For isochronous URBs, the urb status value is non-zero only if the URB is
460 * unlinked, the device is removed, the host controller is disabled or the total
461 * transferred length is less than the requested length and the URB_SHORT_NOT_OK
462 * flag is set. Completion handlers for isochronous URBs should only see
463 * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
464 * Individual frame descriptor status fields may report more status codes.
465 *
466 *
467 * 0 Transfer completed successfully
468 *
469 * -ENOENT URB was synchronously unlinked by usb_unlink_urb
470 *
471 * -EINPROGRESS URB still pending, no results yet
472 * (That is, if drivers see this it's a bug.)
473 *
474 * -EPROTO (*, **) a) bitstuff error
475 * b) no response packet received within the
476 * prescribed bus turn-around time
477 * c) unknown USB error
478 *
479 * -EILSEQ (*, **) a) CRC mismatch
480 * b) no response packet received within the
481 * prescribed bus turn-around time
482 * c) unknown USB error
483 *
484 * Note that often the controller hardware does not
485 * distinguish among cases a), b), and c), so a
486 * driver cannot tell whether there was a protocol
487 * error, a failure to respond (often caused by
488 * device disconnect), or some other fault.
489 *
490 * -ETIME (**) No response packet received within the prescribed
491 * bus turn-around time. This error may instead be
492 * reported as -EPROTO or -EILSEQ.
493 *
494 * -ETIMEDOUT Synchronous USB message functions use this code
495 * to indicate timeout expired before the transfer
496 * completed, and no other error was reported by HC.
497 *
498 * -EPIPE (**) Endpoint stalled. For non-control endpoints,
499 * reset this status with usb_clear_halt().
500 *
501 * -ECOMM During an IN transfer, the host controller
502 * received data from an endpoint faster than it
503 * could be written to system memory
504 *
505 * -ENOSR During an OUT transfer, the host controller
506 * could not retrieve data from system memory fast
507 * enough to keep up with the USB data rate
508 *
509 * -EOVERFLOW (*) The amount of data returned by the endpoint was
510 * greater than either the max packet size of the
511 * endpoint or the remaining buffer size. "Babble".
512 *
513 * -EREMOTEIO The data read from the endpoint did not fill the
514 * specified buffer, and URB_SHORT_NOT_OK was set in
515 * urb->transfer_flags.
516 *
517 * -ENODEV Device was removed. Often preceded by a burst of
518 * other errors, since the hub driver doesn't detect
519 * device removal events immediately.
520 *
521 * -EXDEV ISO transfer only partially completed
522 * (only set in iso_frame_desc[n].status, not urb->status)
523 *
524 * -EINVAL ISO madness, if this happens: Log off and go home
525 *
526 * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
527 *
528 * -ESHUTDOWN The device or host controller has been disabled due
529 * to some problem that could not be worked around,
530 * such as a physical disconnect.
531 *
532 *
533 * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
534 * hardware problems such as bad devices (including firmware) or cables.
535 *
536 * (**) This is also one of several codes that different kinds of host
537 * controller use to indicate a transfer has failed because of device
538 * disconnect. In the interval before the hub driver starts disconnect
539 * processing, devices may receive such fault reports for every request.
540 *
541 * See <https://www.kernel.org/doc/Documentation/usb/error-codes.txt>
542 */
543 static void hdm_read_completion(struct urb *urb)
544 {
545 struct mbo *mbo;
546 struct buf_anchor *anchor;
547 struct most_dev *mdev;
548 struct device *dev;
549 unsigned long flags;
550 unsigned int channel;
551
552 mbo = urb->context;
553 anchor = mbo->priv;
554 mdev = to_mdev(mbo->ifp);
555 channel = mbo->hdm_channel_id;
556 dev = &mdev->usb_device->dev;
557
558 if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
559 (!mdev->is_channel_healthy[channel])) {
560 complete(&anchor->urb_compl);
561 return;
562 }
563
564 if (unlikely(urb->status && !(urb->status == -ENOENT ||
565 urb->status == -ECONNRESET ||
566 urb->status == -ESHUTDOWN))) {
567 mbo->processed_length = 0;
568 switch (urb->status) {
569 case -EPIPE:
570 dev_warn(dev, "Broken IN pipe detected\n");
571 mbo->status = MBO_E_INVAL;
572 usb_unlink_urb(urb);
573 INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
574 queue_work(schedule_usb_work, &anchor->clear_work_obj);
575 return;
576 case -ENODEV:
577 case -EPROTO:
578 mbo->status = MBO_E_CLOSE;
579 break;
580 case -EOVERFLOW:
581 dev_warn(dev, "Babble on IN pipe detected\n");
582 default:
583 mbo->status = MBO_E_INVAL;
584 break;
585 }
586 } else {
587 mbo->processed_length = urb->actual_length;
588 if (!mdev->padding_active[channel]) {
589 mbo->status = MBO_SUCCESS;
590 } else {
591 if (hdm_remove_padding(mdev, channel, mbo)) {
592 mbo->processed_length = 0;
593 mbo->status = MBO_E_INVAL;
594 } else {
595 mbo->status = MBO_SUCCESS;
596 }
597 }
598 }
599 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
600 list_del(&anchor->list);
601 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
602 kfree(anchor);
603
604 if (likely(mbo->complete))
605 mbo->complete(mbo);
606 usb_free_urb(urb);
607 }
608
609 /**
610 * hdm_enqueue - receive a buffer to be used for data transfer
611 * @iface: interface to enqueue to
612 * @channel: ID of the channel
613 * @mbo: pointer to the buffer object
614 *
615 * This allocates a new URB and fills it according to the channel
616 * that is being used for transmission of data. Before the URB is
617 * submitted it is stored in the private anchor list.
618 *
619 * Returns 0 on success. On any error the URB is freed and a error code
620 * is returned.
621 *
622 * Context: Could in _some_ cases be interrupt!
623 */
624 static int hdm_enqueue(struct most_interface *iface, int channel,
625 struct mbo *mbo)
626 {
627 struct most_dev *mdev;
628 struct buf_anchor *anchor;
629 struct most_channel_config *conf;
630 struct device *dev;
631 int retval = 0;
632 struct urb *urb;
633 unsigned long flags;
634 unsigned long length;
635 void *virt_address;
636
637 if (unlikely(!iface || !mbo))
638 return -EIO;
639 if (unlikely(iface->num_channels <= channel) || (channel < 0))
640 return -ECHRNG;
641
642 mdev = to_mdev(iface);
643 conf = &mdev->conf[channel];
644 dev = &mdev->usb_device->dev;
645
646 if (!mdev->usb_device)
647 return -ENODEV;
648
649 urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
650 if (!urb) {
651 dev_err(dev, "Failed to allocate URB\n");
652 return -ENOMEM;
653 }
654
655 anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
656 if (!anchor) {
657 retval = -ENOMEM;
658 goto _error;
659 }
660
661 anchor->urb = urb;
662 init_completion(&anchor->urb_compl);
663 mbo->priv = anchor;
664
665 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
666 list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
667 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
668
669 if ((mdev->padding_active[channel]) &&
670 (conf->direction & MOST_CH_TX))
671 if (hdm_add_padding(mdev, channel, mbo)) {
672 retval = -EIO;
673 goto _error_1;
674 }
675
676 urb->transfer_dma = mbo->bus_address;
677 virt_address = mbo->virt_address;
678 length = mbo->buffer_length;
679
680 if (conf->direction & MOST_CH_TX) {
681 usb_fill_bulk_urb(urb, mdev->usb_device,
682 usb_sndbulkpipe(mdev->usb_device,
683 mdev->ep_address[channel]),
684 virt_address,
685 length,
686 hdm_write_completion,
687 mbo);
688 if (conf->data_type != MOST_CH_ISOC_AVP)
689 urb->transfer_flags |= URB_ZERO_PACKET;
690 } else {
691 usb_fill_bulk_urb(urb, mdev->usb_device,
692 usb_rcvbulkpipe(mdev->usb_device,
693 mdev->ep_address[channel]),
694 virt_address,
695 length + conf->extra_len,
696 hdm_read_completion,
697 mbo);
698 }
699 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
700
701 retval = usb_submit_urb(urb, GFP_KERNEL);
702 if (retval) {
703 dev_err(dev, "URB submit failed with error %d.\n", retval);
704 goto _error_1;
705 }
706 return 0;
707
708 _error_1:
709 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
710 list_del(&anchor->list);
711 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
712 kfree(anchor);
713 _error:
714 usb_free_urb(urb);
715 return retval;
716 }
717
718 /**
719 * hdm_configure_channel - receive channel configuration from core
720 * @iface: interface
721 * @channel: channel ID
722 * @conf: structure that holds the configuration information
723 */
724 static int hdm_configure_channel(struct most_interface *iface, int channel,
725 struct most_channel_config *conf)
726 {
727 unsigned int num_frames;
728 unsigned int frame_size;
729 unsigned int temp_size;
730 unsigned int tail_space;
731 struct most_dev *mdev;
732 struct device *dev;
733
734 mdev = to_mdev(iface);
735 mdev->is_channel_healthy[channel] = true;
736 dev = &mdev->usb_device->dev;
737
738 if (unlikely(!iface || !conf)) {
739 dev_err(dev, "Bad interface or config pointer.\n");
740 return -EINVAL;
741 }
742 if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
743 dev_err(dev, "Channel ID out of range.\n");
744 return -EINVAL;
745 }
746 if ((!conf->num_buffers) || (!conf->buffer_size)) {
747 dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
748 return -EINVAL;
749 }
750
751 if (!(conf->data_type == MOST_CH_SYNC) &&
752 !((conf->data_type == MOST_CH_ISOC_AVP) &&
753 (conf->packets_per_xact != 0xFF))) {
754 mdev->padding_active[channel] = false;
755 goto exit;
756 }
757
758 mdev->padding_active[channel] = true;
759 temp_size = conf->buffer_size;
760
761 frame_size = get_stream_frame_size(conf);
762 if ((frame_size == 0) || (frame_size > USB_MTU)) {
763 dev_warn(dev, "Misconfig: frame size wrong\n");
764 return -EINVAL;
765 }
766
767 if (conf->buffer_size % frame_size) {
768 u16 tmp_val;
769
770 tmp_val = conf->buffer_size / frame_size;
771 conf->buffer_size = tmp_val * frame_size;
772 dev_notice(dev,
773 "Channel %d - rounding buffer size to %d bytes, channel config says %d bytes\n",
774 channel,
775 conf->buffer_size,
776 temp_size);
777 }
778
779 num_frames = conf->buffer_size / frame_size;
780 tail_space = num_frames * (USB_MTU - frame_size);
781 temp_size += tail_space;
782
783 /* calculate extra length to comply w/ HW padding */
784 conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
785 - conf->buffer_size;
786 exit:
787 mdev->conf[channel] = *conf;
788 return 0;
789 }
790
791 /**
792 * hdm_update_netinfo - retrieve latest networking information
793 * @mdev: device interface
794 *
795 * This triggers the USB vendor requests to read the hardware address and
796 * the current link status of the attached device.
797 */
798 static int hdm_update_netinfo(struct most_dev *mdev)
799 {
800 struct usb_device *usb_device = mdev->usb_device;
801 struct device *dev = &usb_device->dev;
802 u16 hi, mi, lo, link;
803
804 if (!is_valid_ether_addr(mdev->hw_addr)) {
805 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
806 dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
807 return -1;
808 }
809
810 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
811 dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
812 return -1;
813 }
814
815 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
816 dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
817 return -1;
818 }
819
820 mutex_lock(&mdev->io_mutex);
821 mdev->hw_addr[0] = hi >> 8;
822 mdev->hw_addr[1] = hi;
823 mdev->hw_addr[2] = mi >> 8;
824 mdev->hw_addr[3] = mi;
825 mdev->hw_addr[4] = lo >> 8;
826 mdev->hw_addr[5] = lo;
827 mutex_unlock(&mdev->io_mutex);
828 }
829
830 if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
831 dev_err(dev, "Vendor request \"link status\" failed\n");
832 return -1;
833 }
834
835 mutex_lock(&mdev->io_mutex);
836 mdev->link_stat = link;
837 mutex_unlock(&mdev->io_mutex);
838 return 0;
839 }
840
841 /**
842 * hdm_request_netinfo - request network information
843 * @iface: pointer to interface
844 * @channel: channel ID
845 *
846 * This is used as trigger to set up the link status timer that
847 * polls for the NI state of the INIC every 2 seconds.
848 *
849 */
850 static void hdm_request_netinfo(struct most_interface *iface, int channel)
851 {
852 struct most_dev *mdev;
853
854 BUG_ON(!iface);
855 mdev = to_mdev(iface);
856 mdev->link_stat_timer.expires = jiffies + HZ;
857 mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
858 }
859
860 /**
861 * link_stat_timer_handler - add work to link_stat work queue
862 * @data: pointer to USB device instance
863 *
864 * The handler runs in interrupt context. That's why we need to defer the
865 * tasks to a work queue.
866 */
867 static void link_stat_timer_handler(unsigned long data)
868 {
869 struct most_dev *mdev = (struct most_dev *)data;
870
871 queue_work(schedule_usb_work, &mdev->poll_work_obj);
872 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
873 add_timer(&mdev->link_stat_timer);
874 }
875
876 /**
877 * wq_netinfo - work queue function
878 * @wq_obj: object that holds data for our deferred work to do
879 *
880 * This retrieves the network interface status of the USB INIC
881 * and compares it with the current status. If the status has
882 * changed, it updates the status of the core.
883 */
884 static void wq_netinfo(struct work_struct *wq_obj)
885 {
886 struct most_dev *mdev;
887 int i, prev_link_stat;
888 u8 prev_hw_addr[6];
889
890 mdev = to_mdev_from_work(wq_obj);
891 prev_link_stat = mdev->link_stat;
892
893 for (i = 0; i < 6; i++)
894 prev_hw_addr[i] = mdev->hw_addr[i];
895
896 if (hdm_update_netinfo(mdev) < 0)
897 return;
898 if ((prev_link_stat != mdev->link_stat) ||
899 (prev_hw_addr[0] != mdev->hw_addr[0]) ||
900 (prev_hw_addr[1] != mdev->hw_addr[1]) ||
901 (prev_hw_addr[2] != mdev->hw_addr[2]) ||
902 (prev_hw_addr[3] != mdev->hw_addr[3]) ||
903 (prev_hw_addr[4] != mdev->hw_addr[4]) ||
904 (prev_hw_addr[5] != mdev->hw_addr[5]))
905 most_deliver_netinfo(&mdev->iface, mdev->link_stat,
906 &mdev->hw_addr[0]);
907 }
908
909 /**
910 * wq_clear_halt - work queue function
911 * @wq_obj: work_struct object to execute
912 *
913 * This sends a clear_halt to the given USB pipe.
914 */
915 static void wq_clear_halt(struct work_struct *wq_obj)
916 {
917 struct buf_anchor *anchor;
918 struct most_dev *mdev;
919 struct mbo *mbo;
920 struct urb *urb;
921 unsigned int channel;
922 unsigned long flags;
923
924 anchor = to_buf_anchor(wq_obj);
925 urb = anchor->urb;
926 mbo = urb->context;
927 mdev = to_mdev(mbo->ifp);
928 channel = mbo->hdm_channel_id;
929
930 if (usb_clear_halt(urb->dev, urb->pipe))
931 dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
932
933 usb_free_urb(urb);
934 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
935 list_del(&anchor->list);
936 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
937
938 if (likely(mbo->complete))
939 mbo->complete(mbo);
940 if (mdev->conf[channel].direction & MOST_CH_TX)
941 most_resume_enqueue(&mdev->iface, channel);
942
943 kfree(anchor);
944 }
945
946 /**
947 * hdm_usb_fops - file operation table for USB driver
948 */
949 static const struct file_operations hdm_usb_fops = {
950 .owner = THIS_MODULE,
951 };
952
953 /**
954 * usb_device_id - ID table for HCD device probing
955 */
956 static struct usb_device_id usbid[] = {
957 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
958 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_INIC), },
959 { } /* Terminating entry */
960 };
961
962 #define MOST_DCI_RO_ATTR(_name) \
963 struct most_dci_attribute most_dci_attr_##_name = \
964 __ATTR(_name, S_IRUGO, show_value, NULL)
965
966 #define MOST_DCI_ATTR(_name) \
967 struct most_dci_attribute most_dci_attr_##_name = \
968 __ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
969
970 /**
971 * struct most_dci_attribute - to access the attributes of a dci object
972 * @attr: attributes of a dci object
973 * @show: pointer to the show function
974 * @store: pointer to the store function
975 */
976 struct most_dci_attribute {
977 struct attribute attr;
978 ssize_t (*show)(struct most_dci_obj *d,
979 struct most_dci_attribute *attr,
980 char *buf);
981 ssize_t (*store)(struct most_dci_obj *d,
982 struct most_dci_attribute *attr,
983 const char *buf,
984 size_t count);
985 };
986 #define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
987
988 /**
989 * dci_attr_show - show function for dci object
990 * @kobj: pointer to kobject
991 * @attr: pointer to attribute struct
992 * @buf: buffer
993 */
994 static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
995 char *buf)
996 {
997 struct most_dci_attribute *dci_attr = to_dci_attr(attr);
998 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
999
1000 if (!dci_attr->show)
1001 return -EIO;
1002
1003 return dci_attr->show(dci_obj, dci_attr, buf);
1004 }
1005
1006 /**
1007 * dci_attr_store - store function for dci object
1008 * @kobj: pointer to kobject
1009 * @attr: pointer to attribute struct
1010 * @buf: buffer
1011 * @len: length of buffer
1012 */
1013 static ssize_t dci_attr_store(struct kobject *kobj,
1014 struct attribute *attr,
1015 const char *buf,
1016 size_t len)
1017 {
1018 struct most_dci_attribute *dci_attr = to_dci_attr(attr);
1019 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1020
1021 if (!dci_attr->store)
1022 return -EIO;
1023
1024 return dci_attr->store(dci_obj, dci_attr, buf, len);
1025 }
1026
1027 static const struct sysfs_ops most_dci_sysfs_ops = {
1028 .show = dci_attr_show,
1029 .store = dci_attr_store,
1030 };
1031
1032 /**
1033 * most_dci_release - release function for dci object
1034 * @kobj: pointer to kobject
1035 *
1036 * This frees the memory allocated for the dci object
1037 */
1038 static void most_dci_release(struct kobject *kobj)
1039 {
1040 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1041
1042 kfree(dci_obj);
1043 }
1044
1045 static ssize_t show_value(struct most_dci_obj *dci_obj,
1046 struct most_dci_attribute *attr, char *buf)
1047 {
1048 u16 tmp_val;
1049 u16 reg_addr;
1050 int err;
1051
1052 if (!strcmp(attr->attr.name, "ni_state"))
1053 reg_addr = DRCI_REG_NI_STATE;
1054 else if (!strcmp(attr->attr.name, "packet_bandwidth"))
1055 reg_addr = DRCI_REG_PACKET_BW;
1056 else if (!strcmp(attr->attr.name, "node_address"))
1057 reg_addr = DRCI_REG_NODE_ADDR;
1058 else if (!strcmp(attr->attr.name, "node_position"))
1059 reg_addr = DRCI_REG_NODE_POS;
1060 else if (!strcmp(attr->attr.name, "mep_filter"))
1061 reg_addr = DRCI_REG_MEP_FILTER;
1062 else if (!strcmp(attr->attr.name, "mep_hash0"))
1063 reg_addr = DRCI_REG_HASH_TBL0;
1064 else if (!strcmp(attr->attr.name, "mep_hash1"))
1065 reg_addr = DRCI_REG_HASH_TBL1;
1066 else if (!strcmp(attr->attr.name, "mep_hash2"))
1067 reg_addr = DRCI_REG_HASH_TBL2;
1068 else if (!strcmp(attr->attr.name, "mep_hash3"))
1069 reg_addr = DRCI_REG_HASH_TBL3;
1070 else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
1071 reg_addr = DRCI_REG_HW_ADDR_HI;
1072 else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
1073 reg_addr = DRCI_REG_HW_ADDR_MI;
1074 else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
1075 reg_addr = DRCI_REG_HW_ADDR_LO;
1076 else
1077 return -EIO;
1078
1079 err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
1080 if (err < 0)
1081 return err;
1082
1083 return snprintf(buf, PAGE_SIZE, "%04x\n", tmp_val);
1084 }
1085
1086 static ssize_t store_value(struct most_dci_obj *dci_obj,
1087 struct most_dci_attribute *attr,
1088 const char *buf, size_t count)
1089 {
1090 u16 val;
1091 u16 reg_addr;
1092 int err;
1093
1094 if (!strcmp(attr->attr.name, "mep_filter"))
1095 reg_addr = DRCI_REG_MEP_FILTER;
1096 else if (!strcmp(attr->attr.name, "mep_hash0"))
1097 reg_addr = DRCI_REG_HASH_TBL0;
1098 else if (!strcmp(attr->attr.name, "mep_hash1"))
1099 reg_addr = DRCI_REG_HASH_TBL1;
1100 else if (!strcmp(attr->attr.name, "mep_hash2"))
1101 reg_addr = DRCI_REG_HASH_TBL2;
1102 else if (!strcmp(attr->attr.name, "mep_hash3"))
1103 reg_addr = DRCI_REG_HASH_TBL3;
1104 else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
1105 reg_addr = DRCI_REG_HW_ADDR_HI;
1106 else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
1107 reg_addr = DRCI_REG_HW_ADDR_MI;
1108 else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
1109 reg_addr = DRCI_REG_HW_ADDR_LO;
1110 else
1111 return -EIO;
1112
1113 err = kstrtou16(buf, 16, &val);
1114 if (err)
1115 return err;
1116
1117 err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
1118 if (err < 0)
1119 return err;
1120
1121 return count;
1122 }
1123
1124 static MOST_DCI_RO_ATTR(ni_state);
1125 static MOST_DCI_RO_ATTR(packet_bandwidth);
1126 static MOST_DCI_RO_ATTR(node_address);
1127 static MOST_DCI_RO_ATTR(node_position);
1128 static MOST_DCI_ATTR(mep_filter);
1129 static MOST_DCI_ATTR(mep_hash0);
1130 static MOST_DCI_ATTR(mep_hash1);
1131 static MOST_DCI_ATTR(mep_hash2);
1132 static MOST_DCI_ATTR(mep_hash3);
1133 static MOST_DCI_ATTR(mep_eui48_hi);
1134 static MOST_DCI_ATTR(mep_eui48_mi);
1135 static MOST_DCI_ATTR(mep_eui48_lo);
1136
1137 /**
1138 * most_dci_def_attrs - array of default attribute files of the dci object
1139 */
1140 static struct attribute *most_dci_def_attrs[] = {
1141 &most_dci_attr_ni_state.attr,
1142 &most_dci_attr_packet_bandwidth.attr,
1143 &most_dci_attr_node_address.attr,
1144 &most_dci_attr_node_position.attr,
1145 &most_dci_attr_mep_filter.attr,
1146 &most_dci_attr_mep_hash0.attr,
1147 &most_dci_attr_mep_hash1.attr,
1148 &most_dci_attr_mep_hash2.attr,
1149 &most_dci_attr_mep_hash3.attr,
1150 &most_dci_attr_mep_eui48_hi.attr,
1151 &most_dci_attr_mep_eui48_mi.attr,
1152 &most_dci_attr_mep_eui48_lo.attr,
1153 NULL,
1154 };
1155
1156 /**
1157 * DCI ktype
1158 */
1159 static struct kobj_type most_dci_ktype = {
1160 .sysfs_ops = &most_dci_sysfs_ops,
1161 .release = most_dci_release,
1162 .default_attrs = most_dci_def_attrs,
1163 };
1164
1165 /**
1166 * create_most_dci_obj - allocates a dci object
1167 * @parent: parent kobject
1168 *
1169 * This creates a dci object and registers it with sysfs.
1170 * Returns a pointer to the object or NULL when something went wrong.
1171 */
1172 static struct
1173 most_dci_obj *create_most_dci_obj(struct kobject *parent)
1174 {
1175 struct most_dci_obj *most_dci;
1176 int retval;
1177
1178 most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
1179 if (!most_dci)
1180 return NULL;
1181
1182 retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
1183 "dci");
1184 if (retval) {
1185 kobject_put(&most_dci->kobj);
1186 return NULL;
1187 }
1188 return most_dci;
1189 }
1190
1191 /**
1192 * destroy_most_dci_obj - DCI object release function
1193 * @p: pointer to dci object
1194 */
1195 static void destroy_most_dci_obj(struct most_dci_obj *p)
1196 {
1197 kobject_put(&p->kobj);
1198 }
1199
1200 /**
1201 * hdm_probe - probe function of USB device driver
1202 * @interface: Interface of the attached USB device
1203 * @id: Pointer to the USB ID table.
1204 *
1205 * This allocates and initializes the device instance, adds the new
1206 * entry to the internal list, scans the USB descriptors and registers
1207 * the interface with the core.
1208 * Additionally, the DCI objects are created and the hardware is sync'd.
1209 *
1210 * Return 0 on success. In case of an error a negative number is returned.
1211 */
1212 static int
1213 hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
1214 {
1215 unsigned int i;
1216 unsigned int num_endpoints;
1217 struct most_channel_capability *tmp_cap;
1218 struct most_dev *mdev;
1219 struct usb_device *usb_dev;
1220 struct device *dev;
1221 struct usb_host_interface *usb_iface_desc;
1222 struct usb_endpoint_descriptor *ep_desc;
1223 int ret = 0;
1224 int err;
1225
1226 usb_iface_desc = interface->cur_altsetting;
1227 usb_dev = interface_to_usbdev(interface);
1228 dev = &usb_dev->dev;
1229 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1230 if (!mdev)
1231 goto exit_ENOMEM;
1232
1233 usb_set_intfdata(interface, mdev);
1234 num_endpoints = usb_iface_desc->desc.bNumEndpoints;
1235 mutex_init(&mdev->io_mutex);
1236 INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
1237 init_timer(&mdev->link_stat_timer);
1238
1239 mdev->usb_device = usb_dev;
1240 mdev->link_stat_timer.function = link_stat_timer_handler;
1241 mdev->link_stat_timer.data = (unsigned long)mdev;
1242 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
1243
1244 mdev->iface.mod = hdm_usb_fops.owner;
1245 mdev->iface.interface = ITYPE_USB;
1246 mdev->iface.configure = hdm_configure_channel;
1247 mdev->iface.request_netinfo = hdm_request_netinfo;
1248 mdev->iface.enqueue = hdm_enqueue;
1249 mdev->iface.poison_channel = hdm_poison_channel;
1250 mdev->iface.description = mdev->description;
1251 mdev->iface.num_channels = num_endpoints;
1252
1253 snprintf(mdev->description, sizeof(mdev->description),
1254 "usb_device %d-%s:%d.%d",
1255 usb_dev->bus->busnum,
1256 usb_dev->devpath,
1257 usb_dev->config->desc.bConfigurationValue,
1258 usb_iface_desc->desc.bInterfaceNumber);
1259
1260 mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
1261 if (!mdev->conf)
1262 goto exit_free;
1263
1264 mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
1265 if (!mdev->cap)
1266 goto exit_free1;
1267
1268 mdev->iface.channel_vector = mdev->cap;
1269 mdev->iface.priv = NULL;
1270
1271 mdev->ep_address =
1272 kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
1273 if (!mdev->ep_address)
1274 goto exit_free2;
1275
1276 mdev->anchor_list =
1277 kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
1278 if (!mdev->anchor_list)
1279 goto exit_free3;
1280
1281 tmp_cap = mdev->cap;
1282 for (i = 0; i < num_endpoints; i++) {
1283 ep_desc = &usb_iface_desc->endpoint[i].desc;
1284 mdev->ep_address[i] = ep_desc->bEndpointAddress;
1285 mdev->padding_active[i] = false;
1286 mdev->is_channel_healthy[i] = true;
1287
1288 snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
1289 mdev->ep_address[i]);
1290
1291 tmp_cap->name_suffix = &mdev->suffix[i][0];
1292 tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
1293 tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
1294 tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
1295 tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
1296 tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
1297 MOST_CH_ISOC_AVP | MOST_CH_SYNC;
1298 if (ep_desc->bEndpointAddress & USB_DIR_IN)
1299 tmp_cap->direction = MOST_CH_RX;
1300 else
1301 tmp_cap->direction = MOST_CH_TX;
1302 tmp_cap++;
1303 INIT_LIST_HEAD(&mdev->anchor_list[i]);
1304 spin_lock_init(&mdev->anchor_list_lock[i]);
1305 err = drci_wr_reg(usb_dev,
1306 DRCI_REG_BASE + DRCI_COMMAND +
1307 ep_desc->bEndpointAddress * 16,
1308 1);
1309 if (err < 0)
1310 pr_warn("DCI Sync for EP %02x failed",
1311 ep_desc->bEndpointAddress);
1312 }
1313 dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
1314 le16_to_cpu(usb_dev->descriptor.idVendor),
1315 le16_to_cpu(usb_dev->descriptor.idProduct),
1316 usb_dev->bus->busnum,
1317 usb_dev->devnum);
1318
1319 dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
1320 usb_dev->bus->busnum,
1321 usb_dev->devpath,
1322 usb_dev->config->desc.bConfigurationValue,
1323 usb_iface_desc->desc.bInterfaceNumber);
1324
1325 mdev->parent = most_register_interface(&mdev->iface);
1326 if (IS_ERR(mdev->parent)) {
1327 ret = PTR_ERR(mdev->parent);
1328 goto exit_free4;
1329 }
1330
1331 mutex_lock(&mdev->io_mutex);
1332 if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_INIC) {
1333 /* this increments the reference count of the instance
1334 * object of the core
1335 */
1336 mdev->dci = create_most_dci_obj(mdev->parent);
1337 if (!mdev->dci) {
1338 mutex_unlock(&mdev->io_mutex);
1339 most_deregister_interface(&mdev->iface);
1340 ret = -ENOMEM;
1341 goto exit_free4;
1342 }
1343
1344 kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
1345 mdev->dci->usb_device = mdev->usb_device;
1346 }
1347 mutex_unlock(&mdev->io_mutex);
1348 return 0;
1349
1350 exit_free4:
1351 kfree(mdev->anchor_list);
1352 exit_free3:
1353 kfree(mdev->ep_address);
1354 exit_free2:
1355 kfree(mdev->cap);
1356 exit_free1:
1357 kfree(mdev->conf);
1358 exit_free:
1359 kfree(mdev);
1360 exit_ENOMEM:
1361 if (ret == 0 || ret == -ENOMEM) {
1362 ret = -ENOMEM;
1363 dev_err(dev, "out of memory\n");
1364 }
1365 return ret;
1366 }
1367
1368 /**
1369 * hdm_disconnect - disconnect function of USB device driver
1370 * @interface: Interface of the attached USB device
1371 *
1372 * This deregisters the interface with the core, removes the kernel timer
1373 * and frees resources.
1374 *
1375 * Context: hub kernel thread
1376 */
1377 static void hdm_disconnect(struct usb_interface *interface)
1378 {
1379 struct most_dev *mdev;
1380
1381 mdev = usb_get_intfdata(interface);
1382 mutex_lock(&mdev->io_mutex);
1383 usb_set_intfdata(interface, NULL);
1384 mdev->usb_device = NULL;
1385 mutex_unlock(&mdev->io_mutex);
1386
1387 del_timer_sync(&mdev->link_stat_timer);
1388 cancel_work_sync(&mdev->poll_work_obj);
1389
1390 destroy_most_dci_obj(mdev->dci);
1391 most_deregister_interface(&mdev->iface);
1392
1393 kfree(mdev->anchor_list);
1394 kfree(mdev->cap);
1395 kfree(mdev->conf);
1396 kfree(mdev->ep_address);
1397 kfree(mdev);
1398 }
1399
1400 static struct usb_driver hdm_usb = {
1401 .name = "hdm_usb",
1402 .id_table = usbid,
1403 .probe = hdm_probe,
1404 .disconnect = hdm_disconnect,
1405 };
1406
1407 static int __init hdm_usb_init(void)
1408 {
1409 pr_info("hdm_usb_init()\n");
1410 if (usb_register(&hdm_usb)) {
1411 pr_err("could not register hdm_usb driver\n");
1412 return -EIO;
1413 }
1414 schedule_usb_work = create_workqueue("hdmu_work");
1415 if (schedule_usb_work == NULL) {
1416 pr_err("could not create workqueue\n");
1417 usb_deregister(&hdm_usb);
1418 return -ENOMEM;
1419 }
1420 return 0;
1421 }
1422
1423 static void __exit hdm_usb_exit(void)
1424 {
1425 pr_info("hdm_usb_exit()\n");
1426 destroy_workqueue(schedule_usb_work);
1427 usb_deregister(&hdm_usb);
1428 }
1429
1430 module_init(hdm_usb_init);
1431 module_exit(hdm_usb_exit);
1432 MODULE_LICENSE("GPL");
1433 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1434 MODULE_DESCRIPTION("HDM_4_USB");
This page took 0.097473 seconds and 6 git commands to generate.