2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
9 #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
11 #include <linux/spinlock.h>
12 #include <linux/sched.h>
13 #include <linux/list.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
18 #include <net/caif/caif_device.h>
19 #include <net/caif/caif_shm.h>
23 #define TX_BUF_SZ 0x2000
24 #define RX_BUF_SZ 0x2000
26 #define CAIF_NEEDED_HEADROOM 32
28 #define CAIF_FLOW_ON 1
29 #define CAIF_FLOW_OFF 0
31 #define LOW_WATERMARK 3
32 #define HIGH_WATERMARK 4
34 /* Maximum number of CAIF buffers per shared memory buffer. */
35 #define SHM_MAX_FRMS_PER_BUF 10
38 * Size in bytes of the descriptor area
39 * (With end of descriptor signalling)
41 #define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
42 sizeof(struct shm_pck_desc))
45 * Offset to the first CAIF frame within a shared memory buffer.
46 * Aligned on 32 bytes.
48 #define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
50 /* Number of bytes for CAIF shared memory header. */
53 /* Number of padding bytes for the complete CAIF frame. */
54 #define SHM_FRM_PAD_LEN 4
56 #define CAIF_MAX_MTU 4096
58 #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
59 #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
61 #define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
62 #define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
64 #define SHM_FULL_MASK (0x0F << 0)
65 #define SHM_EMPTY_MASK (0x0F << 4)
69 * Offset from start of shared memory area to start of
70 * shared memory CAIF frame.
77 unsigned char *desc_vptr
;
83 struct list_head list
;
87 /* Number of bytes of padding before the CAIF frame. */
92 /* caif_dev_common must always be first in the structure*/
93 struct caif_dev_common cfdev
;
98 u32 tx_empty_available
;
101 struct list_head tx_empty_list
;
102 struct list_head tx_pend_list
;
103 struct list_head tx_full_list
;
104 struct list_head rx_empty_list
;
105 struct list_head rx_pend_list
;
106 struct list_head rx_full_list
;
108 struct workqueue_struct
*pshm_tx_workqueue
;
109 struct workqueue_struct
*pshm_rx_workqueue
;
111 struct work_struct shm_tx_work
;
112 struct work_struct shm_rx_work
;
114 struct sk_buff_head sk_qhead
;
115 struct shmdev_layer
*pshm_dev
;
118 static int shm_netdev_open(struct net_device
*shm_netdev
)
120 netif_wake_queue(shm_netdev
);
124 static int shm_netdev_close(struct net_device
*shm_netdev
)
126 netif_stop_queue(shm_netdev
);
130 int caif_shmdrv_rx_cb(u32 mbx_msg
, void *priv
)
132 struct buf_list
*pbuf
;
133 struct shmdrv_layer
*pshm_drv
;
134 struct list_head
*pos
;
135 u32 avail_emptybuff
= 0;
136 unsigned long flags
= 0;
140 /* Check for received buffers. */
141 if (mbx_msg
& SHM_FULL_MASK
) {
144 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
146 /* Check whether we have any outstanding buffers. */
147 if (list_empty(&pshm_drv
->rx_empty_list
)) {
149 /* Release spin lock. */
150 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
152 /* We print even in IRQ context... */
153 pr_warn("No empty Rx buffers to fill: "
154 "mbx_msg:%x\n", mbx_msg
);
161 list_entry(pshm_drv
->rx_empty_list
.next
,
162 struct buf_list
, list
);
165 /* Check buffer synchronization. */
166 if (idx
!= SHM_GET_FULL(mbx_msg
)) {
168 /* We print even in IRQ context... */
170 "phyif_shm_mbx_msg_cb: RX full out of sync:"
171 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
172 idx
, mbx_msg
, SHM_GET_FULL(mbx_msg
));
174 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
180 list_del_init(&pbuf
->list
);
181 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_full_list
);
183 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
185 /* Schedule RX work queue. */
186 if (!work_pending(&pshm_drv
->shm_rx_work
))
187 queue_work(pshm_drv
->pshm_rx_workqueue
,
188 &pshm_drv
->shm_rx_work
);
191 /* Check for emptied buffers. */
192 if (mbx_msg
& SHM_EMPTY_MASK
) {
195 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
197 /* Check whether we have any outstanding buffers. */
198 if (list_empty(&pshm_drv
->tx_full_list
)) {
200 /* We print even in IRQ context... */
201 pr_warn("No TX to empty: msg:%x\n", mbx_msg
);
203 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
210 list_entry(pshm_drv
->tx_full_list
.next
,
211 struct buf_list
, list
);
214 /* Check buffer synchronization. */
215 if (idx
!= SHM_GET_EMPTY(mbx_msg
)) {
217 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
219 /* We print even in IRQ context... */
221 "out of sync:idx:%d, msg:%x\n", idx
, mbx_msg
);
226 list_del_init(&pbuf
->list
);
228 /* Reset buffer parameters. */
230 pbuf
->frm_ofs
= SHM_CAIF_FRM_OFS
;
232 list_add_tail(&pbuf
->list
, &pshm_drv
->tx_empty_list
);
234 /* Check the available no. of buffers in the empty list */
235 list_for_each(pos
, &pshm_drv
->tx_empty_list
)
238 /* Check whether we have to wake up the transmitter. */
239 if ((avail_emptybuff
> HIGH_WATERMARK
) &&
240 (!pshm_drv
->tx_empty_available
)) {
241 pshm_drv
->tx_empty_available
= 1;
242 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
243 pshm_drv
->cfdev
.flowctrl
244 (pshm_drv
->pshm_dev
->pshm_netdev
,
248 /* Schedule the work queue. if required */
249 if (!work_pending(&pshm_drv
->shm_tx_work
))
250 queue_work(pshm_drv
->pshm_tx_workqueue
,
251 &pshm_drv
->shm_tx_work
);
253 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
262 static void shm_rx_work_func(struct work_struct
*rx_work
)
264 struct shmdrv_layer
*pshm_drv
;
265 struct buf_list
*pbuf
;
266 unsigned long flags
= 0;
271 pshm_drv
= container_of(rx_work
, struct shmdrv_layer
, shm_rx_work
);
275 struct shm_pck_desc
*pck_desc
;
277 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
279 /* Check for received buffers. */
280 if (list_empty(&pshm_drv
->rx_full_list
)) {
281 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
286 list_entry(pshm_drv
->rx_full_list
.next
, struct buf_list
,
288 list_del_init(&pbuf
->list
);
289 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
291 /* Retrieve pointer to start of the packet descriptor area. */
292 pck_desc
= (struct shm_pck_desc
*) pbuf
->desc_vptr
;
295 * Check whether descriptor contains a CAIF shared memory
298 while (pck_desc
->frm_ofs
) {
299 unsigned int frm_buf_ofs
;
300 unsigned int frm_pck_ofs
;
301 unsigned int frm_pck_len
;
303 * Check whether offset is within buffer limits
306 if (pck_desc
->frm_ofs
<
307 (pbuf
->phy_addr
- pshm_drv
->shm_base_addr
))
310 * Check whether offset is within buffer limits
313 if (pck_desc
->frm_ofs
>
314 ((pbuf
->phy_addr
- pshm_drv
->shm_base_addr
) +
318 /* Calculate offset from start of buffer. */
320 pck_desc
->frm_ofs
- (pbuf
->phy_addr
-
321 pshm_drv
->shm_base_addr
);
324 * Calculate offset and length of CAIF packet while
325 * taking care of the shared memory header.
328 frm_buf_ofs
+ SHM_HDR_LEN
+
329 (*(pbuf
->desc_vptr
+ frm_buf_ofs
));
331 (pck_desc
->frm_len
- SHM_HDR_LEN
-
332 (*(pbuf
->desc_vptr
+ frm_buf_ofs
)));
334 /* Check whether CAIF packet is within buffer limits */
335 if ((frm_pck_ofs
+ pck_desc
->frm_len
) > pbuf
->len
)
338 /* Get a suitable CAIF packet and copy in data. */
339 skb
= netdev_alloc_skb(pshm_drv
->pshm_dev
->pshm_netdev
,
343 pr_info("OOM: Try next frame in descriptor\n");
347 p
= skb_put(skb
, frm_pck_len
);
348 memcpy(p
, pbuf
->desc_vptr
+ frm_pck_ofs
, frm_pck_len
);
350 skb
->protocol
= htons(ETH_P_CAIF
);
351 skb_reset_mac_header(skb
);
352 skb
->dev
= pshm_drv
->pshm_dev
->pshm_netdev
;
354 /* Push received packet up the stack. */
355 ret
= netif_rx_ni(skb
);
358 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
360 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
361 rx_bytes
+= pck_desc
->frm_len
;
363 ++pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
365 /* Move to next packet descriptor. */
369 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
370 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_pend_list
);
372 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
376 /* Schedule the work queue. if required */
377 if (!work_pending(&pshm_drv
->shm_tx_work
))
378 queue_work(pshm_drv
->pshm_tx_workqueue
, &pshm_drv
->shm_tx_work
);
382 static void shm_tx_work_func(struct work_struct
*tx_work
)
385 unsigned int frmlen
, avail_emptybuff
, append
= 0;
386 unsigned long flags
= 0;
387 struct buf_list
*pbuf
= NULL
;
388 struct shmdrv_layer
*pshm_drv
;
389 struct shm_caif_frm
*frm
;
391 struct shm_pck_desc
*pck_desc
;
392 struct list_head
*pos
;
394 pshm_drv
= container_of(tx_work
, struct shmdrv_layer
, shm_tx_work
);
397 /* Initialize mailbox message. */
401 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
403 /* Check for pending receive buffers. */
404 if (!list_empty(&pshm_drv
->rx_pend_list
)) {
406 pbuf
= list_entry(pshm_drv
->rx_pend_list
.next
,
407 struct buf_list
, list
);
409 list_del_init(&pbuf
->list
);
410 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_empty_list
);
412 * Value index is never changed,
413 * so read access should be safe.
415 mbox_msg
|= SHM_SET_EMPTY(pbuf
->index
);
418 skb
= skb_peek(&pshm_drv
->sk_qhead
);
422 /* Check the available no. of buffers in the empty list */
423 list_for_each(pos
, &pshm_drv
->tx_empty_list
)
426 if ((avail_emptybuff
< LOW_WATERMARK
) &&
427 pshm_drv
->tx_empty_available
) {
428 /* Update blocking condition. */
429 pshm_drv
->tx_empty_available
= 0;
430 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
431 pshm_drv
->cfdev
.flowctrl
432 (pshm_drv
->pshm_dev
->pshm_netdev
,
434 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
437 * We simply return back to the caller if we do not have space
438 * either in Tx pending list or Tx empty list. In this case,
439 * we hold the received skb in the skb list, waiting to
440 * be transmitted once Tx buffers become available
442 if (list_empty(&pshm_drv
->tx_empty_list
))
445 /* Get the first free Tx buffer. */
446 pbuf
= list_entry(pshm_drv
->tx_empty_list
.next
,
447 struct buf_list
, list
);
450 skb
= skb_peek(&pshm_drv
->sk_qhead
);
455 frm
= (struct shm_caif_frm
*)
456 (pbuf
->desc_vptr
+ pbuf
->frm_ofs
);
460 frmlen
+= SHM_HDR_LEN
+ frm
->hdr_ofs
+ skb
->len
;
462 /* Add tail padding if needed. */
463 if (frmlen
% SHM_FRM_PAD_LEN
)
464 frmlen
+= SHM_FRM_PAD_LEN
-
465 (frmlen
% SHM_FRM_PAD_LEN
);
468 * Verify that packet, header and additional padding
469 * can fit within the buffer frame area.
471 if (frmlen
>= (pbuf
->len
- pbuf
->frm_ofs
))
475 list_del_init(&pbuf
->list
);
479 skb
= skb_dequeue(&pshm_drv
->sk_qhead
);
482 /* Copy in CAIF frame. */
483 skb_copy_bits(skb
, 0, pbuf
->desc_vptr
+
484 pbuf
->frm_ofs
+ SHM_HDR_LEN
+
485 frm
->hdr_ofs
, skb
->len
);
487 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.tx_packets
++;
488 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.tx_bytes
+=
490 dev_kfree_skb_irq(skb
);
492 /* Fill in the shared memory packet descriptor area. */
493 pck_desc
= (struct shm_pck_desc
*) (pbuf
->desc_vptr
);
494 /* Forward to current frame. */
495 pck_desc
+= pbuf
->frames
;
496 pck_desc
->frm_ofs
= (pbuf
->phy_addr
-
497 pshm_drv
->shm_base_addr
) +
499 pck_desc
->frm_len
= frmlen
;
500 /* Terminate packet descriptor area. */
502 pck_desc
->frm_ofs
= 0;
503 /* Update buffer parameters. */
505 pbuf
->frm_ofs
+= frmlen
+ (frmlen
% 32);
507 } while (pbuf
->frames
< SHM_MAX_FRMS_PER_BUF
);
509 /* Assign buffer as full. */
510 list_add_tail(&pbuf
->list
, &pshm_drv
->tx_full_list
);
512 mbox_msg
|= SHM_SET_FULL(pbuf
->index
);
514 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
517 pshm_drv
->pshm_dev
->pshmdev_mbxsend
518 (pshm_drv
->pshm_dev
->shm_id
, mbox_msg
);
522 static int shm_netdev_tx(struct sk_buff
*skb
, struct net_device
*shm_netdev
)
524 struct shmdrv_layer
*pshm_drv
;
526 pshm_drv
= netdev_priv(shm_netdev
);
528 skb_queue_tail(&pshm_drv
->sk_qhead
, skb
);
530 /* Schedule Tx work queue. for deferred processing of skbs*/
531 if (!work_pending(&pshm_drv
->shm_tx_work
))
532 queue_work(pshm_drv
->pshm_tx_workqueue
, &pshm_drv
->shm_tx_work
);
537 static const struct net_device_ops netdev_ops
= {
538 .ndo_open
= shm_netdev_open
,
539 .ndo_stop
= shm_netdev_close
,
540 .ndo_start_xmit
= shm_netdev_tx
,
543 static void shm_netdev_setup(struct net_device
*pshm_netdev
)
545 struct shmdrv_layer
*pshm_drv
;
546 pshm_netdev
->netdev_ops
= &netdev_ops
;
548 pshm_netdev
->mtu
= CAIF_MAX_MTU
;
549 pshm_netdev
->type
= ARPHRD_CAIF
;
550 pshm_netdev
->hard_header_len
= CAIF_NEEDED_HEADROOM
;
551 pshm_netdev
->tx_queue_len
= 0;
552 pshm_netdev
->destructor
= free_netdev
;
554 pshm_drv
= netdev_priv(pshm_netdev
);
556 /* Initialize structures in a clean state. */
557 memset(pshm_drv
, 0, sizeof(struct shmdrv_layer
));
559 pshm_drv
->cfdev
.link_select
= CAIF_LINK_LOW_LATENCY
;
562 int caif_shmcore_probe(struct shmdev_layer
*pshm_dev
)
565 struct shmdrv_layer
*pshm_drv
= NULL
;
567 pshm_dev
->pshm_netdev
= alloc_netdev(sizeof(struct shmdrv_layer
),
568 "cfshm%d", shm_netdev_setup
);
569 if (!pshm_dev
->pshm_netdev
)
572 pshm_drv
= netdev_priv(pshm_dev
->pshm_netdev
);
573 pshm_drv
->pshm_dev
= pshm_dev
;
576 * Initialization starts with the verification of the
577 * availability of MBX driver by calling its setup function.
578 * MBX driver must be available by this time for proper
579 * functioning of SHM driver.
581 if ((pshm_dev
->pshmdev_mbxsetup
582 (caif_shmdrv_rx_cb
, pshm_dev
, pshm_drv
)) != 0) {
583 pr_warn("Could not config. SHM Mailbox,"
584 " Bailing out.....\n");
585 free_netdev(pshm_dev
->pshm_netdev
);
589 skb_queue_head_init(&pshm_drv
->sk_qhead
);
591 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
592 " INSTANCE AT pshm_drv =0x%p\n",
593 pshm_drv
->pshm_dev
->shm_id
, pshm_drv
);
595 if (pshm_dev
->shm_total_sz
<
596 (NR_TX_BUF
* TX_BUF_SZ
+ NR_RX_BUF
* RX_BUF_SZ
)) {
598 pr_warn("ERROR, Amount of available"
599 " Phys. SHM cannot accommodate current SHM "
600 "driver configuration, Bailing out ...\n");
601 free_netdev(pshm_dev
->pshm_netdev
);
605 pshm_drv
->shm_base_addr
= pshm_dev
->shm_base_addr
;
606 pshm_drv
->shm_tx_addr
= pshm_drv
->shm_base_addr
;
608 if (pshm_dev
->shm_loopback
)
609 pshm_drv
->shm_rx_addr
= pshm_drv
->shm_tx_addr
;
611 pshm_drv
->shm_rx_addr
= pshm_dev
->shm_base_addr
+
612 (NR_TX_BUF
* TX_BUF_SZ
);
614 spin_lock_init(&pshm_drv
->lock
);
615 INIT_LIST_HEAD(&pshm_drv
->tx_empty_list
);
616 INIT_LIST_HEAD(&pshm_drv
->tx_pend_list
);
617 INIT_LIST_HEAD(&pshm_drv
->tx_full_list
);
619 INIT_LIST_HEAD(&pshm_drv
->rx_empty_list
);
620 INIT_LIST_HEAD(&pshm_drv
->rx_pend_list
);
621 INIT_LIST_HEAD(&pshm_drv
->rx_full_list
);
623 INIT_WORK(&pshm_drv
->shm_tx_work
, shm_tx_work_func
);
624 INIT_WORK(&pshm_drv
->shm_rx_work
, shm_rx_work_func
);
626 pshm_drv
->pshm_tx_workqueue
=
627 create_singlethread_workqueue("shm_tx_work");
628 pshm_drv
->pshm_rx_workqueue
=
629 create_singlethread_workqueue("shm_rx_work");
631 for (j
= 0; j
< NR_TX_BUF
; j
++) {
632 struct buf_list
*tx_buf
=
633 kmalloc(sizeof(struct buf_list
), GFP_KERNEL
);
635 if (tx_buf
== NULL
) {
636 pr_warn("ERROR, Could not"
637 " allocate dynamic mem. for tx_buf,"
638 " Bailing out ...\n");
639 free_netdev(pshm_dev
->pshm_netdev
);
643 tx_buf
->phy_addr
= pshm_drv
->shm_tx_addr
+ (TX_BUF_SZ
* j
);
644 tx_buf
->len
= TX_BUF_SZ
;
646 tx_buf
->frm_ofs
= SHM_CAIF_FRM_OFS
;
648 if (pshm_dev
->shm_loopback
)
649 tx_buf
->desc_vptr
= (unsigned char *)tx_buf
->phy_addr
;
652 * FIXME: the result of ioremap is not a pointer - arnd
655 ioremap(tx_buf
->phy_addr
, TX_BUF_SZ
);
657 list_add_tail(&tx_buf
->list
, &pshm_drv
->tx_empty_list
);
660 for (j
= 0; j
< NR_RX_BUF
; j
++) {
661 struct buf_list
*rx_buf
=
662 kmalloc(sizeof(struct buf_list
), GFP_KERNEL
);
664 if (rx_buf
== NULL
) {
665 pr_warn("ERROR, Could not"
666 " allocate dynamic mem.for rx_buf,"
667 " Bailing out ...\n");
668 free_netdev(pshm_dev
->pshm_netdev
);
672 rx_buf
->phy_addr
= pshm_drv
->shm_rx_addr
+ (RX_BUF_SZ
* j
);
673 rx_buf
->len
= RX_BUF_SZ
;
675 if (pshm_dev
->shm_loopback
)
676 rx_buf
->desc_vptr
= (unsigned char *)rx_buf
->phy_addr
;
679 ioremap(rx_buf
->phy_addr
, RX_BUF_SZ
);
680 list_add_tail(&rx_buf
->list
, &pshm_drv
->rx_empty_list
);
683 pshm_drv
->tx_empty_available
= 1;
684 result
= register_netdev(pshm_dev
->pshm_netdev
);
686 pr_warn("ERROR[%d], SHM could not, "
687 "register with NW FRMWK Bailing out ...\n", result
);
692 void caif_shmcore_remove(struct net_device
*pshm_netdev
)
694 struct buf_list
*pbuf
;
695 struct shmdrv_layer
*pshm_drv
= NULL
;
697 pshm_drv
= netdev_priv(pshm_netdev
);
699 while (!(list_empty(&pshm_drv
->tx_pend_list
))) {
701 list_entry(pshm_drv
->tx_pend_list
.next
,
702 struct buf_list
, list
);
704 list_del(&pbuf
->list
);
708 while (!(list_empty(&pshm_drv
->tx_full_list
))) {
710 list_entry(pshm_drv
->tx_full_list
.next
,
711 struct buf_list
, list
);
712 list_del(&pbuf
->list
);
716 while (!(list_empty(&pshm_drv
->tx_empty_list
))) {
718 list_entry(pshm_drv
->tx_empty_list
.next
,
719 struct buf_list
, list
);
720 list_del(&pbuf
->list
);
724 while (!(list_empty(&pshm_drv
->rx_full_list
))) {
726 list_entry(pshm_drv
->tx_full_list
.next
,
727 struct buf_list
, list
);
728 list_del(&pbuf
->list
);
732 while (!(list_empty(&pshm_drv
->rx_pend_list
))) {
734 list_entry(pshm_drv
->tx_pend_list
.next
,
735 struct buf_list
, list
);
736 list_del(&pbuf
->list
);
740 while (!(list_empty(&pshm_drv
->rx_empty_list
))) {
742 list_entry(pshm_drv
->rx_empty_list
.next
,
743 struct buf_list
, list
);
744 list_del(&pbuf
->list
);
748 /* Destroy work queues. */
749 destroy_workqueue(pshm_drv
->pshm_tx_workqueue
);
750 destroy_workqueue(pshm_drv
->pshm_rx_workqueue
);
752 unregister_netdev(pshm_netdev
);