staging: unisys: visornic: cleanup error handling
[deliverable/linux.git] / drivers / staging / unisys / visornic / visornic_main.c
CommitLineData
68905a14
DK
1/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
3 *
6f14cc18
BR
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
68905a14
DK
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for more
12 * details.
13 */
14
15/* This driver lives in a spar partition, and registers to ethernet io
16 * channels from the visorbus driver. It creates netdev devices and
17 * forwards transmit to the IO channel and accepts rcvs from the IO
18 * Partition via the IO channel.
19 */
20
21#include <linux/debugfs.h>
68905a14 22#include <linux/etherdevice.h>
0d507393 23#include <linux/netdevice.h>
68905a14 24#include <linux/kthread.h>
0d507393
NH
25#include <linux/skbuff.h>
26#include <linux/rtnetlink.h>
68905a14
DK
27
28#include "visorbus.h"
29#include "iochannel.h"
30
0c677e9c 31#define VISORNIC_INFINITE_RSP_WAIT 0
68905a14
DK
32#define VISORNICSOPENMAX 32
33#define MAXDEVICES 16384
34
35/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
36 * = 163840 bytes
37 */
38#define MAX_BUF 163840
61dd330a 39#define NAPI_WEIGHT 64
68905a14 40
68905a14
DK
41static int visornic_probe(struct visor_device *dev);
42static void visornic_remove(struct visor_device *dev);
43static int visornic_pause(struct visor_device *dev,
44 visorbus_state_complete_func complete_func);
45static int visornic_resume(struct visor_device *dev,
46 visorbus_state_complete_func complete_func);
47
48/* DEBUGFS declarations */
49static ssize_t info_debugfs_read(struct file *file, char __user *buf,
50 size_t len, loff_t *offset);
51static ssize_t enable_ints_write(struct file *file, const char __user *buf,
52 size_t len, loff_t *ppos);
53static struct dentry *visornic_debugfs_dir;
54static const struct file_operations debugfs_info_fops = {
55 .read = info_debugfs_read,
56};
57
58static const struct file_operations debugfs_enable_ints_fops = {
59 .write = enable_ints_write,
60};
61
68905a14
DK
62/* GUIDS for director channel type supported by this driver. */
63static struct visor_channeltype_descriptor visornic_channel_types[] = {
64 /* Note that the only channel type we expect to be reported by the
65 * bus driver is the SPAR_VNIC channel.
66 */
67 { SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
68 { NULL_UUID_LE, NULL }
69};
110a66be
PB
70MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
71/*
72 * FIXME XXX: This next line of code must be fixed and removed before
73 * acceptance into the 'normal' part of the kernel. It is only here as a place
74 * holder to get module autoloading functionality working for visorbus. Code
75 * must be added to scripts/mode/file2alias.c, etc., to get this working
76 * properly.
77 */
78MODULE_ALIAS("visorbus:" SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR);
68905a14
DK
79
80/* This is used to tell the visor bus driver which types of visor devices
81 * we support, and what functions to call when a visor device that we support
82 * is attached or removed.
83 */
84static struct visor_driver visornic_driver = {
85 .name = "visornic",
86 .version = "1.0.0.0",
87 .vertag = NULL,
88 .owner = THIS_MODULE,
89 .channel_types = visornic_channel_types,
90 .probe = visornic_probe,
91 .remove = visornic_remove,
92 .pause = visornic_pause,
93 .resume = visornic_resume,
94 .channel_interrupt = NULL,
95};
96
68905a14
DK
97struct chanstat {
98 unsigned long got_rcv;
99 unsigned long got_enbdisack;
100 unsigned long got_xmit_done;
101 unsigned long xmit_fail;
102 unsigned long sent_enbdis;
103 unsigned long sent_promisc;
104 unsigned long sent_post;
81d275c6 105 unsigned long sent_post_failed;
68905a14
DK
106 unsigned long sent_xmit;
107 unsigned long reject_count;
108 unsigned long extra_rcvbufs_sent;
109};
110
111struct visornic_devdata {
77c9a4ae
EA
112 /* 0 disabled 1 enabled to receive */
113 unsigned short enabled;
114 /* NET_RCV_ENABLE/DISABLE acked by IOPART */
115 unsigned short enab_dis_acked;
116
68905a14 117 struct visor_device *dev;
68905a14
DK
118 struct net_device *netdev;
119 struct net_device_stats net_stats;
120 atomic_t interrupt_rcvd;
121 wait_queue_head_t rsp_queue;
122 struct sk_buff **rcvbuf;
77c9a4ae
EA
123 /* incarnation_id lets IOPART know about re-birth */
124 u64 incarnation_id;
125 /* flags as they were prior to set_multicast_list */
126 unsigned short old_flags;
127 atomic_t usage; /* count of users */
128
129 /* number of rcv buffers the vnic will post */
130 int num_rcv_bufs;
68905a14
DK
131 int num_rcv_bufs_could_not_alloc;
132 atomic_t num_rcvbuf_in_iovm;
133 unsigned long alloc_failed_in_if_needed_cnt;
134 unsigned long alloc_failed_in_repost_rtn_cnt;
77c9a4ae
EA
135
136 /* absolute max number of outstanding xmits - should never hit this */
137 unsigned long max_outstanding_net_xmits;
138 /* high water mark for calling netif_stop_queue() */
139 unsigned long upper_threshold_net_xmits;
140 /* high water mark for calling netif_wake_queue() */
141 unsigned long lower_threshold_net_xmits;
142 /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
143 struct sk_buff_head xmitbufhead;
144
d01da5ea 145 visorbus_state_complete_func server_down_complete_func;
68905a14 146 struct work_struct timeout_reset;
77c9a4ae
EA
147 /* cmdrsp_rcv is used for posting/unposting rcv buffers */
148 struct uiscmdrsp *cmdrsp_rcv;
149 /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
150 struct uiscmdrsp *xmit_cmdrsp;
151
68905a14
DK
152 bool server_down; /* IOPART is down */
153 bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
46df8226 154 bool going_away; /* device is being torn down */
68905a14 155 struct dentry *eth_debugfs_dir;
68905a14
DK
156 u64 interrupts_rcvd;
157 u64 interrupts_notme;
158 u64 interrupts_disabled;
159 u64 busy_cnt;
160 spinlock_t priv_lock; /* spinlock to access devdata structures */
161
162 /* flow control counter */
163 u64 flow_control_upper_hits;
164 u64 flow_control_lower_hits;
165
166 /* debug counters */
167 unsigned long n_rcv0; /* # rcvs of 0 buffers */
168 unsigned long n_rcv1; /* # rcvs of 1 buffers */
169 unsigned long n_rcv2; /* # rcvs of 2 buffers */
170 unsigned long n_rcvx; /* # rcvs of >2 buffers */
77c9a4ae
EA
171 unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */
172 unsigned long repost_found_skb_cnt; /* # of found the skb */
173 unsigned long n_repost_deficit; /* # of lost rcv buffers */
174 unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */
68905a14
DK
175 unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
176
177 int queuefullmsg_logged;
178 struct chanstat chstat;
946b2546
NH
179 struct timer_list irq_poll_timer;
180 struct napi_struct napi;
181 struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
68905a14
DK
182};
183
946b2546
NH
184static int visornic_poll(struct napi_struct *napi, int budget);
185static void poll_for_irq(unsigned long v);
68905a14
DK
186
187/**
188 * visor_copy_fragsinfo_from_skb(
189 * @skb_in: skbuff that we are pulling the frags from
190 * @firstfraglen: length of first fragment in skb
191 * @frags_max: max len of frags array
192 * @frags: frags array filled in on output
193 *
194 * Copy the fragment list in the SKB to a phys_info
195 * array that the IOPART understands.
196 * Return value indicates number of entries filled in frags
197 * Negative values indicate an error.
198 */
6a957193 199static int
68905a14
DK
200visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
201 unsigned int frags_max,
202 struct phys_info frags[])
203{
03759f8c 204 unsigned int count = 0, frag, size, offset = 0, numfrags;
513e1cbd 205 unsigned int total_count;
68905a14
DK
206
207 numfrags = skb_shinfo(skb)->nr_frags;
208
77c9a4ae 209 /* Compute the number of fragments this skb has, and if its more than
513e1cbd
NH
210 * frag array can hold, linearize the skb
211 */
212 total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
213 if (firstfraglen % PI_PAGE_SIZE)
214 total_count++;
215
216 if (total_count > frags_max) {
217 if (skb_linearize(skb))
218 return -EINVAL;
219 numfrags = skb_shinfo(skb)->nr_frags;
220 firstfraglen = 0;
221 }
222
68905a14
DK
223 while (firstfraglen) {
224 if (count == frags_max)
225 return -EINVAL;
226
227 frags[count].pi_pfn =
228 page_to_pfn(virt_to_page(skb->data + offset));
229 frags[count].pi_off =
230 (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
231 size = min_t(unsigned int, firstfraglen,
232 PI_PAGE_SIZE - frags[count].pi_off);
233
234 /* can take smallest of firstfraglen (what's left) OR
235 * bytes left in the page
236 */
237 frags[count].pi_len = size;
238 firstfraglen -= size;
239 offset += size;
240 count++;
241 }
242 if (numfrags) {
243 if ((count + numfrags) > frags_max)
244 return -EINVAL;
245
03759f8c 246 for (frag = 0; frag < numfrags; frag++) {
68905a14 247 count = add_physinfo_entries(page_to_pfn(
03759f8c
EA
248 skb_frag_page(&skb_shinfo(skb)->frags[frag])),
249 skb_shinfo(skb)->frags[frag].
68905a14 250 page_offset,
03759f8c 251 skb_shinfo(skb)->frags[frag].
68905a14 252 size, count, frags_max, frags);
77c9a4ae 253 /* add_physinfo_entries only returns
998ff7f8
NH
254 * zero if the frags array is out of room
255 * That should never happen because we
256 * fail above, if count+numfrags > frags_max.
998ff7f8 257 */
6a957193
TS
258 if (!count)
259 return -EINVAL;
68905a14
DK
260 }
261 }
262 if (skb_shinfo(skb)->frag_list) {
263 struct sk_buff *skbinlist;
264 int c;
265
266 for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
267 skbinlist = skbinlist->next) {
268 c = visor_copy_fragsinfo_from_skb(skbinlist,
269 skbinlist->len -
270 skbinlist->data_len,
271 frags_max - count,
272 &frags[count]);
273 if (c < 0)
274 return c;
275 count += c;
276 }
277 }
278 return count;
279}
280
68905a14
DK
281static ssize_t enable_ints_write(struct file *file,
282 const char __user *buffer,
283 size_t count, loff_t *ppos)
284{
77c9a4ae 285 /* Don't want to break ABI here by having a debugfs
52b1660d
NH
286 * file that no longer exists or is writable, so
287 * lets just make this a vestigual function
288 */
68905a14
DK
289 return count;
290}
291
292/**
77c9a4ae 293 * visornic_serverdown_complete - IOPART went down, pause device
68905a14
DK
294 * @work: Work queue it was scheduled on
295 *
296 * The IO partition has gone down and we need to do some cleanup
297 * for when it comes back. Treat the IO partition as the link
298 * being down.
299 * Returns void.
300 */
301static void
ace72eef 302visornic_serverdown_complete(struct visornic_devdata *devdata)
68905a14 303{
68905a14 304 struct net_device *netdev;
68905a14 305
68905a14
DK
306 netdev = devdata->netdev;
307
946b2546
NH
308 /* Stop polling for interrupts */
309 del_timer_sync(&devdata->irq_poll_timer);
68905a14 310
0d507393
NH
311 rtnl_lock();
312 dev_close(netdev);
313 rtnl_unlock();
68905a14 314
68905a14 315 atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
db849927
TS
316 devdata->chstat.sent_xmit = 0;
317 devdata->chstat.got_xmit_done = 0;
68905a14 318
d01da5ea
TS
319 if (devdata->server_down_complete_func)
320 (*devdata->server_down_complete_func)(devdata->dev, 0);
321
68905a14
DK
322 devdata->server_down = true;
323 devdata->server_change_state = false;
d01da5ea 324 devdata->server_down_complete_func = NULL;
68905a14
DK
325}
326
327/**
77c9a4ae 328 * visornic_serverdown - Command has notified us that IOPART is down
68905a14
DK
329 * @devdata: device that is being managed by IOPART
330 *
331 * Schedule the work needed to handle the server down request. Make
332 * sure we haven't already handled the server change state event.
333 * Returns 0 if we scheduled the work, -EINVAL on error.
334 */
335static int
d01da5ea
TS
336visornic_serverdown(struct visornic_devdata *devdata,
337 visorbus_state_complete_func complete_func)
68905a14 338{
46df8226 339 unsigned long flags;
4145ba76 340 int err;
46df8226
TS
341
342 spin_lock_irqsave(&devdata->priv_lock, flags);
4145ba76 343 if (devdata->server_change_state) {
00748b0c
TS
344 dev_dbg(&devdata->dev->device, "%s changing state\n",
345 __func__);
4145ba76
TS
346 err = -EINVAL;
347 goto err_unlock;
348 }
349 if (devdata->server_down) {
350 dev_dbg(&devdata->dev->device, "%s already down\n",
351 __func__);
352 err = -EINVAL;
353 goto err_unlock;
354 }
355 if (devdata->going_away) {
356 dev_dbg(&devdata->dev->device,
357 "%s aborting because device removal pending\n",
358 __func__);
359 err = -ENODEV;
360 goto err_unlock;
05f1b17e 361 }
4145ba76
TS
362 devdata->server_change_state = true;
363 devdata->server_down_complete_func = complete_func;
05f1b17e 364 spin_unlock_irqrestore(&devdata->priv_lock, flags);
4145ba76
TS
365
366 visornic_serverdown_complete(devdata);
68905a14 367 return 0;
4145ba76
TS
368
369err_unlock:
370 spin_unlock_irqrestore(&devdata->priv_lock, flags);
371 return err;
68905a14
DK
372}
373
374/**
375 * alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
376 * @netdev: network adapter the rcv bufs are attached too.
377 *
378 * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
379 * so that it can write rcv data into our memory space.
380 * Return pointer to sk_buff
381 */
382static struct sk_buff *
383alloc_rcv_buf(struct net_device *netdev)
384{
385 struct sk_buff *skb;
386
387 /* NOTE: the first fragment in each rcv buffer is pointed to by
388 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
77c9a4ae 389 * in length, so the first frag is large enough to hold 1514.
68905a14
DK
390 */
391 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
392 if (!skb)
393 return NULL;
394 skb->dev = netdev;
68905a14
DK
395 /* current value of mtu doesn't come into play here; large
396 * packets will just end up using multiple rcv buffers all of
77c9a4ae 397 * same size.
68905a14 398 */
77c9a4ae
EA
399 skb->len = RCVPOST_BUF_SIZE;
400 /* alloc_skb already zeroes it out for clarification. */
401 skb->data_len = 0;
68905a14
DK
402 return skb;
403}
404
405/**
406 * post_skb - post a skb to the IO Partition.
407 * @cmdrsp: cmdrsp packet to be send to the IO Partition
408 * @devdata: visornic_devdata to post the skb too
409 * @skb: skb to give to the IO partition
410 *
411 * Send the skb to the IO Partition.
412 * Returns void
413 */
414static inline void
415post_skb(struct uiscmdrsp *cmdrsp,
416 struct visornic_devdata *devdata, struct sk_buff *skb)
417{
418 cmdrsp->net.buf = skb;
419 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
420 cmdrsp->net.rcvpost.frag.pi_off =
421 (unsigned long)skb->data & PI_PAGE_MASK;
422 cmdrsp->net.rcvpost.frag.pi_len = skb->len;
91678f37 423 cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
68905a14
DK
424
425 if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
426 cmdrsp->net.type = NET_RCV_POST;
427 cmdrsp->cmdtype = CMD_NET_TYPE;
81d275c6 428 if (visorchannel_signalinsert(devdata->dev->visorchannel,
dc38082f
TS
429 IOCHAN_TO_IOPART,
430 cmdrsp)) {
81d275c6
TS
431 atomic_inc(&devdata->num_rcvbuf_in_iovm);
432 devdata->chstat.sent_post++;
433 } else {
434 devdata->chstat.sent_post_failed++;
435 }
68905a14
DK
436 }
437}
438
439/**
440 * send_enbdis - send NET_RCV_ENBDIS to IO Partition
441 * @netdev: netdevice we are enable/disable, used as context
442 * return value
443 * @state: enable = 1/disable = 0
444 * @devdata: visornic device we are enabling/disabling
445 *
446 * Send the enable/disable message to the IO Partition.
447 * Returns void
448 */
449static void
450send_enbdis(struct net_device *netdev, int state,
451 struct visornic_devdata *devdata)
452{
453 devdata->cmdrsp_rcv->net.enbdis.enable = state;
454 devdata->cmdrsp_rcv->net.enbdis.context = netdev;
455 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
456 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
81d275c6 457 if (visorchannel_signalinsert(devdata->dev->visorchannel,
dc38082f
TS
458 IOCHAN_TO_IOPART,
459 devdata->cmdrsp_rcv))
81d275c6 460 devdata->chstat.sent_enbdis++;
68905a14
DK
461}
462
463/**
464 * visornic_disable_with_timeout - Disable network adapter
465 * @netdev: netdevice to disale
466 * @timeout: timeout to wait for disable
467 *
468 * Disable the network adapter and inform the IO Partition that we
469 * are disabled, reclaim memory from rcv bufs.
470 * Returns 0 on success, negative for failure of IO Partition
471 * responding.
472 *
473 */
474static int
475visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
476{
477 struct visornic_devdata *devdata = netdev_priv(netdev);
478 int i;
479 unsigned long flags;
480 int wait = 0;
481
68905a14
DK
482 /* send a msg telling the other end we are stopping incoming pkts */
483 spin_lock_irqsave(&devdata->priv_lock, flags);
484 devdata->enabled = 0;
485 devdata->enab_dis_acked = 0; /* must wait for ack */
486 spin_unlock_irqrestore(&devdata->priv_lock, flags);
487
488 /* send disable and wait for ack -- don't hold lock when sending
489 * disable because if the queue is full, insert might sleep.
490 */
491 send_enbdis(netdev, 0, devdata);
492
493 /* wait for ack to arrive before we try to free rcv buffers
494 * NOTE: the other end automatically unposts the rcv buffers when
495 * when it gets a disable.
496 */
497 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 498 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
499 (wait < timeout)) {
500 if (devdata->enab_dis_acked)
501 break;
502 if (devdata->server_down || devdata->server_change_state) {
503 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
504 dev_dbg(&netdev->dev, "%s server went away\n",
505 __func__);
68905a14
DK
506 return -EIO;
507 }
508 set_current_state(TASK_INTERRUPTIBLE);
509 spin_unlock_irqrestore(&devdata->priv_lock, flags);
510 wait += schedule_timeout(msecs_to_jiffies(10));
511 spin_lock_irqsave(&devdata->priv_lock, flags);
512 }
513
514 /* Wait for usage to go to 1 (no other users) before freeing
515 * rcv buffers
516 */
517 if (atomic_read(&devdata->usage) > 1) {
518 while (1) {
519 set_current_state(TASK_INTERRUPTIBLE);
520 spin_unlock_irqrestore(&devdata->priv_lock, flags);
521 schedule_timeout(msecs_to_jiffies(10));
522 spin_lock_irqsave(&devdata->priv_lock, flags);
523 if (atomic_read(&devdata->usage))
524 break;
525 }
526 }
68905a14
DK
527 /* we've set enabled to 0, so we can give up the lock. */
528 spin_unlock_irqrestore(&devdata->priv_lock, flags);
529
946b2546
NH
530 /* stop the transmit queue so nothing more can be transmitted */
531 netif_stop_queue(netdev);
532
533 napi_disable(&devdata->napi);
534
0d507393
NH
535 skb_queue_purge(&devdata->xmitbufhead);
536
68905a14
DK
537 /* Free rcv buffers - other end has automatically unposed them on
538 * disable
539 */
540 for (i = 0; i < devdata->num_rcv_bufs; i++) {
541 if (devdata->rcvbuf[i]) {
542 kfree_skb(devdata->rcvbuf[i]);
543 devdata->rcvbuf[i] = NULL;
544 }
545 }
546
68905a14
DK
547 return 0;
548}
549
550/**
551 * init_rcv_bufs -- initialize receive bufs and send them to the IO Part
552 * @netdev: struct netdevice
553 * @devdata: visornic_devdata
554 *
555 * Allocate rcv buffers and post them to the IO Partition.
556 * Return 0 for success, and negative for failure.
557 */
558static int
559init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
560{
561 int i, count;
562
563 /* allocate fixed number of receive buffers to post to uisnic
564 * post receive buffers after we've allocated a required amount
565 */
566 for (i = 0; i < devdata->num_rcv_bufs; i++) {
567 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
568 if (!devdata->rcvbuf[i])
569 break; /* if we failed to allocate one let us stop */
570 }
571 if (i == 0) /* couldn't even allocate one -- bail out */
572 return -ENOMEM;
573 count = i;
574
575 /* Ensure we can alloc 2/3rd of the requeested number of buffers.
576 * 2/3 is an arbitrary choice; used also in ndis init.c
577 */
578 if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
579 /* free receive buffers we did alloc and then bail out */
580 for (i = 0; i < count; i++) {
581 kfree_skb(devdata->rcvbuf[i]);
582 devdata->rcvbuf[i] = NULL;
583 }
584 return -ENOMEM;
585 }
586
587 /* post receive buffers to receive incoming input - without holding
588 * lock - we've not enabled nor started the queue so there shouldn't
589 * be any rcv or xmit activity
590 */
591 for (i = 0; i < count; i++)
592 post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]);
593
594 return 0;
595}
596
597/**
598 * visornic_enable_with_timeout - send enable to IO Part
599 * @netdev: struct net_device
600 * @timeout: Time to wait for the ACK from the enable
601 *
602 * Sends enable to IOVM, inits, and posts receive buffers to IOVM
603 * timeout is defined in msecs (timeout of 0 specifies infinite wait)
604 * Return 0 for success, negavite for failure.
605 */
606static int
607visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
608{
609 int i;
610 struct visornic_devdata *devdata = netdev_priv(netdev);
611 unsigned long flags;
612 int wait = 0;
613
614 /* NOTE: the other end automatically unposts the rcv buffers when it
615 * gets a disable.
616 */
617 i = init_rcv_bufs(netdev, devdata);
00748b0c
TS
618 if (i < 0) {
619 dev_err(&netdev->dev,
620 "%s failed to init rcv bufs (%d)\n", __func__, i);
68905a14 621 return i;
00748b0c 622 }
68905a14
DK
623
624 spin_lock_irqsave(&devdata->priv_lock, flags);
625 devdata->enabled = 1;
6483783d 626 devdata->enab_dis_acked = 0;
68905a14
DK
627
628 /* now we're ready, let's send an ENB to uisnic but until we get
629 * an ACK back from uisnic, we'll drop the packets
630 */
631 devdata->n_rcv_packets_not_accepted = 0;
632 spin_unlock_irqrestore(&devdata->priv_lock, flags);
633
634 /* send enable and wait for ack -- don't hold lock when sending enable
635 * because if the queue is full, insert might sleep.
636 */
946b2546 637 napi_enable(&devdata->napi);
68905a14
DK
638 send_enbdis(netdev, 1, devdata);
639
640 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 641 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
642 (wait < timeout)) {
643 if (devdata->enab_dis_acked)
644 break;
645 if (devdata->server_down || devdata->server_change_state) {
646 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
647 dev_dbg(&netdev->dev, "%s server went away\n",
648 __func__);
68905a14
DK
649 return -EIO;
650 }
651 set_current_state(TASK_INTERRUPTIBLE);
652 spin_unlock_irqrestore(&devdata->priv_lock, flags);
653 wait += schedule_timeout(msecs_to_jiffies(10));
654 spin_lock_irqsave(&devdata->priv_lock, flags);
655 }
656
657 spin_unlock_irqrestore(&devdata->priv_lock, flags);
658
00748b0c
TS
659 if (!devdata->enab_dis_acked) {
660 dev_err(&netdev->dev, "%s missing ACK\n", __func__);
68905a14 661 return -EIO;
00748b0c 662 }
68905a14 663
35a8dd31 664 netif_start_queue(netdev);
946b2546 665
68905a14
DK
666 return 0;
667}
668
669/**
670 * visornic_timeout_reset - handle xmit timeout resets
671 * @work work item that scheduled the work
672 *
673 * Transmit Timeouts are typically handled by resetting the
674 * device for our virtual NIC we will send a Disable and Enable
675 * to the IOVM. If it doesn't respond we will trigger a serverdown.
676 */
677static void
678visornic_timeout_reset(struct work_struct *work)
679{
680 struct visornic_devdata *devdata;
681 struct net_device *netdev;
682 int response = 0;
683
684 devdata = container_of(work, struct visornic_devdata, timeout_reset);
685 netdev = devdata->netdev;
686
4d79002e
TS
687 rtnl_lock();
688 if (!netif_running(netdev)) {
689 rtnl_unlock();
690 return;
691 }
692
0c677e9c
NH
693 response = visornic_disable_with_timeout(netdev,
694 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
695 if (response)
696 goto call_serverdown;
697
0c677e9c
NH
698 response = visornic_enable_with_timeout(netdev,
699 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
700 if (response)
701 goto call_serverdown;
68905a14 702
4d79002e
TS
703 rtnl_unlock();
704
68905a14
DK
705 return;
706
707call_serverdown:
d01da5ea 708 visornic_serverdown(devdata, NULL);
4d79002e 709 rtnl_unlock();
68905a14
DK
710}
711
712/**
713 * visornic_open - Enable the visornic device and mark the queue started
714 * @netdev: netdevice to start
715 *
716 * Enable the device and start the transmit queue.
717 * Return 0 for success
718 */
719static int
720visornic_open(struct net_device *netdev)
721{
0c677e9c 722 visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14 723
68905a14
DK
724 return 0;
725}
726
727/**
728 * visornic_close - Disables the visornic device and stops the queues
729 * @netdev: netdevice to start
730 *
731 * Disable the device and stop the transmit queue.
732 * Return 0 for success
733 */
734static int
735visornic_close(struct net_device *netdev)
736{
0c677e9c 737 visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
738
739 return 0;
740}
741
36927c18
TS
742/**
743 * devdata_xmits_outstanding - compute outstanding xmits
744 * @devdata: visornic_devdata for device
745 *
746 * Return value is the number of outstanding xmits.
747 */
748static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
749{
750 if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
751 return devdata->chstat.sent_xmit -
752 devdata->chstat.got_xmit_done;
6e1edc0f
BS
753 return (ULONG_MAX - devdata->chstat.got_xmit_done
754 + devdata->chstat.sent_xmit + 1);
36927c18
TS
755}
756
757/**
758 * vnic_hit_high_watermark
759 * @devdata: indicates visornic device we are checking
760 * @high_watermark: max num of unacked xmits we will tolerate,
761 * before we will start throttling
762 *
763 * Returns true iff the number of unacked xmits sent to
764 * the IO partition is >= high_watermark.
765 */
766static inline bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
767 ulong high_watermark)
768{
769 return (devdata_xmits_outstanding(devdata) >= high_watermark);
770}
771
772/**
773 * vnic_hit_low_watermark
774 * @devdata: indicates visornic device we are checking
775 * @low_watermark: we will wait until the num of unacked xmits
776 * drops to this value or lower before we start
777 * transmitting again
778 *
779 * Returns true iff the number of unacked xmits sent to
780 * the IO partition is <= low_watermark.
781 */
782static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
783 ulong low_watermark)
784{
785 return (devdata_xmits_outstanding(devdata) <= low_watermark);
786}
787
68905a14
DK
788/**
789 * visornic_xmit - send a packet to the IO Partition
790 * @skb: Packet to be sent
791 * @netdev: net device the packet is being sent from
792 *
793 * Convert the skb to a cmdrsp so the IO Partition can undersand it.
794 * Send the XMIT command to the IO Partition for processing. This
795 * function is protected from concurrent calls by a spinlock xmit_lock
796 * in the net_device struct, but as soon as the function returns it
797 * can be called again.
f6346ad6 798 * Returns NETDEV_TX_OK.
68905a14
DK
799 */
800static int
801visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
802{
803 struct visornic_devdata *devdata;
804 int len, firstfraglen, padlen;
805 struct uiscmdrsp *cmdrsp = NULL;
806 unsigned long flags;
807
808 devdata = netdev_priv(netdev);
809 spin_lock_irqsave(&devdata->priv_lock, flags);
810
811 if (netif_queue_stopped(netdev) || devdata->server_down ||
812 devdata->server_change_state) {
813 spin_unlock_irqrestore(&devdata->priv_lock, flags);
814 devdata->busy_cnt++;
00748b0c
TS
815 dev_dbg(&netdev->dev,
816 "%s busy - queue stopped\n", __func__);
f6346ad6
NH
817 kfree_skb(skb);
818 return NETDEV_TX_OK;
68905a14
DK
819 }
820
821 /* sk_buff struct is used to host network data throughout all the
822 * linux network subsystems
823 */
824 len = skb->len;
825
826 /* skb->len is the FULL length of data (including fragmentary portion)
827 * skb->data_len is the length of the fragment portion in frags
828 * skb->len - skb->data_len is size of the 1st fragment in skb->data
829 * calculate the length of the first fragment that skb->data is
830 * pointing to
831 */
832 firstfraglen = skb->len - skb->data_len;
833 if (firstfraglen < ETH_HEADER_SIZE) {
834 spin_unlock_irqrestore(&devdata->priv_lock, flags);
835 devdata->busy_cnt++;
00748b0c
TS
836 dev_err(&netdev->dev,
837 "%s busy - first frag too small (%d)\n",
838 __func__, firstfraglen);
f6346ad6
NH
839 kfree_skb(skb);
840 return NETDEV_TX_OK;
68905a14
DK
841 }
842
843 if ((len < ETH_MIN_PACKET_SIZE) &&
844 ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
845 /* pad the packet out to minimum size */
846 padlen = ETH_MIN_PACKET_SIZE - len;
847 memset(&skb->data[len], 0, padlen);
848 skb->tail += padlen;
849 skb->len += padlen;
850 len += padlen;
851 firstfraglen += padlen;
852 }
853
854 cmdrsp = devdata->xmit_cmdrsp;
855 /* clear cmdrsp */
856 memset(cmdrsp, 0, SIZEOF_CMDRSP);
857 cmdrsp->net.type = NET_XMIT;
858 cmdrsp->cmdtype = CMD_NET_TYPE;
859
860 /* save the pointer to skb -- we'll need it for completion */
861 cmdrsp->net.buf = skb;
862
36927c18
TS
863 if (vnic_hit_high_watermark(devdata,
864 devdata->max_outstanding_net_xmits)) {
77c9a4ae 865 /* extra NET_XMITs queued over to IOVM - need to wait */
68905a14
DK
866 devdata->chstat.reject_count++;
867 if (!devdata->queuefullmsg_logged &&
868 ((devdata->chstat.reject_count & 0x3ff) == 1))
869 devdata->queuefullmsg_logged = 1;
870 netif_stop_queue(netdev);
871 spin_unlock_irqrestore(&devdata->priv_lock, flags);
872 devdata->busy_cnt++;
00748b0c
TS
873 dev_dbg(&netdev->dev,
874 "%s busy - waiting for iovm to catch up\n",
875 __func__);
f6346ad6
NH
876 kfree_skb(skb);
877 return NETDEV_TX_OK;
68905a14
DK
878 }
879 if (devdata->queuefullmsg_logged)
880 devdata->queuefullmsg_logged = 0;
881
882 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
883 cmdrsp->net.xmt.lincsum.valid = 1;
884 cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
885 if (skb_transport_header(skb) > skb->data) {
886 cmdrsp->net.xmt.lincsum.hrawoff =
887 skb_transport_header(skb) - skb->data;
888 cmdrsp->net.xmt.lincsum.hrawoff = 1;
889 }
890 if (skb_network_header(skb) > skb->data) {
891 cmdrsp->net.xmt.lincsum.nhrawoff =
892 skb_network_header(skb) - skb->data;
893 cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
894 }
895 cmdrsp->net.xmt.lincsum.csum = skb->csum;
896 } else {
897 cmdrsp->net.xmt.lincsum.valid = 0;
898 }
899
900 /* save off the length of the entire data packet */
901 cmdrsp->net.xmt.len = len;
902
903 /* copy ethernet header from first frag into ocmdrsp
904 * - everything else will be pass in frags & DMA'ed
905 */
906 memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HEADER_SIZE);
907 /* copy frags info - from skb->data we need to only provide access
908 * beyond eth header
909 */
910 cmdrsp->net.xmt.num_frags =
911 visor_copy_fragsinfo_from_skb(skb, firstfraglen,
912 MAX_PHYS_INFO,
913 cmdrsp->net.xmt.frags);
ce657aa8 914 if (cmdrsp->net.xmt.num_frags < 0) {
68905a14
DK
915 spin_unlock_irqrestore(&devdata->priv_lock, flags);
916 devdata->busy_cnt++;
00748b0c
TS
917 dev_err(&netdev->dev,
918 "%s busy - copy frags failed\n", __func__);
f6346ad6
NH
919 kfree_skb(skb);
920 return NETDEV_TX_OK;
68905a14
DK
921 }
922
923 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
924 IOCHAN_TO_IOPART, cmdrsp)) {
925 netif_stop_queue(netdev);
926 spin_unlock_irqrestore(&devdata->priv_lock, flags);
927 devdata->busy_cnt++;
00748b0c
TS
928 dev_dbg(&netdev->dev,
929 "%s busy - signalinsert failed\n", __func__);
f6346ad6
NH
930 kfree_skb(skb);
931 return NETDEV_TX_OK;
68905a14
DK
932 }
933
934 /* Track the skbs that have been sent to the IOVM for XMIT */
935 skb_queue_head(&devdata->xmitbufhead, skb);
936
68905a14
DK
937 /* update xmt stats */
938 devdata->net_stats.tx_packets++;
939 devdata->net_stats.tx_bytes += skb->len;
940 devdata->chstat.sent_xmit++;
941
77c9a4ae 942 /* check if we have hit the high watermark for netif_stop_queue() */
36927c18
TS
943 if (vnic_hit_high_watermark(devdata,
944 devdata->upper_threshold_net_xmits)) {
77c9a4ae
EA
945 /* extra NET_XMITs queued over to IOVM - need to wait */
946 /* stop queue - call netif_wake_queue() after lower threshold */
947 netif_stop_queue(netdev);
00748b0c
TS
948 dev_dbg(&netdev->dev,
949 "%s busy - invoking iovm flow control\n",
950 __func__);
68905a14
DK
951 devdata->flow_control_upper_hits++;
952 }
953 spin_unlock_irqrestore(&devdata->priv_lock, flags);
954
955 /* skb will be freed when we get back NET_XMIT_DONE */
956 return NETDEV_TX_OK;
957}
958
959/**
960 * visornic_get_stats - returns net_stats of the visornic device
961 * @netdev: netdevice
962 *
963 * Returns the net_device_stats for the device
964 */
965static struct net_device_stats *
966visornic_get_stats(struct net_device *netdev)
967{
968 struct visornic_devdata *devdata = netdev_priv(netdev);
969
970 return &devdata->net_stats;
971}
972
68905a14
DK
973/**
974 * visornic_change_mtu - changes mtu of device.
975 * @netdev: netdevice
976 * @new_mtu: value of new mtu
977 *
978 * MTU cannot be changed by system, must be changed via
979 * CONTROLVM message. All vnics and pnics in a switch have
980 * to have the same MTU for everything to work.
981 * Currently not supported.
982 * Returns EINVAL
983 */
984static int
985visornic_change_mtu(struct net_device *netdev, int new_mtu)
986{
987 return -EINVAL;
988}
989
990/**
991 * visornic_set_multi - changes mtu of device.
992 * @netdev: netdevice
993 *
994 * Only flag we support currently is IFF_PROMISC
995 * Returns void
996 */
997static void
998visornic_set_multi(struct net_device *netdev)
999{
1000 struct uiscmdrsp *cmdrsp;
1001 struct visornic_devdata *devdata = netdev_priv(netdev);
1002
6d8c96cb
DB
1003 if (devdata->old_flags == netdev->flags)
1004 return;
1005
1006 if ((netdev->flags & IFF_PROMISC) ==
1007 (devdata->old_flags & IFF_PROMISC))
1008 goto out_save_flags;
1009
1010 cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1011 if (!cmdrsp)
1012 return;
1013 cmdrsp->cmdtype = CMD_NET_TYPE;
1014 cmdrsp->net.type = NET_RCV_PROMISC;
1015 cmdrsp->net.enbdis.context = netdev;
1016 cmdrsp->net.enbdis.enable =
1017 netdev->flags & IFF_PROMISC;
1018 visorchannel_signalinsert(devdata->dev->visorchannel,
1019 IOCHAN_TO_IOPART,
1020 cmdrsp);
1021 kfree(cmdrsp);
1022
1023out_save_flags:
1024 devdata->old_flags = netdev->flags;
68905a14
DK
1025}
1026
1027/**
1028 * visornic_xmit_timeout - request to timeout the xmit
1029 * @netdev
1030 *
1031 * Queue the work and return. Make sure we have not already
1032 * been informed the IO Partition is gone, if it is gone
1033 * we will already timeout the xmits.
1034 */
1035static void
1036visornic_xmit_timeout(struct net_device *netdev)
1037{
1038 struct visornic_devdata *devdata = netdev_priv(netdev);
1039 unsigned long flags;
1040
1041 spin_lock_irqsave(&devdata->priv_lock, flags);
46df8226
TS
1042 if (devdata->going_away) {
1043 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1044 dev_dbg(&devdata->dev->device,
1045 "%s aborting because device removal pending\n",
1046 __func__);
1047 return;
1048 }
1049
68905a14
DK
1050 /* Ensure that a ServerDown message hasn't been received */
1051 if (!devdata->enabled ||
1052 (devdata->server_down && !devdata->server_change_state)) {
00748b0c
TS
1053 dev_dbg(&netdev->dev, "%s no processing\n",
1054 __func__);
68905a14
DK
1055 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1056 return;
1057 }
ce388d7e 1058 schedule_work(&devdata->timeout_reset);
46df8226 1059 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
1060}
1061
1062/**
1063 * repost_return - repost rcv bufs that have come back
1064 * @cmdrsp: io channel command struct to post
1065 * @devdata: visornic devdata for the device
1066 * @skb: skb
1067 * @netdev: netdevice
1068 *
1069 * Repost rcv buffers that have been returned to us when
1070 * we are finished with them.
1071 * Returns 0 for success, -1 for error.
1072 */
1073static inline int
1074repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1075 struct sk_buff *skb, struct net_device *netdev)
1076{
1077 struct net_pkt_rcv copy;
1078 int i = 0, cc, numreposted;
1079 int found_skb = 0;
1080 int status = 0;
1081
1082 copy = cmdrsp->net.rcv;
1083 switch (copy.numrcvbufs) {
1084 case 0:
1085 devdata->n_rcv0++;
1086 break;
1087 case 1:
1088 devdata->n_rcv1++;
1089 break;
1090 case 2:
1091 devdata->n_rcv2++;
1092 break;
1093 default:
1094 devdata->n_rcvx++;
1095 break;
1096 }
1097 for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
1098 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1099 if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
1100 continue;
1101
1102 if ((skb) && devdata->rcvbuf[i] == skb) {
1103 devdata->found_repost_rcvbuf_cnt++;
1104 found_skb = 1;
1105 devdata->repost_found_skb_cnt++;
1106 }
1107 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1108 if (!devdata->rcvbuf[i]) {
1109 devdata->num_rcv_bufs_could_not_alloc++;
1110 devdata->alloc_failed_in_repost_rtn_cnt++;
1111 status = -ENOMEM;
1112 break;
1113 }
1114 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1115 numreposted++;
1116 break;
1117 }
1118 }
1119 if (numreposted != copy.numrcvbufs) {
1120 devdata->n_repost_deficit++;
1121 status = -EINVAL;
1122 }
1123 if (skb) {
1124 if (found_skb) {
1125 kfree_skb(skb);
1126 } else {
1127 status = -EINVAL;
1128 devdata->bad_rcv_buf++;
1129 }
1130 }
68905a14
DK
1131 return status;
1132}
1133
1134/**
1135 * visornic_rx - Handle receive packets coming back from IO Part
1136 * @cmdrsp: Receive packet returned from IO Part
1137 *
1138 * Got a receive packet back from the IO Part, handle it and send
1139 * it up the stack.
1140 * Returns void
1141 */
946b2546 1142static int
68905a14
DK
1143visornic_rx(struct uiscmdrsp *cmdrsp)
1144{
1145 struct visornic_devdata *devdata;
1146 struct sk_buff *skb, *prev, *curr;
1147 struct net_device *netdev;
946b2546 1148 int cc, currsize, off;
68905a14
DK
1149 struct ethhdr *eth;
1150 unsigned long flags;
946b2546 1151 int rx_count = 0;
68905a14
DK
1152
1153 /* post new rcv buf to the other end using the cmdrsp we have at hand
1154 * post it without holding lock - but we'll use the signal lock to
1155 * synchronize the queue insert the cmdrsp that contains the net.rcv
1156 * is the one we are using to repost, so copy the info we need from it.
1157 */
1158 skb = cmdrsp->net.buf;
1159 netdev = skb->dev;
1160
68905a14
DK
1161 devdata = netdev_priv(netdev);
1162
1163 spin_lock_irqsave(&devdata->priv_lock, flags);
1164 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1165
68905a14
DK
1166 /* set length to how much was ACTUALLY received -
1167 * NOTE: rcv_done_len includes actual length of data rcvd
1168 * including ethhdr
1169 */
1170 skb->len = cmdrsp->net.rcv.rcv_done_len;
1171
f6b6a8ec
DK
1172 /* update rcv stats - call it with priv_lock held */
1173 devdata->net_stats.rx_packets++;
1174 devdata->net_stats.rx_bytes += skb->len;
1175
68905a14
DK
1176 /* test enabled while holding lock */
1177 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1178 /* don't process it unless we're in enable mode and until
1179 * we've gotten an ACK saying the other end got our RCV enable
1180 */
1181 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1182 repost_return(cmdrsp, devdata, skb, netdev);
946b2546 1183 return rx_count;
68905a14
DK
1184 }
1185
1186 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1187
1188 /* when skb was allocated, skb->dev, skb->data, skb->len and
1189 * skb->data_len were setup. AND, data has already put into the
1190 * skb (both first frag and in frags pages)
1191 * NOTE: firstfragslen is the amount of data in skb->data and that
1192 * which is not in nr_frags or frag_list. This is now simply
1193 * RCVPOST_BUF_SIZE. bump tail to show how much data is in
1194 * firstfrag & set data_len to show rest see if we have to chain
1195 * frag_list.
1196 */
1197 if (skb->len > RCVPOST_BUF_SIZE) { /* do PRECAUTIONARY check */
1198 if (cmdrsp->net.rcv.numrcvbufs < 2) {
1199 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1200 dev_err(&devdata->netdev->dev,
1201 "repost_return failed");
946b2546 1202 return rx_count;
68905a14
DK
1203 }
1204 /* length rcvd is greater than firstfrag in this skb rcv buf */
1205 skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
1206 skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that
abbceb61
EA
1207 * will be in
1208 * frag_list
1209 */
68905a14
DK
1210 } else {
1211 /* data fits in this skb - no chaining - do
1212 * PRECAUTIONARY check
1213 */
1214 if (cmdrsp->net.rcv.numrcvbufs != 1) { /* should be 1 */
1215 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1216 dev_err(&devdata->netdev->dev,
1217 "repost_return failed");
946b2546 1218 return rx_count;
68905a14
DK
1219 }
1220 skb->tail += skb->len;
1221 skb->data_len = 0; /* nothing rcvd in frag_list */
1222 }
1223 off = skb_tail_pointer(skb) - skb->data;
1224
1225 /* amount we bumped tail by in the head skb
1226 * it is used to calculate the size of each chained skb below
1227 * it is also used to index into bufline to continue the copy
1228 * (for chansocktwopc)
1229 * if necessary chain the rcv skbs together.
1230 * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
1231 * chain the rest to that one.
1232 * - do PRECAUTIONARY check
1233 */
1234 if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
1235 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1236 dev_err(&devdata->netdev->dev, "repost_return failed");
946b2546 1237 return rx_count;
68905a14
DK
1238 }
1239
1240 if (cmdrsp->net.rcv.numrcvbufs > 1) {
1241 /* chain the various rcv buffers into the skb's frag_list. */
1242 /* Note: off was initialized above */
1243 for (cc = 1, prev = NULL;
1244 cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
1245 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
1246 curr->next = NULL;
1247 if (!prev) /* start of list- set head */
1248 skb_shinfo(skb)->frag_list = curr;
1249 else
1250 prev->next = curr;
1251 prev = curr;
1252
1253 /* should we set skb->len and skb->data_len for each
1254 * buffer being chained??? can't hurt!
1255 */
1256 currsize = min(skb->len - off,
1257 (unsigned int)RCVPOST_BUF_SIZE);
1258 curr->len = currsize;
1259 curr->tail += currsize;
1260 curr->data_len = 0;
1261 off += currsize;
1262 }
68905a14
DK
1263 /* assert skb->len == off */
1264 if (skb->len != off) {
cb84fca0
TS
1265 netdev_err(devdata->netdev,
1266 "something wrong; skb->len:%d != off:%d\n",
1267 skb->len, off);
68905a14 1268 }
68905a14
DK
1269 }
1270
1271 /* set up packet's protocl type using ethernet header - this
1272 * sets up skb->pkt_type & it also PULLS out the eth header
1273 */
1274 skb->protocol = eth_type_trans(skb, netdev);
1275
1276 eth = eth_hdr(skb);
1277
1278 skb->csum = 0;
1279 skb->ip_summed = CHECKSUM_NONE;
1280
1281 do {
1282 if (netdev->flags & IFF_PROMISC)
1283 break; /* accept all packets */
1284 if (skb->pkt_type == PACKET_BROADCAST) {
1285 if (netdev->flags & IFF_BROADCAST)
1286 break; /* accept all broadcast packets */
1287 } else if (skb->pkt_type == PACKET_MULTICAST) {
1288 if ((netdev->flags & IFF_MULTICAST) &&
1289 (netdev_mc_count(netdev))) {
1290 struct netdev_hw_addr *ha;
1291 int found_mc = 0;
1292
1293 /* only accept multicast packets that we can
1294 * find in our multicast address list
1295 */
1296 netdev_for_each_mc_addr(ha, netdev) {
1297 if (ether_addr_equal(eth->h_dest,
1298 ha->addr)) {
1299 found_mc = 1;
1300 break;
1301 }
1302 }
77c9a4ae 1303 /* accept pkt, dest matches a multicast addr */
68905a14 1304 if (found_mc)
77c9a4ae 1305 break;
68905a14 1306 }
77c9a4ae 1307 /* accept packet, h_dest must match vnic mac address */
68905a14 1308 } else if (skb->pkt_type == PACKET_HOST) {
77c9a4ae 1309 break;
68905a14
DK
1310 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1311 /* something is not right */
1312 dev_err(&devdata->netdev->dev,
1313 "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
1314 netdev->name, eth->h_dest, netdev->dev_addr);
1315 }
1316 /* drop packet - don't forward it up to OS */
1317 devdata->n_rcv_packets_not_accepted++;
1318 repost_return(cmdrsp, devdata, skb, netdev);
946b2546 1319 return rx_count;
68905a14
DK
1320 } while (0);
1321
946b2546
NH
1322 rx_count++;
1323 netif_receive_skb(skb);
68905a14
DK
1324 /* netif_rx returns various values, but "in practice most drivers
1325 * ignore the return value
1326 */
1327
1328 skb = NULL;
1329 /*
1330 * whether the packet got dropped or handled, the skb is freed by
1331 * kernel code, so we shouldn't free it. but we should repost a
1332 * new rcv buffer.
1333 */
1334 repost_return(cmdrsp, devdata, skb, netdev);
946b2546 1335 return rx_count;
68905a14
DK
1336}
1337
1338/**
1339 * devdata_initialize - Initialize devdata structure
1340 * @devdata: visornic_devdata structure to initialize
1341 * #dev: visorbus_deviced it belongs to
1342 *
1343 * Setup initial values for the visornic based on channel and default
1344 * values.
1345 * Returns a pointer to the devdata if successful, else NULL
1346 */
1347static struct visornic_devdata *
1348devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
1349{
68905a14
DK
1350 if (!devdata)
1351 return NULL;
68905a14 1352 devdata->dev = dev;
91678f37 1353 devdata->incarnation_id = get_jiffies_64();
68905a14
DK
1354 return devdata;
1355}
1356
1357/**
8d0119d8
TS
1358 * devdata_release - Frees up references in devdata
1359 * @devdata: struct to clean up
68905a14 1360 *
8d0119d8 1361 * Frees up references in devdata.
68905a14
DK
1362 * Returns void
1363 */
8d0119d8 1364static void devdata_release(struct visornic_devdata *devdata)
68905a14 1365{
46df8226
TS
1366 kfree(devdata->rcvbuf);
1367 kfree(devdata->cmdrsp_rcv);
1368 kfree(devdata->xmit_cmdrsp);
68905a14
DK
1369}
1370
1371static const struct net_device_ops visornic_dev_ops = {
1372 .ndo_open = visornic_open,
1373 .ndo_stop = visornic_close,
1374 .ndo_start_xmit = visornic_xmit,
1375 .ndo_get_stats = visornic_get_stats,
68905a14
DK
1376 .ndo_change_mtu = visornic_change_mtu,
1377 .ndo_tx_timeout = visornic_xmit_timeout,
1378 .ndo_set_rx_mode = visornic_set_multi,
1379};
1380
52b1660d
NH
1381/* DebugFS code */
1382static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1383 size_t len, loff_t *offset)
1384{
1385 ssize_t bytes_read = 0;
1386 int str_pos = 0;
1387 struct visornic_devdata *devdata;
1388 struct net_device *dev;
1389 char *vbuf;
1390
1391 if (len > MAX_BUF)
1392 len = MAX_BUF;
1393 vbuf = kzalloc(len, GFP_KERNEL);
1394 if (!vbuf)
1395 return -ENOMEM;
1396
77c9a4ae 1397 /* for each vnic channel dump out channel specific data */
52b1660d
NH
1398 rcu_read_lock();
1399 for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
77c9a4ae 1400 /* Only consider netdevs that are visornic, and are open */
52b1660d
NH
1401 if ((dev->netdev_ops != &visornic_dev_ops) ||
1402 (!netif_queue_stopped(dev)))
1403 continue;
1404
1405 devdata = netdev_priv(dev);
1406 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1407 "netdev = %s (0x%p), MAC Addr %pM\n",
1408 dev->name,
1409 dev,
1410 dev->dev_addr);
1411 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1412 "VisorNic Dev Info = 0x%p\n", devdata);
1413 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1414 " num_rcv_bufs = %d\n",
1415 devdata->num_rcv_bufs);
1416 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1417 " max_oustanding_next_xmits = %lu\n",
52b1660d
NH
1418 devdata->max_outstanding_net_xmits);
1419 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1420 " upper_threshold_net_xmits = %lu\n",
52b1660d
NH
1421 devdata->upper_threshold_net_xmits);
1422 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1423 " lower_threshold_net_xmits = %lu\n",
52b1660d
NH
1424 devdata->lower_threshold_net_xmits);
1425 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1426 " queuefullmsg_logged = %d\n",
1427 devdata->queuefullmsg_logged);
1428 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1429 " chstat.got_rcv = %lu\n",
1430 devdata->chstat.got_rcv);
1431 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1432 " chstat.got_enbdisack = %lu\n",
1433 devdata->chstat.got_enbdisack);
1434 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1435 " chstat.got_xmit_done = %lu\n",
1436 devdata->chstat.got_xmit_done);
1437 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1438 " chstat.xmit_fail = %lu\n",
1439 devdata->chstat.xmit_fail);
1440 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1441 " chstat.sent_enbdis = %lu\n",
1442 devdata->chstat.sent_enbdis);
1443 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1444 " chstat.sent_promisc = %lu\n",
1445 devdata->chstat.sent_promisc);
1446 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1447 " chstat.sent_post = %lu\n",
1448 devdata->chstat.sent_post);
81d275c6
TS
1449 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1450 " chstat.sent_post_failed = %lu\n",
1451 devdata->chstat.sent_post_failed);
52b1660d
NH
1452 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1453 " chstat.sent_xmit = %lu\n",
1454 devdata->chstat.sent_xmit);
1455 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1456 " chstat.reject_count = %lu\n",
1457 devdata->chstat.reject_count);
1458 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1459 " chstat.extra_rcvbufs_sent = %lu\n",
1460 devdata->chstat.extra_rcvbufs_sent);
1461 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1462 " n_rcv0 = %lu\n", devdata->n_rcv0);
1463 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1464 " n_rcv1 = %lu\n", devdata->n_rcv1);
1465 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1466 " n_rcv2 = %lu\n", devdata->n_rcv2);
1467 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1468 " n_rcvx = %lu\n", devdata->n_rcvx);
1469 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1470 " num_rcvbuf_in_iovm = %d\n",
1471 atomic_read(&devdata->num_rcvbuf_in_iovm));
1472 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1473 " alloc_failed_in_if_needed_cnt = %lu\n",
1474 devdata->alloc_failed_in_if_needed_cnt);
1475 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1476 " alloc_failed_in_repost_rtn_cnt = %lu\n",
1477 devdata->alloc_failed_in_repost_rtn_cnt);
1478 /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1479 * " inner_loop_limit_reached_cnt = %lu\n",
1480 * devdata->inner_loop_limit_reached_cnt);
1481 */
1482 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1483 " found_repost_rcvbuf_cnt = %lu\n",
1484 devdata->found_repost_rcvbuf_cnt);
1485 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1486 " repost_found_skb_cnt = %lu\n",
1487 devdata->repost_found_skb_cnt);
1488 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1489 " n_repost_deficit = %lu\n",
1490 devdata->n_repost_deficit);
1491 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1492 " bad_rcv_buf = %lu\n",
1493 devdata->bad_rcv_buf);
1494 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1495 " n_rcv_packets_not_accepted = %lu\n",
1496 devdata->n_rcv_packets_not_accepted);
1497 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1498 " interrupts_rcvd = %llu\n",
1499 devdata->interrupts_rcvd);
1500 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1501 " interrupts_notme = %llu\n",
1502 devdata->interrupts_notme);
1503 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1504 " interrupts_disabled = %llu\n",
1505 devdata->interrupts_disabled);
1506 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1507 " busy_cnt = %llu\n",
1508 devdata->busy_cnt);
1509 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1510 " flow_control_upper_hits = %llu\n",
1511 devdata->flow_control_upper_hits);
1512 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1513 " flow_control_lower_hits = %llu\n",
1514 devdata->flow_control_lower_hits);
52b1660d
NH
1515 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1516 " netif_queue = %s\n",
1517 netif_queue_stopped(devdata->netdev) ?
1518 "stopped" : "running");
36927c18
TS
1519 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1520 " xmits_outstanding = %lu\n",
1521 devdata_xmits_outstanding(devdata));
52b1660d
NH
1522 }
1523 rcu_read_unlock();
1524 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1525 kfree(vbuf);
1526 return bytes_read;
1527}
1528
68905a14
DK
1529/**
1530 * send_rcv_posts_if_needed
1531 * @devdata: visornic device
1532 *
1533 * Send receive buffers to the IO Partition.
1534 * Returns void
1535 */
1536static void
1537send_rcv_posts_if_needed(struct visornic_devdata *devdata)
1538{
1539 int i;
1540 struct net_device *netdev;
1541 struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
1542 int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
1543
1544 /* don't do this until vnic is marked ready */
1545 if (!(devdata->enabled && devdata->enab_dis_acked))
1546 return;
1547
1548 netdev = devdata->netdev;
1549 rcv_bufs_allocated = 0;
1550 /* this code is trying to prevent getting stuck here forever,
1551 * but still retry it if you cant allocate them all this time.
1552 */
1553 cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
1554 while (cur_num_rcv_bufs_to_alloc > 0) {
1555 cur_num_rcv_bufs_to_alloc--;
1556 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1557 if (devdata->rcvbuf[i])
1558 continue;
1559 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1560 if (!devdata->rcvbuf[i]) {
1561 devdata->alloc_failed_in_if_needed_cnt++;
1562 break;
1563 }
1564 rcv_bufs_allocated++;
1565 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1566 devdata->chstat.extra_rcvbufs_sent++;
1567 }
1568 }
1569 devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
1570}
1571
1572/**
91678f37
TS
1573 * drain_resp_queue - drains and ignores all messages from the resp queue
1574 * @cmdrsp: io channel command response message
1575 * @devdata: visornic device to drain
1576 */
1577static void
1578drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
1579{
1580 while (visorchannel_signalremove(devdata->dev->visorchannel,
1581 IOCHAN_FROM_IOPART,
1582 cmdrsp))
1583 ;
1584}
1585
1586/**
1587 * service_resp_queue - drains the response queue
68905a14
DK
1588 * @cmdrsp: io channel command response message
1589 * @devdata: visornic device to drain
1590 *
1591 * Drain the respones queue of any responses from the IO partition.
1592 * Process the responses as we get them.
1593 * Returns when response queue is empty or when the threadd stops.
1594 */
1595static void
946b2546 1596service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
61dd330a 1597 int *rx_work_done, int budget)
68905a14
DK
1598{
1599 unsigned long flags;
1600 struct net_device *netdev;
1601
61dd330a 1602 while (*rx_work_done < budget) {
abbceb61
EA
1603 /* TODO: CLIENT ACQUIRE -- Don't really need this at the
1604 * moment
1605 */
7c03621a
DK
1606 if (!visorchannel_signalremove(devdata->dev->visorchannel,
1607 IOCHAN_FROM_IOPART,
1608 cmdrsp))
1609 break; /* queue empty */
1610
1611 switch (cmdrsp->net.type) {
1612 case NET_RCV:
1613 devdata->chstat.got_rcv++;
1614 /* process incoming packet */
946b2546 1615 *rx_work_done += visornic_rx(cmdrsp);
7c03621a
DK
1616 break;
1617 case NET_XMIT_DONE:
1618 spin_lock_irqsave(&devdata->priv_lock, flags);
1619 devdata->chstat.got_xmit_done++;
1620 if (cmdrsp->net.xmtdone.xmt_done_result)
1621 devdata->chstat.xmit_fail++;
1622 /* only call queue wake if we stopped it */
1623 netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
1624 /* ASSERT netdev == vnicinfo->netdev; */
1625 if ((netdev == devdata->netdev) &&
1626 netif_queue_stopped(netdev)) {
77c9a4ae
EA
1627 /* check if we have crossed the lower watermark
1628 * for netif_wake_queue()
68905a14 1629 */
dc38082f
TS
1630 if (vnic_hit_low_watermark
1631 (devdata,
1632 devdata->lower_threshold_net_xmits)) {
7c03621a
DK
1633 /* enough NET_XMITs completed
1634 * so can restart netif queue
1635 */
1636 netif_wake_queue(netdev);
1637 devdata->flow_control_lower_hits++;
1638 }
68905a14 1639 }
7c03621a
DK
1640 skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
1641 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1642 kfree_skb(cmdrsp->net.buf);
68905a14 1643 break;
7c03621a
DK
1644 case NET_RCV_ENBDIS_ACK:
1645 devdata->chstat.got_enbdisack++;
1646 netdev = (struct net_device *)
1647 cmdrsp->net.enbdis.context;
87a9404e 1648 spin_lock_irqsave(&devdata->priv_lock, flags);
7c03621a
DK
1649 devdata->enab_dis_acked = 1;
1650 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1651
7c03621a
DK
1652 if (devdata->server_down &&
1653 devdata->server_change_state) {
1654 /* Inform Linux that the link is up */
1655 devdata->server_down = false;
1656 devdata->server_change_state = false;
1657 netif_wake_queue(netdev);
1658 netif_carrier_on(netdev);
1659 }
1660 break;
1661 case NET_CONNECT_STATUS:
1662 netdev = devdata->netdev;
1663 if (cmdrsp->net.enbdis.enable == 1) {
1664 spin_lock_irqsave(&devdata->priv_lock, flags);
1665 devdata->enabled = cmdrsp->net.enbdis.enable;
1666 spin_unlock_irqrestore(&devdata->priv_lock,
1667 flags);
1668 netif_wake_queue(netdev);
1669 netif_carrier_on(netdev);
1670 } else {
1671 netif_stop_queue(netdev);
1672 netif_carrier_off(netdev);
1673 spin_lock_irqsave(&devdata->priv_lock, flags);
1674 devdata->enabled = cmdrsp->net.enbdis.enable;
1675 spin_unlock_irqrestore(&devdata->priv_lock,
1676 flags);
1677 }
1678 break;
1679 default:
1680 break;
87a9404e 1681 }
7c03621a 1682 /* cmdrsp is now available for reuse */
68905a14
DK
1683 }
1684}
1685
946b2546
NH
1686static int visornic_poll(struct napi_struct *napi, int budget)
1687{
1688 struct visornic_devdata *devdata = container_of(napi,
1689 struct visornic_devdata,
1690 napi);
1691 int rx_count = 0;
1692
1693 send_rcv_posts_if_needed(devdata);
61dd330a 1694 service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
946b2546 1695
77c9a4ae 1696 /* If there aren't any more packets to receive stop the poll */
946b2546
NH
1697 if (rx_count < budget)
1698 napi_complete(napi);
1699
1700 return rx_count;
1701}
1702
68905a14 1703/**
946b2546 1704 * poll_for_irq - Checks the status of the response queue.
68905a14
DK
1705 * @v: void pointer to the visronic devdata
1706 *
1707 * Main function of the vnic_incoming thread. Peridocially check the
1708 * response queue and drain it if needed.
1709 * Returns when thread has stopped.
1710 */
946b2546
NH
1711static void
1712poll_for_irq(unsigned long v)
68905a14 1713{
946b2546 1714 struct visornic_devdata *devdata = (struct visornic_devdata *)v;
68905a14 1715
946b2546
NH
1716 if (!visorchannel_signalempty(
1717 devdata->dev->visorchannel,
1718 IOCHAN_FROM_IOPART))
1719 napi_schedule(&devdata->napi);
68905a14 1720
946b2546 1721 atomic_set(&devdata->interrupt_rcvd, 0);
68905a14 1722
946b2546 1723 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
68905a14
DK
1724}
1725
1726/**
1727 * visornic_probe - probe function for visornic devices
1728 * @dev: The visor device discovered
1729 *
1730 * Called when visorbus discovers a visornic device on its
1731 * bus. It creates a new visornic ethernet adapter.
1732 * Returns 0 or negative for error.
1733 */
1734static int visornic_probe(struct visor_device *dev)
1735{
1736 struct visornic_devdata *devdata = NULL;
1737 struct net_device *netdev = NULL;
1738 int err;
1739 int channel_offset = 0;
1740 u64 features;
1741
1742 netdev = alloc_etherdev(sizeof(struct visornic_devdata));
00748b0c
TS
1743 if (!netdev) {
1744 dev_err(&dev->device,
1745 "%s alloc_etherdev failed\n", __func__);
68905a14 1746 return -ENOMEM;
00748b0c 1747 }
68905a14
DK
1748
1749 netdev->netdev_ops = &visornic_dev_ops;
90cb147f 1750 netdev->watchdog_timeo = 5 * HZ;
051e9fbb 1751 SET_NETDEV_DEV(netdev, &dev->device);
68905a14
DK
1752
1753 /* Get MAC adddress from channel and read it into the device. */
1754 netdev->addr_len = ETH_ALEN;
1755 channel_offset = offsetof(struct spar_io_channel_protocol,
1756 vnic.macaddr);
1757 err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
1758 ETH_ALEN);
00748b0c
TS
1759 if (err < 0) {
1760 dev_err(&dev->device,
1761 "%s failed to get mac addr from chan (%d)\n",
1762 __func__, err);
68905a14 1763 goto cleanup_netdev;
00748b0c 1764 }
68905a14
DK
1765
1766 devdata = devdata_initialize(netdev_priv(netdev), dev);
1767 if (!devdata) {
00748b0c
TS
1768 dev_err(&dev->device,
1769 "%s devdata_initialize failed\n", __func__);
68905a14
DK
1770 err = -ENOMEM;
1771 goto cleanup_netdev;
1772 }
91678f37
TS
1773 /* don't trust messages laying around in the channel */
1774 drain_resp_queue(devdata->cmdrsp, devdata);
68905a14
DK
1775
1776 devdata->netdev = netdev;
5deeea33 1777 dev_set_drvdata(&dev->device, devdata);
68905a14
DK
1778 init_waitqueue_head(&devdata->rsp_queue);
1779 spin_lock_init(&devdata->priv_lock);
1780 devdata->enabled = 0; /* not yet */
1781 atomic_set(&devdata->usage, 1);
1782
1783 /* Setup rcv bufs */
1784 channel_offset = offsetof(struct spar_io_channel_protocol,
1785 vnic.num_rcv_bufs);
1786 err = visorbus_read_channel(dev, channel_offset,
1787 &devdata->num_rcv_bufs, 4);
00748b0c
TS
1788 if (err) {
1789 dev_err(&dev->device,
1790 "%s failed to get #rcv bufs from chan (%d)\n",
1791 __func__, err);
68905a14 1792 goto cleanup_netdev;
00748b0c 1793 }
68905a14 1794
5e757bc5
SB
1795 devdata->rcvbuf = kcalloc(devdata->num_rcv_bufs,
1796 sizeof(struct sk_buff *), GFP_KERNEL);
68905a14
DK
1797 if (!devdata->rcvbuf) {
1798 err = -ENOMEM;
d12324e3 1799 goto cleanup_netdev;
68905a14
DK
1800 }
1801
1802 /* set the net_xmit outstanding threshold */
1803 /* always leave two slots open but you should have 3 at a minimum */
36927c18 1804 /* note that max_outstanding_net_xmits must be > 0 */
68905a14 1805 devdata->max_outstanding_net_xmits =
36927c18 1806 max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
68905a14 1807 devdata->upper_threshold_net_xmits =
36927c18
TS
1808 max_t(unsigned long,
1809 2, (devdata->max_outstanding_net_xmits - 1));
68905a14 1810 devdata->lower_threshold_net_xmits =
36927c18
TS
1811 max_t(unsigned long,
1812 1, (devdata->max_outstanding_net_xmits / 2));
68905a14
DK
1813
1814 skb_queue_head_init(&devdata->xmitbufhead);
1815
1816 /* create a cmdrsp we can use to post and unpost rcv buffers */
1817 devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1818 if (!devdata->cmdrsp_rcv) {
1819 err = -ENOMEM;
d12324e3 1820 goto cleanup_rcvbuf;
68905a14
DK
1821 }
1822 devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1823 if (!devdata->xmit_cmdrsp) {
1824 err = -ENOMEM;
d12324e3 1825 goto cleanup_cmdrsp_rcv;
68905a14 1826 }
68905a14
DK
1827 INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
1828 devdata->server_down = false;
1829 devdata->server_change_state = false;
1830
1831 /*set the default mtu */
1832 channel_offset = offsetof(struct spar_io_channel_protocol,
1833 vnic.mtu);
1834 err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
00748b0c
TS
1835 if (err) {
1836 dev_err(&dev->device,
1837 "%s failed to get mtu from chan (%d)\n",
1838 __func__, err);
68905a14 1839 goto cleanup_xmit_cmdrsp;
00748b0c 1840 }
68905a14
DK
1841
1842 /* TODO: Setup Interrupt information */
1843 /* Let's start our threads to get responses */
946b2546
NH
1844 netif_napi_add(netdev, &devdata->napi, visornic_poll, 64);
1845
1846 setup_timer(&devdata->irq_poll_timer, poll_for_irq,
1847 (unsigned long)devdata);
77c9a4ae 1848 /* Note: This time has to start running before the while
946b2546
NH
1849 * loop below because the napi routine is responsible for
1850 * setting enab_dis_acked
1851 */
1852 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
1853
68905a14
DK
1854 channel_offset = offsetof(struct spar_io_channel_protocol,
1855 channel_header.features);
1856 err = visorbus_read_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1857 if (err) {
1858 dev_err(&dev->device,
1859 "%s failed to get features from chan (%d)\n",
1860 __func__, err);
946b2546 1861 goto cleanup_napi_add;
00748b0c 1862 }
68905a14
DK
1863
1864 features |= ULTRA_IO_CHANNEL_IS_POLLING;
91678f37 1865 features |= ULTRA_IO_DRIVER_SUPPORTS_ENHANCED_RCVBUF_CHECKING;
68905a14 1866 err = visorbus_write_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1867 if (err) {
1868 dev_err(&dev->device,
1869 "%s failed to set features in chan (%d)\n",
1870 __func__, err);
946b2546 1871 goto cleanup_napi_add;
00748b0c 1872 }
68905a14 1873
61dd330a
DK
1874 /* Let's start our threads to get responses */
1875 netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
1876
77c9a4ae 1877 /* Note: Interupts have to be enable before the while
61dd330a
DK
1878 * loop below because the napi routine is responsible for
1879 * setting enab_dis_acked
1880 */
1881 visorbus_enable_channel_interrupts(dev);
1882
68905a14 1883 err = register_netdev(netdev);
00748b0c
TS
1884 if (err) {
1885 dev_err(&dev->device,
1886 "%s register_netdev failed (%d)\n", __func__, err);
946b2546 1887 goto cleanup_napi_add;
00748b0c 1888 }
68905a14
DK
1889
1890 /* create debgug/sysfs directories */
1891 devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
1892 visornic_debugfs_dir);
1893 if (!devdata->eth_debugfs_dir) {
00748b0c
TS
1894 dev_err(&dev->device,
1895 "%s debugfs_create_dir %s failed\n",
1896 __func__, netdev->name);
68905a14 1897 err = -ENOMEM;
5b12100a 1898 goto cleanup_register_netdev;
68905a14
DK
1899 }
1900
00748b0c
TS
1901 dev_info(&dev->device, "%s success netdev=%s\n",
1902 __func__, netdev->name);
68905a14
DK
1903 return 0;
1904
5b12100a
DK
1905cleanup_register_netdev:
1906 unregister_netdev(netdev);
1907
946b2546
NH
1908cleanup_napi_add:
1909 del_timer_sync(&devdata->irq_poll_timer);
1910 netif_napi_del(&devdata->napi);
1911
68905a14
DK
1912cleanup_xmit_cmdrsp:
1913 kfree(devdata->xmit_cmdrsp);
1914
1915cleanup_cmdrsp_rcv:
1916 kfree(devdata->cmdrsp_rcv);
1917
1918cleanup_rcvbuf:
1919 kfree(devdata->rcvbuf);
1920
1921cleanup_netdev:
1922 free_netdev(netdev);
1923 return err;
1924}
1925
1926/**
1927 * host_side_disappeared - IO part is gone.
1928 * @devdata: device object
1929 *
1930 * IO partition servicing this device is gone, do cleanup
1931 * Returns void.
1932 */
1933static void host_side_disappeared(struct visornic_devdata *devdata)
1934{
1935 unsigned long flags;
1936
1937 spin_lock_irqsave(&devdata->priv_lock, flags);
68905a14
DK
1938 devdata->dev = NULL; /* indicate device destroyed */
1939 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1940}
1941
1942/**
1943 * visornic_remove - Called when visornic dev goes away
1944 * @dev: visornic device that is being removed
1945 *
1946 * Called when DEVICE_DESTROY gets called to remove device.
1947 * Returns void
1948 */
1949static void visornic_remove(struct visor_device *dev)
1950{
1951 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
46df8226
TS
1952 struct net_device *netdev;
1953 unsigned long flags;
68905a14 1954
00748b0c
TS
1955 if (!devdata) {
1956 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 1957 return;
00748b0c 1958 }
46df8226
TS
1959 spin_lock_irqsave(&devdata->priv_lock, flags);
1960 if (devdata->going_away) {
1961 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1962 dev_err(&dev->device, "%s already being removed\n", __func__);
1963 return;
1964 }
1965 devdata->going_away = true;
1966 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1967 netdev = devdata->netdev;
1968 if (!netdev) {
1969 dev_err(&dev->device, "%s not net device\n", __func__);
1970 return;
1971 }
1972
1973 /* going_away prevents new items being added to the workqueues */
ce388d7e 1974 cancel_work_sync(&devdata->timeout_reset);
46df8226
TS
1975
1976 debugfs_remove_recursive(devdata->eth_debugfs_dir);
1977
1978 unregister_netdev(netdev); /* this will call visornic_close() */
1979
946b2546
NH
1980 del_timer_sync(&devdata->irq_poll_timer);
1981 netif_napi_del(&devdata->napi);
46df8226 1982
68905a14
DK
1983 dev_set_drvdata(&dev->device, NULL);
1984 host_side_disappeared(devdata);
8d0119d8 1985 devdata_release(devdata);
46df8226 1986 free_netdev(netdev);
68905a14
DK
1987}
1988
1989/**
1990 * visornic_pause - Called when IO Part disappears
1991 * @dev: visornic device that is being serviced
1992 * @complete_func: call when finished.
1993 *
1994 * Called when the IO Partition has gone down. Need to free
1995 * up resources and wait for IO partition to come back. Mark
1996 * link as down and don't attempt any DMA. When we have freed
1997 * memory call the complete_func so that Command knows we are
1998 * done. If we don't call complete_func, IO part will never
1999 * come back.
2000 * Returns 0 for success.
2001 */
2002static int visornic_pause(struct visor_device *dev,
2003 visorbus_state_complete_func complete_func)
2004{
2005 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2006
d01da5ea 2007 visornic_serverdown(devdata, complete_func);
68905a14
DK
2008 return 0;
2009}
2010
2011/**
2012 * visornic_resume - Called when IO part has recovered
2013 * @dev: visornic device that is being serviced
2014 * @compelte_func: call when finished
2015 *
2016 * Called when the IO partition has recovered. Reestablish
2017 * connection to the IO part and set the link up. Okay to do
2018 * DMA again.
2019 * Returns 0 for success.
2020 */
2021static int visornic_resume(struct visor_device *dev,
2022 visorbus_state_complete_func complete_func)
2023{
2024 struct visornic_devdata *devdata;
2025 struct net_device *netdev;
2026 unsigned long flags;
2027
2028 devdata = dev_get_drvdata(&dev->device);
00748b0c
TS
2029 if (!devdata) {
2030 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2031 return -EINVAL;
00748b0c 2032 }
68905a14
DK
2033
2034 netdev = devdata->netdev;
2035
c847020e
TS
2036 spin_lock_irqsave(&devdata->priv_lock, flags);
2037 if (devdata->server_change_state) {
68905a14 2038 spin_unlock_irqrestore(&devdata->priv_lock, flags);
c847020e 2039 dev_err(&dev->device, "%s server already changing state\n",
00748b0c 2040 __func__);
c847020e 2041 return -EINVAL;
68905a14 2042 }
c847020e
TS
2043 if (!devdata->server_down) {
2044 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2045 dev_err(&dev->device, "%s server not down\n", __func__);
2046 complete_func(dev, 0);
2047 return 0;
2048 }
2049 devdata->server_change_state = true;
2050 spin_unlock_irqrestore(&devdata->priv_lock, flags);
946b2546 2051
c847020e
TS
2052 /* Must transition channel to ATTACHED state BEFORE
2053 * we can start using the device again.
2054 * TODO: State transitions
2055 */
946b2546
NH
2056 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
2057
2058 init_rcv_bufs(netdev, devdata);
c847020e
TS
2059
2060 rtnl_lock();
2061 dev_open(netdev);
2062 rtnl_unlock();
68905a14
DK
2063
2064 complete_func(dev, 0);
2065 return 0;
2066}
2067
2068/**
2069 * visornic_init - Init function
2070 *
2071 * Init function for the visornic driver. Do initial driver setup
2072 * and wait for devices.
2073 * Returns 0 for success, negative for error.
2074 */
2075static int visornic_init(void)
2076{
2077 struct dentry *ret;
2078 int err = -ENOMEM;
2079
68905a14
DK
2080 visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
2081 if (!visornic_debugfs_dir)
2082 return err;
2083
2084 ret = debugfs_create_file("info", S_IRUSR, visornic_debugfs_dir, NULL,
2085 &debugfs_info_fops);
2086 if (!ret)
2087 goto cleanup_debugfs;
2088 ret = debugfs_create_file("enable_ints", S_IWUSR, visornic_debugfs_dir,
2089 NULL, &debugfs_enable_ints_fops);
2090 if (!ret)
2091 goto cleanup_debugfs;
2092
8b5081c8
BR
2093 err = visorbus_register_visor_driver(&visornic_driver);
2094 if (!err)
2095 return 0;
68905a14 2096
68905a14
DK
2097cleanup_debugfs:
2098 debugfs_remove_recursive(visornic_debugfs_dir);
2099
2100 return err;
2101}
2102
2103/**
2104 * visornic_cleanup - driver exit routine
2105 *
2106 * Unregister driver from the bus and free up memory.
2107 */
2108static void visornic_cleanup(void)
2109{
3798ff31
TS
2110 visorbus_unregister_visor_driver(&visornic_driver);
2111
68905a14 2112 debugfs_remove_recursive(visornic_debugfs_dir);
68905a14
DK
2113}
2114
2115module_init(visornic_init);
2116module_exit(visornic_cleanup);
2117
2118MODULE_AUTHOR("Unisys");
2119MODULE_LICENSE("GPL");
2120MODULE_DESCRIPTION("sPAR nic driver for sparlinux: ver 1.0.0.0");
2121MODULE_VERSION("1.0.0.0");
This page took 0.265098 seconds and 5 git commands to generate.