[PATCH] s390: some minor qeth driver fixes
[deliverable/linux.git] / drivers / s390 / net / qeth_main.c
CommitLineData
1da177e4
LT
1/*
2 *
6c6b3e7c 3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.246 $)
1da177e4
LT
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
11 * Rewritten by
1387780f 12 * Frank Pavlic (fpavlic@de.ibm.com) and
1da177e4
LT
13 * Thomas Spatzier <tspat@de.ibm.com>
14 *
e08d88cc 15 * $Revision: 1.242 $ $Date: 2005/05/04 20:19:18 $
1da177e4
LT
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
1da177e4
LT
32
33#include <linux/config.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/string.h>
37#include <linux/errno.h>
38#include <linux/mm.h>
39#include <linux/ip.h>
40#include <linux/inetdevice.h>
41#include <linux/netdevice.h>
42#include <linux/sched.h>
43#include <linux/workqueue.h>
44#include <linux/kernel.h>
45#include <linux/slab.h>
46#include <linux/interrupt.h>
47#include <linux/tcp.h>
48#include <linux/icmp.h>
49#include <linux/skbuff.h>
50#include <linux/in.h>
51#include <linux/igmp.h>
52#include <linux/init.h>
53#include <linux/reboot.h>
54#include <linux/mii.h>
55#include <linux/rcupdate.h>
56#include <linux/ethtool.h>
57
58#include <net/arp.h>
59#include <net/ip.h>
60#include <net/route.h>
61
62#include <asm/ebcdic.h>
63#include <asm/io.h>
64#include <asm/qeth.h>
65#include <asm/timex.h>
66#include <asm/semaphore.h>
67#include <asm/uaccess.h>
68
69#include "qeth.h"
70#include "qeth_mpc.h"
71#include "qeth_fs.h"
72#include "qeth_eddp.h"
73#include "qeth_tso.h"
74
6c6b3e7c 75#define VERSION_QETH_C "$Revision: 1.246 $"
1da177e4
LT
76static const char *version = "qeth S/390 OSA-Express driver";
77
78/**
79 * Debug Facility Stuff
80 */
81static debug_info_t *qeth_dbf_setup = NULL;
82static debug_info_t *qeth_dbf_data = NULL;
83static debug_info_t *qeth_dbf_misc = NULL;
84static debug_info_t *qeth_dbf_control = NULL;
85debug_info_t *qeth_dbf_trace = NULL;
86static debug_info_t *qeth_dbf_sense = NULL;
87static debug_info_t *qeth_dbf_qerr = NULL;
88
89DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
90
91/**
92 * some more definitions and declarations
93 */
94static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
95
96/* list of our cards */
97struct qeth_card_list_struct qeth_card_list;
98/*process list want to be notified*/
99spinlock_t qeth_notify_lock;
100struct list_head qeth_notify_list;
101
102static void qeth_send_control_data_cb(struct qeth_channel *,
103 struct qeth_cmd_buffer *);
104
105/**
106 * here we go with function implementation
107 */
108static void
109qeth_init_qdio_info(struct qeth_card *card);
110
111static int
112qeth_init_qdio_queues(struct qeth_card *card);
113
114static int
115qeth_alloc_qdio_buffers(struct qeth_card *card);
116
117static void
118qeth_free_qdio_buffers(struct qeth_card *);
119
120static void
121qeth_clear_qdio_buffers(struct qeth_card *);
122
123static void
124qeth_clear_ip_list(struct qeth_card *, int, int);
125
126static void
127qeth_clear_ipacmd_list(struct qeth_card *);
128
129static int
130qeth_qdio_clear_card(struct qeth_card *, int);
131
132static void
133qeth_clear_working_pool_list(struct qeth_card *);
134
135static void
136qeth_clear_cmd_buffers(struct qeth_channel *);
137
138static int
139qeth_stop(struct net_device *);
140
141static void
142qeth_clear_ipato_list(struct qeth_card *);
143
144static int
145qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
146
147static void
148qeth_irq_tasklet(unsigned long);
149
150static int
151qeth_set_online(struct ccwgroup_device *);
152
05e08a2a
FP
153static int
154__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode);
155
1da177e4
LT
156static struct qeth_ipaddr *
157qeth_get_addr_buffer(enum qeth_prot_versions);
158
159static void
160qeth_set_multicast_list(struct net_device *);
161
6c951b90
FP
162static void
163qeth_setadp_promisc_mode(struct qeth_card *);
164
1da177e4
LT
165static void
166qeth_notify_processes(void)
167{
168 /*notify all registered processes */
169 struct qeth_notify_list_struct *n_entry;
170
171 QETH_DBF_TEXT(trace,3,"procnoti");
172 spin_lock(&qeth_notify_lock);
173 list_for_each_entry(n_entry, &qeth_notify_list, list) {
174 send_sig(n_entry->signum, n_entry->task, 1);
175 }
176 spin_unlock(&qeth_notify_lock);
177
178}
179int
180qeth_notifier_unregister(struct task_struct *p)
181{
182 struct qeth_notify_list_struct *n_entry, *tmp;
183
184 QETH_DBF_TEXT(trace, 2, "notunreg");
185 spin_lock(&qeth_notify_lock);
186 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
187 if (n_entry->task == p) {
188 list_del(&n_entry->list);
189 kfree(n_entry);
190 goto out;
191 }
192 }
193out:
194 spin_unlock(&qeth_notify_lock);
195 return 0;
196}
197int
198qeth_notifier_register(struct task_struct *p, int signum)
199{
200 struct qeth_notify_list_struct *n_entry;
201
1da177e4
LT
202 /*check first if entry already exists*/
203 spin_lock(&qeth_notify_lock);
204 list_for_each_entry(n_entry, &qeth_notify_list, list) {
205 if (n_entry->task == p) {
206 n_entry->signum = signum;
207 spin_unlock(&qeth_notify_lock);
208 return 0;
209 }
210 }
211 spin_unlock(&qeth_notify_lock);
212
213 n_entry = (struct qeth_notify_list_struct *)
214 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
215 if (!n_entry)
216 return -ENOMEM;
217 n_entry->task = p;
218 n_entry->signum = signum;
219 spin_lock(&qeth_notify_lock);
220 list_add(&n_entry->list,&qeth_notify_list);
221 spin_unlock(&qeth_notify_lock);
222 return 0;
223}
224
225
226/**
227 * free channel command buffers
228 */
229static void
230qeth_clean_channel(struct qeth_channel *channel)
231{
232 int cnt;
233
234 QETH_DBF_TEXT(setup, 2, "freech");
235 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
236 kfree(channel->iob[cnt].data);
237}
238
239/**
240 * free card
241 */
242static void
243qeth_free_card(struct qeth_card *card)
244{
245
246 QETH_DBF_TEXT(setup, 2, "freecrd");
247 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
248 qeth_clean_channel(&card->read);
249 qeth_clean_channel(&card->write);
250 if (card->dev)
251 free_netdev(card->dev);
252 qeth_clear_ip_list(card, 0, 0);
253 qeth_clear_ipato_list(card);
254 kfree(card->ip_tbd_list);
255 qeth_free_qdio_buffers(card);
256 kfree(card);
257}
258
259/**
260 * alloc memory for command buffer per channel
261 */
262static int
263qeth_setup_channel(struct qeth_channel *channel)
264{
265 int cnt;
266
267 QETH_DBF_TEXT(setup, 2, "setupch");
268 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
269 channel->iob[cnt].data = (char *)
270 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
271 if (channel->iob[cnt].data == NULL)
272 break;
273 channel->iob[cnt].state = BUF_STATE_FREE;
274 channel->iob[cnt].channel = channel;
275 channel->iob[cnt].callback = qeth_send_control_data_cb;
276 channel->iob[cnt].rc = 0;
277 }
278 if (cnt < QETH_CMD_BUFFER_NO) {
279 while (cnt-- > 0)
280 kfree(channel->iob[cnt].data);
281 return -ENOMEM;
282 }
283 channel->buf_no = 0;
284 channel->io_buf_no = 0;
285 atomic_set(&channel->irq_pending, 0);
286 spin_lock_init(&channel->iob_lock);
287
288 init_waitqueue_head(&channel->wait_q);
289 channel->irq_tasklet.data = (unsigned long) channel;
290 channel->irq_tasklet.func = qeth_irq_tasklet;
291 return 0;
292}
293
294/**
295 * alloc memory for card structure
296 */
297static struct qeth_card *
298qeth_alloc_card(void)
299{
300 struct qeth_card *card;
301
302 QETH_DBF_TEXT(setup, 2, "alloccrd");
303 card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
304 GFP_DMA|GFP_KERNEL);
305 if (!card)
306 return NULL;
307 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
308 memset(card, 0, sizeof(struct qeth_card));
309 if (qeth_setup_channel(&card->read)) {
310 kfree(card);
311 return NULL;
312 }
313 if (qeth_setup_channel(&card->write)) {
314 qeth_clean_channel(&card->read);
315 kfree(card);
316 return NULL;
317 }
318 return card;
319}
320
321static long
322__qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
323{
324 if (!IS_ERR(irb))
325 return 0;
326
327 switch (PTR_ERR(irb)) {
328 case -EIO:
329 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
330 QETH_DBF_TEXT(trace, 2, "ckirberr");
331 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
332 break;
333 case -ETIMEDOUT:
334 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
335 QETH_DBF_TEXT(trace, 2, "ckirberr");
336 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
337 break;
338 default:
339 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
340 cdev->dev.bus_id);
341 QETH_DBF_TEXT(trace, 2, "ckirberr");
342 QETH_DBF_TEXT(trace, 2, " rc???");
343 }
344 return PTR_ERR(irb);
345}
346
347static int
348qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
349{
350 int dstat,cstat;
351 char *sense;
352
353 sense = (char *) irb->ecw;
354 cstat = irb->scsw.cstat;
355 dstat = irb->scsw.dstat;
356
357 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
358 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
359 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
360 QETH_DBF_TEXT(trace,2, "CGENCHK");
361 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
362 cdev->dev.bus_id, dstat, cstat);
363 HEXDUMP16(WARN, "irb: ", irb);
364 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
365 return 1;
366 }
367
368 if (dstat & DEV_STAT_UNIT_CHECK) {
369 if (sense[SENSE_RESETTING_EVENT_BYTE] &
370 SENSE_RESETTING_EVENT_FLAG) {
371 QETH_DBF_TEXT(trace,2,"REVIND");
372 return 1;
373 }
374 if (sense[SENSE_COMMAND_REJECT_BYTE] &
375 SENSE_COMMAND_REJECT_FLAG) {
376 QETH_DBF_TEXT(trace,2,"CMDREJi");
377 return 0;
378 }
379 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
380 QETH_DBF_TEXT(trace,2,"AFFE");
381 return 1;
382 }
383 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
384 QETH_DBF_TEXT(trace,2,"ZEROSEN");
385 return 0;
386 }
387 QETH_DBF_TEXT(trace,2,"DGENCHK");
388 return 1;
389 }
390 return 0;
391}
392static int qeth_issue_next_read(struct qeth_card *);
393
394/**
395 * interrupt handler
396 */
397static void
398qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
399{
400 int rc;
401 int cstat,dstat;
402 struct qeth_cmd_buffer *buffer;
403 struct qeth_channel *channel;
404 struct qeth_card *card;
405
406 QETH_DBF_TEXT(trace,5,"irq");
407
408 if (__qeth_check_irb_error(cdev, irb))
409 return;
410 cstat = irb->scsw.cstat;
411 dstat = irb->scsw.dstat;
412
413 card = CARD_FROM_CDEV(cdev);
414 if (!card)
415 return;
416
417 if (card->read.ccwdev == cdev){
418 channel = &card->read;
419 QETH_DBF_TEXT(trace,5,"read");
420 } else if (card->write.ccwdev == cdev) {
421 channel = &card->write;
422 QETH_DBF_TEXT(trace,5,"write");
423 } else {
424 channel = &card->data;
425 QETH_DBF_TEXT(trace,5,"data");
426 }
427 atomic_set(&channel->irq_pending, 0);
428
429 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
430 channel->state = CH_STATE_STOPPED;
431
432 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
433 channel->state = CH_STATE_HALTED;
434
435 /*let's wake up immediately on data channel*/
436 if ((channel == &card->data) && (intparm != 0))
437 goto out;
438
439 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
440 QETH_DBF_TEXT(trace, 6, "clrchpar");
441 /* we don't have to handle this further */
442 intparm = 0;
443 }
444 if (intparm == QETH_HALT_CHANNEL_PARM) {
445 QETH_DBF_TEXT(trace, 6, "hltchpar");
446 /* we don't have to handle this further */
447 intparm = 0;
448 }
449 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
450 (dstat & DEV_STAT_UNIT_CHECK) ||
451 (cstat)) {
452 if (irb->esw.esw0.erw.cons) {
453 /* TODO: we should make this s390dbf */
454 PRINT_WARN("sense data available on channel %s.\n",
455 CHANNEL_ID(channel));
456 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
457 HEXDUMP16(WARN,"irb: ",irb);
458 HEXDUMP16(WARN,"sense data: ",irb->ecw);
459 }
460 rc = qeth_get_problem(cdev,irb);
461 if (rc) {
462 qeth_schedule_recovery(card);
463 goto out;
464 }
465 }
466
467 if (intparm) {
468 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
469 buffer->state = BUF_STATE_PROCESSED;
470 }
471 if (channel == &card->data)
472 return;
473
474 if (channel == &card->read &&
475 channel->state == CH_STATE_UP)
476 qeth_issue_next_read(card);
477
478 tasklet_schedule(&channel->irq_tasklet);
479 return;
480out:
481 wake_up(&card->wait_q);
482}
483
484/**
485 * tasklet function scheduled from irq handler
486 */
487static void
488qeth_irq_tasklet(unsigned long data)
489{
490 struct qeth_card *card;
491 struct qeth_channel *channel;
492 struct qeth_cmd_buffer *iob;
493 __u8 index;
494
495 QETH_DBF_TEXT(trace,5,"irqtlet");
496 channel = (struct qeth_channel *) data;
497 iob = channel->iob;
498 index = channel->buf_no;
499 card = CARD_FROM_CDEV(channel->ccwdev);
500 while (iob[index].state == BUF_STATE_PROCESSED) {
501 if (iob[index].callback !=NULL) {
502 iob[index].callback(channel,iob + index);
503 }
504 index = (index + 1) % QETH_CMD_BUFFER_NO;
505 }
506 channel->buf_no = index;
507 wake_up(&card->wait_q);
508}
509
05e08a2a 510static int qeth_stop_card(struct qeth_card *, int);
1da177e4
LT
511
512static int
05e08a2a 513__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
1da177e4
LT
514{
515 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
9123e0d7 516 int rc = 0, rc2 = 0, rc3 = 0;
1da177e4
LT
517 enum qeth_card_states recover_flag;
518
519 QETH_DBF_TEXT(setup, 3, "setoffl");
520 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
521
522 recover_flag = card->state;
05e08a2a 523 if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
1da177e4
LT
524 PRINT_WARN("Stopping card %s interrupted by user!\n",
525 CARD_BUS_ID(card));
526 return -ERESTARTSYS;
527 }
9123e0d7
UB
528 rc = ccw_device_set_offline(CARD_DDEV(card));
529 rc2 = ccw_device_set_offline(CARD_WDEV(card));
530 rc3 = ccw_device_set_offline(CARD_RDEV(card));
531 if (!rc)
532 rc = (rc2) ? rc2 : rc3;
533 if (rc)
1da177e4 534 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1da177e4
LT
535 if (recover_flag == CARD_STATE_UP)
536 card->state = CARD_STATE_RECOVER;
537 qeth_notify_processes();
538 return 0;
539}
540
05e08a2a
FP
541static int
542qeth_set_offline(struct ccwgroup_device *cgdev)
543{
544 return __qeth_set_offline(cgdev, 0);
545}
546
1da177e4
LT
547static int
548qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
549
550
551static void
552qeth_remove_device(struct ccwgroup_device *cgdev)
553{
554 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
555 unsigned long flags;
556
557 QETH_DBF_TEXT(setup, 3, "rmdev");
558 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
559
560 if (!card)
561 return;
562
563 if (qeth_wait_for_threads(card, 0xffffffff))
564 return;
565
566 if (cgdev->state == CCWGROUP_ONLINE){
567 card->use_hard_stop = 1;
568 qeth_set_offline(cgdev);
569 }
570 /* remove form our internal list */
571 write_lock_irqsave(&qeth_card_list.rwlock, flags);
572 list_del(&card->list);
573 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
574 if (card->dev)
575 unregister_netdev(card->dev);
576 qeth_remove_device_attributes(&cgdev->dev);
577 qeth_free_card(card);
578 cgdev->dev.driver_data = NULL;
579 put_device(&cgdev->dev);
580}
581
582static int
583qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
584static int
585qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
586
587/**
588 * Add/remove address to/from card's ip list, i.e. try to add or remove
589 * reference to/from an IP address that is already registered on the card.
590 * Returns:
591 * 0 address was on card and its reference count has been adjusted,
592 * but is still > 0, so nothing has to be done
593 * also returns 0 if card was not on card and the todo was to delete
594 * the address -> there is also nothing to be done
595 * 1 address was not on card and the todo is to add it to the card's ip
596 * list
597 * -1 address was on card and its reference count has been decremented
598 * to <= 0 by the todo -> address must be removed from card
599 */
600static int
601__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
602 struct qeth_ipaddr **__addr)
603{
604 struct qeth_ipaddr *addr;
605 int found = 0;
606
607 list_for_each_entry(addr, &card->ip_list, entry) {
6c88ad2d
FP
608 if (card->options.layer2) {
609 if ((addr->type == todo->type) &&
610 (memcmp(&addr->mac, &todo->mac,
611 OSA_ADDR_LEN) == 0)) {
612 found = 1;
613 break;
614 }
615 continue;
616 }
1da177e4
LT
617 if ((addr->proto == QETH_PROT_IPV4) &&
618 (todo->proto == QETH_PROT_IPV4) &&
619 (addr->type == todo->type) &&
620 (addr->u.a4.addr == todo->u.a4.addr) &&
6c88ad2d 621 (addr->u.a4.mask == todo->u.a4.mask)) {
1da177e4
LT
622 found = 1;
623 break;
624 }
625 if ((addr->proto == QETH_PROT_IPV6) &&
626 (todo->proto == QETH_PROT_IPV6) &&
627 (addr->type == todo->type) &&
628 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
629 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
6c88ad2d 630 sizeof(struct in6_addr)) == 0)) {
1da177e4
LT
631 found = 1;
632 break;
633 }
634 }
6c88ad2d 635 if (found) {
1da177e4
LT
636 addr->users += todo->users;
637 if (addr->users <= 0){
638 *__addr = addr;
639 return -1;
640 } else {
641 /* for VIPA and RXIP limit refcount to 1 */
642 if (addr->type != QETH_IP_TYPE_NORMAL)
643 addr->users = 1;
644 return 0;
645 }
646 }
6c88ad2d 647 if (todo->users > 0) {
1da177e4
LT
648 /* for VIPA and RXIP limit refcount to 1 */
649 if (todo->type != QETH_IP_TYPE_NORMAL)
650 todo->users = 1;
651 return 1;
652 } else
653 return 0;
654}
655
656static inline int
657__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
658 int same_type)
659{
660 struct qeth_ipaddr *tmp;
661
662 list_for_each_entry(tmp, list, entry) {
663 if ((tmp->proto == QETH_PROT_IPV4) &&
664 (addr->proto == QETH_PROT_IPV4) &&
665 ((same_type && (tmp->type == addr->type)) ||
666 (!same_type && (tmp->type != addr->type)) ) &&
667 (tmp->u.a4.addr == addr->u.a4.addr) ){
668 return 1;
669 }
670 if ((tmp->proto == QETH_PROT_IPV6) &&
671 (addr->proto == QETH_PROT_IPV6) &&
672 ((same_type && (tmp->type == addr->type)) ||
673 (!same_type && (tmp->type != addr->type)) ) &&
674 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
675 sizeof(struct in6_addr)) == 0) ) {
676 return 1;
677 }
678 }
679 return 0;
680}
681
682/*
683 * Add IP to be added to todo list. If there is already an "add todo"
684 * in this list we just incremenent the reference count.
685 * Returns 0 if we just incremented reference count.
686 */
687static int
688__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
689{
690 struct qeth_ipaddr *tmp, *t;
691 int found = 0;
692
693 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
694 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
695 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
696 return 0;
6c88ad2d
FP
697 if (card->options.layer2) {
698 if ((tmp->type == addr->type) &&
699 (tmp->is_multicast == addr->is_multicast) &&
700 (memcmp(&tmp->mac, &addr->mac,
701 OSA_ADDR_LEN) == 0)) {
702 found = 1;
703 break;
704 }
705 continue;
706 }
1da177e4
LT
707 if ((tmp->proto == QETH_PROT_IPV4) &&
708 (addr->proto == QETH_PROT_IPV4) &&
709 (tmp->type == addr->type) &&
710 (tmp->is_multicast == addr->is_multicast) &&
711 (tmp->u.a4.addr == addr->u.a4.addr) &&
6c88ad2d 712 (tmp->u.a4.mask == addr->u.a4.mask)) {
1da177e4
LT
713 found = 1;
714 break;
715 }
716 if ((tmp->proto == QETH_PROT_IPV6) &&
717 (addr->proto == QETH_PROT_IPV6) &&
718 (tmp->type == addr->type) &&
719 (tmp->is_multicast == addr->is_multicast) &&
720 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
721 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
6c88ad2d 722 sizeof(struct in6_addr)) == 0)) {
1da177e4
LT
723 found = 1;
724 break;
725 }
726 }
727 if (found){
728 if (addr->users != 0)
729 tmp->users += addr->users;
730 else
731 tmp->users += add? 1:-1;
6c88ad2d 732 if (tmp->users == 0) {
1da177e4
LT
733 list_del(&tmp->entry);
734 kfree(tmp);
735 }
736 return 0;
737 } else {
738 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
739 list_add(&addr->entry, card->ip_tbd_list);
740 else {
741 if (addr->users == 0)
742 addr->users += add? 1:-1;
743 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
744 qeth_is_addr_covered_by_ipato(card, addr)){
745 QETH_DBF_TEXT(trace, 2, "tkovaddr");
746 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
747 }
748 list_add_tail(&addr->entry, card->ip_tbd_list);
749 }
750 return 1;
751 }
752}
753
754/**
755 * Remove IP address from list
756 */
757static int
758qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
759{
760 unsigned long flags;
761 int rc = 0;
762
6c88ad2d
FP
763 QETH_DBF_TEXT(trace, 4, "delip");
764
765 if (card->options.layer2)
766 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
767 else if (addr->proto == QETH_PROT_IPV4)
768 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
1da177e4 769 else {
6c88ad2d
FP
770 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
771 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
1da177e4
LT
772 }
773 spin_lock_irqsave(&card->ip_lock, flags);
774 rc = __qeth_insert_ip_todo(card, addr, 0);
775 spin_unlock_irqrestore(&card->ip_lock, flags);
776 return rc;
777}
778
779static int
780qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
781{
782 unsigned long flags;
783 int rc = 0;
784
6c88ad2d
FP
785 QETH_DBF_TEXT(trace, 4, "addip");
786 if (card->options.layer2)
787 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
788 else if (addr->proto == QETH_PROT_IPV4)
789 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
1da177e4 790 else {
6c88ad2d
FP
791 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
792 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
1da177e4
LT
793 }
794 spin_lock_irqsave(&card->ip_lock, flags);
795 rc = __qeth_insert_ip_todo(card, addr, 1);
796 spin_unlock_irqrestore(&card->ip_lock, flags);
797 return rc;
798}
799
800static inline void
801__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
802{
803 struct qeth_ipaddr *addr, *tmp;
804 int rc;
d805d7c6 805again:
1da177e4
LT
806 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
807 if (addr->is_multicast) {
808 spin_unlock_irqrestore(&card->ip_lock, *flags);
809 rc = qeth_deregister_addr_entry(card, addr);
810 spin_lock_irqsave(&card->ip_lock, *flags);
811 if (!rc) {
812 list_del(&addr->entry);
813 kfree(addr);
d805d7c6 814 goto again;
1da177e4
LT
815 }
816 }
817 }
818}
819
820static void
821qeth_set_ip_addr_list(struct qeth_card *card)
822{
823 struct list_head *tbd_list;
824 struct qeth_ipaddr *todo, *addr;
825 unsigned long flags;
826 int rc;
827
828 QETH_DBF_TEXT(trace, 2, "sdiplist");
829 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
830
831 spin_lock_irqsave(&card->ip_lock, flags);
832 tbd_list = card->ip_tbd_list;
833 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
834 if (!card->ip_tbd_list) {
835 QETH_DBF_TEXT(trace, 0, "silnomem");
836 card->ip_tbd_list = tbd_list;
837 spin_unlock_irqrestore(&card->ip_lock, flags);
838 return;
839 } else
840 INIT_LIST_HEAD(card->ip_tbd_list);
841
842 while (!list_empty(tbd_list)){
843 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
844 list_del(&todo->entry);
845 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
846 __qeth_delete_all_mc(card, &flags);
847 kfree(todo);
848 continue;
849 }
850 rc = __qeth_ref_ip_on_card(card, todo, &addr);
851 if (rc == 0) {
852 /* nothing to be done; only adjusted refcount */
853 kfree(todo);
854 } else if (rc == 1) {
855 /* new entry to be added to on-card list */
856 spin_unlock_irqrestore(&card->ip_lock, flags);
857 rc = qeth_register_addr_entry(card, todo);
858 spin_lock_irqsave(&card->ip_lock, flags);
859 if (!rc)
860 list_add_tail(&todo->entry, &card->ip_list);
861 else
862 kfree(todo);
863 } else if (rc == -1) {
864 /* on-card entry to be removed */
865 list_del_init(&addr->entry);
866 spin_unlock_irqrestore(&card->ip_lock, flags);
867 rc = qeth_deregister_addr_entry(card, addr);
868 spin_lock_irqsave(&card->ip_lock, flags);
869 if (!rc)
870 kfree(addr);
871 else
872 list_add_tail(&addr->entry, &card->ip_list);
873 kfree(todo);
874 }
875 }
876 spin_unlock_irqrestore(&card->ip_lock, flags);
877 kfree(tbd_list);
878}
879
880static void qeth_delete_mc_addresses(struct qeth_card *);
881static void qeth_add_multicast_ipv4(struct qeth_card *);
6c88ad2d 882static void qeth_layer2_add_multicast(struct qeth_card *);
1da177e4
LT
883#ifdef CONFIG_QETH_IPV6
884static void qeth_add_multicast_ipv6(struct qeth_card *);
885#endif
886
887static inline int
888qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
889{
890 unsigned long flags;
891
892 spin_lock_irqsave(&card->thread_mask_lock, flags);
893 if ( !(card->thread_allowed_mask & thread) ||
894 (card->thread_start_mask & thread) ) {
895 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
896 return -EPERM;
897 }
898 card->thread_start_mask |= thread;
899 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
900 return 0;
901}
902
903static void
904qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
905{
906 unsigned long flags;
907
908 spin_lock_irqsave(&card->thread_mask_lock, flags);
909 card->thread_start_mask &= ~thread;
910 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
911 wake_up(&card->wait_q);
912}
913
914static void
915qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
916{
917 unsigned long flags;
918
919 spin_lock_irqsave(&card->thread_mask_lock, flags);
920 card->thread_running_mask &= ~thread;
921 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
922 wake_up(&card->wait_q);
923}
924
925static inline int
926__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
927{
928 unsigned long flags;
929 int rc = 0;
930
931 spin_lock_irqsave(&card->thread_mask_lock, flags);
932 if (card->thread_start_mask & thread){
933 if ((card->thread_allowed_mask & thread) &&
934 !(card->thread_running_mask & thread)){
935 rc = 1;
936 card->thread_start_mask &= ~thread;
937 card->thread_running_mask |= thread;
938 } else
939 rc = -EPERM;
940 }
941 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
942 return rc;
943}
944
945static int
946qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
947{
948 int rc = 0;
949
950 wait_event(card->wait_q,
951 (rc = __qeth_do_run_thread(card, thread)) >= 0);
952 return rc;
953}
954
955static int
956qeth_register_ip_addresses(void *ptr)
957{
958 struct qeth_card *card;
959
960 card = (struct qeth_card *) ptr;
961 daemonize("qeth_reg_ip");
962 QETH_DBF_TEXT(trace,4,"regipth1");
963 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
964 return 0;
965 QETH_DBF_TEXT(trace,4,"regipth2");
966 qeth_set_ip_addr_list(card);
967 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
968 return 0;
969}
970
6c951b90
FP
971/*
972 * Drive the SET_PROMISC_MODE thread
973 */
974static int
975qeth_set_promisc_mode(void *ptr)
976{
977 struct qeth_card *card = (struct qeth_card *) ptr;
978
979 daemonize("qeth_setprm");
980 QETH_DBF_TEXT(trace,4,"setprm1");
981 if (!qeth_do_run_thread(card, QETH_SET_PROMISC_MODE_THREAD))
982 return 0;
983 QETH_DBF_TEXT(trace,4,"setprm2");
984 qeth_setadp_promisc_mode(card);
985 qeth_clear_thread_running_bit(card, QETH_SET_PROMISC_MODE_THREAD);
986 return 0;
987}
988
1da177e4
LT
989static int
990qeth_recover(void *ptr)
991{
992 struct qeth_card *card;
993 int rc = 0;
994
995 card = (struct qeth_card *) ptr;
996 daemonize("qeth_recover");
997 QETH_DBF_TEXT(trace,2,"recover1");
998 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
999 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
1000 return 0;
1001 QETH_DBF_TEXT(trace,2,"recover2");
1002 PRINT_WARN("Recovery of device %s started ...\n",
1003 CARD_BUS_ID(card));
1004 card->use_hard_stop = 1;
05e08a2a
FP
1005 __qeth_set_offline(card->gdev,1);
1006 rc = __qeth_set_online(card->gdev,1);
1da177e4
LT
1007 if (!rc)
1008 PRINT_INFO("Device %s successfully recovered!\n",
1009 CARD_BUS_ID(card));
1010 else
1011 PRINT_INFO("Device %s could not be recovered!\n",
1012 CARD_BUS_ID(card));
1013 /* don't run another scheduled recovery */
1014 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1015 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1016 return 0;
1017}
1018
1019void
1020qeth_schedule_recovery(struct qeth_card *card)
1021{
1022 QETH_DBF_TEXT(trace,2,"startrec");
1023
1024 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
1025 schedule_work(&card->kernel_thread_starter);
1026}
1027
1028static int
1029qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1030{
1031 unsigned long flags;
1032 int rc = 0;
1033
1034 spin_lock_irqsave(&card->thread_mask_lock, flags);
1035 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
1036 (u8) card->thread_start_mask,
1037 (u8) card->thread_allowed_mask,
1038 (u8) card->thread_running_mask);
1039 rc = (card->thread_start_mask & thread);
1040 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1041 return rc;
1042}
1043
1044static void
1045qeth_start_kernel_thread(struct qeth_card *card)
1046{
1047 QETH_DBF_TEXT(trace , 2, "strthrd");
1048
1049 if (card->read.state != CH_STATE_UP &&
1050 card->write.state != CH_STATE_UP)
1051 return;
1052
1053 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
1054 kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
6c951b90
FP
1055 if (qeth_do_start_thread(card, QETH_SET_PROMISC_MODE_THREAD))
1056 kernel_thread(qeth_set_promisc_mode, (void *)card, SIGCHLD);
1da177e4
LT
1057 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1058 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1059}
1060
1061
1062static void
1063qeth_set_intial_options(struct qeth_card *card)
1064{
1065 card->options.route4.type = NO_ROUTER;
1066#ifdef CONFIG_QETH_IPV6
1067 card->options.route6.type = NO_ROUTER;
1068#endif /* QETH_IPV6 */
1069 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1070 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1071 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1072 card->options.fake_broadcast = 0;
1073 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1074 card->options.fake_ll = 0;
500f83ab
UB
1075 if (card->info.type == QETH_CARD_TYPE_OSN)
1076 card->options.layer2 = 1;
1077 else
1078 card->options.layer2 = 0;
1da177e4
LT
1079}
1080
1081/**
1082 * initialize channels ,card and all state machines
1083 */
1084static int
1085qeth_setup_card(struct qeth_card *card)
1086{
1087
1088 QETH_DBF_TEXT(setup, 2, "setupcrd");
1089 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1090
1091 card->read.state = CH_STATE_DOWN;
1092 card->write.state = CH_STATE_DOWN;
1093 card->data.state = CH_STATE_DOWN;
1094 card->state = CARD_STATE_DOWN;
1095 card->lan_online = 0;
1096 card->use_hard_stop = 0;
1097 card->dev = NULL;
1098#ifdef CONFIG_QETH_VLAN
1099 spin_lock_init(&card->vlanlock);
1100 card->vlangrp = NULL;
1101#endif
9123e0d7 1102 spin_lock_init(&card->lock);
1da177e4
LT
1103 spin_lock_init(&card->ip_lock);
1104 spin_lock_init(&card->thread_mask_lock);
1105 card->thread_start_mask = 0;
1106 card->thread_allowed_mask = 0;
1107 card->thread_running_mask = 0;
1108 INIT_WORK(&card->kernel_thread_starter,
1109 (void *)qeth_start_kernel_thread,card);
1110 INIT_LIST_HEAD(&card->ip_list);
1111 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1112 if (!card->ip_tbd_list) {
1113 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1114 return -ENOMEM;
1115 }
1116 INIT_LIST_HEAD(card->ip_tbd_list);
1117 INIT_LIST_HEAD(&card->cmd_waiter_list);
1118 init_waitqueue_head(&card->wait_q);
1119 /* intial options */
1120 qeth_set_intial_options(card);
1121 /* IP address takeover */
1122 INIT_LIST_HEAD(&card->ipato.entries);
1123 card->ipato.enabled = 0;
1124 card->ipato.invert4 = 0;
1125 card->ipato.invert6 = 0;
1126 /* init QDIO stuff */
1127 qeth_init_qdio_info(card);
1128 return 0;
1129}
1130
1131static int
1132is_1920_device (struct qeth_card *card)
1133{
1134 int single_queue = 0;
1135 struct ccw_device *ccwdev;
1136 struct channelPath_dsc {
1137 u8 flags;
1138 u8 lsn;
1139 u8 desc;
1140 u8 chpid;
1141 u8 swla;
1142 u8 zeroes;
1143 u8 chla;
1144 u8 chpp;
1145 } *chp_dsc;
1146
1147 QETH_DBF_TEXT(setup, 2, "chk_1920");
1148
1149 ccwdev = card->data.ccwdev;
1150 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1151 if (chp_dsc != NULL) {
1152 /* CHPP field bit 6 == 1 -> single queue */
1153 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1154 kfree(chp_dsc);
1155 }
1156 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1157 return single_queue;
1158}
1159
1160static int
1161qeth_determine_card_type(struct qeth_card *card)
1162{
1163 int i = 0;
1164
1165 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1166
500f83ab
UB
1167 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1168 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1da177e4
LT
1169 while (known_devices[i][4]) {
1170 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1171 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1172 card->info.type = known_devices[i][4];
500f83ab
UB
1173 card->qdio.no_out_queues = known_devices[i][8];
1174 card->info.is_multicast_different = known_devices[i][9];
1da177e4
LT
1175 if (is_1920_device(card)) {
1176 PRINT_INFO("Priority Queueing not able "
1177 "due to hardware limitations!\n");
1178 card->qdio.no_out_queues = 1;
1179 card->qdio.default_out_queue = 0;
500f83ab 1180 }
1da177e4
LT
1181 return 0;
1182 }
1183 i++;
1184 }
1185 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1186 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1187 return -ENOENT;
1188}
1189
1190static int
1191qeth_probe_device(struct ccwgroup_device *gdev)
1192{
1193 struct qeth_card *card;
1194 struct device *dev;
1195 unsigned long flags;
1196 int rc;
1197
1198 QETH_DBF_TEXT(setup, 2, "probedev");
1199
1200 dev = &gdev->dev;
1201 if (!get_device(dev))
1202 return -ENODEV;
1203
500f83ab
UB
1204 QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
1205
1da177e4
LT
1206 card = qeth_alloc_card();
1207 if (!card) {
1208 put_device(dev);
1209 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1210 return -ENOMEM;
1211 }
1212 card->read.ccwdev = gdev->cdev[0];
1213 card->write.ccwdev = gdev->cdev[1];
1214 card->data.ccwdev = gdev->cdev[2];
1da177e4
LT
1215 gdev->dev.driver_data = card;
1216 card->gdev = gdev;
1217 gdev->cdev[0]->handler = qeth_irq;
1218 gdev->cdev[1]->handler = qeth_irq;
1219 gdev->cdev[2]->handler = qeth_irq;
1220
500f83ab
UB
1221 if ((rc = qeth_determine_card_type(card))){
1222 PRINT_WARN("%s: not a valid card type\n", __func__);
1223 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1224 put_device(dev);
1225 qeth_free_card(card);
1226 return rc;
1227 }
1228 if ((rc = qeth_setup_card(card))){
1229 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1da177e4
LT
1230 put_device(dev);
1231 qeth_free_card(card);
1232 return rc;
1233 }
500f83ab
UB
1234 rc = qeth_create_device_attributes(dev);
1235 if (rc) {
1da177e4
LT
1236 put_device(dev);
1237 qeth_free_card(card);
1238 return rc;
1239 }
1240 /* insert into our internal list */
1241 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1242 list_add_tail(&card->list, &qeth_card_list.list);
1243 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1244 return rc;
1245}
1246
1247
1248static int
1249qeth_get_unitaddr(struct qeth_card *card)
1250{
1251 int length;
1252 char *prcd;
1253 int rc;
1254
1255 QETH_DBF_TEXT(setup, 2, "getunit");
1256 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
1257 if (rc) {
1258 PRINT_ERR("read_conf_data for device %s returned %i\n",
1259 CARD_DDEV_ID(card), rc);
1260 return rc;
1261 }
1262 card->info.chpid = prcd[30];
1263 card->info.unit_addr2 = prcd[31];
1264 card->info.cula = prcd[63];
1265 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1266 (prcd[0x11] == _ascebc['M']));
1267 return 0;
1268}
1269
1270static void
1271qeth_init_tokens(struct qeth_card *card)
1272{
1273 card->token.issuer_rm_w = 0x00010103UL;
1274 card->token.cm_filter_w = 0x00010108UL;
1275 card->token.cm_connection_w = 0x0001010aUL;
1276 card->token.ulp_filter_w = 0x0001010bUL;
1277 card->token.ulp_connection_w = 0x0001010dUL;
1278}
1279
1280static inline __u16
1281raw_devno_from_bus_id(char *id)
1282{
1283 id += (strlen(id) - 4);
1284 return (__u16) simple_strtoul(id, &id, 16);
1285}
1286/**
1287 * setup channel
1288 */
1289static void
1290qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1291{
1292 struct qeth_card *card;
1293
1294 QETH_DBF_TEXT(trace, 4, "setupccw");
1295 card = CARD_FROM_CDEV(channel->ccwdev);
1296 if (channel == &card->read)
1297 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1298 else
1299 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1300 channel->ccw.count = len;
1301 channel->ccw.cda = (__u32) __pa(iob);
1302}
1303
1304/**
1305 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1306 */
1307static struct qeth_cmd_buffer *
1308__qeth_get_buffer(struct qeth_channel *channel)
1309{
1310 __u8 index;
1311
1312 QETH_DBF_TEXT(trace, 6, "getbuff");
1313 index = channel->io_buf_no;
1314 do {
1315 if (channel->iob[index].state == BUF_STATE_FREE) {
1316 channel->iob[index].state = BUF_STATE_LOCKED;
1317 channel->io_buf_no = (channel->io_buf_no + 1) %
1318 QETH_CMD_BUFFER_NO;
1319 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1320 return channel->iob + index;
1321 }
1322 index = (index + 1) % QETH_CMD_BUFFER_NO;
1323 } while(index != channel->io_buf_no);
1324
1325 return NULL;
1326}
1327
1328/**
1329 * release command buffer
1330 */
1331static void
1332qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1333{
1334 unsigned long flags;
1335
1336 QETH_DBF_TEXT(trace, 6, "relbuff");
1337 spin_lock_irqsave(&channel->iob_lock, flags);
1338 memset(iob->data, 0, QETH_BUFSIZE);
1339 iob->state = BUF_STATE_FREE;
1340 iob->callback = qeth_send_control_data_cb;
1341 iob->rc = 0;
1342 spin_unlock_irqrestore(&channel->iob_lock, flags);
1343}
1344
1345static struct qeth_cmd_buffer *
1346qeth_get_buffer(struct qeth_channel *channel)
1347{
1348 struct qeth_cmd_buffer *buffer = NULL;
1349 unsigned long flags;
1350
1351 spin_lock_irqsave(&channel->iob_lock, flags);
1352 buffer = __qeth_get_buffer(channel);
1353 spin_unlock_irqrestore(&channel->iob_lock, flags);
1354 return buffer;
1355}
1356
1357static struct qeth_cmd_buffer *
1358qeth_wait_for_buffer(struct qeth_channel *channel)
1359{
1360 struct qeth_cmd_buffer *buffer;
1361 wait_event(channel->wait_q,
1362 ((buffer = qeth_get_buffer(channel)) != NULL));
1363 return buffer;
1364}
1365
1366static void
1367qeth_clear_cmd_buffers(struct qeth_channel *channel)
1368{
1369 int cnt = 0;
1370
1371 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1372 qeth_release_buffer(channel,&channel->iob[cnt]);
1373 channel->buf_no = 0;
1374 channel->io_buf_no = 0;
1375}
1376
1377/**
1378 * start IDX for read and write channel
1379 */
1380static int
1381qeth_idx_activate_get_answer(struct qeth_channel *channel,
1382 void (*idx_reply_cb)(struct qeth_channel *,
1383 struct qeth_cmd_buffer *))
1384{
1385 struct qeth_cmd_buffer *iob;
1386 unsigned long flags;
1387 int rc;
1388 struct qeth_card *card;
1389
1390 QETH_DBF_TEXT(setup, 2, "idxanswr");
1391 card = CARD_FROM_CDEV(channel->ccwdev);
1392 iob = qeth_get_buffer(channel);
1393 iob->callback = idx_reply_cb;
1394 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1395 channel->ccw.count = QETH_BUFSIZE;
1396 channel->ccw.cda = (__u32) __pa(iob->data);
1397
1398 wait_event(card->wait_q,
1399 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1400 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1401 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1402 rc = ccw_device_start(channel->ccwdev,
1403 &channel->ccw,(addr_t) iob, 0, 0);
1404 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1405
1406 if (rc) {
1407 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1408 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1409 atomic_set(&channel->irq_pending, 0);
1410 wake_up(&card->wait_q);
1411 return rc;
1412 }
1413 rc = wait_event_interruptible_timeout(card->wait_q,
1414 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1415 if (rc == -ERESTARTSYS)
1416 return rc;
1417 if (channel->state != CH_STATE_UP){
1418 rc = -ETIME;
1419 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1420 qeth_clear_cmd_buffers(channel);
1421 } else
1422 rc = 0;
1423 return rc;
1424}
1425
1426static int
1427qeth_idx_activate_channel(struct qeth_channel *channel,
1428 void (*idx_reply_cb)(struct qeth_channel *,
1429 struct qeth_cmd_buffer *))
1430{
1431 struct qeth_card *card;
1432 struct qeth_cmd_buffer *iob;
1433 unsigned long flags;
1434 __u16 temp;
1435 int rc;
1436
1437 card = CARD_FROM_CDEV(channel->ccwdev);
1438
1439 QETH_DBF_TEXT(setup, 2, "idxactch");
1440
1441 iob = qeth_get_buffer(channel);
1442 iob->callback = idx_reply_cb;
1443 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1444 channel->ccw.count = IDX_ACTIVATE_SIZE;
1445 channel->ccw.cda = (__u32) __pa(iob->data);
1446 if (channel == &card->write) {
1447 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1448 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1449 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1450 card->seqno.trans_hdr++;
1451 } else {
1452 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1453 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1454 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1455 }
1456 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1457 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1458 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1459 &card->info.func_level,sizeof(__u16));
1460 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1461 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1462 temp = (card->info.cula << 8) + card->info.unit_addr2;
1463 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1464
1465 wait_event(card->wait_q,
1466 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1467 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1468 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1469 rc = ccw_device_start(channel->ccwdev,
1470 &channel->ccw,(addr_t) iob, 0, 0);
1471 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1472
1473 if (rc) {
1474 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1475 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1476 atomic_set(&channel->irq_pending, 0);
1477 wake_up(&card->wait_q);
1478 return rc;
1479 }
1480 rc = wait_event_interruptible_timeout(card->wait_q,
1481 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1482 if (rc == -ERESTARTSYS)
1483 return rc;
1484 if (channel->state != CH_STATE_ACTIVATING) {
1485 PRINT_WARN("qeth: IDX activate timed out!\n");
1486 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1487 qeth_clear_cmd_buffers(channel);
1488 return -ETIME;
1489 }
1490 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1491}
1492
1493static int
1494qeth_peer_func_level(int level)
1495{
1496 if ((level & 0xff) == 8)
1497 return (level & 0xff) + 0x400;
1498 if (((level >> 8) & 3) == 1)
1499 return (level & 0xff) + 0x200;
1500 return level;
1501}
1502
1503static void
1504qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1505{
1506 struct qeth_card *card;
1507 __u16 temp;
1508
1509 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1510
1511 if (channel->state == CH_STATE_DOWN) {
1512 channel->state = CH_STATE_ACTIVATING;
1513 goto out;
1514 }
1515 card = CARD_FROM_CDEV(channel->ccwdev);
1516
1517 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1518 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1519 "reply\n", CARD_WDEV_ID(card));
1520 goto out;
1521 }
1522 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1523 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1524 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1525 "function level mismatch "
1526 "(sent: 0x%x, received: 0x%x)\n",
1527 CARD_WDEV_ID(card), card->info.func_level, temp);
1528 goto out;
1529 }
1530 channel->state = CH_STATE_UP;
1531out:
1532 qeth_release_buffer(channel, iob);
1533}
1534
1535static int
1536qeth_check_idx_response(unsigned char *buffer)
1537{
1538 if (!buffer)
1539 return 0;
1540
1541 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1542 if ((buffer[2] & 0xc0) == 0xc0) {
1543 PRINT_WARN("received an IDX TERMINATE "
1544 "with cause code 0x%02x%s\n",
1545 buffer[4],
1546 ((buffer[4] == 0x22) ?
1547 " -- try another portname" : ""));
1548 QETH_DBF_TEXT(trace, 2, "ckidxres");
1549 QETH_DBF_TEXT(trace, 2, " idxterm");
1550 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1551 return -EIO;
1552 }
1553 return 0;
1554}
1555
1556static void
1557qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1558{
1559 struct qeth_card *card;
1560 __u16 temp;
1561
1562 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1563 if (channel->state == CH_STATE_DOWN) {
1564 channel->state = CH_STATE_ACTIVATING;
1565 goto out;
1566 }
1567
1568 card = CARD_FROM_CDEV(channel->ccwdev);
1569 if (qeth_check_idx_response(iob->data)) {
1570 goto out;
1571 }
1572 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1573 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1574 "reply\n", CARD_RDEV_ID(card));
1575 goto out;
1576 }
1577
1578/**
1579 * temporary fix for microcode bug
1580 * to revert it,replace OR by AND
1581 */
1582 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1583 (card->info.type == QETH_CARD_TYPE_OSAE) )
1584 card->info.portname_required = 1;
1585
1586 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1587 if (temp != qeth_peer_func_level(card->info.func_level)) {
1588 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1589 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1590 CARD_RDEV_ID(card), card->info.func_level, temp);
1591 goto out;
1592 }
1593 memcpy(&card->token.issuer_rm_r,
1594 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1595 QETH_MPC_TOKEN_LENGTH);
1596 memcpy(&card->info.mcl_level[0],
1597 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1598 channel->state = CH_STATE_UP;
1599out:
1600 qeth_release_buffer(channel,iob);
1601}
1602
1603static int
1604qeth_issue_next_read(struct qeth_card *card)
1605{
1606 int rc;
1607 struct qeth_cmd_buffer *iob;
1608
1609 QETH_DBF_TEXT(trace,5,"issnxrd");
1610 if (card->read.state != CH_STATE_UP)
1611 return -EIO;
1612 iob = qeth_get_buffer(&card->read);
1613 if (!iob) {
1614 PRINT_WARN("issue_next_read failed: no iob available!\n");
1615 return -ENOMEM;
1616 }
1617 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1618 wait_event(card->wait_q,
1619 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
1620 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1621 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1622 (addr_t) iob, 0, 0);
1623 if (rc) {
1624 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1625 atomic_set(&card->read.irq_pending, 0);
1626 qeth_schedule_recovery(card);
1627 wake_up(&card->wait_q);
1628 }
1629 return rc;
1630}
1631
1632static struct qeth_reply *
1633qeth_alloc_reply(struct qeth_card *card)
1634{
1635 struct qeth_reply *reply;
1636
1637 reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1638 if (reply){
1639 memset(reply, 0, sizeof(struct qeth_reply));
1640 atomic_set(&reply->refcnt, 1);
1641 reply->card = card;
1642 };
1643 return reply;
1644}
1645
1646static void
1647qeth_get_reply(struct qeth_reply *reply)
1648{
1649 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1650 atomic_inc(&reply->refcnt);
1651}
1652
1653static void
1654qeth_put_reply(struct qeth_reply *reply)
1655{
1656 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1657 if (atomic_dec_and_test(&reply->refcnt))
1658 kfree(reply);
1659}
1660
1661static void
1662qeth_cmd_timeout(unsigned long data)
1663{
1664 struct qeth_reply *reply, *list_reply, *r;
1665 unsigned long flags;
1666
1667 reply = (struct qeth_reply *) data;
1668 spin_lock_irqsave(&reply->card->lock, flags);
1669 list_for_each_entry_safe(list_reply, r,
1670 &reply->card->cmd_waiter_list, list) {
1671 if (reply == list_reply){
1672 qeth_get_reply(reply);
1673 list_del_init(&reply->list);
1674 spin_unlock_irqrestore(&reply->card->lock, flags);
1675 reply->rc = -ETIME;
1676 reply->received = 1;
1677 wake_up(&reply->wait_q);
1678 qeth_put_reply(reply);
1679 return;
1680 }
1681 }
1682 spin_unlock_irqrestore(&reply->card->lock, flags);
1683}
1684
1da177e4
LT
1685static struct qeth_ipa_cmd *
1686qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1687{
1688 struct qeth_ipa_cmd *cmd = NULL;
1689
1690 QETH_DBF_TEXT(trace,5,"chkipad");
1691 if (IS_IPA(iob->data)){
1692 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1693 if (IS_IPA_REPLY(cmd))
1694 return cmd;
1695 else {
1696 switch (cmd->hdr.command) {
1697 case IPA_CMD_STOPLAN:
1698 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1699 "there is a network problem or "
1700 "someone pulled the cable or "
1701 "disabled the port.\n",
1702 QETH_CARD_IFNAME(card),
1703 card->info.chpid);
1704 card->lan_online = 0;
1705 netif_carrier_off(card->dev);
1706 return NULL;
1707 case IPA_CMD_STARTLAN:
1708 PRINT_INFO("Link reestablished on %s "
1709 "(CHPID 0x%X). Scheduling "
1710 "IP address reset.\n",
1711 QETH_CARD_IFNAME(card),
1712 card->info.chpid);
1da177e4 1713 netif_carrier_on(card->dev);
9123e0d7 1714 qeth_schedule_recovery(card);
1da177e4 1715 return NULL;
500f83ab
UB
1716 case IPA_CMD_MODCCID:
1717 return cmd;
1da177e4
LT
1718 case IPA_CMD_REGISTER_LOCAL_ADDR:
1719 QETH_DBF_TEXT(trace,3, "irla");
1720 break;
1721 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1722 QETH_DBF_TEXT(trace,3, "urla");
1723 break;
1724 default:
1725 PRINT_WARN("Received data is IPA "
1726 "but not a reply!\n");
1727 break;
1728 }
1729 }
1730 }
1731 return cmd;
1732}
1733
1734/**
1735 * wake all waiting ipa commands
1736 */
1737static void
1738qeth_clear_ipacmd_list(struct qeth_card *card)
1739{
1740 struct qeth_reply *reply, *r;
1741 unsigned long flags;
1742
1743 QETH_DBF_TEXT(trace, 4, "clipalst");
1744
1745 spin_lock_irqsave(&card->lock, flags);
1746 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1747 qeth_get_reply(reply);
1748 reply->rc = -EIO;
1749 reply->received = 1;
1750 list_del_init(&reply->list);
1751 wake_up(&reply->wait_q);
1752 qeth_put_reply(reply);
1753 }
1754 spin_unlock_irqrestore(&card->lock, flags);
1755}
1756
1757static void
1758qeth_send_control_data_cb(struct qeth_channel *channel,
1759 struct qeth_cmd_buffer *iob)
1760{
1761 struct qeth_card *card;
1762 struct qeth_reply *reply, *r;
1763 struct qeth_ipa_cmd *cmd;
1764 unsigned long flags;
1765 int keep_reply;
1766
1767 QETH_DBF_TEXT(trace,4,"sndctlcb");
1768
1769 card = CARD_FROM_CDEV(channel->ccwdev);
1770 if (qeth_check_idx_response(iob->data)) {
1771 qeth_clear_ipacmd_list(card);
1772 qeth_schedule_recovery(card);
1773 goto out;
1774 }
1775
1776 cmd = qeth_check_ipa_data(card, iob);
1777 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1778 goto out;
500f83ab
UB
1779 /*in case of OSN : check if cmd is set */
1780 if (card->info.type == QETH_CARD_TYPE_OSN &&
1781 cmd &&
1782 cmd->hdr.command != IPA_CMD_STARTLAN &&
1783 card->osn_info.assist_cb != NULL) {
1784 card->osn_info.assist_cb(card->dev, cmd);
1785 goto out;
1786 }
1da177e4
LT
1787
1788 spin_lock_irqsave(&card->lock, flags);
1789 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1790 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1791 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1792 qeth_get_reply(reply);
1793 list_del_init(&reply->list);
1794 spin_unlock_irqrestore(&card->lock, flags);
1795 keep_reply = 0;
1796 if (reply->callback != NULL) {
1797 if (cmd) {
1798 reply->offset = (__u16)((char*)cmd -
1799 (char *)iob->data);
1800 keep_reply = reply->callback(card,
1801 reply,
1802 (unsigned long)cmd);
500f83ab 1803 } else
1da177e4
LT
1804 keep_reply = reply->callback(card,
1805 reply,
1806 (unsigned long)iob);
1807 }
1808 if (cmd)
1809 reply->rc = (u16) cmd->hdr.return_code;
1810 else if (iob->rc)
1811 reply->rc = iob->rc;
1812 if (keep_reply) {
1813 spin_lock_irqsave(&card->lock, flags);
1814 list_add_tail(&reply->list,
1815 &card->cmd_waiter_list);
1816 spin_unlock_irqrestore(&card->lock, flags);
1817 } else {
1818 reply->received = 1;
1819 wake_up(&reply->wait_q);
1820 }
1821 qeth_put_reply(reply);
1822 goto out;
1823 }
1824 }
1825 spin_unlock_irqrestore(&card->lock, flags);
1826out:
1827 memcpy(&card->seqno.pdu_hdr_ack,
1828 QETH_PDU_HEADER_SEQ_NO(iob->data),
1829 QETH_SEQ_NO_LENGTH);
1830 qeth_release_buffer(channel,iob);
1831}
1832
500f83ab
UB
1833static inline void
1834qeth_prepare_control_data(struct qeth_card *card, int len,
1835struct qeth_cmd_buffer *iob)
1836{
1837 qeth_setup_ccw(&card->write,iob->data,len);
1838 iob->callback = qeth_release_buffer;
1839
1840 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1841 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1842 card->seqno.trans_hdr++;
1843 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1844 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1845 card->seqno.pdu_hdr++;
1846 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1847 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1848 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1849}
1850
1da177e4
LT
1851static int
1852qeth_send_control_data(struct qeth_card *card, int len,
1853 struct qeth_cmd_buffer *iob,
1854 int (*reply_cb)
1855 (struct qeth_card *, struct qeth_reply*, unsigned long),
1856 void *reply_param)
1857
1858{
1859 int rc;
1860 unsigned long flags;
500f83ab 1861 struct qeth_reply *reply = NULL;
1da177e4
LT
1862 struct timer_list timer;
1863
1864 QETH_DBF_TEXT(trace, 2, "sendctl");
1865
1da177e4
LT
1866 reply = qeth_alloc_reply(card);
1867 if (!reply) {
1868 PRINT_WARN("Could no alloc qeth_reply!\n");
1869 return -ENOMEM;
1870 }
1871 reply->callback = reply_cb;
1872 reply->param = reply_param;
1873 if (card->state == CARD_STATE_DOWN)
1874 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1875 else
1876 reply->seqno = card->seqno.ipa++;
1877 init_timer(&timer);
1878 timer.function = qeth_cmd_timeout;
1879 timer.data = (unsigned long) reply;
1da177e4
LT
1880 init_waitqueue_head(&reply->wait_q);
1881 spin_lock_irqsave(&card->lock, flags);
1882 list_add_tail(&reply->list, &card->cmd_waiter_list);
1883 spin_unlock_irqrestore(&card->lock, flags);
1884 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1885 wait_event(card->wait_q,
1886 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
500f83ab
UB
1887 qeth_prepare_control_data(card, len, iob);
1888 if (IS_IPA(iob->data))
1889 timer.expires = jiffies + QETH_IPA_TIMEOUT;
1890 else
1891 timer.expires = jiffies + QETH_TIMEOUT;
1da177e4
LT
1892 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1893 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1894 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1895 (addr_t) iob, 0, 0);
1896 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1897 if (rc){
1898 PRINT_WARN("qeth_send_control_data: "
1899 "ccw_device_start rc = %i\n", rc);
1900 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1901 spin_lock_irqsave(&card->lock, flags);
1902 list_del_init(&reply->list);
1903 qeth_put_reply(reply);
1904 spin_unlock_irqrestore(&card->lock, flags);
1905 qeth_release_buffer(iob->channel, iob);
1906 atomic_set(&card->write.irq_pending, 0);
1907 wake_up(&card->wait_q);
1908 return rc;
1909 }
1910 add_timer(&timer);
1911 wait_event(reply->wait_q, reply->received);
1912 del_timer_sync(&timer);
1913 rc = reply->rc;
1914 qeth_put_reply(reply);
1915 return rc;
1916}
1917
500f83ab
UB
1918static int
1919qeth_osn_send_control_data(struct qeth_card *card, int len,
1920 struct qeth_cmd_buffer *iob)
1921{
1922 unsigned long flags;
1923 int rc = 0;
1924
1925 QETH_DBF_TEXT(trace, 5, "osndctrd");
1926
1927 wait_event(card->wait_q,
1928 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
1929 qeth_prepare_control_data(card, len, iob);
1930 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1931 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1932 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1933 (addr_t) iob, 0, 0);
1934 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1935 if (rc){
1936 PRINT_WARN("qeth_osn_send_control_data: "
1937 "ccw_device_start rc = %i\n", rc);
1938 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1939 qeth_release_buffer(iob->channel, iob);
1940 atomic_set(&card->write.irq_pending, 0);
1941 wake_up(&card->wait_q);
1942 }
1943 return rc;
1944}
1945
1946static inline void
1947qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1948 char prot_type)
1949{
1950 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1951 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
1952 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1953 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1954}
1955
1956static int
1957qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1958 int data_len)
1959{
1960 u16 s1, s2;
1961
1962QETH_DBF_TEXT(trace,4,"osndipa");
1963
1964 qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
1965 s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
1966 s2 = (u16)data_len;
1967 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
1968 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
1969 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
1970 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
1971 return qeth_osn_send_control_data(card, s1, iob);
1972}
1973
1da177e4
LT
1974static int
1975qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1976 int (*reply_cb)
1977 (struct qeth_card *,struct qeth_reply*, unsigned long),
1978 void *reply_param)
1979{
1980 int rc;
1981 char prot_type;
1982
1983 QETH_DBF_TEXT(trace,4,"sendipa");
1984
1da177e4 1985 if (card->options.layer2)
500f83ab
UB
1986 if (card->info.type == QETH_CARD_TYPE_OSN)
1987 prot_type = QETH_PROT_OSN2;
1988 else
1989 prot_type = QETH_PROT_LAYER2;
1da177e4
LT
1990 else
1991 prot_type = QETH_PROT_TCPIP;
500f83ab 1992 qeth_prepare_ipa_cmd(card,iob,prot_type);
1da177e4
LT
1993 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
1994 reply_cb, reply_param);
1995 return rc;
1996}
1997
1998
1999static int
2000qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2001 unsigned long data)
2002{
2003 struct qeth_cmd_buffer *iob;
2004
2005 QETH_DBF_TEXT(setup, 2, "cmenblcb");
2006
2007 iob = (struct qeth_cmd_buffer *) data;
2008 memcpy(&card->token.cm_filter_r,
2009 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2010 QETH_MPC_TOKEN_LENGTH);
2011 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2012 return 0;
2013}
2014
2015static int
2016qeth_cm_enable(struct qeth_card *card)
2017{
2018 int rc;
2019 struct qeth_cmd_buffer *iob;
2020
2021 QETH_DBF_TEXT(setup,2,"cmenable");
2022
2023 iob = qeth_wait_for_buffer(&card->write);
2024 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2025 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2026 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2027 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2028 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2029
2030 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2031 qeth_cm_enable_cb, NULL);
2032 return rc;
2033}
2034
2035static int
2036qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2037 unsigned long data)
2038{
2039
2040 struct qeth_cmd_buffer *iob;
2041
2042 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
2043
2044 iob = (struct qeth_cmd_buffer *) data;
2045 memcpy(&card->token.cm_connection_r,
2046 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2047 QETH_MPC_TOKEN_LENGTH);
2048 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2049 return 0;
2050}
2051
2052static int
2053qeth_cm_setup(struct qeth_card *card)
2054{
2055 int rc;
2056 struct qeth_cmd_buffer *iob;
2057
2058 QETH_DBF_TEXT(setup,2,"cmsetup");
2059
2060 iob = qeth_wait_for_buffer(&card->write);
2061 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2062 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2063 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2064 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2065 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2066 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2067 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2068 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2069 qeth_cm_setup_cb, NULL);
2070 return rc;
2071
2072}
2073
2074static int
2075qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2076 unsigned long data)
2077{
2078
2079 __u16 mtu, framesize;
2080 __u16 len;
2081 __u8 link_type;
2082 struct qeth_cmd_buffer *iob;
2083
2084 QETH_DBF_TEXT(setup, 2, "ulpenacb");
2085
2086 iob = (struct qeth_cmd_buffer *) data;
2087 memcpy(&card->token.ulp_filter_r,
2088 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2089 QETH_MPC_TOKEN_LENGTH);
2090 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
2091 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2092 mtu = qeth_get_mtu_outof_framesize(framesize);
2093 if (!mtu) {
2094 iob->rc = -EINVAL;
2095 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2096 return 0;
2097 }
2098 card->info.max_mtu = mtu;
2099 card->info.initial_mtu = mtu;
2100 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
2101 } else {
2102 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
2103 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
2104 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2105 }
2106
2107 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2108 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2109 memcpy(&link_type,
2110 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2111 card->info.link_type = link_type;
2112 } else
2113 card->info.link_type = 0;
2114 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2115 return 0;
2116}
2117
2118static int
2119qeth_ulp_enable(struct qeth_card *card)
2120{
2121 int rc;
2122 char prot_type;
2123 struct qeth_cmd_buffer *iob;
2124
2125 /*FIXME: trace view callbacks*/
2126 QETH_DBF_TEXT(setup,2,"ulpenabl");
2127
2128 iob = qeth_wait_for_buffer(&card->write);
2129 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2130
2131 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
2132 (__u8) card->info.portno;
2133 if (card->options.layer2)
500f83ab
UB
2134 if (card->info.type == QETH_CARD_TYPE_OSN)
2135 prot_type = QETH_PROT_OSN2;
2136 else
2137 prot_type = QETH_PROT_LAYER2;
1da177e4
LT
2138 else
2139 prot_type = QETH_PROT_TCPIP;
2140
2141 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
2142 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2143 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2144 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2145 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2146 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2147 card->info.portname, 9);
2148 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2149 qeth_ulp_enable_cb, NULL);
2150 return rc;
2151
2152}
2153
2154static inline __u16
2155__raw_devno_from_bus_id(char *id)
2156{
2157 id += (strlen(id) - 4);
2158 return (__u16) simple_strtoul(id, &id, 16);
2159}
2160
2161static int
2162qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2163 unsigned long data)
2164{
2165 struct qeth_cmd_buffer *iob;
2166
2167 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
2168
2169 iob = (struct qeth_cmd_buffer *) data;
2170 memcpy(&card->token.ulp_connection_r,
2171 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2172 QETH_MPC_TOKEN_LENGTH);
2173 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2174 return 0;
2175}
2176
2177static int
2178qeth_ulp_setup(struct qeth_card *card)
2179{
2180 int rc;
2181 __u16 temp;
2182 struct qeth_cmd_buffer *iob;
2183
2184 QETH_DBF_TEXT(setup,2,"ulpsetup");
2185
2186 iob = qeth_wait_for_buffer(&card->write);
2187 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2188
2189 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2190 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2191 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2192 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2193 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2194 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2195
2196 temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
2197 memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
2198 temp = (card->info.cula << 8) + card->info.unit_addr2;
2199 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2200 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2201 qeth_ulp_setup_cb, NULL);
2202 return rc;
2203}
2204
2205static inline int
6c6b3e7c
FP
2206qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2207 unsigned int siga_error, const char *dbftext)
1da177e4 2208{
1da177e4 2209 if (qdio_error || siga_error) {
6c6b3e7c
FP
2210 QETH_DBF_TEXT(trace, 2, dbftext);
2211 QETH_DBF_TEXT(qerr, 2, dbftext);
1da177e4 2212 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
6c6b3e7c 2213 buf->element[15].flags & 0xff);
1da177e4 2214 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
6c6b3e7c 2215 buf->element[14].flags & 0xff);
1da177e4
LT
2216 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2217 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
6c6b3e7c 2218 return 1;
1da177e4 2219 }
6c6b3e7c 2220 return 0;
1da177e4
LT
2221}
2222
2223static inline struct sk_buff *
500f83ab 2224qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
1da177e4
LT
2225{
2226 struct sk_buff* skb;
500f83ab
UB
2227 int add_len;
2228
2229 add_len = 0;
2230 if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN)
2231 add_len = sizeof(struct qeth_hdr);
1da177e4 2232#ifdef CONFIG_QETH_VLAN
500f83ab
UB
2233 else
2234 add_len = VLAN_HLEN;
1da177e4 2235#endif
500f83ab
UB
2236 skb = dev_alloc_skb(length + add_len);
2237 if (skb && add_len)
2238 skb_reserve(skb, add_len);
1da177e4
LT
2239 return skb;
2240}
2241
2242static inline struct sk_buff *
2243qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2244 struct qdio_buffer_element **__element, int *__offset,
2245 struct qeth_hdr **hdr)
2246{
2247 struct qdio_buffer_element *element = *__element;
2248 int offset = *__offset;
2249 struct sk_buff *skb = NULL;
2250 int skb_len;
2251 void *data_ptr;
2252 int data_len;
2253
2254 QETH_DBF_TEXT(trace,6,"nextskb");
2255 /* qeth_hdr must not cross element boundaries */
2256 if (element->length < offset + sizeof(struct qeth_hdr)){
2257 if (qeth_is_last_sbale(element))
2258 return NULL;
2259 element++;
2260 offset = 0;
2261 if (element->length < sizeof(struct qeth_hdr))
2262 return NULL;
2263 }
2264 *hdr = element->addr + offset;
2265
2266 offset += sizeof(struct qeth_hdr);
2267 if (card->options.layer2)
500f83ab
UB
2268 if (card->info.type == QETH_CARD_TYPE_OSN)
2269 skb_len = (*hdr)->hdr.osn.pdu_length;
2270 else
2271 skb_len = (*hdr)->hdr.l2.pkt_length;
1da177e4
LT
2272 else
2273 skb_len = (*hdr)->hdr.l3.length;
2274
2275 if (!skb_len)
2276 return NULL;
2277 if (card->options.fake_ll){
e23dd9cd 2278 if(card->dev->type == ARPHRD_IEEE802_TR){
500f83ab 2279 if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr)))
e23dd9cd
FP
2280 goto no_mem;
2281 skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
2282 } else {
500f83ab 2283 if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr)))
e23dd9cd
FP
2284 goto no_mem;
2285 skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
2286 }
500f83ab 2287 } else if (!(skb = qeth_get_skb(skb_len, *hdr)))
1da177e4
LT
2288 goto no_mem;
2289 data_ptr = element->addr + offset;
2290 while (skb_len) {
2291 data_len = min(skb_len, (int)(element->length - offset));
2292 if (data_len)
2293 memcpy(skb_put(skb, data_len), data_ptr, data_len);
2294 skb_len -= data_len;
2295 if (skb_len){
2296 if (qeth_is_last_sbale(element)){
2297 QETH_DBF_TEXT(trace,4,"unexeob");
2298 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2299 QETH_DBF_TEXT(qerr,2,"unexeob");
2300 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2301 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2302 dev_kfree_skb_any(skb);
2303 card->stats.rx_errors++;
2304 return NULL;
2305 }
2306 element++;
2307 offset = 0;
2308 data_ptr = element->addr;
2309 } else {
2310 offset += data_len;
2311 }
2312 }
2313 *__element = element;
2314 *__offset = offset;
2315 return skb;
2316no_mem:
2317 if (net_ratelimit()){
2318 PRINT_WARN("No memory for packet received on %s.\n",
2319 QETH_CARD_IFNAME(card));
2320 QETH_DBF_TEXT(trace,2,"noskbmem");
2321 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2322 }
2323 card->stats.rx_dropped++;
2324 return NULL;
2325}
2326
ab611487 2327static inline __be16
1da177e4
LT
2328qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2329{
2330 struct qeth_card *card;
2331 struct ethhdr *eth;
2332
2333 QETH_DBF_TEXT(trace,6,"typtrans");
2334
2335 card = (struct qeth_card *)dev->priv;
2336#ifdef CONFIG_TR
2337 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2338 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2339 return tr_type_trans(skb,dev);
2340#endif /* CONFIG_TR */
2341 skb->mac.raw = skb->data;
2342 skb_pull(skb, ETH_HLEN );
2343 eth = eth_hdr(skb);
2344
2345 if (*eth->h_dest & 1) {
2346 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2347 skb->pkt_type = PACKET_BROADCAST;
2348 else
2349 skb->pkt_type = PACKET_MULTICAST;
2350 } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
2351 skb->pkt_type = PACKET_OTHERHOST;
2352
2353 if (ntohs(eth->h_proto) >= 1536)
2354 return eth->h_proto;
2355 if (*(unsigned short *) (skb->data) == 0xFFFF)
2356 return htons(ETH_P_802_3);
2357 return htons(ETH_P_802_2);
2358}
2359
2360static inline void
e23dd9cd
FP
2361qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2362 struct qeth_hdr *hdr)
2363{
2364 struct trh_hdr *fake_hdr;
2365 struct trllc *fake_llc;
2366 struct iphdr *ip_hdr;
2367
2368 QETH_DBF_TEXT(trace,5,"skbfktr");
2369 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR;
2370 /* this is a fake ethernet header */
2371 fake_hdr = (struct trh_hdr *) skb->mac.raw;
2372
2373 /* the destination MAC address */
2374 switch (skb->pkt_type){
2375 case PACKET_MULTICAST:
2376 switch (skb->protocol){
2377#ifdef CONFIG_QETH_IPV6
2378 case __constant_htons(ETH_P_IPV6):
2379 ndisc_mc_map((struct in6_addr *)
2380 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2381 fake_hdr->daddr, card->dev, 0);
2382 break;
2383#endif /* CONFIG_QETH_IPV6 */
2384 case __constant_htons(ETH_P_IP):
2385 ip_hdr = (struct iphdr *)skb->data;
2386 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->daddr);
2387 break;
2388 default:
2389 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2390 }
2391 break;
2392 case PACKET_BROADCAST:
2393 memset(fake_hdr->daddr, 0xff, TR_ALEN);
2394 break;
2395 default:
2396 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2397 }
2398 /* the source MAC address */
2399 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2400 memcpy(fake_hdr->saddr, &hdr->hdr.l3.dest_addr[2], TR_ALEN);
2401 else
2402 memset(fake_hdr->saddr, 0, TR_ALEN);
2403 fake_hdr->rcf=0;
2404 fake_llc = (struct trllc*)&(fake_hdr->rcf);
2405 fake_llc->dsap = EXTENDED_SAP;
2406 fake_llc->ssap = EXTENDED_SAP;
2407 fake_llc->llc = UI_CMD;
2408 fake_llc->protid[0] = 0;
2409 fake_llc->protid[1] = 0;
2410 fake_llc->protid[2] = 0;
2411 fake_llc->ethertype = ETH_P_IP;
2412}
2413
2414static inline void
2415qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
1da177e4
LT
2416 struct qeth_hdr *hdr)
2417{
2418 struct ethhdr *fake_hdr;
2419 struct iphdr *ip_hdr;
2420
e23dd9cd
FP
2421 QETH_DBF_TEXT(trace,5,"skbfketh");
2422 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH;
1da177e4
LT
2423 /* this is a fake ethernet header */
2424 fake_hdr = (struct ethhdr *) skb->mac.raw;
2425
2426 /* the destination MAC address */
2427 switch (skb->pkt_type){
2428 case PACKET_MULTICAST:
2429 switch (skb->protocol){
2430#ifdef CONFIG_QETH_IPV6
2431 case __constant_htons(ETH_P_IPV6):
2432 ndisc_mc_map((struct in6_addr *)
2433 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2434 fake_hdr->h_dest, card->dev, 0);
2435 break;
2436#endif /* CONFIG_QETH_IPV6 */
2437 case __constant_htons(ETH_P_IP):
2438 ip_hdr = (struct iphdr *)skb->data;
e23dd9cd 2439 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
1da177e4
LT
2440 break;
2441 default:
2442 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2443 }
2444 break;
2445 case PACKET_BROADCAST:
2446 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2447 break;
2448 default:
2449 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2450 }
2451 /* the source MAC address */
2452 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2453 memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
2454 else
2455 memset(fake_hdr->h_source, 0, ETH_ALEN);
2456 /* the protocol */
2457 fake_hdr->h_proto = skb->protocol;
2458}
2459
e23dd9cd
FP
2460static inline void
2461qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2462 struct qeth_hdr *hdr)
2463{
2464 if (card->dev->type == ARPHRD_IEEE802_TR)
2465 qeth_rebuild_skb_fake_ll_tr(card, skb, hdr);
2466 else
2467 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
2468}
2469
1da177e4
LT
2470static inline void
2471qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
2472 struct qeth_hdr *hdr)
2473{
2474#ifdef CONFIG_QETH_VLAN
2475 u16 *vlan_tag;
2476
2477 if (hdr->hdr.l3.ext_flags &
2478 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2479 vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
2480 *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2481 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2482 *(vlan_tag + 1) = skb->protocol;
2483 skb->protocol = __constant_htons(ETH_P_8021Q);
2484 }
2485#endif /* CONFIG_QETH_VLAN */
2486}
2487
2488static inline __u16
2489qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2490 struct qeth_hdr *hdr)
2491{
2492 unsigned short vlan_id = 0;
2493#ifdef CONFIG_QETH_VLAN
2494 struct vlan_hdr *vhdr;
2495#endif
2496
2497 skb->pkt_type = PACKET_HOST;
2498 skb->protocol = qeth_type_trans(skb, skb->dev);
2499 if (card->options.checksum_type == NO_CHECKSUMMING)
2500 skb->ip_summed = CHECKSUM_UNNECESSARY;
2501 else
2502 skb->ip_summed = CHECKSUM_NONE;
2503#ifdef CONFIG_QETH_VLAN
2504 if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
2505 vhdr = (struct vlan_hdr *) skb->data;
2506 skb->protocol =
2507 __constant_htons(vhdr->h_vlan_encapsulated_proto);
2508 vlan_id = hdr->hdr.l2.vlan_id;
2509 skb_pull(skb, VLAN_HLEN);
2510 }
2511#endif
9123e0d7 2512 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
1da177e4
LT
2513 return vlan_id;
2514}
2515
2516static inline void
2517qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2518 struct qeth_hdr *hdr)
2519{
2520#ifdef CONFIG_QETH_IPV6
2521 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
2522 skb->pkt_type = PACKET_HOST;
2523 skb->protocol = qeth_type_trans(skb, card->dev);
2524 return;
2525 }
2526#endif /* CONFIG_QETH_IPV6 */
2527 skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2528 ETH_P_IP);
2529 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
2530 case QETH_CAST_UNICAST:
2531 skb->pkt_type = PACKET_HOST;
2532 break;
2533 case QETH_CAST_MULTICAST:
2534 skb->pkt_type = PACKET_MULTICAST;
2535 card->stats.multicast++;
2536 break;
2537 case QETH_CAST_BROADCAST:
2538 skb->pkt_type = PACKET_BROADCAST;
2539 card->stats.multicast++;
2540 break;
2541 case QETH_CAST_ANYCAST:
2542 case QETH_CAST_NOCAST:
2543 default:
2544 skb->pkt_type = PACKET_HOST;
2545 }
2546 qeth_rebuild_skb_vlan(card, skb, hdr);
2547 if (card->options.fake_ll)
2548 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2549 else
2550 skb->mac.raw = skb->data;
2551 skb->ip_summed = card->options.checksum_type;
2552 if (card->options.checksum_type == HW_CHECKSUMMING){
2553 if ( (hdr->hdr.l3.ext_flags &
2554 (QETH_HDR_EXT_CSUM_HDR_REQ |
2555 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2556 (QETH_HDR_EXT_CSUM_HDR_REQ |
2557 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2558 skb->ip_summed = CHECKSUM_UNNECESSARY;
2559 else
2560 skb->ip_summed = SW_CHECKSUMMING;
2561 }
2562}
2563
2564static inline void
2565qeth_process_inbound_buffer(struct qeth_card *card,
2566 struct qeth_qdio_buffer *buf, int index)
2567{
2568 struct qdio_buffer_element *element;
2569 struct sk_buff *skb;
2570 struct qeth_hdr *hdr;
2571 int offset;
2572 int rxrc;
2573 __u16 vlan_tag = 0;
2574
2575 /* get first element of current buffer */
2576 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2577 offset = 0;
2578#ifdef CONFIG_QETH_PERF_STATS
2579 card->perf_stats.bufs_rec++;
2580#endif
2581 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2582 &offset, &hdr))) {
2583 skb->dev = card->dev;
2584 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2585 vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
500f83ab 2586 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
1da177e4 2587 qeth_rebuild_skb(card, skb, hdr);
500f83ab
UB
2588 else { /*in case of OSN*/
2589 skb_push(skb, sizeof(struct qeth_hdr));
2590 memcpy(skb->data, hdr, sizeof(struct qeth_hdr));
2591 }
1da177e4
LT
2592 /* is device UP ? */
2593 if (!(card->dev->flags & IFF_UP)){
2594 dev_kfree_skb_any(skb);
2595 continue;
2596 }
2597#ifdef CONFIG_QETH_VLAN
2598 if (vlan_tag)
2599 vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
2600 else
2601#endif
500f83ab
UB
2602 if (card->info.type == QETH_CARD_TYPE_OSN)
2603 rxrc = card->osn_info.data_cb(skb);
2604 else
2605 rxrc = netif_rx(skb);
1da177e4
LT
2606 card->dev->last_rx = jiffies;
2607 card->stats.rx_packets++;
2608 card->stats.rx_bytes += skb->len;
2609 }
2610}
2611
2612static inline struct qeth_buffer_pool_entry *
2613qeth_get_buffer_pool_entry(struct qeth_card *card)
2614{
2615 struct qeth_buffer_pool_entry *entry;
2616
2617 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2618 if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2619 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2620 struct qeth_buffer_pool_entry, list);
2621 list_del_init(&entry->list);
2622 return entry;
2623 }
2624 return NULL;
2625}
2626
2627static inline void
2628qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2629{
2630 struct qeth_buffer_pool_entry *pool_entry;
2631 int i;
2632
2633 pool_entry = qeth_get_buffer_pool_entry(card);
2634 /*
2635 * since the buffer is accessed only from the input_tasklet
2636 * there shouldn't be a need to synchronize; also, since we use
2637 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2638 * buffers
2639 */
2640 BUG_ON(!pool_entry);
2641
2642 buf->pool_entry = pool_entry;
2643 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2644 buf->buffer->element[i].length = PAGE_SIZE;
2645 buf->buffer->element[i].addr = pool_entry->elements[i];
2646 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2647 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2648 else
2649 buf->buffer->element[i].flags = 0;
2650 }
2651 buf->state = QETH_QDIO_BUF_EMPTY;
2652}
2653
2654static inline void
2655qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2656 struct qeth_qdio_out_buffer *buf)
2657{
2658 int i;
2659 struct sk_buff *skb;
2660
2661 /* is PCI flag set on buffer? */
2662 if (buf->buffer->element[0].flags & 0x40)
2663 atomic_dec(&queue->set_pci_flags_count);
2664
2665 while ((skb = skb_dequeue(&buf->skb_list))){
2666 atomic_dec(&skb->users);
2667 dev_kfree_skb_any(skb);
2668 }
2669 qeth_eddp_buf_release_contexts(buf);
2670 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2671 buf->buffer->element[i].length = 0;
2672 buf->buffer->element[i].addr = NULL;
2673 buf->buffer->element[i].flags = 0;
2674 }
2675 buf->next_element_to_fill = 0;
2676 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2677}
2678
2679static inline void
2680qeth_queue_input_buffer(struct qeth_card *card, int index)
2681{
2682 struct qeth_qdio_q *queue = card->qdio.in_q;
2683 int count;
2684 int i;
2685 int rc;
2686
2687 QETH_DBF_TEXT(trace,6,"queinbuf");
2688 count = (index < queue->next_buf_to_init)?
2689 card->qdio.in_buf_pool.buf_count -
2690 (queue->next_buf_to_init - index) :
2691 card->qdio.in_buf_pool.buf_count -
2692 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2693 /* only requeue at a certain threshold to avoid SIGAs */
2694 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2695 for (i = queue->next_buf_to_init;
2696 i < queue->next_buf_to_init + count; ++i)
2697 qeth_init_input_buffer(card,
2698 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2699 /*
2700 * according to old code it should be avoided to requeue all
2701 * 128 buffers in order to benefit from PCI avoidance.
2702 * this function keeps at least one buffer (the buffer at
2703 * 'index') un-requeued -> this buffer is the first buffer that
2704 * will be requeued the next time
2705 */
2706#ifdef CONFIG_QETH_PERF_STATS
2707 card->perf_stats.inbound_do_qdio_cnt++;
2708 card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
2709#endif
2710 rc = do_QDIO(CARD_DDEV(card),
2711 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2712 0, queue->next_buf_to_init, count, NULL);
2713#ifdef CONFIG_QETH_PERF_STATS
2714 card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
2715 card->perf_stats.inbound_do_qdio_start_time;
2716#endif
2717 if (rc){
2718 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2719 "return %i (device %s).\n",
2720 rc, CARD_DDEV_ID(card));
2721 QETH_DBF_TEXT(trace,2,"qinberr");
2722 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2723 }
2724 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2725 QDIO_MAX_BUFFERS_PER_Q;
2726 }
2727}
2728
2729static inline void
2730qeth_put_buffer_pool_entry(struct qeth_card *card,
2731 struct qeth_buffer_pool_entry *entry)
2732{
2733 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2734 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2735}
2736
2737static void
2738qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2739 unsigned int qdio_err, unsigned int siga_err,
2740 unsigned int queue, int first_element, int count,
2741 unsigned long card_ptr)
2742{
2743 struct net_device *net_dev;
2744 struct qeth_card *card;
2745 struct qeth_qdio_buffer *buffer;
2746 int index;
2747 int i;
2748
2749 QETH_DBF_TEXT(trace, 6, "qdinput");
2750 card = (struct qeth_card *) card_ptr;
2751 net_dev = card->dev;
2752#ifdef CONFIG_QETH_PERF_STATS
2753 card->perf_stats.inbound_cnt++;
2754 card->perf_stats.inbound_start_time = qeth_get_micros();
2755#endif
2756 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2757 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2758 QETH_DBF_TEXT(trace, 1,"qdinchk");
2759 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2760 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2761 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2762 qeth_schedule_recovery(card);
2763 return;
2764 }
2765 }
2766 for (i = first_element; i < (first_element + count); ++i) {
2767 index = i % QDIO_MAX_BUFFERS_PER_Q;
2768 buffer = &card->qdio.in_q->bufs[index];
6c6b3e7c
FP
2769 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
2770 qeth_check_qdio_errors(buffer->buffer,
2771 qdio_err, siga_err,"qinerr")))
1da177e4
LT
2772 qeth_process_inbound_buffer(card, buffer, index);
2773 /* clear buffer and give back to hardware */
2774 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2775 qeth_queue_input_buffer(card, index);
2776 }
2777#ifdef CONFIG_QETH_PERF_STATS
2778 card->perf_stats.inbound_time += qeth_get_micros() -
2779 card->perf_stats.inbound_start_time;
2780#endif
2781}
2782
2783static inline int
2784qeth_handle_send_error(struct qeth_card *card,
2785 struct qeth_qdio_out_buffer *buffer,
6c6b3e7c 2786 unsigned int qdio_err, unsigned int siga_err)
1da177e4
LT
2787{
2788 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2789 int cc = siga_err & 3;
2790
2791 QETH_DBF_TEXT(trace, 6, "hdsnderr");
6c6b3e7c 2792 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
1da177e4
LT
2793 switch (cc) {
2794 case 0:
2795 if (qdio_err){
2796 QETH_DBF_TEXT(trace, 1,"lnkfail");
2797 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2798 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2799 (u16)qdio_err, (u8)sbalf15);
2800 return QETH_SEND_ERROR_LINK_FAILURE;
2801 }
2802 return QETH_SEND_ERROR_NONE;
2803 case 2:
2804 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2805 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2806 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2807 return QETH_SEND_ERROR_KICK_IT;
2808 }
2809 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2810 return QETH_SEND_ERROR_RETRY;
2811 return QETH_SEND_ERROR_LINK_FAILURE;
2812 /* look at qdio_error and sbalf 15 */
2813 case 1:
2814 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2815 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2816 return QETH_SEND_ERROR_LINK_FAILURE;
2817 case 3:
2818 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2819 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2820 return QETH_SEND_ERROR_KICK_IT;
2821 }
2822 return QETH_SEND_ERROR_LINK_FAILURE;
2823}
2824
2825void
2826qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2827 int index, int count)
2828{
2829 struct qeth_qdio_out_buffer *buf;
2830 int rc;
2831 int i;
2832
2833 QETH_DBF_TEXT(trace, 6, "flushbuf");
2834
2835 for (i = index; i < index + count; ++i) {
2836 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2837 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2838 SBAL_FLAGS_LAST_ENTRY;
2839
2840 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2841 continue;
2842
2843 if (!queue->do_pack){
2844 if ((atomic_read(&queue->used_buffers) >=
2845 (QETH_HIGH_WATERMARK_PACK -
2846 QETH_WATERMARK_PACK_FUZZ)) &&
2847 !atomic_read(&queue->set_pci_flags_count)){
2848 /* it's likely that we'll go to packing
2849 * mode soon */
2850 atomic_inc(&queue->set_pci_flags_count);
2851 buf->buffer->element[0].flags |= 0x40;
2852 }
2853 } else {
2854 if (!atomic_read(&queue->set_pci_flags_count)){
2855 /*
2856 * there's no outstanding PCI any more, so we
2857 * have to request a PCI to be sure the the PCI
2858 * will wake at some time in the future then we
2859 * can flush packed buffers that might still be
2860 * hanging around, which can happen if no
2861 * further send was requested by the stack
2862 */
2863 atomic_inc(&queue->set_pci_flags_count);
2864 buf->buffer->element[0].flags |= 0x40;
2865 }
2866 }
2867 }
2868
2869 queue->card->dev->trans_start = jiffies;
2870#ifdef CONFIG_QETH_PERF_STATS
2871 queue->card->perf_stats.outbound_do_qdio_cnt++;
2872 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
2873#endif
2874 if (under_int)
2875 rc = do_QDIO(CARD_DDEV(queue->card),
2876 QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
2877 queue->queue_no, index, count, NULL);
2878 else
2879 rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
2880 queue->queue_no, index, count, NULL);
2881#ifdef CONFIG_QETH_PERF_STATS
2882 queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
2883 queue->card->perf_stats.outbound_do_qdio_start_time;
2884#endif
2885 if (rc){
1da177e4
LT
2886 QETH_DBF_TEXT(trace, 2, "flushbuf");
2887 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
f3d242e8 2888 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
1da177e4
LT
2889 queue->card->stats.tx_errors += count;
2890 /* this must not happen under normal circumstances. if it
2891 * happens something is really wrong -> recover */
2892 qeth_schedule_recovery(queue->card);
2893 return;
2894 }
2895 atomic_add(count, &queue->used_buffers);
2896#ifdef CONFIG_QETH_PERF_STATS
2897 queue->card->perf_stats.bufs_sent += count;
2898#endif
2899}
2900
2901/*
2902 * Switched to packing state if the number of used buffers on a queue
2903 * reaches a certain limit.
2904 */
2905static inline void
2906qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2907{
2908 if (!queue->do_pack) {
2909 if (atomic_read(&queue->used_buffers)
2910 >= QETH_HIGH_WATERMARK_PACK){
2911 /* switch non-PACKING -> PACKING */
2912 QETH_DBF_TEXT(trace, 6, "np->pack");
2913#ifdef CONFIG_QETH_PERF_STATS
2914 queue->card->perf_stats.sc_dp_p++;
2915#endif
2916 queue->do_pack = 1;
2917 }
2918 }
2919}
2920
2921/*
2922 * Switches from packing to non-packing mode. If there is a packing
2923 * buffer on the queue this buffer will be prepared to be flushed.
2924 * In that case 1 is returned to inform the caller. If no buffer
2925 * has to be flushed, zero is returned.
2926 */
2927static inline int
2928qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2929{
2930 struct qeth_qdio_out_buffer *buffer;
2931 int flush_count = 0;
2932
2933 if (queue->do_pack) {
2934 if (atomic_read(&queue->used_buffers)
2935 <= QETH_LOW_WATERMARK_PACK) {
2936 /* switch PACKING -> non-PACKING */
2937 QETH_DBF_TEXT(trace, 6, "pack->np");
2938#ifdef CONFIG_QETH_PERF_STATS
2939 queue->card->perf_stats.sc_p_dp++;
2940#endif
2941 queue->do_pack = 0;
2942 /* flush packing buffers */
2943 buffer = &queue->bufs[queue->next_buf_to_fill];
2944 if ((atomic_read(&buffer->state) ==
2945 QETH_QDIO_BUF_EMPTY) &&
2946 (buffer->next_element_to_fill > 0)) {
2947 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2948 flush_count++;
2949 queue->next_buf_to_fill =
2950 (queue->next_buf_to_fill + 1) %
2951 QDIO_MAX_BUFFERS_PER_Q;
2952 }
2953 }
2954 }
2955 return flush_count;
2956}
2957
2958/*
2959 * Called to flush a packing buffer if no more pci flags are on the queue.
2960 * Checks if there is a packing buffer and prepares it to be flushed.
2961 * In that case returns 1, otherwise zero.
2962 */
2963static inline int
2964qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2965{
2966 struct qeth_qdio_out_buffer *buffer;
2967
2968 buffer = &queue->bufs[queue->next_buf_to_fill];
2969 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2970 (buffer->next_element_to_fill > 0)){
2971 /* it's a packing buffer */
2972 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2973 queue->next_buf_to_fill =
2974 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2975 return 1;
2976 }
2977 return 0;
2978}
2979
2980static inline void
2981qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2982{
2983 int index;
2984 int flush_cnt = 0;
2985 int q_was_packing = 0;
2986
2987 /*
2988 * check if weed have to switch to non-packing mode or if
2989 * we have to get a pci flag out on the queue
2990 */
2991 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2992 !atomic_read(&queue->set_pci_flags_count)){
2993 if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2994 QETH_OUT_Q_UNLOCKED) {
2995 /*
2996 * If we get in here, there was no action in
2997 * do_send_packet. So, we check if there is a
2998 * packing buffer to be flushed here.
2999 */
3000 netif_stop_queue(queue->card->dev);
3001 index = queue->next_buf_to_fill;
3002 q_was_packing = queue->do_pack;
3003 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3004 if (!flush_cnt &&
3005 !atomic_read(&queue->set_pci_flags_count))
3006 flush_cnt +=
3007 qeth_flush_buffers_on_no_pci(queue);
3008#ifdef CONFIG_QETH_PERF_STATS
3009 if (q_was_packing)
3010 queue->card->perf_stats.bufs_sent_pack +=
3011 flush_cnt;
3012#endif
3013 if (flush_cnt)
3014 qeth_flush_buffers(queue, 1, index, flush_cnt);
3015 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3016 }
3017 }
3018}
3019
3020static void
3021qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
3022 unsigned int qdio_error, unsigned int siga_error,
3023 unsigned int __queue, int first_element, int count,
3024 unsigned long card_ptr)
3025{
3026 struct qeth_card *card = (struct qeth_card *) card_ptr;
3027 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3028 struct qeth_qdio_out_buffer *buffer;
3029 int i;
3030
3031 QETH_DBF_TEXT(trace, 6, "qdouhdl");
3032 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
3033 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
f3d242e8
FP
3034 QETH_DBF_TEXT(trace, 2, "achkcond");
3035 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
1da177e4
LT
3036 QETH_DBF_TEXT_(trace, 2, "%08x", status);
3037 netif_stop_queue(card->dev);
3038 qeth_schedule_recovery(card);
3039 return;
3040 }
3041 }
3042#ifdef CONFIG_QETH_PERF_STATS
3043 card->perf_stats.outbound_handler_cnt++;
3044 card->perf_stats.outbound_handler_start_time = qeth_get_micros();
3045#endif
3046 for(i = first_element; i < (first_element + count); ++i){
3047 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
3048 /*we only handle the KICK_IT error by doing a recovery */
6c6b3e7c
FP
3049 if (qeth_handle_send_error(card, buffer,
3050 qdio_error, siga_error)
1da177e4
LT
3051 == QETH_SEND_ERROR_KICK_IT){
3052 netif_stop_queue(card->dev);
3053 qeth_schedule_recovery(card);
3054 return;
3055 }
3056 qeth_clear_output_buffer(queue, buffer);
3057 }
3058 atomic_sub(count, &queue->used_buffers);
3059 /* check if we need to do something on this outbound queue */
3060 if (card->info.type != QETH_CARD_TYPE_IQD)
3061 qeth_check_outbound_queue(queue);
3062
3063 netif_wake_queue(queue->card->dev);
3064#ifdef CONFIG_QETH_PERF_STATS
3065 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3066 card->perf_stats.outbound_handler_start_time;
3067#endif
3068}
3069
3070static void
3071qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
3072{
3073
3074 param_field[0] = _ascebc['P'];
3075 param_field[1] = _ascebc['C'];
3076 param_field[2] = _ascebc['I'];
3077 param_field[3] = _ascebc['T'];
3078 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
3079 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
3080 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
3081}
3082
3083static void
3084qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
3085{
3086 param_field[16] = _ascebc['B'];
3087 param_field[17] = _ascebc['L'];
3088 param_field[18] = _ascebc['K'];
3089 param_field[19] = _ascebc['T'];
3090 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
3091 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
3092 *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
3093}
3094
3095static void
3096qeth_initialize_working_pool_list(struct qeth_card *card)
3097{
3098 struct qeth_buffer_pool_entry *entry;
3099
3100 QETH_DBF_TEXT(trace,5,"inwrklst");
3101
3102 list_for_each_entry(entry,
3103 &card->qdio.init_pool.entry_list, init_list) {
3104 qeth_put_buffer_pool_entry(card,entry);
3105 }
3106}
3107
3108static void
3109qeth_clear_working_pool_list(struct qeth_card *card)
3110{
3111 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3112
3113 QETH_DBF_TEXT(trace,5,"clwrklst");
3114 list_for_each_entry_safe(pool_entry, tmp,
3115 &card->qdio.in_buf_pool.entry_list, list){
3116 list_del(&pool_entry->list);
3117 }
3118}
3119
3120static void
3121qeth_free_buffer_pool(struct qeth_card *card)
3122{
3123 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3124 int i=0;
3125 QETH_DBF_TEXT(trace,5,"freepool");
3126 list_for_each_entry_safe(pool_entry, tmp,
3127 &card->qdio.init_pool.entry_list, init_list){
3128 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
3129 free_page((unsigned long)pool_entry->elements[i]);
3130 list_del(&pool_entry->init_list);
3131 kfree(pool_entry);
3132 }
3133}
3134
3135static int
3136qeth_alloc_buffer_pool(struct qeth_card *card)
3137{
3138 struct qeth_buffer_pool_entry *pool_entry;
3139 void *ptr;
3140 int i, j;
3141
3142 QETH_DBF_TEXT(trace,5,"alocpool");
3143 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
3144 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
3145 if (!pool_entry){
3146 qeth_free_buffer_pool(card);
3147 return -ENOMEM;
3148 }
3149 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
9123e0d7 3150 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
1da177e4
LT
3151 if (!ptr) {
3152 while (j > 0)
3153 free_page((unsigned long)
3154 pool_entry->elements[--j]);
3155 kfree(pool_entry);
3156 qeth_free_buffer_pool(card);
3157 return -ENOMEM;
3158 }
3159 pool_entry->elements[j] = ptr;
3160 }
3161 list_add(&pool_entry->init_list,
3162 &card->qdio.init_pool.entry_list);
3163 }
3164 return 0;
3165}
3166
3167int
3168qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
3169{
3170 QETH_DBF_TEXT(trace, 2, "realcbp");
3171
3172 if ((card->state != CARD_STATE_DOWN) &&
3173 (card->state != CARD_STATE_RECOVER))
3174 return -EPERM;
3175
3176 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
3177 qeth_clear_working_pool_list(card);
3178 qeth_free_buffer_pool(card);
3179 card->qdio.in_buf_pool.buf_count = bufcnt;
3180 card->qdio.init_pool.buf_count = bufcnt;
3181 return qeth_alloc_buffer_pool(card);
3182}
3183
3184static int
3185qeth_alloc_qdio_buffers(struct qeth_card *card)
3186{
3187 int i, j;
3188
3189 QETH_DBF_TEXT(setup, 2, "allcqdbf");
3190
3191 if (card->qdio.state == QETH_QDIO_ALLOCATED)
3192 return 0;
3193
9123e0d7
UB
3194 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3195 GFP_KERNEL|GFP_DMA);
1da177e4
LT
3196 if (!card->qdio.in_q)
3197 return - ENOMEM;
3198 QETH_DBF_TEXT(setup, 2, "inq");
3199 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
3200 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
3201 /* give inbound qeth_qdio_buffers their qdio_buffers */
3202 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3203 card->qdio.in_q->bufs[i].buffer =
3204 &card->qdio.in_q->qdio_bufs[i];
3205 /* inbound buffer pool */
3206 if (qeth_alloc_buffer_pool(card)){
3207 kfree(card->qdio.in_q);
3208 return -ENOMEM;
3209 }
3210 /* outbound */
3211 card->qdio.out_qs =
3212 kmalloc(card->qdio.no_out_queues *
3213 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
3214 if (!card->qdio.out_qs){
3215 qeth_free_buffer_pool(card);
3216 return -ENOMEM;
3217 }
3218 for (i = 0; i < card->qdio.no_out_queues; ++i){
3219 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
9123e0d7 3220 GFP_KERNEL|GFP_DMA);
1da177e4
LT
3221 if (!card->qdio.out_qs[i]){
3222 while (i > 0)
3223 kfree(card->qdio.out_qs[--i]);
3224 kfree(card->qdio.out_qs);
3225 return -ENOMEM;
3226 }
3227 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
3228 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
3229 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
3230 card->qdio.out_qs[i]->queue_no = i;
3231 /* give outbound qeth_qdio_buffers their qdio_buffers */
3232 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3233 card->qdio.out_qs[i]->bufs[j].buffer =
3234 &card->qdio.out_qs[i]->qdio_bufs[j];
3235 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3236 skb_list);
3237 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3238 }
3239 }
3240 card->qdio.state = QETH_QDIO_ALLOCATED;
3241 return 0;
3242}
3243
3244static void
3245qeth_free_qdio_buffers(struct qeth_card *card)
3246{
3247 int i, j;
3248
3249 QETH_DBF_TEXT(trace, 2, "freeqdbf");
3250 if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
3251 return;
3252 kfree(card->qdio.in_q);
3253 /* inbound buffer pool */
3254 qeth_free_buffer_pool(card);
3255 /* free outbound qdio_qs */
3256 for (i = 0; i < card->qdio.no_out_queues; ++i){
3257 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3258 qeth_clear_output_buffer(card->qdio.out_qs[i],
3259 &card->qdio.out_qs[i]->bufs[j]);
3260 kfree(card->qdio.out_qs[i]);
3261 }
3262 kfree(card->qdio.out_qs);
3263 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3264}
3265
3266static void
3267qeth_clear_qdio_buffers(struct qeth_card *card)
3268{
3269 int i, j;
3270
3271 QETH_DBF_TEXT(trace, 2, "clearqdbf");
3272 /* clear outbound buffers to free skbs */
3273 for (i = 0; i < card->qdio.no_out_queues; ++i)
3274 if (card->qdio.out_qs[i]){
3275 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3276 qeth_clear_output_buffer(card->qdio.out_qs[i],
3277 &card->qdio.out_qs[i]->bufs[j]);
3278 }
3279}
3280
3281static void
3282qeth_init_qdio_info(struct qeth_card *card)
3283{
3284 QETH_DBF_TEXT(setup, 4, "intqdinf");
3285 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3286 /* inbound */
3287 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
3288 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
3289 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
3290 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
3291 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1da177e4
LT
3292}
3293
3294static int
3295qeth_init_qdio_queues(struct qeth_card *card)
3296{
3297 int i, j;
3298 int rc;
3299
3300 QETH_DBF_TEXT(setup, 2, "initqdqs");
3301
3302 /* inbound queue */
3303 memset(card->qdio.in_q->qdio_bufs, 0,
3304 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3305 qeth_initialize_working_pool_list(card);
3306 /*give only as many buffers to hardware as we have buffer pool entries*/
3307 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3308 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3309 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3310 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3311 card->qdio.in_buf_pool.buf_count - 1, NULL);
3312 if (rc) {
3313 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3314 return rc;
3315 }
3316 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3317 if (rc) {
3318 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3319 return rc;
3320 }
3321 /* outbound queue */
3322 for (i = 0; i < card->qdio.no_out_queues; ++i){
3323 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3324 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3325 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3326 qeth_clear_output_buffer(card->qdio.out_qs[i],
3327 &card->qdio.out_qs[i]->bufs[j]);
3328 }
3329 card->qdio.out_qs[i]->card = card;
3330 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3331 card->qdio.out_qs[i]->do_pack = 0;
3332 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3333 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3334 atomic_set(&card->qdio.out_qs[i]->state,
3335 QETH_OUT_Q_UNLOCKED);
3336 }
3337 return 0;
3338}
3339
3340static int
3341qeth_qdio_establish(struct qeth_card *card)
3342{
3343 struct qdio_initialize init_data;
3344 char *qib_param_field;
3345 struct qdio_buffer **in_sbal_ptrs;
3346 struct qdio_buffer **out_sbal_ptrs;
3347 int i, j, k;
3348 int rc;
3349
3350 QETH_DBF_TEXT(setup, 2, "qdioest");
3351
3352 qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3353 GFP_KERNEL);
3354 if (!qib_param_field)
3355 return -ENOMEM;
3356
3357 memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
3358
3359 qeth_create_qib_param_field(card, qib_param_field);
3360 qeth_create_qib_param_field_blkt(card, qib_param_field);
3361
3362 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3363 GFP_KERNEL);
3364 if (!in_sbal_ptrs) {
3365 kfree(qib_param_field);
3366 return -ENOMEM;
3367 }
3368 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3369 in_sbal_ptrs[i] = (struct qdio_buffer *)
3370 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3371
3372 out_sbal_ptrs =
3373 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3374 sizeof(void *), GFP_KERNEL);
3375 if (!out_sbal_ptrs) {
3376 kfree(in_sbal_ptrs);
3377 kfree(qib_param_field);
3378 return -ENOMEM;
3379 }
3380 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3381 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3382 out_sbal_ptrs[k] = (struct qdio_buffer *)
3383 virt_to_phys(card->qdio.out_qs[i]->
3384 bufs[j].buffer);
3385 }
3386
3387 memset(&init_data, 0, sizeof(struct qdio_initialize));
3388 init_data.cdev = CARD_DDEV(card);
3389 init_data.q_format = qeth_get_qdio_q_format(card);
3390 init_data.qib_param_field_format = 0;
3391 init_data.qib_param_field = qib_param_field;
3392 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3393 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3394 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3395 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3396 init_data.no_input_qs = 1;
3397 init_data.no_output_qs = card->qdio.no_out_queues;
3398 init_data.input_handler = (qdio_handler_t *)
3399 qeth_qdio_input_handler;
3400 init_data.output_handler = (qdio_handler_t *)
3401 qeth_qdio_output_handler;
3402 init_data.int_parm = (unsigned long) card;
3403 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3404 QDIO_OUTBOUND_0COPY_SBALS |
3405 QDIO_USE_OUTBOUND_PCIS;
3406 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3407 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3408
3409 if (!(rc = qdio_initialize(&init_data)))
3410 card->qdio.state = QETH_QDIO_ESTABLISHED;
3411
3412 kfree(out_sbal_ptrs);
3413 kfree(in_sbal_ptrs);
3414 kfree(qib_param_field);
3415 return rc;
3416}
3417
3418static int
3419qeth_qdio_activate(struct qeth_card *card)
3420{
3421 QETH_DBF_TEXT(setup,3,"qdioact");
3422 return qdio_activate(CARD_DDEV(card), 0);
3423}
3424
3425static int
3426qeth_clear_channel(struct qeth_channel *channel)
3427{
3428 unsigned long flags;
3429 struct qeth_card *card;
3430 int rc;
3431
3432 QETH_DBF_TEXT(trace,3,"clearch");
3433 card = CARD_FROM_CDEV(channel->ccwdev);
3434 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3435 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3436 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3437
3438 if (rc)
3439 return rc;
3440 rc = wait_event_interruptible_timeout(card->wait_q,
3441 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3442 if (rc == -ERESTARTSYS)
3443 return rc;
3444 if (channel->state != CH_STATE_STOPPED)
3445 return -ETIME;
3446 channel->state = CH_STATE_DOWN;
3447 return 0;
3448}
3449
3450static int
3451qeth_halt_channel(struct qeth_channel *channel)
3452{
3453 unsigned long flags;
3454 struct qeth_card *card;
3455 int rc;
3456
3457 QETH_DBF_TEXT(trace,3,"haltch");
3458 card = CARD_FROM_CDEV(channel->ccwdev);
3459 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3460 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3461 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3462
3463 if (rc)
3464 return rc;
3465 rc = wait_event_interruptible_timeout(card->wait_q,
3466 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3467 if (rc == -ERESTARTSYS)
3468 return rc;
3469 if (channel->state != CH_STATE_HALTED)
3470 return -ETIME;
3471 return 0;
3472}
3473
3474static int
3475qeth_halt_channels(struct qeth_card *card)
3476{
f3d242e8 3477 int rc1 = 0, rc2=0, rc3 = 0;
1da177e4
LT
3478
3479 QETH_DBF_TEXT(trace,3,"haltchs");
f3d242e8
FP
3480 rc1 = qeth_halt_channel(&card->read);
3481 rc2 = qeth_halt_channel(&card->write);
3482 rc3 = qeth_halt_channel(&card->data);
3483 if (rc1)
3484 return rc1;
3485 if (rc2)
3486 return rc2;
3487 return rc3;
1da177e4
LT
3488}
3489static int
3490qeth_clear_channels(struct qeth_card *card)
3491{
f3d242e8 3492 int rc1 = 0, rc2=0, rc3 = 0;
1da177e4
LT
3493
3494 QETH_DBF_TEXT(trace,3,"clearchs");
f3d242e8
FP
3495 rc1 = qeth_clear_channel(&card->read);
3496 rc2 = qeth_clear_channel(&card->write);
3497 rc3 = qeth_clear_channel(&card->data);
3498 if (rc1)
3499 return rc1;
3500 if (rc2)
3501 return rc2;
3502 return rc3;
1da177e4
LT
3503}
3504
3505static int
3506qeth_clear_halt_card(struct qeth_card *card, int halt)
3507{
3508 int rc = 0;
3509
3510 QETH_DBF_TEXT(trace,3,"clhacrd");
3511 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3512
3513 if (halt)
3514 rc = qeth_halt_channels(card);
3515 if (rc)
3516 return rc;
3517 return qeth_clear_channels(card);
3518}
3519
3520static int
3521qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3522{
3523 int rc = 0;
3524
3525 QETH_DBF_TEXT(trace,3,"qdioclr");
3526 if (card->qdio.state == QETH_QDIO_ESTABLISHED){
3527 if ((rc = qdio_cleanup(CARD_DDEV(card),
3528 (card->info.type == QETH_CARD_TYPE_IQD) ?
3529 QDIO_FLAG_CLEANUP_USING_HALT :
3530 QDIO_FLAG_CLEANUP_USING_CLEAR)))
3531 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
3532 card->qdio.state = QETH_QDIO_ALLOCATED;
3533 }
3534 if ((rc = qeth_clear_halt_card(card, use_halt)))
3535 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
3536 card->state = CARD_STATE_DOWN;
3537 return rc;
3538}
3539
3540static int
3541qeth_dm_act(struct qeth_card *card)
3542{
3543 int rc;
3544 struct qeth_cmd_buffer *iob;
3545
3546 QETH_DBF_TEXT(setup,2,"dmact");
3547
3548 iob = qeth_wait_for_buffer(&card->write);
3549 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3550
3551 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3552 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3553 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3554 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3555 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3556 return rc;
3557}
3558
3559static int
3560qeth_mpc_initialize(struct qeth_card *card)
3561{
3562 int rc;
3563
3564 QETH_DBF_TEXT(setup,2,"mpcinit");
3565
3566 if ((rc = qeth_issue_next_read(card))){
3567 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3568 return rc;
3569 }
3570 if ((rc = qeth_cm_enable(card))){
3571 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
f3d242e8 3572 goto out_qdio;
1da177e4
LT
3573 }
3574 if ((rc = qeth_cm_setup(card))){
3575 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
f3d242e8 3576 goto out_qdio;
1da177e4
LT
3577 }
3578 if ((rc = qeth_ulp_enable(card))){
3579 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
f3d242e8 3580 goto out_qdio;
1da177e4
LT
3581 }
3582 if ((rc = qeth_ulp_setup(card))){
3583 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
f3d242e8 3584 goto out_qdio;
1da177e4
LT
3585 }
3586 if ((rc = qeth_alloc_qdio_buffers(card))){
3587 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
f3d242e8 3588 goto out_qdio;
1da177e4
LT
3589 }
3590 if ((rc = qeth_qdio_establish(card))){
3591 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3592 qeth_free_qdio_buffers(card);
3593 goto out_qdio;
3594 }
3595 if ((rc = qeth_qdio_activate(card))){
3596 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3597 goto out_qdio;
3598 }
3599 if ((rc = qeth_dm_act(card))){
3600 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3601 goto out_qdio;
3602 }
3603
3604 return 0;
3605out_qdio:
500f83ab 3606 qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD);
1da177e4
LT
3607 return rc;
3608}
3609
3610static struct net_device *
3611qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3612{
3613 struct net_device *dev = NULL;
3614
3615 switch (type) {
3616 case QETH_CARD_TYPE_OSAE:
3617 switch (linktype) {
3618 case QETH_LINK_TYPE_LANE_TR:
3619 case QETH_LINK_TYPE_HSTR:
3620#ifdef CONFIG_TR
3621 dev = alloc_trdev(0);
3622#endif /* CONFIG_TR */
3623 break;
3624 default:
3625 dev = alloc_etherdev(0);
3626 }
3627 break;
3628 case QETH_CARD_TYPE_IQD:
3629 dev = alloc_netdev(0, "hsi%d", ether_setup);
3630 break;
500f83ab
UB
3631 case QETH_CARD_TYPE_OSN:
3632 dev = alloc_netdev(0, "osn%d", ether_setup);
3633 break;
1da177e4
LT
3634 default:
3635 dev = alloc_etherdev(0);
3636 }
3637 return dev;
3638}
3639
3640/*hard_header fake function; used in case fake_ll is set */
3641static int
3642qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3643 unsigned short type, void *daddr, void *saddr,
3644 unsigned len)
3645{
e23dd9cd
FP
3646 if(dev->type == ARPHRD_IEEE802_TR){
3647 struct trh_hdr *hdr;
3648 hdr = (struct trh_hdr *)skb_push(skb, QETH_FAKE_LL_LEN_TR);
3649 memcpy(hdr->saddr, dev->dev_addr, TR_ALEN);
3650 memcpy(hdr->daddr, "FAKELL", TR_ALEN);
3651 return QETH_FAKE_LL_LEN_TR;
3652
3653 } else {
3654 struct ethhdr *hdr;
3655 hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN_ETH);
3656 memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
3657 memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
3658 if (type != ETH_P_802_3)
3659 hdr->h_proto = htons(type);
3660 else
3661 hdr->h_proto = htons(len);
3662 return QETH_FAKE_LL_LEN_ETH;
1da177e4 3663
e23dd9cd 3664 }
1da177e4
LT
3665}
3666
3667static inline int
3668qeth_send_packet(struct qeth_card *, struct sk_buff *);
3669
3670static int
3671qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3672{
3673 int rc;
3674 struct qeth_card *card;
3675
3676 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3677 card = (struct qeth_card *)dev->priv;
3678 if (skb==NULL) {
3679 card->stats.tx_dropped++;
3680 card->stats.tx_errors++;
3681 /* return OK; otherwise ksoftirqd goes to 100% */
3682 return NETDEV_TX_OK;
3683 }
3684 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
3685 card->stats.tx_dropped++;
3686 card->stats.tx_errors++;
3687 card->stats.tx_carrier_errors++;
3688 dev_kfree_skb_any(skb);
3689 /* return OK; otherwise ksoftirqd goes to 100% */
3690 return NETDEV_TX_OK;
3691 }
3692#ifdef CONFIG_QETH_PERF_STATS
3693 card->perf_stats.outbound_cnt++;
3694 card->perf_stats.outbound_start_time = qeth_get_micros();
3695#endif
3696 netif_stop_queue(dev);
3697 if ((rc = qeth_send_packet(card, skb))) {
3698 if (rc == -EBUSY) {
3699 return NETDEV_TX_BUSY;
3700 } else {
3701 card->stats.tx_errors++;
3702 card->stats.tx_dropped++;
3703 dev_kfree_skb_any(skb);
3704 /*set to OK; otherwise ksoftirqd goes to 100% */
3705 rc = NETDEV_TX_OK;
3706 }
3707 }
3708 netif_wake_queue(dev);
3709#ifdef CONFIG_QETH_PERF_STATS
3710 card->perf_stats.outbound_time += qeth_get_micros() -
3711 card->perf_stats.outbound_start_time;
3712#endif
3713 return rc;
3714}
3715
3716static int
3717qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3718{
3719 int rc = 0;
3720#ifdef CONFIG_QETH_VLAN
3721 struct vlan_group *vg;
3722 int i;
3723
3724 if (!(vg = card->vlangrp))
3725 return rc;
3726
3727 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3728 if (vg->vlan_devices[i] == dev){
3729 rc = QETH_VLAN_CARD;
3730 break;
3731 }
3732 }
6c6b3e7c
FP
3733 if (rc && !(VLAN_DEV_INFO(dev)->real_dev->priv == (void *)card))
3734 return 0;
3735
1da177e4
LT
3736#endif
3737 return rc;
3738}
3739
3740static int
3741qeth_verify_dev(struct net_device *dev)
3742{
3743 struct qeth_card *card;
3744 unsigned long flags;
3745 int rc = 0;
3746
3747 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3748 list_for_each_entry(card, &qeth_card_list.list, list){
3749 if (card->dev == dev){
3750 rc = QETH_REAL_CARD;
3751 break;
3752 }
3753 rc = qeth_verify_vlan_dev(dev, card);
3754 if (rc)
3755 break;
3756 }
3757 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3758
3759 return rc;
3760}
3761
3762static struct qeth_card *
3763qeth_get_card_from_dev(struct net_device *dev)
3764{
3765 struct qeth_card *card = NULL;
3766 int rc;
3767
3768 rc = qeth_verify_dev(dev);
3769 if (rc == QETH_REAL_CARD)
3770 card = (struct qeth_card *)dev->priv;
3771 else if (rc == QETH_VLAN_CARD)
3772 card = (struct qeth_card *)
3773 VLAN_DEV_INFO(dev)->real_dev->priv;
3774
3775 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3776 return card ;
3777}
3778
3779static void
3780qeth_tx_timeout(struct net_device *dev)
3781{
3782 struct qeth_card *card;
3783
3784 card = (struct qeth_card *) dev->priv;
3785 card->stats.tx_errors++;
3786 qeth_schedule_recovery(card);
3787}
3788
3789static int
3790qeth_open(struct net_device *dev)
3791{
3792 struct qeth_card *card;
3793
3794 QETH_DBF_TEXT(trace, 4, "qethopen");
3795
3796 card = (struct qeth_card *) dev->priv;
3797
3798 if (card->state != CARD_STATE_SOFTSETUP)
3799 return -ENODEV;
3800
500f83ab
UB
3801 if ( (card->info.type != QETH_CARD_TYPE_OSN) &&
3802 (card->options.layer2) &&
e08d88cc 3803 (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
1da177e4
LT
3804 QETH_DBF_TEXT(trace,4,"nomacadr");
3805 return -EPERM;
3806 }
3807 card->dev->flags |= IFF_UP;
3808 netif_start_queue(dev);
3809 card->data.state = CH_STATE_UP;
3810 card->state = CARD_STATE_UP;
3811
3812 if (!card->lan_online){
3813 if (netif_carrier_ok(dev))
3814 netif_carrier_off(dev);
3815 }
3816 return 0;
3817}
3818
3819static int
3820qeth_stop(struct net_device *dev)
3821{
3822 struct qeth_card *card;
3823
3824 QETH_DBF_TEXT(trace, 4, "qethstop");
3825
3826 card = (struct qeth_card *) dev->priv;
3827
3828 netif_stop_queue(dev);
3829 card->dev->flags &= ~IFF_UP;
3830 if (card->state == CARD_STATE_UP)
3831 card->state = CARD_STATE_SOFTSETUP;
3832 return 0;
3833}
3834
3835static inline int
3836qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3837{
3838 int cast_type = RTN_UNSPEC;
3839
500f83ab
UB
3840 if (card->info.type == QETH_CARD_TYPE_OSN)
3841 return cast_type;
3842
1da177e4
LT
3843 if (skb->dst && skb->dst->neighbour){
3844 cast_type = skb->dst->neighbour->type;
3845 if ((cast_type == RTN_BROADCAST) ||
3846 (cast_type == RTN_MULTICAST) ||
3847 (cast_type == RTN_ANYCAST))
3848 return cast_type;
3849 else
3850 return RTN_UNSPEC;
3851 }
3852 /* try something else */
3853 if (skb->protocol == ETH_P_IPV6)
3854 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
3855 else if (skb->protocol == ETH_P_IP)
3856 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
3857 /* ... */
3858 if (!memcmp(skb->data, skb->dev->broadcast, 6))
3859 return RTN_BROADCAST;
3860 else {
3861 u16 hdr_mac;
3862
3863 hdr_mac = *((u16 *)skb->data);
3864 /* tr multicast? */
3865 switch (card->info.link_type) {
3866 case QETH_LINK_TYPE_HSTR:
3867 case QETH_LINK_TYPE_LANE_TR:
3868 if ((hdr_mac == QETH_TR_MAC_NC) ||
3869 (hdr_mac == QETH_TR_MAC_C))
3870 return RTN_MULTICAST;
3871 /* eth or so multicast? */
3872 default:
3873 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3874 (hdr_mac == QETH_ETH_MAC_V6))
3875 return RTN_MULTICAST;
3876 }
3877 }
3878 return cast_type;
3879}
3880
3881static inline int
3882qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3883 int ipv, int cast_type)
3884{
3885 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3886 return card->qdio.default_out_queue;
3887 switch (card->qdio.no_out_queues) {
3888 case 4:
3889 if (cast_type && card->info.is_multicast_different)
3890 return card->info.is_multicast_different &
3891 (card->qdio.no_out_queues - 1);
3892 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3893 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3894 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
3895 return 3;
3896 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
3897 return 2;
3898 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
3899 return 1;
3900 if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
3901 return 0;
3902 }
3903 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3904 return 3 - (skb->nh.iph->tos >> 6);
3905 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3906 /* TODO: IPv6!!! */
3907 }
3908 return card->qdio.default_out_queue;
3909 case 1: /* fallthrough for single-out-queue 1920-device */
3910 default:
3911 return card->qdio.default_out_queue;
3912 }
3913}
3914
3915static inline int
3916qeth_get_ip_version(struct sk_buff *skb)
3917{
3918 switch (skb->protocol) {
3919 case ETH_P_IPV6:
3920 return 6;
3921 case ETH_P_IP:
3922 return 4;
3923 default:
3924 return 0;
3925 }
3926}
3927
3928static inline int
3929qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3930 struct qeth_hdr **hdr, int ipv)
3931{
500f83ab 3932 int rc = 0;
1da177e4
LT
3933#ifdef CONFIG_QETH_VLAN
3934 u16 *tag;
3935#endif
3936
3937 QETH_DBF_TEXT(trace, 6, "prepskb");
500f83ab
UB
3938 if (card->info.type == QETH_CARD_TYPE_OSN) {
3939 *hdr = (struct qeth_hdr *)(*skb)->data;
3940 return rc;
3941 }
9cb90de8
FP
3942 rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
3943 if (rc)
3944 return rc;
1da177e4
LT
3945#ifdef CONFIG_QETH_VLAN
3946 if (card->vlangrp && vlan_tx_tag_present(*skb) &&
3947 ((ipv == 6) || card->options.layer2) ) {
3948 /*
3949 * Move the mac addresses (6 bytes src, 6 bytes dest)
3950 * to the beginning of the new header. We are using three
3951 * memcpys instead of one memmove to save cycles.
3952 */
3953 skb_push(*skb, VLAN_HLEN);
3954 memcpy((*skb)->data, (*skb)->data + 4, 4);
3955 memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
3956 memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
3957 tag = (u16 *)((*skb)->data + 12);
3958 /*
3959 * first two bytes = ETH_P_8021Q (0x8100)
3960 * second two bytes = VLANID
3961 */
3962 *tag = __constant_htons(ETH_P_8021Q);
3963 *(tag + 1) = htons(vlan_tx_tag_get(*skb));
3964 }
3965#endif
3966 *hdr = (struct qeth_hdr *)
3967 qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
3968 if (hdr == NULL)
3969 return -EINVAL;
3970 return 0;
3971}
3972
3973static inline u8
3974qeth_get_qeth_hdr_flags4(int cast_type)
3975{
3976 if (cast_type == RTN_MULTICAST)
3977 return QETH_CAST_MULTICAST;
3978 if (cast_type == RTN_BROADCAST)
3979 return QETH_CAST_BROADCAST;
3980 return QETH_CAST_UNICAST;
3981}
3982
3983static inline u8
3984qeth_get_qeth_hdr_flags6(int cast_type)
3985{
3986 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
3987 if (cast_type == RTN_MULTICAST)
3988 return ct | QETH_CAST_MULTICAST;
3989 if (cast_type == RTN_ANYCAST)
3990 return ct | QETH_CAST_ANYCAST;
3991 if (cast_type == RTN_BROADCAST)
3992 return ct | QETH_CAST_BROADCAST;
3993 return ct | QETH_CAST_UNICAST;
3994}
3995
3996static inline void
3997qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3998 struct sk_buff *skb)
3999{
4000 __u16 hdr_mac;
4001
4002 if (!memcmp(skb->data+QETH_HEADER_SIZE,
4003 skb->dev->broadcast,6)) { /* broadcast? */
4004 *(__u32 *)hdr->hdr.l2.flags |=
4005 QETH_LAYER2_FLAG_BROADCAST << 8;
4006 return;
4007 }
4008 hdr_mac=*((__u16*)skb->data);
4009 /* tr multicast? */
4010 switch (card->info.link_type) {
4011 case QETH_LINK_TYPE_HSTR:
4012 case QETH_LINK_TYPE_LANE_TR:
4013 if ((hdr_mac == QETH_TR_MAC_NC) ||
4014 (hdr_mac == QETH_TR_MAC_C) )
4015 *(__u32 *)hdr->hdr.l2.flags |=
4016 QETH_LAYER2_FLAG_MULTICAST << 8;
4017 else
4018 *(__u32 *)hdr->hdr.l2.flags |=
4019 QETH_LAYER2_FLAG_UNICAST << 8;
4020 break;
4021 /* eth or so multicast? */
4022 default:
4023 if ( (hdr_mac==QETH_ETH_MAC_V4) ||
4024 (hdr_mac==QETH_ETH_MAC_V6) )
4025 *(__u32 *)hdr->hdr.l2.flags |=
4026 QETH_LAYER2_FLAG_MULTICAST << 8;
4027 else
4028 *(__u32 *)hdr->hdr.l2.flags |=
4029 QETH_LAYER2_FLAG_UNICAST << 8;
4030 }
4031}
4032
4033static inline void
4034qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4035 struct sk_buff *skb, int cast_type)
4036{
4037 memset(hdr, 0, sizeof(struct qeth_hdr));
4038 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
4039
4040 /* set byte 0 to "0x02" and byte 3 to casting flags */
4041 if (cast_type==RTN_MULTICAST)
4042 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
4043 else if (cast_type==RTN_BROADCAST)
4044 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
4045 else
4046 qeth_layer2_get_packet_type(card, hdr, skb);
4047
4048 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
4049#ifdef CONFIG_QETH_VLAN
4050 /* VSWITCH relies on the VLAN
4051 * information to be present in
4052 * the QDIO header */
4053 if ((card->vlangrp != NULL) &&
4054 vlan_tx_tag_present(skb)) {
4055 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
4056 hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
4057 }
4058#endif
4059}
4060
4061void
4062qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4063 struct sk_buff *skb, int ipv, int cast_type)
4064{
4065 QETH_DBF_TEXT(trace, 6, "fillhdr");
4066
4067 memset(hdr, 0, sizeof(struct qeth_hdr));
4068 if (card->options.layer2) {
4069 qeth_layer2_fill_header(card, hdr, skb, cast_type);
4070 return;
4071 }
4072 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
4073 hdr->hdr.l3.ext_flags = 0;
4074#ifdef CONFIG_QETH_VLAN
4075 /*
4076 * before we're going to overwrite this location with next hop ip.
4077 * v6 uses passthrough, v4 sets the tag in the QDIO header.
4078 */
4079 if (card->vlangrp && vlan_tx_tag_present(skb)) {
4080 hdr->hdr.l3.ext_flags = (ipv == 4) ?
4081 QETH_HDR_EXT_VLAN_FRAME :
4082 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
4083 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
4084 }
4085#endif /* CONFIG_QETH_VLAN */
4086 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
4087 if (ipv == 4) { /* IPv4 */
4088 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
4089 memset(hdr->hdr.l3.dest_addr, 0, 12);
4090 if ((skb->dst) && (skb->dst->neighbour)) {
4091 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
4092 *((u32 *) skb->dst->neighbour->primary_key);
4093 } else {
4094 /* fill in destination address used in ip header */
4095 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr;
4096 }
4097 } else if (ipv == 6) { /* IPv6 or passthru */
4098 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
4099 if ((skb->dst) && (skb->dst->neighbour)) {
4100 memcpy(hdr->hdr.l3.dest_addr,
4101 skb->dst->neighbour->primary_key, 16);
4102 } else {
4103 /* fill in destination address used in ip header */
4104 memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16);
4105 }
4106 } else { /* passthrough */
e23dd9cd 4107 if((skb->dev->type == ARPHRD_IEEE802_TR) &&
9a455819
FP
4108 !memcmp(skb->data + sizeof(struct qeth_hdr) +
4109 sizeof(__u16), skb->dev->broadcast, 6)) {
4110 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4111 QETH_HDR_PASSTHRU;
e23dd9cd 4112 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
1da177e4 4113 skb->dev->broadcast, 6)) { /* broadcast? */
e23dd9cd
FP
4114 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4115 QETH_HDR_PASSTHRU;
1da177e4
LT
4116 } else {
4117 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
4118 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
4119 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
4120 }
4121 }
4122}
4123
1da177e4
LT
4124static inline void
4125__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
05e08a2a 4126 int is_tso, int *next_element_to_fill)
1da177e4
LT
4127{
4128 int length = skb->len;
4129 int length_here;
4130 int element;
4131 char *data;
05e08a2a 4132 int first_lap ;
1da177e4
LT
4133
4134 element = *next_element_to_fill;
4135 data = skb->data;
05e08a2a
FP
4136 first_lap = (is_tso == 0 ? 1 : 0);
4137
1da177e4
LT
4138 while (length > 0) {
4139 /* length_here is the remaining amount of data in this page */
4140 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
4141 if (length < length_here)
4142 length_here = length;
05e08a2a 4143
1da177e4
LT
4144 buffer->element[element].addr = data;
4145 buffer->element[element].length = length_here;
4146 length -= length_here;
05e08a2a 4147 if (!length) {
1da177e4
LT
4148 if (first_lap)
4149 buffer->element[element].flags = 0;
4150 else
4151 buffer->element[element].flags =
4152 SBAL_FLAGS_LAST_FRAG;
4153 } else {
4154 if (first_lap)
4155 buffer->element[element].flags =
4156 SBAL_FLAGS_FIRST_FRAG;
4157 else
4158 buffer->element[element].flags =
4159 SBAL_FLAGS_MIDDLE_FRAG;
4160 }
4161 data += length_here;
4162 element++;
4163 first_lap = 0;
4164 }
4165 *next_element_to_fill = element;
4166}
4167
4168static inline int
4169qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4170 struct qeth_qdio_out_buffer *buf,
4171 struct sk_buff *skb)
4172{
4173 struct qdio_buffer *buffer;
05e08a2a
FP
4174 struct qeth_hdr_tso *hdr;
4175 int flush_cnt = 0, hdr_len, large_send = 0;
1da177e4
LT
4176
4177 QETH_DBF_TEXT(trace, 6, "qdfillbf");
05e08a2a 4178
1da177e4
LT
4179 buffer = buf->buffer;
4180 atomic_inc(&skb->users);
4181 skb_queue_tail(&buf->skb_list, skb);
05e08a2a
FP
4182
4183 hdr = (struct qeth_hdr_tso *) skb->data;
4184 /*check first on TSO ....*/
4185 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
4186 int element = buf->next_element_to_fill;
4187
4188 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
4189 /*fill first buffer entry only with header information */
4190 buffer->element[element].addr = skb->data;
4191 buffer->element[element].length = hdr_len;
4192 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
4193 buf->next_element_to_fill++;
4194 skb->data += hdr_len;
4195 skb->len -= hdr_len;
4196 large_send = 1;
4197 }
1da177e4 4198 if (skb_shinfo(skb)->nr_frags == 0)
05e08a2a 4199 __qeth_fill_buffer(skb, buffer, large_send,
1da177e4
LT
4200 (int *)&buf->next_element_to_fill);
4201 else
05e08a2a 4202 __qeth_fill_buffer_frag(skb, buffer, large_send,
1da177e4
LT
4203 (int *)&buf->next_element_to_fill);
4204
4205 if (!queue->do_pack) {
4206 QETH_DBF_TEXT(trace, 6, "fillbfnp");
4207 /* set state to PRIMED -> will be flushed */
4208 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4209 flush_cnt = 1;
4210 } else {
4211 QETH_DBF_TEXT(trace, 6, "fillbfpa");
4212#ifdef CONFIG_QETH_PERF_STATS
4213 queue->card->perf_stats.skbs_sent_pack++;
4214#endif
4215 if (buf->next_element_to_fill >=
4216 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4217 /*
4218 * packed buffer if full -> set state PRIMED
4219 * -> will be flushed
4220 */
4221 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4222 flush_cnt = 1;
4223 }
4224 }
4225 return flush_cnt;
4226}
4227
4228static inline int
4229qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4230 struct sk_buff *skb, struct qeth_hdr *hdr,
4231 int elements_needed,
4232 struct qeth_eddp_context *ctx)
4233{
4234 struct qeth_qdio_out_buffer *buffer;
4235 int buffers_needed = 0;
4236 int flush_cnt = 0;
4237 int index;
4238
4239 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4240
4241 /* spin until we get the queue ... */
4242 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4243 QETH_OUT_Q_LOCKED,
4244 &queue->state));
4245 /* ... now we've got the queue */
4246 index = queue->next_buf_to_fill;
4247 buffer = &queue->bufs[queue->next_buf_to_fill];
4248 /*
4249 * check if buffer is empty to make sure that we do not 'overtake'
4250 * ourselves and try to fill a buffer that is already primed
4251 */
4252 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4253 card->stats.tx_dropped++;
4254 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4255 return -EBUSY;
4256 }
4257 if (ctx == NULL)
4258 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4259 QDIO_MAX_BUFFERS_PER_Q;
4260 else {
4261 buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
4262 if (buffers_needed < 0) {
4263 card->stats.tx_dropped++;
4264 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4265 return -EBUSY;
4266 }
4267 queue->next_buf_to_fill =
4268 (queue->next_buf_to_fill + buffers_needed) %
4269 QDIO_MAX_BUFFERS_PER_Q;
4270 }
4271 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4272 if (ctx == NULL) {
4273 qeth_fill_buffer(queue, buffer, skb);
4274 qeth_flush_buffers(queue, 0, index, 1);
4275 } else {
4276 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
4277 WARN_ON(buffers_needed != flush_cnt);
4278 qeth_flush_buffers(queue, 0, index, flush_cnt);
4279 }
4280 return 0;
4281}
4282
4283static inline int
4284qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4285 struct sk_buff *skb, struct qeth_hdr *hdr,
4286 int elements_needed, struct qeth_eddp_context *ctx)
4287{
4288 struct qeth_qdio_out_buffer *buffer;
4289 int start_index;
4290 int flush_count = 0;
4291 int do_pack = 0;
4292 int tmp;
4293 int rc = 0;
4294
4295 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4296
4297 /* spin until we get the queue ... */
4298 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4299 QETH_OUT_Q_LOCKED,
4300 &queue->state));
4301 start_index = queue->next_buf_to_fill;
4302 buffer = &queue->bufs[queue->next_buf_to_fill];
4303 /*
4304 * check if buffer is empty to make sure that we do not 'overtake'
4305 * ourselves and try to fill a buffer that is already primed
4306 */
4307 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
4308 card->stats.tx_dropped++;
4309 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4310 return -EBUSY;
4311 }
4312 /* check if we need to switch packing state of this queue */
4313 qeth_switch_to_packing_if_needed(queue);
4314 if (queue->do_pack){
4315 do_pack = 1;
4316 if (ctx == NULL) {
4317 /* does packet fit in current buffer? */
4318 if((QETH_MAX_BUFFER_ELEMENTS(card) -
4319 buffer->next_element_to_fill) < elements_needed){
4320 /* ... no -> set state PRIMED */
4321 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
4322 flush_count++;
4323 queue->next_buf_to_fill =
4324 (queue->next_buf_to_fill + 1) %
4325 QDIO_MAX_BUFFERS_PER_Q;
4326 buffer = &queue->bufs[queue->next_buf_to_fill];
4327 /* we did a step forward, so check buffer state
4328 * again */
4329 if (atomic_read(&buffer->state) !=
4330 QETH_QDIO_BUF_EMPTY){
4331 card->stats.tx_dropped++;
4332 qeth_flush_buffers(queue, 0, start_index, flush_count);
4333 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4334 return -EBUSY;
4335 }
4336 }
4337 } else {
4338 /* check if we have enough elements (including following
4339 * free buffers) to handle eddp context */
4340 if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
4341 printk("eddp tx_dropped 1\n");
4342 card->stats.tx_dropped++;
4343 rc = -EBUSY;
4344 goto out;
4345 }
4346 }
4347 }
4348 if (ctx == NULL)
4349 tmp = qeth_fill_buffer(queue, buffer, skb);
4350 else {
4351 tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
4352 if (tmp < 0) {
4353 printk("eddp tx_dropped 2\n");
4354 card->stats.tx_dropped++;
4355 rc = - EBUSY;
4356 goto out;
4357 }
4358 }
4359 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4360 QDIO_MAX_BUFFERS_PER_Q;
4361 flush_count += tmp;
4362out:
4363 if (flush_count)
4364 qeth_flush_buffers(queue, 0, start_index, flush_count);
d805d7c6
FP
4365 else if (!atomic_read(&queue->set_pci_flags_count))
4366 atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
1da177e4
LT
4367 /*
4368 * queue->state will go from LOCKED -> UNLOCKED or from
4369 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4370 * (switch packing state or flush buffer to get another pci flag out).
4371 * In that case we will enter this loop
4372 */
4373 while (atomic_dec_return(&queue->state)){
4374 flush_count = 0;
4375 start_index = queue->next_buf_to_fill;
4376 /* check if we can go back to non-packing state */
4377 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
4378 /*
4379 * check if we need to flush a packing buffer to get a pci
4380 * flag out on the queue
4381 */
4382 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
4383 flush_count += qeth_flush_buffers_on_no_pci(queue);
4384 if (flush_count)
4385 qeth_flush_buffers(queue, 0, start_index, flush_count);
4386 }
4387 /* at this point the queue is UNLOCKED again */
4388#ifdef CONFIG_QETH_PERF_STATS
4389 if (do_pack)
4390 queue->card->perf_stats.bufs_sent_pack += flush_count;
4391#endif /* CONFIG_QETH_PERF_STATS */
4392
4393 return rc;
4394}
4395
05e08a2a 4396static inline int
9cb90de8
FP
4397qeth_get_elements_no(struct qeth_card *card, void *hdr,
4398 struct sk_buff *skb, int elems)
05e08a2a
FP
4399{
4400 int elements_needed = 0;
4401
4402 if (skb_shinfo(skb)->nr_frags > 0) {
4403 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
4404 }
4405 if (elements_needed == 0 )
4406 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4407 + skb->len) >> PAGE_SHIFT);
9cb90de8 4408 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
05e08a2a 4409 PRINT_ERR("qeth_do_send_packet: invalid size of "
9cb90de8
FP
4410 "IP packet (Number=%d / Length=%d). Discarded.\n",
4411 (elements_needed+elems), skb->len);
05e08a2a
FP
4412 return 0;
4413 }
4414 return elements_needed;
4415}
4416
1da177e4
LT
4417static inline int
4418qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4419{
4420 int ipv = 0;
4421 int cast_type;
4422 struct qeth_qdio_out_q *queue;
f3d242e8 4423 struct qeth_hdr *hdr = NULL;
1da177e4
LT
4424 int elements_needed = 0;
4425 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4426 struct qeth_eddp_context *ctx = NULL;
4427 int rc;
4428
4429 QETH_DBF_TEXT(trace, 6, "sendpkt");
4430
4431 if (!card->options.layer2) {
4432 ipv = qeth_get_ip_version(skb);
4433 if ((card->dev->hard_header == qeth_fake_header) && ipv) {
4434 if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) {
4435 card->stats.tx_dropped++;
4436 dev_kfree_skb_irq(skb);
4437 return 0;
4438 }
e23dd9cd
FP
4439 if(card->dev->type == ARPHRD_IEEE802_TR){
4440 skb_pull(skb, QETH_FAKE_LL_LEN_TR);
4441 } else {
4442 skb_pull(skb, QETH_FAKE_LL_LEN_ETH);
4443 }
1da177e4
LT
4444 }
4445 }
500f83ab
UB
4446 if ((card->info.type == QETH_CARD_TYPE_OSN) &&
4447 (skb->protocol == htons(ETH_P_IPV6))) {
4448 dev_kfree_skb_any(skb);
4449 return 0;
4450 }
1da177e4 4451 cast_type = qeth_get_cast_type(card, skb);
500f83ab
UB
4452 if ((cast_type == RTN_BROADCAST) &&
4453 (card->info.broadcast_capable == 0)){
1da177e4
LT
4454 card->stats.tx_dropped++;
4455 card->stats.tx_errors++;
4456 dev_kfree_skb_any(skb);
4457 return NETDEV_TX_OK;
4458 }
4459 queue = card->qdio.out_qs
4460 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
4461
4462 if (skb_shinfo(skb)->tso_size)
4463 large_send = card->options.large_send;
4464
1da177e4
LT
4465 /*are we able to do TSO ? If so ,prepare and send it from here */
4466 if ((large_send == QETH_LARGE_SEND_TSO) &&
4467 (cast_type == RTN_UNSPEC)) {
05e08a2a
FP
4468 rc = qeth_tso_prepare_packet(card, skb, ipv, cast_type);
4469 if (rc) {
4470 card->stats.tx_dropped++;
4471 card->stats.tx_errors++;
4472 dev_kfree_skb_any(skb);
4473 return NETDEV_TX_OK;
4474 }
4475 elements_needed++;
4476 } else {
4477 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) {
4478 QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
4479 return rc;
4480 }
500f83ab
UB
4481 if (card->info.type != QETH_CARD_TYPE_OSN)
4482 qeth_fill_header(card, hdr, skb, ipv, cast_type);
1da177e4
LT
4483 }
4484
1da177e4
LT
4485 if (large_send == QETH_LARGE_SEND_EDDP) {
4486 ctx = qeth_eddp_create_context(card, skb, hdr);
4487 if (ctx == NULL) {
4488 PRINT_WARN("could not create eddp context\n");
4489 return -EINVAL;
4490 }
4491 } else {
9cb90de8
FP
4492 int elems = qeth_get_elements_no(card,(void*) hdr, skb,
4493 elements_needed);
4494 if (!elems)
1da177e4 4495 return -EINVAL;
9cb90de8 4496 elements_needed += elems;
1da177e4
LT
4497 }
4498
4499 if (card->info.type != QETH_CARD_TYPE_IQD)
4500 rc = qeth_do_send_packet(card, queue, skb, hdr,
4501 elements_needed, ctx);
4502 else
4503 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
4504 elements_needed, ctx);
1da177e4
LT
4505 if (!rc){
4506 card->stats.tx_packets++;
4507 card->stats.tx_bytes += skb->len;
4508#ifdef CONFIG_QETH_PERF_STATS
05e08a2a
FP
4509 if (skb_shinfo(skb)->tso_size &&
4510 !(large_send == QETH_LARGE_SEND_NO)) {
1da177e4
LT
4511 card->perf_stats.large_send_bytes += skb->len;
4512 card->perf_stats.large_send_cnt++;
4513 }
4514 if (skb_shinfo(skb)->nr_frags > 0){
4515 card->perf_stats.sg_skbs_sent++;
4516 /* nr_frags + skb->data */
4517 card->perf_stats.sg_frags_sent +=
4518 skb_shinfo(skb)->nr_frags + 1;
4519 }
4520#endif /* CONFIG_QETH_PERF_STATS */
4521 }
4522 if (ctx != NULL) {
4523 /* drop creator's reference */
4524 qeth_eddp_put_context(ctx);
4525 /* free skb; it's not referenced by a buffer */
4526 if (rc == 0)
4527 dev_kfree_skb_any(skb);
4528
4529 }
4530 return rc;
4531}
4532
4533static int
4534qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4535{
4536 struct qeth_card *card = (struct qeth_card *) dev->priv;
4537 int rc = 0;
4538
4539 switch(regnum){
4540 case MII_BMCR: /* Basic mode control register */
4541 rc = BMCR_FULLDPLX;
4542 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
500f83ab 4543 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
1da177e4
LT
4544 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4545 rc |= BMCR_SPEED100;
4546 break;
4547 case MII_BMSR: /* Basic mode status register */
4548 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4549 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4550 BMSR_100BASE4;
4551 break;
4552 case MII_PHYSID1: /* PHYS ID 1 */
4553 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4554 dev->dev_addr[2];
4555 rc = (rc >> 5) & 0xFFFF;
4556 break;
4557 case MII_PHYSID2: /* PHYS ID 2 */
4558 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4559 break;
4560 case MII_ADVERTISE: /* Advertisement control reg */
4561 rc = ADVERTISE_ALL;
4562 break;
4563 case MII_LPA: /* Link partner ability reg */
4564 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4565 LPA_100BASE4 | LPA_LPACK;
4566 break;
4567 case MII_EXPANSION: /* Expansion register */
4568 break;
4569 case MII_DCOUNTER: /* disconnect counter */
4570 break;
4571 case MII_FCSCOUNTER: /* false carrier counter */
4572 break;
4573 case MII_NWAYTEST: /* N-way auto-neg test register */
4574 break;
4575 case MII_RERRCOUNTER: /* rx error counter */
4576 rc = card->stats.rx_errors;
4577 break;
4578 case MII_SREVISION: /* silicon revision */
4579 break;
4580 case MII_RESV1: /* reserved 1 */
4581 break;
4582 case MII_LBRERROR: /* loopback, rx, bypass error */
4583 break;
4584 case MII_PHYADDR: /* physical address */
4585 break;
4586 case MII_RESV2: /* reserved 2 */
4587 break;
4588 case MII_TPISTATUS: /* TPI status for 10mbps */
4589 break;
4590 case MII_NCONFIG: /* network interface config */
4591 break;
4592 default:
4593 rc = 0;
4594 break;
4595 }
4596 return rc;
4597}
4598
4599static void
4600qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
4601{
4602 switch(regnum){
4603 case MII_BMCR: /* Basic mode control register */
4604 case MII_BMSR: /* Basic mode status register */
4605 case MII_PHYSID1: /* PHYS ID 1 */
4606 case MII_PHYSID2: /* PHYS ID 2 */
4607 case MII_ADVERTISE: /* Advertisement control reg */
4608 case MII_LPA: /* Link partner ability reg */
4609 case MII_EXPANSION: /* Expansion register */
4610 case MII_DCOUNTER: /* disconnect counter */
4611 case MII_FCSCOUNTER: /* false carrier counter */
4612 case MII_NWAYTEST: /* N-way auto-neg test register */
4613 case MII_RERRCOUNTER: /* rx error counter */
4614 case MII_SREVISION: /* silicon revision */
4615 case MII_RESV1: /* reserved 1 */
4616 case MII_LBRERROR: /* loopback, rx, bypass error */
4617 case MII_PHYADDR: /* physical address */
4618 case MII_RESV2: /* reserved 2 */
4619 case MII_TPISTATUS: /* TPI status for 10mbps */
4620 case MII_NCONFIG: /* network interface config */
4621 default:
4622 break;
4623 }
4624}
4625
4626static inline const char *
4627qeth_arp_get_error_cause(int *rc)
4628{
4629 switch (*rc) {
4630 case QETH_IPA_ARP_RC_FAILED:
4631 *rc = -EIO;
4632 return "operation failed";
4633 case QETH_IPA_ARP_RC_NOTSUPP:
4634 *rc = -EOPNOTSUPP;
4635 return "operation not supported";
4636 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4637 *rc = -EINVAL;
4638 return "argument out of range";
4639 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4640 *rc = -EOPNOTSUPP;
4641 return "query operation not supported";
4642 case QETH_IPA_ARP_RC_Q_NO_DATA:
4643 *rc = -ENOENT;
4644 return "no query data available";
4645 default:
4646 return "unknown error";
4647 }
4648}
4649
4650static int
4651qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4652 __u16, long);
4653
4654static int
4655qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4656{
4657 int tmp;
4658 int rc;
4659
4660 QETH_DBF_TEXT(trace,3,"arpstnoe");
4661
f3d242e8
FP
4662 /*
4663 * currently GuestLAN only supports the ARP assist function
4664 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4665 * thus we say EOPNOTSUPP for this ARP function
4666 */
1da177e4
LT
4667 if (card->info.guestlan)
4668 return -EOPNOTSUPP;
4669 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4670 PRINT_WARN("ARP processing not supported "
4671 "on %s!\n", QETH_CARD_IFNAME(card));
4672 return -EOPNOTSUPP;
4673 }
4674 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4675 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4676 no_entries);
4677 if (rc) {
4678 tmp = rc;
4679 PRINT_WARN("Could not set number of ARP entries on %s: "
4680 "%s (0x%x/%d)\n",
4681 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4682 tmp, tmp);
4683 }
4684 return rc;
4685}
4686
4687static inline void
4688qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4689 struct qeth_arp_query_data *qdata,
4690 int entry_size, int uentry_size)
4691{
4692 char *entry_ptr;
4693 char *uentry_ptr;
4694 int i;
4695
4696 entry_ptr = (char *)&qdata->data;
4697 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4698 for (i = 0; i < qdata->no_entries; ++i){
4699 /* strip off 32 bytes "media specific information" */
4700 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4701 entry_ptr += entry_size;
4702 uentry_ptr += uentry_size;
4703 }
4704}
4705
4706static int
4707qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4708 unsigned long data)
4709{
4710 struct qeth_ipa_cmd *cmd;
4711 struct qeth_arp_query_data *qdata;
4712 struct qeth_arp_query_info *qinfo;
4713 int entry_size;
4714 int uentry_size;
4715 int i;
4716
4717 QETH_DBF_TEXT(trace,4,"arpquecb");
4718
4719 qinfo = (struct qeth_arp_query_info *) reply->param;
4720 cmd = (struct qeth_ipa_cmd *) data;
4721 if (cmd->hdr.return_code) {
4722 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4723 return 0;
4724 }
4725 if (cmd->data.setassparms.hdr.return_code) {
4726 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4727 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4728 return 0;
4729 }
4730 qdata = &cmd->data.setassparms.data.query_arp;
4731 switch(qdata->reply_bits){
4732 case 5:
4733 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4734 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4735 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4736 break;
4737 case 7:
4738 /* fall through to default */
4739 default:
4740 /* tr is the same as eth -> entry7 */
4741 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4742 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4743 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4744 break;
4745 }
4746 /* check if there is enough room in userspace */
4747 if ((qinfo->udata_len - qinfo->udata_offset) <
4748 qdata->no_entries * uentry_size){
4749 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4750 cmd->hdr.return_code = -ENOMEM;
4751 PRINT_WARN("query ARP user space buffer is too small for "
4752 "the returned number of ARP entries. "
4753 "Aborting query!\n");
4754 goto out_error;
4755 }
4756 QETH_DBF_TEXT_(trace, 4, "anore%i",
4757 cmd->data.setassparms.hdr.number_of_replies);
4758 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4759 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4760
4761 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4762 /* strip off "media specific information" */
4763 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4764 uentry_size);
4765 } else
4766 /*copy entries to user buffer*/
4767 memcpy(qinfo->udata + qinfo->udata_offset,
4768 (char *)&qdata->data, qdata->no_entries*uentry_size);
4769
4770 qinfo->no_entries += qdata->no_entries;
4771 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4772 /* check if all replies received ... */
4773 if (cmd->data.setassparms.hdr.seq_no <
4774 cmd->data.setassparms.hdr.number_of_replies)
4775 return 1;
4776 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4777 /* keep STRIP_ENTRIES flag so the user program can distinguish
4778 * stripped entries from normal ones */
4779 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4780 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4781 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4782 return 0;
4783out_error:
4784 i = 0;
4785 memcpy(qinfo->udata, &i, 4);
4786 return 0;
4787}
4788
4789static int
4790qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4791 int len, int (*reply_cb)(struct qeth_card *,
4792 struct qeth_reply *,
4793 unsigned long),
4794 void *reply_param)
4795{
4796 QETH_DBF_TEXT(trace,4,"sendarp");
4797
4798 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4799 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4800 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4801 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4802 reply_cb, reply_param);
4803}
4804
4805static int
4806qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4807 int len, int (*reply_cb)(struct qeth_card *,
4808 struct qeth_reply *,
4809 unsigned long),
4810 void *reply_param)
4811{
4812 u16 s1, s2;
4813
4814 QETH_DBF_TEXT(trace,4,"sendsnmp");
4815
4816 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4817 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4818 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4819 /* adjust PDU length fields in IPA_PDU_HEADER */
4820 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4821 s2 = (u32) len;
4822 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4823 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4824 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4825 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4826 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4827 reply_cb, reply_param);
4828}
4829
4830static struct qeth_cmd_buffer *
4831qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4832 __u16, __u16, enum qeth_prot_versions);
4833static int
4834qeth_arp_query(struct qeth_card *card, char *udata)
4835{
4836 struct qeth_cmd_buffer *iob;
4837 struct qeth_arp_query_info qinfo = {0, };
4838 int tmp;
4839 int rc;
4840
4841 QETH_DBF_TEXT(trace,3,"arpquery");
4842
1da177e4
LT
4843 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4844 IPA_ARP_PROCESSING)) {
4845 PRINT_WARN("ARP processing not supported "
4846 "on %s!\n", QETH_CARD_IFNAME(card));
4847 return -EOPNOTSUPP;
4848 }
4849 /* get size of userspace buffer and mask_bits -> 6 bytes */
4850 if (copy_from_user(&qinfo, udata, 6))
4851 return -EFAULT;
4852 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
4853 return -ENOMEM;
4854 memset(qinfo.udata, 0, qinfo.udata_len);
4855 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4856 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4857 IPA_CMD_ASS_ARP_QUERY_INFO,
4858 sizeof(int),QETH_PROT_IPV4);
4859
4860 rc = qeth_send_ipa_arp_cmd(card, iob,
4861 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4862 qeth_arp_query_cb, (void *)&qinfo);
4863 if (rc) {
4864 tmp = rc;
4865 PRINT_WARN("Error while querying ARP cache on %s: %s "
4866 "(0x%x/%d)\n",
4867 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4868 tmp, tmp);
4869 copy_to_user(udata, qinfo.udata, 4);
4870 } else {
4871 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4872 }
4873 kfree(qinfo.udata);
4874 return rc;
4875}
4876
4877/**
4878 * SNMP command callback
4879 */
4880static int
4881qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4882 unsigned long sdata)
4883{
4884 struct qeth_ipa_cmd *cmd;
4885 struct qeth_arp_query_info *qinfo;
4886 struct qeth_snmp_cmd *snmp;
4887 unsigned char *data;
4888 __u16 data_len;
4889
4890 QETH_DBF_TEXT(trace,3,"snpcmdcb");
4891
4892 cmd = (struct qeth_ipa_cmd *) sdata;
4893 data = (unsigned char *)((char *)cmd - reply->offset);
4894 qinfo = (struct qeth_arp_query_info *) reply->param;
4895 snmp = &cmd->data.setadapterparms.data.snmp;
4896
4897 if (cmd->hdr.return_code) {
4898 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4899 return 0;
4900 }
4901 if (cmd->data.setadapterparms.hdr.return_code) {
4902 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4903 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4904 return 0;
4905 }
4906 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4907 if (cmd->data.setadapterparms.hdr.seq_no == 1)
4908 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4909 else
4910 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4911
4912 /* check if there is enough room in userspace */
4913 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4914 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4915 cmd->hdr.return_code = -ENOMEM;
4916 return 0;
4917 }
4918 QETH_DBF_TEXT_(trace, 4, "snore%i",
4919 cmd->data.setadapterparms.hdr.used_total);
4920 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4921 /*copy entries to user buffer*/
4922 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4923 memcpy(qinfo->udata + qinfo->udata_offset,
4924 (char *)snmp,
4925 data_len + offsetof(struct qeth_snmp_cmd,data));
4926 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4927 } else {
4928 memcpy(qinfo->udata + qinfo->udata_offset,
4929 (char *)&snmp->request, data_len);
4930 }
4931 qinfo->udata_offset += data_len;
4932 /* check if all replies received ... */
4933 QETH_DBF_TEXT_(trace, 4, "srtot%i",
4934 cmd->data.setadapterparms.hdr.used_total);
4935 QETH_DBF_TEXT_(trace, 4, "srseq%i",
4936 cmd->data.setadapterparms.hdr.seq_no);
4937 if (cmd->data.setadapterparms.hdr.seq_no <
4938 cmd->data.setadapterparms.hdr.used_total)
4939 return 1;
4940 return 0;
4941}
4942
4943static struct qeth_cmd_buffer *
4944qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4945 enum qeth_prot_versions );
4946
4947static struct qeth_cmd_buffer *
4948qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4949{
4950 struct qeth_cmd_buffer *iob;
4951 struct qeth_ipa_cmd *cmd;
4952
4953 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4954 QETH_PROT_IPV4);
4955 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4956 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4957 cmd->data.setadapterparms.hdr.command_code = command;
4958 cmd->data.setadapterparms.hdr.used_total = 1;
4959 cmd->data.setadapterparms.hdr.seq_no = 1;
4960
4961 return iob;
4962}
4963
4964/**
4965 * function to send SNMP commands to OSA-E card
4966 */
4967static int
4968qeth_snmp_command(struct qeth_card *card, char *udata)
4969{
4970 struct qeth_cmd_buffer *iob;
4971 struct qeth_ipa_cmd *cmd;
4972 struct qeth_snmp_ureq *ureq;
4973 int req_len;
4974 struct qeth_arp_query_info qinfo = {0, };
4975 int rc = 0;
4976
4977 QETH_DBF_TEXT(trace,3,"snmpcmd");
4978
4979 if (card->info.guestlan)
4980 return -EOPNOTSUPP;
4981
4982 if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
4983 (!card->options.layer2) ) {
4984 PRINT_WARN("SNMP Query MIBS not supported "
4985 "on %s!\n", QETH_CARD_IFNAME(card));
4986 return -EOPNOTSUPP;
4987 }
4988 /* skip 4 bytes (data_len struct member) to get req_len */
4989 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4990 return -EFAULT;
4991 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
4992 if (!ureq) {
4993 QETH_DBF_TEXT(trace, 2, "snmpnome");
4994 return -ENOMEM;
4995 }
4996 if (copy_from_user(ureq, udata,
4997 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
4998 kfree(ureq);
4999 return -EFAULT;
5000 }
5001 qinfo.udata_len = ureq->hdr.data_len;
5002 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
5003 kfree(ureq);
5004 return -ENOMEM;
5005 }
5006 memset(qinfo.udata, 0, qinfo.udata_len);
5007 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
5008
5009 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
5010 QETH_SNMP_SETADP_CMDLENGTH + req_len);
5011 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5012 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
5013 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
5014 qeth_snmp_command_cb, (void *)&qinfo);
5015 if (rc)
5016 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
5017 QETH_CARD_IFNAME(card), rc);
5018 else
5019 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
5020
5021 kfree(ureq);
5022 kfree(qinfo.udata);
5023 return rc;
5024}
5025
5026static int
5027qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
5028 unsigned long);
5029
5030static int
6c951b90
FP
5031qeth_default_setadapterparms_cb(struct qeth_card *card,
5032 struct qeth_reply *reply,
5033 unsigned long data);
5034static int
1da177e4
LT
5035qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
5036 __u16, long,
5037 int (*reply_cb)
5038 (struct qeth_card *, struct qeth_reply *, unsigned long),
5039 void *reply_param);
5040
5041static int
5042qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5043{
5044 struct qeth_cmd_buffer *iob;
5045 char buf[16];
5046 int tmp;
5047 int rc;
5048
5049 QETH_DBF_TEXT(trace,3,"arpadent");
5050
5051 /*
f3d242e8
FP
5052 * currently GuestLAN only supports the ARP assist function
5053 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
5054 * thus we say EOPNOTSUPP for this ARP function
1da177e4
LT
5055 */
5056 if (card->info.guestlan)
5057 return -EOPNOTSUPP;
5058 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5059 PRINT_WARN("ARP processing not supported "
5060 "on %s!\n", QETH_CARD_IFNAME(card));
5061 return -EOPNOTSUPP;
5062 }
5063
5064 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5065 IPA_CMD_ASS_ARP_ADD_ENTRY,
5066 sizeof(struct qeth_arp_cache_entry),
5067 QETH_PROT_IPV4);
5068 rc = qeth_send_setassparms(card, iob,
5069 sizeof(struct qeth_arp_cache_entry),
5070 (unsigned long) entry,
5071 qeth_default_setassparms_cb, NULL);
5072 if (rc) {
5073 tmp = rc;
5074 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5075 PRINT_WARN("Could not add ARP entry for address %s on %s: "
5076 "%s (0x%x/%d)\n",
5077 buf, QETH_CARD_IFNAME(card),
5078 qeth_arp_get_error_cause(&rc), tmp, tmp);
5079 }
5080 return rc;
5081}
5082
5083static int
5084qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5085{
5086 struct qeth_cmd_buffer *iob;
5087 char buf[16] = {0, };
5088 int tmp;
5089 int rc;
5090
5091 QETH_DBF_TEXT(trace,3,"arprment");
5092
5093 /*
f3d242e8
FP
5094 * currently GuestLAN only supports the ARP assist function
5095 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
5096 * thus we say EOPNOTSUPP for this ARP function
1da177e4
LT
5097 */
5098 if (card->info.guestlan)
5099 return -EOPNOTSUPP;
5100 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5101 PRINT_WARN("ARP processing not supported "
5102 "on %s!\n", QETH_CARD_IFNAME(card));
5103 return -EOPNOTSUPP;
5104 }
5105 memcpy(buf, entry, 12);
5106 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5107 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
5108 12,
5109 QETH_PROT_IPV4);
5110 rc = qeth_send_setassparms(card, iob,
5111 12, (unsigned long)buf,
5112 qeth_default_setassparms_cb, NULL);
5113 if (rc) {
5114 tmp = rc;
5115 memset(buf, 0, 16);
5116 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5117 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
5118 "%s (0x%x/%d)\n",
5119 buf, QETH_CARD_IFNAME(card),
5120 qeth_arp_get_error_cause(&rc), tmp, tmp);
5121 }
5122 return rc;
5123}
5124
5125static int
5126qeth_arp_flush_cache(struct qeth_card *card)
5127{
5128 int rc;
5129 int tmp;
5130
5131 QETH_DBF_TEXT(trace,3,"arpflush");
5132
5133 /*
f3d242e8
FP
5134 * currently GuestLAN only supports the ARP assist function
5135 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
5136 * thus we say EOPNOTSUPP for this ARP function
5137 */
1da177e4
LT
5138 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
5139 return -EOPNOTSUPP;
5140 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5141 PRINT_WARN("ARP processing not supported "
5142 "on %s!\n", QETH_CARD_IFNAME(card));
5143 return -EOPNOTSUPP;
5144 }
5145 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
5146 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
5147 if (rc){
5148 tmp = rc;
5149 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
5150 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
5151 tmp, tmp);
5152 }
5153 return rc;
5154}
5155
5156static int
5157qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5158{
5159 struct qeth_card *card = (struct qeth_card *)dev->priv;
5160 struct qeth_arp_cache_entry arp_entry;
5161 struct mii_ioctl_data *mii_data;
5162 int rc = 0;
5163
5164 if (!card)
5165 return -ENODEV;
5166
5167 if ((card->state != CARD_STATE_UP) &&
5168 (card->state != CARD_STATE_SOFTSETUP))
5169 return -ENODEV;
5170
500f83ab
UB
5171 if (card->info.type == QETH_CARD_TYPE_OSN)
5172 return -EPERM;
5173
1da177e4
LT
5174 switch (cmd){
5175 case SIOC_QETH_ARP_SET_NO_ENTRIES:
5176 if ( !capable(CAP_NET_ADMIN) ||
5177 (card->options.layer2) ) {
5178 rc = -EPERM;
5179 break;
5180 }
5181 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
5182 break;
5183 case SIOC_QETH_ARP_QUERY_INFO:
5184 if ( !capable(CAP_NET_ADMIN) ||
5185 (card->options.layer2) ) {
5186 rc = -EPERM;
5187 break;
5188 }
5189 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
5190 break;
5191 case SIOC_QETH_ARP_ADD_ENTRY:
5192 if ( !capable(CAP_NET_ADMIN) ||
5193 (card->options.layer2) ) {
5194 rc = -EPERM;
5195 break;
5196 }
5197 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5198 sizeof(struct qeth_arp_cache_entry)))
5199 rc = -EFAULT;
5200 else
5201 rc = qeth_arp_add_entry(card, &arp_entry);
5202 break;
5203 case SIOC_QETH_ARP_REMOVE_ENTRY:
5204 if ( !capable(CAP_NET_ADMIN) ||
5205 (card->options.layer2) ) {
5206 rc = -EPERM;
5207 break;
5208 }
5209 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5210 sizeof(struct qeth_arp_cache_entry)))
5211 rc = -EFAULT;
5212 else
5213 rc = qeth_arp_remove_entry(card, &arp_entry);
5214 break;
5215 case SIOC_QETH_ARP_FLUSH_CACHE:
5216 if ( !capable(CAP_NET_ADMIN) ||
5217 (card->options.layer2) ) {
5218 rc = -EPERM;
5219 break;
5220 }
5221 rc = qeth_arp_flush_cache(card);
5222 break;
5223 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5224 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5225 break;
5226 case SIOC_QETH_GET_CARD_TYPE:
5227 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
5228 !card->info.guestlan)
5229 return 1;
5230 return 0;
5231 break;
5232 case SIOCGMIIPHY:
5233 mii_data = if_mii(rq);
5234 mii_data->phy_id = 0;
5235 break;
5236 case SIOCGMIIREG:
5237 mii_data = if_mii(rq);
5238 if (mii_data->phy_id != 0)
5239 rc = -EINVAL;
5240 else
5241 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
5242 mii_data->reg_num);
5243 break;
5244 case SIOCSMIIREG:
5245 rc = -EOPNOTSUPP;
5246 break;
5247 /* TODO: remove return if qeth_mdio_write does something */
5248 if (!capable(CAP_NET_ADMIN)){
5249 rc = -EPERM;
5250 break;
5251 }
5252 mii_data = if_mii(rq);
5253 if (mii_data->phy_id != 0)
5254 rc = -EINVAL;
5255 else
5256 qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
5257 mii_data->val_in);
5258 break;
5259 default:
5260 rc = -EOPNOTSUPP;
5261 }
5262 if (rc)
5263 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
5264 return rc;
5265}
5266
5267static struct net_device_stats *
5268qeth_get_stats(struct net_device *dev)
5269{
5270 struct qeth_card *card;
5271
5272 card = (struct qeth_card *) (dev->priv);
5273
5274 QETH_DBF_TEXT(trace,5,"getstat");
5275
5276 return &card->stats;
5277}
5278
5279static int
5280qeth_change_mtu(struct net_device *dev, int new_mtu)
5281{
5282 struct qeth_card *card;
5283 char dbf_text[15];
5284
5285 card = (struct qeth_card *) (dev->priv);
5286
5287 QETH_DBF_TEXT(trace,4,"chgmtu");
5288 sprintf(dbf_text, "%8x", new_mtu);
5289 QETH_DBF_TEXT(trace,4,dbf_text);
5290
5291 if (new_mtu < 64)
5292 return -EINVAL;
5293 if (new_mtu > 65535)
5294 return -EINVAL;
5295 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
5296 (!qeth_mtu_is_valid(card, new_mtu)))
5297 return -EINVAL;
5298 dev->mtu = new_mtu;
5299 return 0;
5300}
5301
5302#ifdef CONFIG_QETH_VLAN
5303static void
5304qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5305{
5306 struct qeth_card *card;
5307 unsigned long flags;
5308
5309 QETH_DBF_TEXT(trace,4,"vlanreg");
5310
5311 card = (struct qeth_card *) dev->priv;
5312 spin_lock_irqsave(&card->vlanlock, flags);
5313 card->vlangrp = grp;
5314 spin_unlock_irqrestore(&card->vlanlock, flags);
5315}
5316
5317static inline void
5318qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5319 unsigned short vid)
5320{
5321 int i;
5322 struct sk_buff *skb;
5323 struct sk_buff_head tmp_list;
5324
5325 skb_queue_head_init(&tmp_list);
5326 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5327 while ((skb = skb_dequeue(&buf->skb_list))){
5328 if (vlan_tx_tag_present(skb) &&
5329 (vlan_tx_tag_get(skb) == vid)) {
5330 atomic_dec(&skb->users);
5331 dev_kfree_skb(skb);
5332 } else
5333 skb_queue_tail(&tmp_list, skb);
5334 }
5335 }
5336 while ((skb = skb_dequeue(&tmp_list)))
5337 skb_queue_tail(&buf->skb_list, skb);
5338}
5339
5340static void
5341qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
5342{
5343 int i, j;
5344
5345 QETH_DBF_TEXT(trace, 4, "frvlskbs");
5346 for (i = 0; i < card->qdio.no_out_queues; ++i){
5347 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
5348 qeth_free_vlan_buffer(card, &card->qdio.
5349 out_qs[i]->bufs[j], vid);
5350 }
5351}
5352
5353static void
5354qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5355{
5356 struct in_device *in_dev;
5357 struct in_ifaddr *ifa;
5358 struct qeth_ipaddr *addr;
5359
5360 QETH_DBF_TEXT(trace, 4, "frvaddr4");
6c88ad2d 5361
1da177e4 5362 rcu_read_lock();
e5ed6399 5363 in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
1da177e4
LT
5364 if (!in_dev)
5365 goto out;
5366 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
5367 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
5368 if (addr){
5369 addr->u.a4.addr = ifa->ifa_address;
5370 addr->u.a4.mask = ifa->ifa_mask;
5371 addr->type = QETH_IP_TYPE_NORMAL;
5372 if (!qeth_delete_ip(card, addr))
5373 kfree(addr);
5374 }
5375 }
5376out:
5377 rcu_read_unlock();
5378}
5379
5380static void
5381qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5382{
5383#ifdef CONFIG_QETH_IPV6
5384 struct inet6_dev *in6_dev;
5385 struct inet6_ifaddr *ifa;
5386 struct qeth_ipaddr *addr;
5387
5388 QETH_DBF_TEXT(trace, 4, "frvaddr6");
6c88ad2d 5389
1da177e4
LT
5390 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
5391 if (!in6_dev)
5392 return;
5393 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
5394 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
5395 if (addr){
5396 memcpy(&addr->u.a6.addr, &ifa->addr,
5397 sizeof(struct in6_addr));
5398 addr->u.a6.pfxlen = ifa->prefix_len;
5399 addr->type = QETH_IP_TYPE_NORMAL;
5400 if (!qeth_delete_ip(card, addr))
5401 kfree(addr);
5402 }
5403 }
5404 in6_dev_put(in6_dev);
5405#endif /* CONFIG_QETH_IPV6 */
5406}
5407
6c88ad2d
FP
5408static void
5409qeth_free_vlan_addresses(struct qeth_card *card, unsigned short vid)
5410{
5411 if (card->options.layer2 || !card->vlangrp)
5412 return;
5413 qeth_free_vlan_addresses4(card, vid);
5414 qeth_free_vlan_addresses6(card, vid);
5415}
5416
508cc2b0
FP
5417static int
5418qeth_layer2_send_setdelvlan_cb(struct qeth_card *card,
5419 struct qeth_reply *reply,
5420 unsigned long data)
5421{
5422 struct qeth_ipa_cmd *cmd;
5423
5424 QETH_DBF_TEXT(trace, 2, "L2sdvcb");
5425 cmd = (struct qeth_ipa_cmd *) data;
5426 if (cmd->hdr.return_code) {
5427 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5428 "Continuing\n",cmd->data.setdelvlan.vlan_id,
5429 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5430 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
5431 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
5432 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5433 }
5434 return 0;
5435}
5436
5437static int
1da177e4
LT
5438qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
5439 enum qeth_ipa_cmds ipacmd)
5440{
1da177e4
LT
5441 struct qeth_ipa_cmd *cmd;
5442 struct qeth_cmd_buffer *iob;
5443
5444 QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
5445 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5446 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5447 cmd->data.setdelvlan.vlan_id = i;
508cc2b0
FP
5448 return qeth_send_ipa_cmd(card, iob,
5449 qeth_layer2_send_setdelvlan_cb, NULL);
1da177e4
LT
5450}
5451
5452static void
5453qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5454{
5455 unsigned short i;
5456
5457 QETH_DBF_TEXT(trace, 3, "L2prcvln");
5458
5459 if (!card->vlangrp)
5460 return;
5461 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5462 if (card->vlangrp->vlan_devices[i] == NULL)
5463 continue;
5464 if (clear)
5465 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
5466 else
5467 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
5468 }
5469}
5470
5471/*add_vid is layer 2 used only ....*/
5472static void
5473qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
5474{
5475 struct qeth_card *card;
5476
5477 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
5478
5479 card = (struct qeth_card *) dev->priv;
5480 if (!card->options.layer2)
5481 return;
5482 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
5483}
5484
5485/*... kill_vid used for both modes*/
5486static void
5487qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5488{
5489 struct qeth_card *card;
5490 unsigned long flags;
5491
5492 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
5493
5494 card = (struct qeth_card *) dev->priv;
5495 /* free all skbs for the vlan device */
5496 qeth_free_vlan_skbs(card, vid);
5497 spin_lock_irqsave(&card->vlanlock, flags);
5498 /* unregister IP addresses of vlan device */
6c88ad2d 5499 qeth_free_vlan_addresses(card, vid);
1da177e4
LT
5500 if (card->vlangrp)
5501 card->vlangrp->vlan_devices[vid] = NULL;
5502 spin_unlock_irqrestore(&card->vlanlock, flags);
5503 if (card->options.layer2)
5504 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
5505 qeth_set_multicast_list(card->dev);
5506}
5507#endif
6c951b90
FP
5508/**
5509 * Examine hardware response to SET_PROMISC_MODE
5510 */
5511static int
5512qeth_setadp_promisc_mode_cb(struct qeth_card *card,
5513 struct qeth_reply *reply,
5514 unsigned long data)
5515{
5516 struct qeth_ipa_cmd *cmd;
5517 struct qeth_ipacmd_setadpparms *setparms;
5518
5519 QETH_DBF_TEXT(trace,4,"prmadpcb");
5520
5521 cmd = (struct qeth_ipa_cmd *) data;
5522 setparms = &(cmd->data.setadapterparms);
5523
5524 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
5525 if (cmd->hdr.return_code) {
5526 QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code);
5527 setparms->data.mode = SET_PROMISC_MODE_OFF;
5528 }
5529 card->info.promisc_mode = setparms->data.mode;
5530 return 0;
5531}
5532/*
5533 * Set promiscuous mode (on or off) (SET_PROMISC_MODE command)
5534 */
5535static void
5536qeth_setadp_promisc_mode(struct qeth_card *card)
5537{
5538 enum qeth_ipa_promisc_modes mode;
5539 struct net_device *dev = card->dev;
5540 struct qeth_cmd_buffer *iob;
5541 struct qeth_ipa_cmd *cmd;
5542
5543 QETH_DBF_TEXT(trace, 4, "setprom");
5544
5545 if (((dev->flags & IFF_PROMISC) &&
5546 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
5547 (!(dev->flags & IFF_PROMISC) &&
5548 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
5549 return;
5550 mode = SET_PROMISC_MODE_OFF;
5551 if (dev->flags & IFF_PROMISC)
5552 mode = SET_PROMISC_MODE_ON;
5553 QETH_DBF_TEXT_(trace, 4, "mode:%x", mode);
5554
5555 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
5556 sizeof(struct qeth_ipacmd_setadpparms));
5557 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
5558 cmd->data.setadapterparms.data.mode = mode;
5559 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
5560}
1da177e4
LT
5561
5562/**
5563 * set multicast address on card
5564 */
5565static void
5566qeth_set_multicast_list(struct net_device *dev)
5567{
5568 struct qeth_card *card = (struct qeth_card *) dev->priv;
5569
500f83ab
UB
5570 if (card->info.type == QETH_CARD_TYPE_OSN)
5571 return ;
5572
1da177e4
LT
5573 QETH_DBF_TEXT(trace,3,"setmulti");
5574 qeth_delete_mc_addresses(card);
6c88ad2d
FP
5575 if (card->options.layer2) {
5576 qeth_layer2_add_multicast(card);
5577 goto out;
5578 }
1da177e4
LT
5579 qeth_add_multicast_ipv4(card);
5580#ifdef CONFIG_QETH_IPV6
5581 qeth_add_multicast_ipv6(card);
5582#endif
6c88ad2d 5583out:
1da177e4
LT
5584 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
5585 schedule_work(&card->kernel_thread_starter);
6c951b90
FP
5586 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
5587 return;
5588 if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0)
5589 schedule_work(&card->kernel_thread_starter);
5590
1da177e4
LT
5591}
5592
5593static int
5594qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
5595{
5596 return 0;
5597}
5598
5599static void
5600qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
5601{
5602 if (dev->type == ARPHRD_IEEE802_TR)
5603 ip_tr_mc_map(ipm, mac);
5604 else
5605 ip_eth_mc_map(ipm, mac);
5606}
5607
5608static struct qeth_ipaddr *
5609qeth_get_addr_buffer(enum qeth_prot_versions prot)
5610{
5611 struct qeth_ipaddr *addr;
5612
5613 addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
5614 if (addr == NULL) {
5615 PRINT_WARN("Not enough memory to add address\n");
5616 return NULL;
5617 }
5618 memset(addr,0,sizeof(struct qeth_ipaddr));
5619 addr->type = QETH_IP_TYPE_NORMAL;
5620 addr->proto = prot;
5621 return addr;
5622}
5623
500f83ab
UB
5624int
5625qeth_osn_assist(struct net_device *dev,
5626 void *data,
5627 int data_len)
5628{
5629 struct qeth_cmd_buffer *iob;
5630 struct qeth_card *card;
5631 int rc;
5632
5633 QETH_DBF_TEXT(trace, 2, "osnsdmc");
5634 if (!dev)
5635 return -ENODEV;
5636 card = (struct qeth_card *)dev->priv;
5637 if (!card)
5638 return -ENODEV;
5639 if ((card->state != CARD_STATE_UP) &&
5640 (card->state != CARD_STATE_SOFTSETUP))
5641 return -ENODEV;
5642 iob = qeth_wait_for_buffer(&card->write);
5643 memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
5644 rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
5645 return rc;
5646}
5647
5648static struct net_device *
5649qeth_netdev_by_devno(unsigned char *read_dev_no)
5650{
5651 struct qeth_card *card;
5652 struct net_device *ndev;
5653 unsigned char *readno;
5654 __u16 temp_dev_no, card_dev_no;
5655 char *endp;
5656 unsigned long flags;
5657
5658 ndev = NULL;
5659 memcpy(&temp_dev_no, read_dev_no, 2);
5660 read_lock_irqsave(&qeth_card_list.rwlock, flags);
5661 list_for_each_entry(card, &qeth_card_list.list, list) {
5662 readno = CARD_RDEV_ID(card);
5663 readno += (strlen(readno) - 4);
5664 card_dev_no = simple_strtoul(readno, &endp, 16);
5665 if (card_dev_no == temp_dev_no) {
5666 ndev = card->dev;
5667 break;
5668 }
5669 }
5670 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
5671 return ndev;
5672}
5673
5674int
5675qeth_osn_register(unsigned char *read_dev_no,
5676 struct net_device **dev,
5677 int (*assist_cb)(struct net_device *, void *),
5678 int (*data_cb)(struct sk_buff *))
5679{
5680 struct qeth_card * card;
5681
5682 QETH_DBF_TEXT(trace, 2, "osnreg");
5683 *dev = qeth_netdev_by_devno(read_dev_no);
5684 if (*dev == NULL)
5685 return -ENODEV;
5686 card = (struct qeth_card *)(*dev)->priv;
5687 if (!card)
5688 return -ENODEV;
5689 if ((assist_cb == NULL) || (data_cb == NULL))
5690 return -EINVAL;
5691 card->osn_info.assist_cb = assist_cb;
5692 card->osn_info.data_cb = data_cb;
5693 return 0;
5694}
5695
5696void
5697qeth_osn_deregister(struct net_device * dev)
5698{
5699 struct qeth_card *card;
5700
5701 QETH_DBF_TEXT(trace, 2, "osndereg");
5702 if (!dev)
5703 return;
5704 card = (struct qeth_card *)dev->priv;
5705 if (!card)
5706 return;
5707 card->osn_info.assist_cb = NULL;
5708 card->osn_info.data_cb = NULL;
5709 return;
5710}
5711
1da177e4
LT
5712static void
5713qeth_delete_mc_addresses(struct qeth_card *card)
5714{
5715 struct qeth_ipaddr *iptodo;
5716 unsigned long flags;
5717
5718 QETH_DBF_TEXT(trace,4,"delmc");
5719 iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
5720 if (!iptodo) {
5721 QETH_DBF_TEXT(trace, 2, "dmcnomem");
5722 return;
5723 }
5724 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
5725 spin_lock_irqsave(&card->ip_lock, flags);
5726 if (!__qeth_insert_ip_todo(card, iptodo, 0))
5727 kfree(iptodo);
5728 spin_unlock_irqrestore(&card->ip_lock, flags);
5729}
5730
5731static inline void
5732qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5733{
5734 struct qeth_ipaddr *ipm;
5735 struct ip_mc_list *im4;
5736 char buf[MAX_ADDR_LEN];
5737
5738 QETH_DBF_TEXT(trace,4,"addmc");
5739 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
5740 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
5741 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5742 if (!ipm)
5743 continue;
5744 ipm->u.a4.addr = im4->multiaddr;
5745 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5746 ipm->is_multicast = 1;
5747 if (!qeth_add_ip(card,ipm))
5748 kfree(ipm);
5749 }
5750}
5751
5752static inline void
5753qeth_add_vlan_mc(struct qeth_card *card)
5754{
5755#ifdef CONFIG_QETH_VLAN
5756 struct in_device *in_dev;
5757 struct vlan_group *vg;
5758 int i;
5759
5760 QETH_DBF_TEXT(trace,4,"addmcvl");
5761 if ( ((card->options.layer2 == 0) &&
5762 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5763 (card->vlangrp == NULL) )
5764 return ;
5765
5766 vg = card->vlangrp;
5767 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5768 if (vg->vlan_devices[i] == NULL ||
5769 !(vg->vlan_devices[i]->flags & IFF_UP))
5770 continue;
5771 in_dev = in_dev_get(vg->vlan_devices[i]);
5772 if (!in_dev)
5773 continue;
5774 read_lock(&in_dev->mc_list_lock);
5775 qeth_add_mc(card,in_dev);
5776 read_unlock(&in_dev->mc_list_lock);
5777 in_dev_put(in_dev);
5778 }
5779#endif
5780}
5781
5782static void
5783qeth_add_multicast_ipv4(struct qeth_card *card)
5784{
5785 struct in_device *in4_dev;
5786
5787 QETH_DBF_TEXT(trace,4,"chkmcv4");
5788 in4_dev = in_dev_get(card->dev);
5789 if (in4_dev == NULL)
5790 return;
5791 read_lock(&in4_dev->mc_list_lock);
5792 qeth_add_mc(card, in4_dev);
5793 qeth_add_vlan_mc(card);
5794 read_unlock(&in4_dev->mc_list_lock);
5795 in_dev_put(in4_dev);
5796}
5797
6c88ad2d
FP
5798static void
5799qeth_layer2_add_multicast(struct qeth_card *card)
5800{
5801 struct qeth_ipaddr *ipm;
5802 struct dev_mc_list *dm;
5803
5804 QETH_DBF_TEXT(trace,4,"L2addmc");
5805 for (dm = card->dev->mc_list; dm; dm = dm->next) {
5806 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5807 if (!ipm)
5808 continue;
5809 memcpy(ipm->mac,dm->dmi_addr,MAX_ADDR_LEN);
5810 ipm->is_multicast = 1;
5811 if (!qeth_add_ip(card, ipm))
5812 kfree(ipm);
5813 }
5814}
5815
1da177e4
LT
5816#ifdef CONFIG_QETH_IPV6
5817static inline void
5818qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5819{
5820 struct qeth_ipaddr *ipm;
5821 struct ifmcaddr6 *im6;
5822 char buf[MAX_ADDR_LEN];
5823
5824 QETH_DBF_TEXT(trace,4,"addmc6");
5825 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
5826 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
5827 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
5828 if (!ipm)
5829 continue;
5830 ipm->is_multicast = 1;
5831 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5832 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
5833 sizeof(struct in6_addr));
5834 if (!qeth_add_ip(card,ipm))
5835 kfree(ipm);
5836 }
5837}
5838
5839static inline void
5840qeth_add_vlan_mc6(struct qeth_card *card)
5841{
5842#ifdef CONFIG_QETH_VLAN
5843 struct inet6_dev *in_dev;
5844 struct vlan_group *vg;
5845 int i;
5846
5847 QETH_DBF_TEXT(trace,4,"admc6vl");
5848 if ( ((card->options.layer2 == 0) &&
5849 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5850 (card->vlangrp == NULL))
5851 return ;
5852
5853 vg = card->vlangrp;
5854 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5855 if (vg->vlan_devices[i] == NULL ||
5856 !(vg->vlan_devices[i]->flags & IFF_UP))
5857 continue;
5858 in_dev = in6_dev_get(vg->vlan_devices[i]);
5859 if (!in_dev)
5860 continue;
5861 read_lock(&in_dev->lock);
5862 qeth_add_mc6(card,in_dev);
5863 read_unlock(&in_dev->lock);
5864 in6_dev_put(in_dev);
5865 }
5866#endif /* CONFIG_QETH_VLAN */
5867}
5868
5869static void
5870qeth_add_multicast_ipv6(struct qeth_card *card)
5871{
5872 struct inet6_dev *in6_dev;
5873
5874 QETH_DBF_TEXT(trace,4,"chkmcv6");
6c6b3e7c 5875 if (!qeth_is_supported(card, IPA_IPV6))
1da177e4 5876 return ;
1da177e4
LT
5877 in6_dev = in6_dev_get(card->dev);
5878 if (in6_dev == NULL)
5879 return;
5880 read_lock(&in6_dev->lock);
5881 qeth_add_mc6(card, in6_dev);
5882 qeth_add_vlan_mc6(card);
5883 read_unlock(&in6_dev->lock);
5884 in6_dev_put(in6_dev);
5885}
5886#endif /* CONFIG_QETH_IPV6 */
5887
5888static int
5889qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
5890 enum qeth_ipa_cmds ipacmd,
5891 int (*reply_cb) (struct qeth_card *,
5892 struct qeth_reply*,
5893 unsigned long))
5894{
5895 struct qeth_ipa_cmd *cmd;
5896 struct qeth_cmd_buffer *iob;
5897
5898 QETH_DBF_TEXT(trace, 2, "L2sdmac");
5899 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5900 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5901 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
5902 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
5903 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
5904}
5905
5906static int
5907qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
5908 struct qeth_reply *reply,
5909 unsigned long data)
5910{
5911 struct qeth_ipa_cmd *cmd;
5912 __u8 *mac;
5913
5914 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
5915 cmd = (struct qeth_ipa_cmd *) data;
5916 mac = &cmd->data.setdelmac.mac[0];
5917 /* MAC already registered, needed in couple/uncouple case */
5918 if (cmd->hdr.return_code == 0x2005) {
5919 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
5920 "already existing on %s \n",
5921 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5922 QETH_CARD_IFNAME(card));
5923 cmd->hdr.return_code = 0;
5924 }
5925 if (cmd->hdr.return_code)
5926 PRINT_ERR("Could not set group MAC " \
5927 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5928 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5929 QETH_CARD_IFNAME(card),cmd->hdr.return_code);
5930 return 0;
5931}
5932
5933static int
5934qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
5935{
5936 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
5937 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
5938 qeth_layer2_send_setgroupmac_cb);
5939}
5940
5941static int
5942qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
5943 struct qeth_reply *reply,
5944 unsigned long data)
5945{
5946 struct qeth_ipa_cmd *cmd;
5947 __u8 *mac;
5948
5949 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
5950 cmd = (struct qeth_ipa_cmd *) data;
5951 mac = &cmd->data.setdelmac.mac[0];
5952 if (cmd->hdr.return_code)
5953 PRINT_ERR("Could not delete group MAC " \
5954 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5955 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5956 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5957 return 0;
5958}
5959
5960static int
5961qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
5962{
5963 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
5964 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
5965 qeth_layer2_send_delgroupmac_cb);
5966}
5967
5968static int
5969qeth_layer2_send_setmac_cb(struct qeth_card *card,
5970 struct qeth_reply *reply,
5971 unsigned long data)
5972{
5973 struct qeth_ipa_cmd *cmd;
5974
5975 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
5976 cmd = (struct qeth_ipa_cmd *) data;
5977 if (cmd->hdr.return_code) {
5978 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
5979 PRINT_WARN("Error in registering MAC address on " \
5980 "device %s: x%x\n", CARD_BUS_ID(card),
5981 cmd->hdr.return_code);
e08d88cc 5982 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
1da177e4
LT
5983 cmd->hdr.return_code = -EIO;
5984 } else {
e08d88cc 5985 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
1da177e4
LT
5986 memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
5987 OSA_ADDR_LEN);
5988 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
5989 "successfully registered on device %s\n",
5990 card->dev->dev_addr[0], card->dev->dev_addr[1],
5991 card->dev->dev_addr[2], card->dev->dev_addr[3],
5992 card->dev->dev_addr[4], card->dev->dev_addr[5],
5993 card->dev->name);
5994 }
5995 return 0;
5996}
5997
5998static int
5999qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
6000{
6001 QETH_DBF_TEXT(trace, 2, "L2Setmac");
6002 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
6003 qeth_layer2_send_setmac_cb);
6004}
6005
6006static int
6007qeth_layer2_send_delmac_cb(struct qeth_card *card,
6008 struct qeth_reply *reply,
6009 unsigned long data)
6010{
6011 struct qeth_ipa_cmd *cmd;
6012
6013 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
6014 cmd = (struct qeth_ipa_cmd *) data;
6015 if (cmd->hdr.return_code) {
6016 PRINT_WARN("Error in deregistering MAC address on " \
6017 "device %s: x%x\n", CARD_BUS_ID(card),
6018 cmd->hdr.return_code);
6019 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
6020 cmd->hdr.return_code = -EIO;
6021 return 0;
6022 }
e08d88cc 6023 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
1da177e4
LT
6024
6025 return 0;
6026}
6027static int
6028qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
6029{
6030 QETH_DBF_TEXT(trace, 2, "L2Delmac");
e08d88cc 6031 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
1da177e4
LT
6032 return 0;
6033 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
6034 qeth_layer2_send_delmac_cb);
6035}
6036
6037static int
6038qeth_layer2_set_mac_address(struct net_device *dev, void *p)
6039{
6040 struct sockaddr *addr = p;
6041 struct qeth_card *card;
6042 int rc = 0;
6043
6044 QETH_DBF_TEXT(trace, 3, "setmac");
6045
6046 if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
6047 QETH_DBF_TEXT(trace, 3, "setmcINV");
6048 return -EOPNOTSUPP;
6049 }
6050 card = (struct qeth_card *) dev->priv;
6051
6052 if (!card->options.layer2) {
e08d88cc 6053 PRINT_WARN("Setting MAC address on %s is not supported "
1da177e4
LT
6054 "in Layer 3 mode.\n", dev->name);
6055 QETH_DBF_TEXT(trace, 3, "setmcLY3");
6056 return -EOPNOTSUPP;
6057 }
500f83ab
UB
6058 if (card->info.type == QETH_CARD_TYPE_OSN) {
6059 PRINT_WARN("Setting MAC address on %s is not supported.\n",
6060 dev->name);
6061 QETH_DBF_TEXT(trace, 3, "setmcOSN");
6062 return -EOPNOTSUPP;
6063 }
1da177e4
LT
6064 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
6065 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
6066 rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
6067 if (!rc)
6068 rc = qeth_layer2_send_setmac(card, addr->sa_data);
6069 return rc;
6070}
6071
6072static void
6073qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
6074 __u8 command, enum qeth_prot_versions prot)
6075{
6076 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
6077 cmd->hdr.command = command;
6078 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
6079 cmd->hdr.seqno = card->seqno.ipa;
6080 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
6081 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
6082 if (card->options.layer2)
6083 cmd->hdr.prim_version_no = 2;
6084 else
6085 cmd->hdr.prim_version_no = 1;
6086 cmd->hdr.param_count = 1;
6087 cmd->hdr.prot_version = prot;
6088 cmd->hdr.ipa_supported = 0;
6089 cmd->hdr.ipa_enabled = 0;
6090}
6091
6092static struct qeth_cmd_buffer *
6093qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6094 enum qeth_prot_versions prot)
6095{
6096 struct qeth_cmd_buffer *iob;
6097 struct qeth_ipa_cmd *cmd;
6098
6099 iob = qeth_wait_for_buffer(&card->write);
6100 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6101 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
6102
6103 return iob;
6104}
6105
6106static int
6107qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6108{
6109 int rc;
6110 struct qeth_cmd_buffer *iob;
6111 struct qeth_ipa_cmd *cmd;
6112
6113 QETH_DBF_TEXT(trace,4,"setdelmc");
6114
6115 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6116 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6117 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
6118 if (addr->proto == QETH_PROT_IPV6)
6119 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
6120 sizeof(struct in6_addr));
6121 else
6122 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
6123
6124 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6125
6126 return rc;
6127}
6128static inline void
6129qeth_fill_netmask(u8 *netmask, unsigned int len)
6130{
6131 int i,j;
6132 for (i=0;i<16;i++) {
6133 j=(len)-(i*8);
6134 if (j >= 8)
6135 netmask[i] = 0xff;
6136 else if (j > 0)
6137 netmask[i] = (u8)(0xFF00>>j);
6138 else
6139 netmask[i] = 0;
6140 }
6141}
6142
6143static int
6144qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
6145 int ipacmd, unsigned int flags)
6146{
6147 int rc;
6148 struct qeth_cmd_buffer *iob;
6149 struct qeth_ipa_cmd *cmd;
6150 __u8 netmask[16];
6151
6152 QETH_DBF_TEXT(trace,4,"setdelip");
6153 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
6154
6155 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6156 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6157 if (addr->proto == QETH_PROT_IPV6) {
6158 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
6159 sizeof(struct in6_addr));
6160 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
6161 memcpy(cmd->data.setdelip6.mask, netmask,
6162 sizeof(struct in6_addr));
6163 cmd->data.setdelip6.flags = flags;
6164 } else {
6165 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
6166 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
6167 cmd->data.setdelip4.flags = flags;
6168 }
6169
6170 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6171
6172 return rc;
6173}
6174
6175static int
6176qeth_layer2_register_addr_entry(struct qeth_card *card,
6177 struct qeth_ipaddr *addr)
6178{
6179 if (!addr->is_multicast)
6180 return 0;
6181 QETH_DBF_TEXT(trace, 2, "setgmac");
6182 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6183 return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
6184}
6185
6186static int
6187qeth_layer2_deregister_addr_entry(struct qeth_card *card,
6188 struct qeth_ipaddr *addr)
6189{
6190 if (!addr->is_multicast)
6191 return 0;
6192 QETH_DBF_TEXT(trace, 2, "delgmac");
6193 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6194 return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
6195}
6196
6197static int
6198qeth_layer3_register_addr_entry(struct qeth_card *card,
6199 struct qeth_ipaddr *addr)
6200{
6201 char buf[50];
6202 int rc;
6203 int cnt = 3;
6204
6205 if (addr->proto == QETH_PROT_IPV4) {
6206 QETH_DBF_TEXT(trace, 2,"setaddr4");
6207 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6208 } else if (addr->proto == QETH_PROT_IPV6) {
6209 QETH_DBF_TEXT(trace, 2, "setaddr6");
6210 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6211 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6212 } else {
6213 QETH_DBF_TEXT(trace, 2, "setaddr?");
6214 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6215 }
6216 do {
6217 if (addr->is_multicast)
6218 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
6219 else
6220 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
6221 addr->set_flags);
6222 if (rc)
6223 QETH_DBF_TEXT(trace, 2, "failed");
6224 } while ((--cnt > 0) && rc);
6225 if (rc){
6226 QETH_DBF_TEXT(trace, 2, "FAILED");
6227 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6228 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
6229 buf, rc, rc);
6230 }
6231 return rc;
6232}
6233
6234static int
6235qeth_layer3_deregister_addr_entry(struct qeth_card *card,
6236 struct qeth_ipaddr *addr)
6237{
6238 //char buf[50];
6239 int rc;
6240
6241 if (addr->proto == QETH_PROT_IPV4) {
6242 QETH_DBF_TEXT(trace, 2,"deladdr4");
6243 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6244 } else if (addr->proto == QETH_PROT_IPV6) {
6245 QETH_DBF_TEXT(trace, 2, "deladdr6");
6246 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6247 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6248 } else {
6249 QETH_DBF_TEXT(trace, 2, "deladdr?");
6250 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6251 }
6252 if (addr->is_multicast)
6253 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
6254 else
6255 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
6256 addr->del_flags);
6257 if (rc) {
6258 QETH_DBF_TEXT(trace, 2, "failed");
6259 /* TODO: re-activate this warning as soon as we have a
6260 * clean mirco code
6261 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6262 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
6263 buf, rc);
6264 */
6265 }
6266 return rc;
6267}
6268
6269static int
6270qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6271{
6272 if (card->options.layer2)
6273 return qeth_layer2_register_addr_entry(card, addr);
6274
6275 return qeth_layer3_register_addr_entry(card, addr);
6276}
6277
6278static int
6279qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6280{
6281 if (card->options.layer2)
6282 return qeth_layer2_deregister_addr_entry(card, addr);
6283
6284 return qeth_layer3_deregister_addr_entry(card, addr);
6285}
6286
6287static u32
6288qeth_ethtool_get_tx_csum(struct net_device *dev)
6289{
6290 /* We may need to say that we support tx csum offload if
6291 * we do EDDP or TSO. There are discussions going on to
6292 * enforce rules in the stack and in ethtool that make
6293 * SG and TSO depend on HW_CSUM. At the moment there are
6294 * no such rules....
6295 * If we say yes here, we have to checksum outbound packets
6296 * any time. */
6297 return 0;
6298}
6299
6300static int
6301qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
6302{
6303 return -EINVAL;
6304}
6305
6306static u32
6307qeth_ethtool_get_rx_csum(struct net_device *dev)
6308{
6309 struct qeth_card *card = (struct qeth_card *)dev->priv;
6310
6311 return (card->options.checksum_type == HW_CHECKSUMMING);
6312}
6313
6314static int
6315qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6316{
6317 struct qeth_card *card = (struct qeth_card *)dev->priv;
6318
6319 if ((card->state != CARD_STATE_DOWN) &&
6320 (card->state != CARD_STATE_RECOVER))
6321 return -EPERM;
6322 if (data)
6323 card->options.checksum_type = HW_CHECKSUMMING;
6324 else
6325 card->options.checksum_type = SW_CHECKSUMMING;
6326 return 0;
6327}
6328
6329static u32
6330qeth_ethtool_get_sg(struct net_device *dev)
6331{
6332 struct qeth_card *card = (struct qeth_card *)dev->priv;
6333
6334 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6335 (dev->features & NETIF_F_SG));
6336}
6337
6338static int
6339qeth_ethtool_set_sg(struct net_device *dev, u32 data)
6340{
6341 struct qeth_card *card = (struct qeth_card *)dev->priv;
6342
6343 if (data) {
6344 if (card->options.large_send != QETH_LARGE_SEND_NO)
6345 dev->features |= NETIF_F_SG;
6346 else {
6347 dev->features &= ~NETIF_F_SG;
6348 return -EINVAL;
6349 }
6350 } else
6351 dev->features &= ~NETIF_F_SG;
6352 return 0;
6353}
6354
6355static u32
6356qeth_ethtool_get_tso(struct net_device *dev)
6357{
6358 struct qeth_card *card = (struct qeth_card *)dev->priv;
6359
6360 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6361 (dev->features & NETIF_F_TSO));
6362}
6363
6364static int
6365qeth_ethtool_set_tso(struct net_device *dev, u32 data)
6366{
6367 struct qeth_card *card = (struct qeth_card *)dev->priv;
6368
6369 if (data) {
6370 if (card->options.large_send != QETH_LARGE_SEND_NO)
6371 dev->features |= NETIF_F_TSO;
6372 else {
6373 dev->features &= ~NETIF_F_TSO;
6374 return -EINVAL;
6375 }
6376 } else
6377 dev->features &= ~NETIF_F_TSO;
6378 return 0;
6379}
6380
6381static struct ethtool_ops qeth_ethtool_ops = {
6382 .get_tx_csum = qeth_ethtool_get_tx_csum,
6383 .set_tx_csum = qeth_ethtool_set_tx_csum,
6384 .get_rx_csum = qeth_ethtool_get_rx_csum,
6385 .set_rx_csum = qeth_ethtool_set_rx_csum,
6386 .get_sg = qeth_ethtool_get_sg,
6387 .set_sg = qeth_ethtool_set_sg,
6388 .get_tso = qeth_ethtool_get_tso,
6389 .set_tso = qeth_ethtool_set_tso,
6390};
6391
6392static int
6393qeth_netdev_init(struct net_device *dev)
6394{
6395 struct qeth_card *card;
6396
6397 card = (struct qeth_card *) dev->priv;
6398
6399 QETH_DBF_TEXT(trace,3,"initdev");
6400
6401 dev->tx_timeout = &qeth_tx_timeout;
6402 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6403 dev->open = qeth_open;
6404 dev->stop = qeth_stop;
6405 dev->hard_start_xmit = qeth_hard_start_xmit;
6406 dev->do_ioctl = qeth_do_ioctl;
6407 dev->get_stats = qeth_get_stats;
6408 dev->change_mtu = qeth_change_mtu;
6409 dev->neigh_setup = qeth_neigh_setup;
6410 dev->set_multicast_list = qeth_set_multicast_list;
6411#ifdef CONFIG_QETH_VLAN
6412 dev->vlan_rx_register = qeth_vlan_rx_register;
6413 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
6414 dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
6415#endif
6416 dev->hard_header = card->orig_hard_header;
6417 if (qeth_get_netdev_flags(card) & IFF_NOARP) {
6418 dev->rebuild_header = NULL;
6419 dev->hard_header = NULL;
6420 if (card->options.fake_ll)
6421 dev->hard_header = qeth_fake_header;
6422 dev->header_cache_update = NULL;
6423 dev->hard_header_cache = NULL;
6424 }
6425#ifdef CONFIG_QETH_IPV6
6426 /*IPv6 address autoconfiguration stuff*/
6427 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
6428 card->dev->dev_id = card->info.unique_id & 0xffff;
6429#endif
6430 dev->hard_header_parse = NULL;
6431 dev->set_mac_address = qeth_layer2_set_mac_address;
6432 dev->flags |= qeth_get_netdev_flags(card);
6433 if ((card->options.fake_broadcast) ||
6434 (card->info.broadcast_capable))
6435 dev->flags |= IFF_BROADCAST;
6436 dev->hard_header_len =
6437 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
6438 dev->addr_len = OSA_ADDR_LEN;
6439 dev->mtu = card->info.initial_mtu;
500f83ab
UB
6440 if (card->info.type != QETH_CARD_TYPE_OSN)
6441 SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
1da177e4
LT
6442 SET_MODULE_OWNER(dev);
6443 return 0;
6444}
6445
6446static void
6447qeth_init_func_level(struct qeth_card *card)
6448{
6449 if (card->ipato.enabled) {
6450 if (card->info.type == QETH_CARD_TYPE_IQD)
6451 card->info.func_level =
6452 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
6453 else
6454 card->info.func_level =
6455 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
6456 } else {
6457 if (card->info.type == QETH_CARD_TYPE_IQD)
500f83ab 6458 /*FIXME:why do we have same values for dis and ena for osae??? */
1da177e4
LT
6459 card->info.func_level =
6460 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
6461 else
6462 card->info.func_level =
6463 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
6464 }
6465}
6466
6467/**
6468 * hardsetup card, initialize MPC and QDIO stuff
6469 */
6470static int
6471qeth_hardsetup_card(struct qeth_card *card)
6472{
6473 int retries = 3;
6474 int rc;
6475
6476 QETH_DBF_TEXT(setup, 2, "hrdsetup");
6477
6478retry:
6479 if (retries < 3){
6480 PRINT_WARN("Retrying to do IDX activates.\n");
6481 ccw_device_set_offline(CARD_DDEV(card));
6482 ccw_device_set_offline(CARD_WDEV(card));
6483 ccw_device_set_offline(CARD_RDEV(card));
6484 ccw_device_set_online(CARD_RDEV(card));
6485 ccw_device_set_online(CARD_WDEV(card));
6486 ccw_device_set_online(CARD_DDEV(card));
6487 }
500f83ab 6488 rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD);
1da177e4
LT
6489 if (rc == -ERESTARTSYS) {
6490 QETH_DBF_TEXT(setup, 2, "break1");
6491 return rc;
6492 } else if (rc) {
6493 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6494 if (--retries < 0)
6495 goto out;
6496 else
6497 goto retry;
6498 }
6499 if ((rc = qeth_get_unitaddr(card))){
6500 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6501 return rc;
6502 }
6503 qeth_init_tokens(card);
6504 qeth_init_func_level(card);
6505 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
6506 if (rc == -ERESTARTSYS) {
6507 QETH_DBF_TEXT(setup, 2, "break2");
6508 return rc;
6509 } else if (rc) {
6510 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6511 if (--retries < 0)
6512 goto out;
6513 else
6514 goto retry;
6515 }
6516 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
6517 if (rc == -ERESTARTSYS) {
6518 QETH_DBF_TEXT(setup, 2, "break3");
6519 return rc;
6520 } else if (rc) {
6521 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6522 if (--retries < 0)
6523 goto out;
6524 else
6525 goto retry;
6526 }
6527 if ((rc = qeth_mpc_initialize(card))){
6528 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6529 goto out;
6530 }
6531 /*network device will be recovered*/
6532 if (card->dev) {
6533 card->dev->hard_header = card->orig_hard_header;
6534 return 0;
6535 }
6536 /* at first set_online allocate netdev */
6537 card->dev = qeth_get_netdevice(card->info.type,
6538 card->info.link_type);
6539 if (!card->dev){
500f83ab
UB
6540 qeth_qdio_clear_card(card, card->info.type !=
6541 QETH_CARD_TYPE_IQD);
1da177e4
LT
6542 rc = -ENODEV;
6543 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6544 goto out;
6545 }
6546 card->dev->priv = card;
6547 card->orig_hard_header = card->dev->hard_header;
6548 card->dev->type = qeth_get_arphdr_type(card->info.type,
6549 card->info.link_type);
6550 card->dev->init = qeth_netdev_init;
6551 return 0;
6552out:
6553 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
6554 return rc;
6555}
6556
6557static int
6558qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6559 unsigned long data)
6560{
6561 struct qeth_ipa_cmd *cmd;
6562
6563 QETH_DBF_TEXT(trace,4,"defadpcb");
6564
6565 cmd = (struct qeth_ipa_cmd *) data;
6566 if (cmd->hdr.return_code == 0){
6567 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6568 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6569 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6570#ifdef CONFIG_QETH_IPV6
6571 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6572 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6573#endif
6574 }
6575 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
6576 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6577 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
6578 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
6579 }
6580 return 0;
6581}
6582
6583static int
6584qeth_default_setadapterparms_cb(struct qeth_card *card,
6585 struct qeth_reply *reply,
6586 unsigned long data)
6587{
6588 struct qeth_ipa_cmd *cmd;
6589
6590 QETH_DBF_TEXT(trace,4,"defadpcb");
6591
6592 cmd = (struct qeth_ipa_cmd *) data;
6593 if (cmd->hdr.return_code == 0)
6594 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
6595 return 0;
6596}
6597
6c951b90
FP
6598
6599
1da177e4
LT
6600static int
6601qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6602 unsigned long data)
6603{
6604 struct qeth_ipa_cmd *cmd;
6605
6606 QETH_DBF_TEXT(trace,3,"quyadpcb");
6607
6608 cmd = (struct qeth_ipa_cmd *) data;
6609 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
6610 card->info.link_type =
6611 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
6612 card->options.adp.supported_funcs =
6613 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
6614 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
6615}
6616
6617static int
6618qeth_query_setadapterparms(struct qeth_card *card)
6619{
6620 int rc;
6621 struct qeth_cmd_buffer *iob;
6622
6623 QETH_DBF_TEXT(trace,3,"queryadp");
6624 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
6625 sizeof(struct qeth_ipacmd_setadpparms));
6626 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
6627 return rc;
6628}
6629
6630static int
6631qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
6632 struct qeth_reply *reply,
6633 unsigned long data)
6634{
6635 struct qeth_ipa_cmd *cmd;
6636
6637 QETH_DBF_TEXT(trace,4,"chgmaccb");
6638
6639 cmd = (struct qeth_ipa_cmd *) data;
e08d88cc
FP
6640 if (!card->options.layer2 || card->info.guestlan ||
6641 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
6642 memcpy(card->dev->dev_addr,
6643 &cmd->data.setadapterparms.data.change_addr.addr,
6644 OSA_ADDR_LEN);
6645 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
6646 }
1da177e4
LT
6647 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
6648 return 0;
6649}
6650
6651static int
6652qeth_setadpparms_change_macaddr(struct qeth_card *card)
6653{
6654 int rc;
6655 struct qeth_cmd_buffer *iob;
6656 struct qeth_ipa_cmd *cmd;
6657
6658 QETH_DBF_TEXT(trace,4,"chgmac");
6659
6660 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
6661 sizeof(struct qeth_ipacmd_setadpparms));
6662 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6663 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
6664 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
6665 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
6666 card->dev->dev_addr, OSA_ADDR_LEN);
6667 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
6668 NULL);
6669 return rc;
6670}
6671
6672static int
6673qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6674{
6675 int rc;
6676 struct qeth_cmd_buffer *iob;
6677 struct qeth_ipa_cmd *cmd;
6678
6679 QETH_DBF_TEXT(trace,4,"adpmode");
6680
6681 iob = qeth_get_adapter_cmd(card, command,
6682 sizeof(struct qeth_ipacmd_setadpparms));
6683 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6684 cmd->data.setadapterparms.data.mode = mode;
6685 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
6686 NULL);
6687 return rc;
6688}
6689
6690static inline int
6691qeth_setadapter_hstr(struct qeth_card *card)
6692{
6693 int rc;
6694
6695 QETH_DBF_TEXT(trace,4,"adphstr");
6696
6697 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
6698 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
6699 card->options.broadcast_mode);
6700 if (rc)
6701 PRINT_WARN("couldn't set broadcast mode on "
6702 "device %s: x%x\n",
6703 CARD_BUS_ID(card), rc);
6704 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
6705 card->options.macaddr_mode);
6706 if (rc)
6707 PRINT_WARN("couldn't set macaddr mode on "
6708 "device %s: x%x\n", CARD_BUS_ID(card), rc);
6709 return rc;
6710 }
6711 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
6712 PRINT_WARN("set adapter parameters not available "
6713 "to set broadcast mode, using ALLRINGS "
6714 "on device %s:\n", CARD_BUS_ID(card));
6715 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
6716 PRINT_WARN("set adapter parameters not available "
6717 "to set macaddr mode, using NONCANONICAL "
6718 "on device %s:\n", CARD_BUS_ID(card));
6719 return 0;
6720}
6721
6722static int
6723qeth_setadapter_parms(struct qeth_card *card)
6724{
6725 int rc;
6726
6727 QETH_DBF_TEXT(setup, 2, "setadprm");
6728
6729 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
6730 PRINT_WARN("set adapter parameters not supported "
6731 "on device %s.\n",
6732 CARD_BUS_ID(card));
6733 QETH_DBF_TEXT(setup, 2, " notsupp");
6734 return 0;
6735 }
6736 rc = qeth_query_setadapterparms(card);
6737 if (rc) {
6738 PRINT_WARN("couldn't set adapter parameters on device %s: "
6739 "x%x\n", CARD_BUS_ID(card), rc);
6740 return rc;
6741 }
6742 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
6743 rc = qeth_setadpparms_change_macaddr(card);
6744 if (rc)
6745 PRINT_WARN("couldn't get MAC address on "
6746 "device %s: x%x\n",
6747 CARD_BUS_ID(card), rc);
6748 }
6749
6750 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
6751 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
6752 rc = qeth_setadapter_hstr(card);
6753
6754 return rc;
6755}
6756
6757static int
6758qeth_layer2_initialize(struct qeth_card *card)
6759{
6760 int rc = 0;
6761
6762
6763 QETH_DBF_TEXT(setup, 2, "doL2init");
6764 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
6765
6c951b90
FP
6766 rc = qeth_query_setadapterparms(card);
6767 if (rc) {
6768 PRINT_WARN("could not query adapter parameters on device %s: "
6769 "x%x\n", CARD_BUS_ID(card), rc);
6770 }
6771
1da177e4
LT
6772 rc = qeth_setadpparms_change_macaddr(card);
6773 if (rc) {
6774 PRINT_WARN("couldn't get MAC address on "
6775 "device %s: x%x\n",
6776 CARD_BUS_ID(card), rc);
6777 QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
6778 return rc;
6779 }
6780 QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
6781
6782 rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
6783 if (rc)
6784 QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
6785 return 0;
6786}
6787
6788
6789static int
6790qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6791 enum qeth_prot_versions prot)
6792{
6793 int rc;
6794 struct qeth_cmd_buffer *iob;
6795
6796 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
6797 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6798
6799 return rc;
6800}
6801
6802static int
6803qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
6804{
6805 int rc;
6806
6807 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
6808
6809 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
6810 return rc;
6811}
6812
6813static int
6814qeth_send_stoplan(struct qeth_card *card)
6815{
6816 int rc = 0;
6817
6818 /*
6819 * TODO: according to the IPA format document page 14,
6820 * TCP/IP (we!) never issue a STOPLAN
6821 * is this right ?!?
6822 */
6823 QETH_DBF_TEXT(trace, 2, "stoplan");
6824
6825 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
6826 return rc;
6827}
6828
6829static int
6830qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6831 unsigned long data)
6832{
6833 struct qeth_ipa_cmd *cmd;
6834
6835 QETH_DBF_TEXT(setup, 2, "qipasscb");
6836
6837 cmd = (struct qeth_ipa_cmd *) data;
6838 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6839 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6840 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
9123e0d7
UB
6841 /* Disable IPV6 support hard coded for Hipersockets */
6842 if(card->info.type == QETH_CARD_TYPE_IQD)
6843 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
1da177e4
LT
6844 } else {
6845#ifdef CONFIG_QETH_IPV6
6846 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
6847 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6848#endif
6849 }
6850 QETH_DBF_TEXT(setup, 2, "suppenbl");
6851 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
6852 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
6853 return 0;
6854}
6855
6856static int
6857qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
6858{
6859 int rc;
6860 struct qeth_cmd_buffer *iob;
6861
6862 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
6863 if (card->options.layer2) {
6864 QETH_DBF_TEXT(setup, 2, "noprmly2");
6865 return -EPERM;
6866 }
6867
6868 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
6869 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
6870 return rc;
6871}
6872
6873static struct qeth_cmd_buffer *
6874qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
6875 __u16 cmd_code, __u16 len,
6876 enum qeth_prot_versions prot)
6877{
6878 struct qeth_cmd_buffer *iob;
6879 struct qeth_ipa_cmd *cmd;
6880
6881 QETH_DBF_TEXT(trace,4,"getasscm");
6882 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
6883
6884 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6885 cmd->data.setassparms.hdr.assist_no = ipa_func;
6886 cmd->data.setassparms.hdr.length = 8 + len;
6887 cmd->data.setassparms.hdr.command_code = cmd_code;
6888 cmd->data.setassparms.hdr.return_code = 0;
6889 cmd->data.setassparms.hdr.seq_no = 0;
6890
6891 return iob;
6892}
6893
6894static int
6895qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
6896 __u16 len, long data,
6897 int (*reply_cb)
6898 (struct qeth_card *,struct qeth_reply *,unsigned long),
6899 void *reply_param)
6900{
6901 int rc;
6902 struct qeth_ipa_cmd *cmd;
6903
6904 QETH_DBF_TEXT(trace,4,"sendassp");
6905
6906 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6907 if (len <= sizeof(__u32))
6908 cmd->data.setassparms.data.flags_32bit = (__u32) data;
6909 else if (len > sizeof(__u32))
6910 memcpy(&cmd->data.setassparms.data, (void *) data, len);
6911
6912 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
6913 return rc;
6914}
6915
6916#ifdef CONFIG_QETH_IPV6
6917static int
6918qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
6919 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
6920
6921{
6922 int rc;
6923 struct qeth_cmd_buffer *iob;
6924
6925 QETH_DBF_TEXT(trace,4,"simassp6");
6926 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6927 0, QETH_PROT_IPV6);
6928 rc = qeth_send_setassparms(card, iob, 0, 0,
6929 qeth_default_setassparms_cb, NULL);
6930 return rc;
6931}
6932#endif
6933
6934static int
6935qeth_send_simple_setassparms(struct qeth_card *card,
6936 enum qeth_ipa_funcs ipa_func,
6937 __u16 cmd_code, long data)
6938{
6939 int rc;
6940 int length = 0;
6941 struct qeth_cmd_buffer *iob;
6942
6943 QETH_DBF_TEXT(trace,4,"simassp4");
6944 if (data)
6945 length = sizeof(__u32);
6946 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6947 length, QETH_PROT_IPV4);
6948 rc = qeth_send_setassparms(card, iob, length, data,
6949 qeth_default_setassparms_cb, NULL);
6950 return rc;
6951}
6952
6953static inline int
6954qeth_start_ipa_arp_processing(struct qeth_card *card)
6955{
6956 int rc;
6957
6958 QETH_DBF_TEXT(trace,3,"ipaarp");
6959
6960 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
6961 PRINT_WARN("ARP processing not supported "
6962 "on %s!\n", QETH_CARD_IFNAME(card));
6963 return 0;
6964 }
6965 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
6966 IPA_CMD_ASS_START, 0);
6967 if (rc) {
6968 PRINT_WARN("Could not start ARP processing "
6969 "assist on %s: 0x%x\n",
6970 QETH_CARD_IFNAME(card), rc);
6971 }
6972 return rc;
6973}
6974
6975static int
6976qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
6977{
6978 int rc;
6979
6980 QETH_DBF_TEXT(trace,3,"ipaipfrg");
6981
6982 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
6983 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
6984 QETH_CARD_IFNAME(card));
6985 return -EOPNOTSUPP;
6986 }
6987
6988 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
6989 IPA_CMD_ASS_START, 0);
6990 if (rc) {
6991 PRINT_WARN("Could not start Hardware IP fragmentation "
6992 "assist on %s: 0x%x\n",
6993 QETH_CARD_IFNAME(card), rc);
6994 } else
6995 PRINT_INFO("Hardware IP fragmentation enabled \n");
6996 return rc;
6997}
6998
6999static int
7000qeth_start_ipa_source_mac(struct qeth_card *card)
7001{
7002 int rc;
7003
7004 QETH_DBF_TEXT(trace,3,"stsrcmac");
7005
7006 if (!card->options.fake_ll)
7007 return -EOPNOTSUPP;
7008
7009 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
7010 PRINT_INFO("Inbound source address not "
7011 "supported on %s\n", QETH_CARD_IFNAME(card));
7012 return -EOPNOTSUPP;
7013 }
7014
7015 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
7016 IPA_CMD_ASS_START, 0);
7017 if (rc)
7018 PRINT_WARN("Could not start inbound source "
7019 "assist on %s: 0x%x\n",
7020 QETH_CARD_IFNAME(card), rc);
7021 return rc;
7022}
7023
7024static int
7025qeth_start_ipa_vlan(struct qeth_card *card)
7026{
7027 int rc = 0;
7028
7029 QETH_DBF_TEXT(trace,3,"strtvlan");
7030
7031#ifdef CONFIG_QETH_VLAN
7032 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
7033 PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
7034 return -EOPNOTSUPP;
7035 }
7036
7037 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
7038 IPA_CMD_ASS_START,0);
7039 if (rc) {
7040 PRINT_WARN("Could not start vlan "
7041 "assist on %s: 0x%x\n",
7042 QETH_CARD_IFNAME(card), rc);
7043 } else {
7044 PRINT_INFO("VLAN enabled \n");
7045 card->dev->features |=
7046 NETIF_F_HW_VLAN_FILTER |
7047 NETIF_F_HW_VLAN_TX |
7048 NETIF_F_HW_VLAN_RX;
7049 }
7050#endif /* QETH_VLAN */
7051 return rc;
7052}
7053
7054static int
7055qeth_start_ipa_multicast(struct qeth_card *card)
7056{
7057 int rc;
7058
7059 QETH_DBF_TEXT(trace,3,"stmcast");
7060
7061 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
7062 PRINT_WARN("Multicast not supported on %s\n",
7063 QETH_CARD_IFNAME(card));
7064 return -EOPNOTSUPP;
7065 }
7066
7067 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
7068 IPA_CMD_ASS_START,0);
7069 if (rc) {
7070 PRINT_WARN("Could not start multicast "
7071 "assist on %s: rc=%i\n",
7072 QETH_CARD_IFNAME(card), rc);
7073 } else {
7074 PRINT_INFO("Multicast enabled\n");
7075 card->dev->flags |= IFF_MULTICAST;
7076 }
7077 return rc;
7078}
7079
7080#ifdef CONFIG_QETH_IPV6
7081static int
7082qeth_softsetup_ipv6(struct qeth_card *card)
7083{
7084 int rc;
7085
7086 QETH_DBF_TEXT(trace,3,"softipv6");
7087
7088 netif_stop_queue(card->dev);
7089 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
7090 if (rc) {
7091 PRINT_ERR("IPv6 startlan failed on %s\n",
7092 QETH_CARD_IFNAME(card));
7093 return rc;
7094 }
7095 netif_wake_queue(card->dev);
7096 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
7097 if (rc) {
7098 PRINT_ERR("IPv6 query ipassist failed on %s\n",
7099 QETH_CARD_IFNAME(card));
7100 return rc;
7101 }
7102 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
7103 IPA_CMD_ASS_START, 3);
7104 if (rc) {
7105 PRINT_WARN("IPv6 start assist (version 4) failed "
7106 "on %s: 0x%x\n",
7107 QETH_CARD_IFNAME(card), rc);
7108 return rc;
7109 }
7110 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
7111 IPA_CMD_ASS_START);
7112 if (rc) {
7113 PRINT_WARN("IPV6 start assist (version 6) failed "
7114 "on %s: 0x%x\n",
7115 QETH_CARD_IFNAME(card), rc);
7116 return rc;
7117 }
7118 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
7119 IPA_CMD_ASS_START);
7120 if (rc) {
7121 PRINT_WARN("Could not enable passthrough "
7122 "on %s: 0x%x\n",
7123 QETH_CARD_IFNAME(card), rc);
7124 return rc;
7125 }
7126 PRINT_INFO("IPV6 enabled \n");
7127 return 0;
7128}
7129
7130#endif
7131
7132static int
7133qeth_start_ipa_ipv6(struct qeth_card *card)
7134{
7135 int rc = 0;
7136#ifdef CONFIG_QETH_IPV6
7137 QETH_DBF_TEXT(trace,3,"strtipv6");
7138
7139 if (!qeth_is_supported(card, IPA_IPV6)) {
7140 PRINT_WARN("IPv6 not supported on %s\n",
7141 QETH_CARD_IFNAME(card));
7142 return 0;
7143 }
7144 rc = qeth_softsetup_ipv6(card);
7145#endif
7146 return rc ;
7147}
7148
7149static int
7150qeth_start_ipa_broadcast(struct qeth_card *card)
7151{
7152 int rc;
7153
7154 QETH_DBF_TEXT(trace,3,"stbrdcst");
7155 card->info.broadcast_capable = 0;
7156 if (!qeth_is_supported(card, IPA_FILTERING)) {
7157 PRINT_WARN("Broadcast not supported on %s\n",
7158 QETH_CARD_IFNAME(card));
7159 rc = -EOPNOTSUPP;
7160 goto out;
7161 }
7162 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7163 IPA_CMD_ASS_START, 0);
7164 if (rc) {
7165 PRINT_WARN("Could not enable broadcasting filtering "
7166 "on %s: 0x%x\n",
7167 QETH_CARD_IFNAME(card), rc);
7168 goto out;
7169 }
7170
7171 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7172 IPA_CMD_ASS_CONFIGURE, 1);
7173 if (rc) {
7174 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
7175 QETH_CARD_IFNAME(card), rc);
7176 goto out;
7177 }
7178 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
7179 PRINT_INFO("Broadcast enabled \n");
7180 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7181 IPA_CMD_ASS_ENABLE, 1);
7182 if (rc) {
7183 PRINT_WARN("Could not set up broadcast echo filtering on "
7184 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
7185 goto out;
7186 }
7187 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
7188out:
7189 if (card->info.broadcast_capable)
7190 card->dev->flags |= IFF_BROADCAST;
7191 else
7192 card->dev->flags &= ~IFF_BROADCAST;
7193 return rc;
7194}
7195
7196static int
7197qeth_send_checksum_command(struct qeth_card *card)
7198{
7199 int rc;
7200
7201 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7202 IPA_CMD_ASS_START, 0);
7203 if (rc) {
7204 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
7205 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7206 QETH_CARD_IFNAME(card), rc);
7207 return rc;
7208 }
7209 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7210 IPA_CMD_ASS_ENABLE,
7211 card->info.csum_mask);
7212 if (rc) {
7213 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
7214 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7215 QETH_CARD_IFNAME(card), rc);
7216 return rc;
7217 }
7218 return 0;
7219}
7220
7221static int
7222qeth_start_ipa_checksum(struct qeth_card *card)
7223{
7224 int rc = 0;
7225
7226 QETH_DBF_TEXT(trace,3,"strtcsum");
7227
7228 if (card->options.checksum_type == NO_CHECKSUMMING) {
7229 PRINT_WARN("Using no checksumming on %s.\n",
7230 QETH_CARD_IFNAME(card));
7231 return 0;
7232 }
7233 if (card->options.checksum_type == SW_CHECKSUMMING) {
7234 PRINT_WARN("Using SW checksumming on %s.\n",
7235 QETH_CARD_IFNAME(card));
7236 return 0;
7237 }
7238 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
7239 PRINT_WARN("Inbound HW Checksumming not "
7240 "supported on %s,\ncontinuing "
7241 "using Inbound SW Checksumming\n",
7242 QETH_CARD_IFNAME(card));
7243 card->options.checksum_type = SW_CHECKSUMMING;
7244 return 0;
7245 }
7246 rc = qeth_send_checksum_command(card);
7247 if (!rc) {
7248 PRINT_INFO("HW Checksumming (inbound) enabled \n");
7249 }
7250 return rc;
7251}
7252
7253static int
7254qeth_start_ipa_tso(struct qeth_card *card)
7255{
7256 int rc;
7257
7258 QETH_DBF_TEXT(trace,3,"sttso");
7259
7260 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
7261 PRINT_WARN("Outbound TSO not supported on %s\n",
7262 QETH_CARD_IFNAME(card));
7263 rc = -EOPNOTSUPP;
7264 } else {
7265 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
7266 IPA_CMD_ASS_START,0);
7267 if (rc)
7268 PRINT_WARN("Could not start outbound TSO "
7269 "assist on %s: rc=%i\n",
7270 QETH_CARD_IFNAME(card), rc);
7271 else
7272 PRINT_INFO("Outbound TSO enabled\n");
7273 }
7274 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
7275 card->options.large_send = QETH_LARGE_SEND_NO;
7276 card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
7277 }
7278 return rc;
7279}
7280
7281static int
7282qeth_start_ipassists(struct qeth_card *card)
7283{
7284 QETH_DBF_TEXT(trace,3,"strtipas");
7285 qeth_start_ipa_arp_processing(card); /* go on*/
7286 qeth_start_ipa_ip_fragmentation(card); /* go on*/
7287 qeth_start_ipa_source_mac(card); /* go on*/
7288 qeth_start_ipa_vlan(card); /* go on*/
7289 qeth_start_ipa_multicast(card); /* go on*/
7290 qeth_start_ipa_ipv6(card); /* go on*/
7291 qeth_start_ipa_broadcast(card); /* go on*/
7292 qeth_start_ipa_checksum(card); /* go on*/
7293 qeth_start_ipa_tso(card); /* go on*/
7294 return 0;
7295}
7296
7297static int
7298qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
7299 enum qeth_prot_versions prot)
7300{
7301 int rc;
7302 struct qeth_ipa_cmd *cmd;
7303 struct qeth_cmd_buffer *iob;
7304
7305 QETH_DBF_TEXT(trace,4,"setroutg");
7306 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
7307 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7308 cmd->data.setrtg.type = (type);
7309 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7310
7311 return rc;
7312
7313}
7314
7315static void
7316qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
7317 enum qeth_prot_versions prot)
7318{
7319 if (card->info.type == QETH_CARD_TYPE_IQD) {
7320 switch (*type) {
7321 case NO_ROUTER:
7322 case PRIMARY_CONNECTOR:
7323 case SECONDARY_CONNECTOR:
7324 case MULTICAST_ROUTER:
7325 return;
7326 default:
7327 goto out_inval;
7328 }
7329 } else {
7330 switch (*type) {
7331 case NO_ROUTER:
7332 case PRIMARY_ROUTER:
7333 case SECONDARY_ROUTER:
7334 return;
7335 case MULTICAST_ROUTER:
7336 if (qeth_is_ipafunc_supported(card, prot,
7337 IPA_OSA_MC_ROUTER))
7338 return;
7339 default:
7340 goto out_inval;
7341 }
7342 }
7343out_inval:
7344 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
7345 "Router status set to 'no router'.\n",
7346 ((*type == PRIMARY_ROUTER)? "primary router" :
7347 (*type == SECONDARY_ROUTER)? "secondary router" :
7348 (*type == PRIMARY_CONNECTOR)? "primary connector" :
7349 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
7350 (*type == MULTICAST_ROUTER)? "multicast router" :
7351 "unknown"),
7352 card->dev->name);
7353 *type = NO_ROUTER;
7354}
7355
7356int
7357qeth_setrouting_v4(struct qeth_card *card)
7358{
7359 int rc;
7360
7361 QETH_DBF_TEXT(trace,3,"setrtg4");
7362
7363 qeth_correct_routing_type(card, &card->options.route4.type,
7364 QETH_PROT_IPV4);
7365
7366 rc = qeth_send_setrouting(card, card->options.route4.type,
7367 QETH_PROT_IPV4);
7368 if (rc) {
7369 card->options.route4.type = NO_ROUTER;
7370 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7371 "Type set to 'no router'.\n",
7372 rc, QETH_CARD_IFNAME(card));
7373 }
7374 return rc;
7375}
7376
7377int
7378qeth_setrouting_v6(struct qeth_card *card)
7379{
7380 int rc = 0;
7381
7382 QETH_DBF_TEXT(trace,3,"setrtg6");
7383#ifdef CONFIG_QETH_IPV6
7384
7385 qeth_correct_routing_type(card, &card->options.route6.type,
7386 QETH_PROT_IPV6);
7387
7388 if ((card->options.route6.type == NO_ROUTER) ||
7389 ((card->info.type == QETH_CARD_TYPE_OSAE) &&
7390 (card->options.route6.type == MULTICAST_ROUTER) &&
7391 !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
7392 return 0;
7393 rc = qeth_send_setrouting(card, card->options.route6.type,
7394 QETH_PROT_IPV6);
7395 if (rc) {
7396 card->options.route6.type = NO_ROUTER;
7397 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7398 "Type set to 'no router'.\n",
7399 rc, QETH_CARD_IFNAME(card));
7400 }
7401#endif
7402 return rc;
7403}
7404
7405int
9cb90de8 7406qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
1da177e4
LT
7407{
7408 int rc = 0;
7409
9cb90de8
FP
7410 if (card->dev == NULL) {
7411 card->options.large_send = type;
1da177e4 7412 return 0;
9cb90de8 7413 }
1da177e4 7414 netif_stop_queue(card->dev);
9cb90de8 7415 card->options.large_send = type;
1da177e4
LT
7416 switch (card->options.large_send) {
7417 case QETH_LARGE_SEND_EDDP:
7418 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7419 break;
7420 case QETH_LARGE_SEND_TSO:
7421 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
7422 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7423 } else {
7424 PRINT_WARN("TSO not supported on %s. "
7425 "large_send set to 'no'.\n",
7426 card->dev->name);
7427 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7428 card->options.large_send = QETH_LARGE_SEND_NO;
7429 rc = -EOPNOTSUPP;
7430 }
7431 break;
7432 default: /* includes QETH_LARGE_SEND_NO */
7433 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7434 break;
7435 }
1da177e4
LT
7436 netif_wake_queue(card->dev);
7437 return rc;
7438}
7439
7440/*
7441 * softsetup card: init IPA stuff
7442 */
7443static int
7444qeth_softsetup_card(struct qeth_card *card)
7445{
7446 int rc;
7447
7448 QETH_DBF_TEXT(setup, 2, "softsetp");
7449
7450 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
7451 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7452 if (rc == 0xe080){
7453 PRINT_WARN("LAN on card %s if offline! "
7454 "Continuing softsetup.\n",
7455 CARD_BUS_ID(card));
7456 card->lan_online = 0;
7457 } else
7458 return rc;
7459 } else
7460 card->lan_online = 1;
500f83ab
UB
7461 if (card->info.type==QETH_CARD_TYPE_OSN)
7462 goto out;
1da177e4
LT
7463 if (card->options.layer2) {
7464 card->dev->features |=
7465 NETIF_F_HW_VLAN_FILTER |
7466 NETIF_F_HW_VLAN_TX |
7467 NETIF_F_HW_VLAN_RX;
7468 card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
7469 card->info.broadcast_capable=1;
7470 if ((rc = qeth_layer2_initialize(card))) {
7471 QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
7472 return rc;
7473 }
7474#ifdef CONFIG_QETH_VLAN
7475 qeth_layer2_process_vlans(card, 0);
7476#endif
7477 goto out;
7478 }
7479 if ((card->options.large_send == QETH_LARGE_SEND_EDDP) ||
7480 (card->options.large_send == QETH_LARGE_SEND_TSO))
7481 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7482 else
7483 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7484
7485 if ((rc = qeth_setadapter_parms(card)))
7486 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7487 if ((rc = qeth_start_ipassists(card)))
7488 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7489 if ((rc = qeth_setrouting_v4(card)))
7490 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7491 if ((rc = qeth_setrouting_v6(card)))
7492 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7493out:
7494 netif_stop_queue(card->dev);
7495 return 0;
7496}
7497
7498#ifdef CONFIG_QETH_IPV6
7499static int
7500qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
7501 unsigned long data)
7502{
7503 struct qeth_ipa_cmd *cmd;
7504
7505 cmd = (struct qeth_ipa_cmd *) data;
7506 if (cmd->hdr.return_code == 0)
7507 card->info.unique_id = *((__u16 *)
7508 &cmd->data.create_destroy_addr.unique_id[6]);
7509 else {
7510 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7511 UNIQUE_ID_NOT_BY_CARD;
7512 PRINT_WARN("couldn't get a unique id from the card on device "
7513 "%s (result=x%x), using default id. ipv6 "
7514 "autoconfig on other lpars may lead to duplicate "
7515 "ip addresses. please use manually "
7516 "configured ones.\n",
7517 CARD_BUS_ID(card), cmd->hdr.return_code);
7518 }
7519 return 0;
7520}
7521#endif
7522
7523static int
7524qeth_put_unique_id(struct qeth_card *card)
7525{
7526
7527 int rc = 0;
7528#ifdef CONFIG_QETH_IPV6
7529 struct qeth_cmd_buffer *iob;
7530 struct qeth_ipa_cmd *cmd;
7531
7532 QETH_DBF_TEXT(trace,2,"puniqeid");
7533
7534 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
7535 UNIQUE_ID_NOT_BY_CARD)
7536 return -1;
7537 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
7538 QETH_PROT_IPV6);
7539 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7540 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7541 card->info.unique_id;
7542 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
7543 card->dev->dev_addr, OSA_ADDR_LEN);
7544 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7545#else
7546 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7547 UNIQUE_ID_NOT_BY_CARD;
7548#endif
7549 return rc;
7550}
7551
7552/**
7553 * Clear IP List
7554 */
7555static void
7556qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
7557{
7558 struct qeth_ipaddr *addr, *tmp;
7559 unsigned long flags;
7560
7561 QETH_DBF_TEXT(trace,4,"clearip");
7562 spin_lock_irqsave(&card->ip_lock, flags);
7563 /* clear todo list */
7564 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
7565 list_del(&addr->entry);
7566 kfree(addr);
7567 }
7568
7569 while (!list_empty(&card->ip_list)) {
7570 addr = list_entry(card->ip_list.next,
7571 struct qeth_ipaddr, entry);
7572 list_del_init(&addr->entry);
7573 if (clean) {
7574 spin_unlock_irqrestore(&card->ip_lock, flags);
7575 qeth_deregister_addr_entry(card, addr);
7576 spin_lock_irqsave(&card->ip_lock, flags);
7577 }
7578 if (!recover || addr->is_multicast) {
7579 kfree(addr);
7580 continue;
7581 }
7582 list_add_tail(&addr->entry, card->ip_tbd_list);
7583 }
7584 spin_unlock_irqrestore(&card->ip_lock, flags);
7585}
7586
7587static void
7588qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7589 int clear_start_mask)
7590{
7591 unsigned long flags;
7592
7593 spin_lock_irqsave(&card->thread_mask_lock, flags);
7594 card->thread_allowed_mask = threads;
7595 if (clear_start_mask)
7596 card->thread_start_mask &= threads;
7597 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7598 wake_up(&card->wait_q);
7599}
7600
7601static inline int
7602qeth_threads_running(struct qeth_card *card, unsigned long threads)
7603{
7604 unsigned long flags;
7605 int rc = 0;
7606
7607 spin_lock_irqsave(&card->thread_mask_lock, flags);
7608 rc = (card->thread_running_mask & threads);
7609 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7610 return rc;
7611}
7612
7613static int
7614qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7615{
7616 return wait_event_interruptible(card->wait_q,
7617 qeth_threads_running(card, threads) == 0);
7618}
7619
7620static int
05e08a2a 7621qeth_stop_card(struct qeth_card *card, int recovery_mode)
1da177e4
LT
7622{
7623 int rc = 0;
7624
7625 QETH_DBF_TEXT(setup ,2,"stopcard");
7626 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7627
7628 qeth_set_allowed_threads(card, 0, 1);
7629 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
7630 return -ERESTARTSYS;
7631 if (card->read.state == CH_STATE_UP &&
7632 card->write.state == CH_STATE_UP &&
7633 (card->state == CARD_STATE_UP)) {
500f83ab
UB
7634 if (recovery_mode &&
7635 card->info.type != QETH_CARD_TYPE_OSN) {
05e08a2a
FP
7636 qeth_stop(card->dev);
7637 } else {
7638 rtnl_lock();
7639 dev_close(card->dev);
7640 rtnl_unlock();
7641 }
1da177e4
LT
7642 if (!card->use_hard_stop) {
7643 __u8 *mac = &card->dev->dev_addr[0];
7644 rc = qeth_layer2_send_delmac(card, mac);
7645 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
7646 if ((rc = qeth_send_stoplan(card)))
7647 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7648 }
7649 card->state = CARD_STATE_SOFTSETUP;
7650 }
7651 if (card->state == CARD_STATE_SOFTSETUP) {
7652#ifdef CONFIG_QETH_VLAN
7653 if (card->options.layer2)
7654 qeth_layer2_process_vlans(card, 1);
7655#endif
7656 qeth_clear_ip_list(card, !card->use_hard_stop, 1);
7657 qeth_clear_ipacmd_list(card);
7658 card->state = CARD_STATE_HARDSETUP;
7659 }
7660 if (card->state == CARD_STATE_HARDSETUP) {
7661 if ((!card->use_hard_stop) &&
7662 (!card->options.layer2))
7663 if ((rc = qeth_put_unique_id(card)))
7664 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7665 qeth_qdio_clear_card(card, 0);
7666 qeth_clear_qdio_buffers(card);
7667 qeth_clear_working_pool_list(card);
7668 card->state = CARD_STATE_DOWN;
7669 }
7670 if (card->state == CARD_STATE_DOWN) {
7671 qeth_clear_cmd_buffers(&card->read);
7672 qeth_clear_cmd_buffers(&card->write);
7673 }
7674 card->use_hard_stop = 0;
7675 return rc;
7676}
7677
7678
7679static int
7680qeth_get_unique_id(struct qeth_card *card)
7681{
7682 int rc = 0;
7683#ifdef CONFIG_QETH_IPV6
7684 struct qeth_cmd_buffer *iob;
7685 struct qeth_ipa_cmd *cmd;
7686
7687 QETH_DBF_TEXT(setup, 2, "guniqeid");
7688
7689 if (!qeth_is_supported(card,IPA_IPV6)) {
7690 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7691 UNIQUE_ID_NOT_BY_CARD;
7692 return 0;
7693 }
7694
7695 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
7696 QETH_PROT_IPV6);
7697 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7698 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7699 card->info.unique_id;
7700
7701 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
7702#else
7703 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7704 UNIQUE_ID_NOT_BY_CARD;
7705#endif
7706 return rc;
7707}
7708static void
7709qeth_print_status_with_portname(struct qeth_card *card)
7710{
7711 char dbf_text[15];
7712 int i;
7713
7714 sprintf(dbf_text, "%s", card->info.portname + 1);
7715 for (i = 0; i < 8; i++)
7716 dbf_text[i] =
7717 (char) _ebcasc[(__u8) dbf_text[i]];
7718 dbf_text[8] = 0;
7719 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7720 "with link type %s (portname: %s)\n",
7721 CARD_RDEV_ID(card),
7722 CARD_WDEV_ID(card),
7723 CARD_DDEV_ID(card),
7724 qeth_get_cardname(card),
7725 (card->info.mcl_level[0]) ? " (level: " : "",
7726 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7727 (card->info.mcl_level[0]) ? ")" : "",
7728 qeth_get_cardname_short(card),
7729 dbf_text);
7730
7731}
7732
7733static void
7734qeth_print_status_no_portname(struct qeth_card *card)
7735{
7736 if (card->info.portname[0])
7737 printk("qeth: Device %s/%s/%s is a%s "
7738 "card%s%s%s\nwith link type %s "
7739 "(no portname needed by interface).\n",
7740 CARD_RDEV_ID(card),
7741 CARD_WDEV_ID(card),
7742 CARD_DDEV_ID(card),
7743 qeth_get_cardname(card),
7744 (card->info.mcl_level[0]) ? " (level: " : "",
7745 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7746 (card->info.mcl_level[0]) ? ")" : "",
7747 qeth_get_cardname_short(card));
7748 else
7749 printk("qeth: Device %s/%s/%s is a%s "
7750 "card%s%s%s\nwith link type %s.\n",
7751 CARD_RDEV_ID(card),
7752 CARD_WDEV_ID(card),
7753 CARD_DDEV_ID(card),
7754 qeth_get_cardname(card),
7755 (card->info.mcl_level[0]) ? " (level: " : "",
7756 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7757 (card->info.mcl_level[0]) ? ")" : "",
7758 qeth_get_cardname_short(card));
7759}
7760
7761static void
7762qeth_print_status_message(struct qeth_card *card)
7763{
7764 switch (card->info.type) {
7765 case QETH_CARD_TYPE_OSAE:
7766 /* VM will use a non-zero first character
7767 * to indicate a HiperSockets like reporting
7768 * of the level OSA sets the first character to zero
7769 * */
7770 if (!card->info.mcl_level[0]) {
7771 sprintf(card->info.mcl_level,"%02x%02x",
7772 card->info.mcl_level[2],
7773 card->info.mcl_level[3]);
7774
7775 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7776 break;
7777 }
7778 /* fallthrough */
7779 case QETH_CARD_TYPE_IQD:
7780 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
7781 card->info.mcl_level[0]];
7782 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
7783 card->info.mcl_level[1]];
7784 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
7785 card->info.mcl_level[2]];
7786 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
7787 card->info.mcl_level[3]];
7788 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7789 break;
7790 default:
7791 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
7792 }
7793 if (card->info.portname_required)
7794 qeth_print_status_with_portname(card);
7795 else
7796 qeth_print_status_no_portname(card);
7797}
7798
7799static int
7800qeth_register_netdev(struct qeth_card *card)
7801{
7802 QETH_DBF_TEXT(setup, 3, "regnetd");
7803 if (card->dev->reg_state != NETREG_UNINITIALIZED) {
7804 qeth_netdev_init(card->dev);
7805 return 0;
7806 }
7807 /* sysfs magic */
7808 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
7809 return register_netdev(card->dev);
7810}
7811
7812static void
05e08a2a 7813qeth_start_again(struct qeth_card *card, int recovery_mode)
1da177e4
LT
7814{
7815 QETH_DBF_TEXT(setup ,2, "startag");
7816
500f83ab
UB
7817 if (recovery_mode &&
7818 card->info.type != QETH_CARD_TYPE_OSN) {
05e08a2a
FP
7819 qeth_open(card->dev);
7820 } else {
7821 rtnl_lock();
7822 dev_open(card->dev);
7823 rtnl_unlock();
7824 }
1da177e4
LT
7825 /* this also sets saved unicast addresses */
7826 qeth_set_multicast_list(card->dev);
7827}
7828
7829
7830/* Layer 2 specific stuff */
7831#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
7832 if (card->options.option == value) { \
7833 PRINT_ERR("%s not supported with layer 2 " \
7834 "functionality, ignoring option on read" \
7835 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7836 card->options.option = reset_value; \
7837 }
7838#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
7839 if (card->options.option != value) { \
7840 PRINT_ERR("%s not supported with layer 2 " \
7841 "functionality, ignoring option on read" \
7842 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7843 card->options.option = reset_value; \
7844 }
7845
7846
7847static void qeth_make_parameters_consistent(struct qeth_card *card)
7848{
7849
500f83ab
UB
7850 if (card->options.layer2 == 0)
7851 return;
7852 if (card->info.type == QETH_CARD_TYPE_OSN)
7853 return;
7854 if (card->info.type == QETH_CARD_TYPE_IQD) {
7855 PRINT_ERR("Device %s does not support layer 2 functionality." \
7856 " Ignoring layer2 option.\n",CARD_BUS_ID(card));
7857 card->options.layer2 = 0;
7858 return;
7859 }
7860 IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
7861 "Routing options are");
1da177e4 7862#ifdef CONFIG_QETH_IPV6
500f83ab
UB
7863 IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
7864 "Routing options are");
1da177e4 7865#endif
500f83ab
UB
7866 IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
7867 QETH_CHECKSUM_DEFAULT,
7868 "Checksumming options are");
7869 IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
7870 QETH_TR_BROADCAST_ALLRINGS,
7871 "Broadcast mode options are");
7872 IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
7873 QETH_TR_MACADDR_NONCANONICAL,
7874 "Canonical MAC addr options are");
7875 IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
7876 "Broadcast faking options are");
7877 IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
7878 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
7879 IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
1da177e4
LT
7880}
7881
7882
7883static int
05e08a2a 7884__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1da177e4
LT
7885{
7886 struct qeth_card *card = gdev->dev.driver_data;
7887 int rc = 0;
7888 enum qeth_card_states recover_flag;
7889
7890 BUG_ON(!card);
7891 QETH_DBF_TEXT(setup ,2, "setonlin");
7892 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7893
7894 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
7895 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
7896 PRINT_WARN("set_online of card %s interrupted by user!\n",
7897 CARD_BUS_ID(card));
7898 return -ERESTARTSYS;
7899 }
7900
7901 recover_flag = card->state;
7902 if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
7903 (rc = ccw_device_set_online(CARD_WDEV(card))) ||
7904 (rc = ccw_device_set_online(CARD_DDEV(card)))){
7905 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7906 return -EIO;
7907 }
7908
500f83ab 7909 qeth_make_parameters_consistent(card);
1da177e4
LT
7910
7911 if ((rc = qeth_hardsetup_card(card))){
7912 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7913 goto out_remove;
7914 }
7915 card->state = CARD_STATE_HARDSETUP;
7916
7917 if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
7918 rc = qeth_get_unique_id(card);
7919
7920 if (rc && card->options.layer2 == 0) {
7921 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7922 goto out_remove;
7923 }
7924 qeth_print_status_message(card);
7925 if ((rc = qeth_register_netdev(card))){
7926 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7927 goto out_remove;
7928 }
7929 if ((rc = qeth_softsetup_card(card))){
7930 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7931 goto out_remove;
7932 }
7933 card->state = CARD_STATE_SOFTSETUP;
7934
7935 if ((rc = qeth_init_qdio_queues(card))){
7936 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
7937 goto out_remove;
7938 }
7939/*maybe it was set offline without ifconfig down
7940 * we can also use this state for recovery purposes*/
7941 qeth_set_allowed_threads(card, 0xffffffff, 0);
7942 if (recover_flag == CARD_STATE_RECOVER)
05e08a2a 7943 qeth_start_again(card, recovery_mode);
1da177e4
LT
7944 qeth_notify_processes();
7945 return 0;
7946out_remove:
7947 card->use_hard_stop = 1;
05e08a2a 7948 qeth_stop_card(card, 0);
1da177e4
LT
7949 ccw_device_set_offline(CARD_DDEV(card));
7950 ccw_device_set_offline(CARD_WDEV(card));
7951 ccw_device_set_offline(CARD_RDEV(card));
7952 if (recover_flag == CARD_STATE_RECOVER)
7953 card->state = CARD_STATE_RECOVER;
7954 else
7955 card->state = CARD_STATE_DOWN;
7956 return -ENODEV;
7957}
7958
05e08a2a
FP
7959static int
7960qeth_set_online(struct ccwgroup_device *gdev)
7961{
7962 return __qeth_set_online(gdev, 0);
7963}
7964
1da177e4
LT
7965static struct ccw_device_id qeth_ids[] = {
7966 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
7967 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
500f83ab 7968 {CCW_DEVICE(0x1731, 0x06), driver_info:QETH_CARD_TYPE_OSN},
1da177e4
LT
7969 {},
7970};
7971MODULE_DEVICE_TABLE(ccw, qeth_ids);
7972
7973struct device *qeth_root_dev = NULL;
7974
7975struct ccwgroup_driver qeth_ccwgroup_driver = {
7976 .owner = THIS_MODULE,
7977 .name = "qeth",
7978 .driver_id = 0xD8C5E3C8,
7979 .probe = qeth_probe_device,
7980 .remove = qeth_remove_device,
7981 .set_online = qeth_set_online,
7982 .set_offline = qeth_set_offline,
7983};
7984
7985struct ccw_driver qeth_ccw_driver = {
7986 .name = "qeth",
7987 .ids = qeth_ids,
7988 .probe = ccwgroup_probe_ccwdev,
7989 .remove = ccwgroup_remove_ccwdev,
7990};
7991
7992
7993static void
7994qeth_unregister_dbf_views(void)
7995{
7996 if (qeth_dbf_setup)
7997 debug_unregister(qeth_dbf_setup);
7998 if (qeth_dbf_qerr)
7999 debug_unregister(qeth_dbf_qerr);
8000 if (qeth_dbf_sense)
8001 debug_unregister(qeth_dbf_sense);
8002 if (qeth_dbf_misc)
8003 debug_unregister(qeth_dbf_misc);
8004 if (qeth_dbf_data)
8005 debug_unregister(qeth_dbf_data);
8006 if (qeth_dbf_control)
8007 debug_unregister(qeth_dbf_control);
8008 if (qeth_dbf_trace)
8009 debug_unregister(qeth_dbf_trace);
8010}
8011static int
8012qeth_register_dbf_views(void)
8013{
8014 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
66a464db 8015 QETH_DBF_SETUP_PAGES,
1da177e4
LT
8016 QETH_DBF_SETUP_NR_AREAS,
8017 QETH_DBF_SETUP_LEN);
8018 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
66a464db 8019 QETH_DBF_MISC_PAGES,
1da177e4
LT
8020 QETH_DBF_MISC_NR_AREAS,
8021 QETH_DBF_MISC_LEN);
8022 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
66a464db 8023 QETH_DBF_DATA_PAGES,
1da177e4
LT
8024 QETH_DBF_DATA_NR_AREAS,
8025 QETH_DBF_DATA_LEN);
8026 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
66a464db 8027 QETH_DBF_CONTROL_PAGES,
1da177e4
LT
8028 QETH_DBF_CONTROL_NR_AREAS,
8029 QETH_DBF_CONTROL_LEN);
8030 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
66a464db 8031 QETH_DBF_SENSE_PAGES,
1da177e4
LT
8032 QETH_DBF_SENSE_NR_AREAS,
8033 QETH_DBF_SENSE_LEN);
8034 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
66a464db 8035 QETH_DBF_QERR_PAGES,
1da177e4
LT
8036 QETH_DBF_QERR_NR_AREAS,
8037 QETH_DBF_QERR_LEN);
8038 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
66a464db 8039 QETH_DBF_TRACE_PAGES,
1da177e4
LT
8040 QETH_DBF_TRACE_NR_AREAS,
8041 QETH_DBF_TRACE_LEN);
8042
8043 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
8044 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
8045 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
8046 (qeth_dbf_trace == NULL)) {
8047 qeth_unregister_dbf_views();
8048 return -ENOMEM;
8049 }
8050 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
8051 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
8052
8053 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
8054 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
8055
8056 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
8057 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
8058
8059 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
8060 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
8061
8062 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
8063 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
8064
8065 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
8066 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
8067
8068 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
8069 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
8070
8071 return 0;
8072}
8073
8074#ifdef CONFIG_QETH_IPV6
8075extern struct neigh_table arp_tbl;
8076static struct neigh_ops *arp_direct_ops;
8077static int (*qeth_old_arp_constructor) (struct neighbour *);
8078
8079static struct neigh_ops arp_direct_ops_template = {
8080 .family = AF_INET,
8081 .destructor = NULL,
8082 .solicit = NULL,
8083 .error_report = NULL,
8084 .output = dev_queue_xmit,
8085 .connected_output = dev_queue_xmit,
8086 .hh_output = dev_queue_xmit,
8087 .queue_xmit = dev_queue_xmit
8088};
8089
8090static int
8091qeth_arp_constructor(struct neighbour *neigh)
8092{
8093 struct net_device *dev = neigh->dev;
8094 struct in_device *in_dev;
8095 struct neigh_parms *parms;
8096 struct qeth_card *card;
8097
8098 card = qeth_get_card_from_dev(dev);
8099 if (card == NULL)
8100 goto out;
8101 if((card->options.layer2) ||
8102 (card->dev->hard_header == qeth_fake_header))
8103 goto out;
8104
8105 rcu_read_lock();
e5ed6399 8106 in_dev = __in_dev_get_rcu(dev);
1da177e4
LT
8107 if (in_dev == NULL) {
8108 rcu_read_unlock();
8109 return -EINVAL;
8110 }
8111
8112 parms = in_dev->arp_parms;
8113 __neigh_parms_put(neigh->parms);
8114 neigh->parms = neigh_parms_clone(parms);
8115 rcu_read_unlock();
8116
8117 neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
8118 neigh->nud_state = NUD_NOARP;
8119 neigh->ops = arp_direct_ops;
8120 neigh->output = neigh->ops->queue_xmit;
8121 return 0;
8122out:
8123 return qeth_old_arp_constructor(neigh);
8124}
8125#endif /*CONFIG_QETH_IPV6*/
8126
8127/*
8128 * IP address takeover related functions
8129 */
8130static void
8131qeth_clear_ipato_list(struct qeth_card *card)
8132{
8133 struct qeth_ipato_entry *ipatoe, *tmp;
8134 unsigned long flags;
8135
8136 spin_lock_irqsave(&card->ip_lock, flags);
8137 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
8138 list_del(&ipatoe->entry);
8139 kfree(ipatoe);
8140 }
8141 spin_unlock_irqrestore(&card->ip_lock, flags);
8142}
8143
8144int
8145qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
8146{
8147 struct qeth_ipato_entry *ipatoe;
8148 unsigned long flags;
8149 int rc = 0;
8150
8151 QETH_DBF_TEXT(trace, 2, "addipato");
8152 spin_lock_irqsave(&card->ip_lock, flags);
8153 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8154 if (ipatoe->proto != new->proto)
8155 continue;
8156 if (!memcmp(ipatoe->addr, new->addr,
8157 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
8158 (ipatoe->mask_bits == new->mask_bits)){
8159 PRINT_WARN("ipato entry already exists!\n");
8160 rc = -EEXIST;
8161 break;
8162 }
8163 }
8164 if (!rc) {
8165 list_add_tail(&new->entry, &card->ipato.entries);
8166 }
8167 spin_unlock_irqrestore(&card->ip_lock, flags);
8168 return rc;
8169}
8170
8171void
8172qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8173 u8 *addr, int mask_bits)
8174{
8175 struct qeth_ipato_entry *ipatoe, *tmp;
8176 unsigned long flags;
8177
8178 QETH_DBF_TEXT(trace, 2, "delipato");
8179 spin_lock_irqsave(&card->ip_lock, flags);
8180 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
8181 if (ipatoe->proto != proto)
8182 continue;
8183 if (!memcmp(ipatoe->addr, addr,
8184 (proto == QETH_PROT_IPV4)? 4:16) &&
8185 (ipatoe->mask_bits == mask_bits)){
8186 list_del(&ipatoe->entry);
8187 kfree(ipatoe);
8188 }
8189 }
8190 spin_unlock_irqrestore(&card->ip_lock, flags);
8191}
8192
8193static inline void
8194qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8195{
8196 int i, j;
8197 u8 octet;
8198
8199 for (i = 0; i < len; ++i){
8200 octet = addr[i];
8201 for (j = 7; j >= 0; --j){
8202 bits[i*8 + j] = octet & 1;
8203 octet >>= 1;
8204 }
8205 }
8206}
8207
8208static int
8209qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
8210{
8211 struct qeth_ipato_entry *ipatoe;
8212 u8 addr_bits[128] = {0, };
8213 u8 ipatoe_bits[128] = {0, };
8214 int rc = 0;
8215
8216 if (!card->ipato.enabled)
8217 return 0;
8218
8219 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
8220 (addr->proto == QETH_PROT_IPV4)? 4:16);
8221 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8222 if (addr->proto != ipatoe->proto)
8223 continue;
8224 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
8225 (ipatoe->proto==QETH_PROT_IPV4) ?
8226 4:16);
8227 if (addr->proto == QETH_PROT_IPV4)
8228 rc = !memcmp(addr_bits, ipatoe_bits,
8229 min(32, ipatoe->mask_bits));
8230 else
8231 rc = !memcmp(addr_bits, ipatoe_bits,
8232 min(128, ipatoe->mask_bits));
8233 if (rc)
8234 break;
8235 }
8236 /* invert? */
8237 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
8238 rc = !rc;
8239 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
8240 rc = !rc;
8241
8242 return rc;
8243}
8244
8245/*
8246 * VIPA related functions
8247 */
8248int
8249qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8250 const u8 *addr)
8251{
8252 struct qeth_ipaddr *ipaddr;
8253 unsigned long flags;
8254 int rc = 0;
8255
8256 ipaddr = qeth_get_addr_buffer(proto);
8257 if (ipaddr){
8258 if (proto == QETH_PROT_IPV4){
8259 QETH_DBF_TEXT(trace, 2, "addvipa4");
8260 memcpy(&ipaddr->u.a4.addr, addr, 4);
8261 ipaddr->u.a4.mask = 0;
8262#ifdef CONFIG_QETH_IPV6
8263 } else if (proto == QETH_PROT_IPV6){
8264 QETH_DBF_TEXT(trace, 2, "addvipa6");
8265 memcpy(&ipaddr->u.a6.addr, addr, 16);
8266 ipaddr->u.a6.pfxlen = 0;
8267#endif
8268 }
8269 ipaddr->type = QETH_IP_TYPE_VIPA;
8270 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
8271 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
8272 } else
8273 return -ENOMEM;
8274 spin_lock_irqsave(&card->ip_lock, flags);
8275 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8276 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8277 rc = -EEXIST;
8278 spin_unlock_irqrestore(&card->ip_lock, flags);
8279 if (rc){
8280 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
8281 return rc;
8282 }
8283 if (!qeth_add_ip(card, ipaddr))
8284 kfree(ipaddr);
8285 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8286 schedule_work(&card->kernel_thread_starter);
8287 return rc;
8288}
8289
8290void
8291qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8292 const u8 *addr)
8293{
8294 struct qeth_ipaddr *ipaddr;
8295
8296 ipaddr = qeth_get_addr_buffer(proto);
8297 if (ipaddr){
8298 if (proto == QETH_PROT_IPV4){
8299 QETH_DBF_TEXT(trace, 2, "delvipa4");
8300 memcpy(&ipaddr->u.a4.addr, addr, 4);
8301 ipaddr->u.a4.mask = 0;
8302#ifdef CONFIG_QETH_IPV6
8303 } else if (proto == QETH_PROT_IPV6){
8304 QETH_DBF_TEXT(trace, 2, "delvipa6");
8305 memcpy(&ipaddr->u.a6.addr, addr, 16);
8306 ipaddr->u.a6.pfxlen = 0;
8307#endif
8308 }
8309 ipaddr->type = QETH_IP_TYPE_VIPA;
8310 } else
8311 return;
8312 if (!qeth_delete_ip(card, ipaddr))
8313 kfree(ipaddr);
8314 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8315 schedule_work(&card->kernel_thread_starter);
8316}
8317
8318/*
8319 * proxy ARP related functions
8320 */
8321int
8322qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8323 const u8 *addr)
8324{
8325 struct qeth_ipaddr *ipaddr;
8326 unsigned long flags;
8327 int rc = 0;
8328
8329 ipaddr = qeth_get_addr_buffer(proto);
8330 if (ipaddr){
8331 if (proto == QETH_PROT_IPV4){
8332 QETH_DBF_TEXT(trace, 2, "addrxip4");
8333 memcpy(&ipaddr->u.a4.addr, addr, 4);
8334 ipaddr->u.a4.mask = 0;
8335#ifdef CONFIG_QETH_IPV6
8336 } else if (proto == QETH_PROT_IPV6){
8337 QETH_DBF_TEXT(trace, 2, "addrxip6");
8338 memcpy(&ipaddr->u.a6.addr, addr, 16);
8339 ipaddr->u.a6.pfxlen = 0;
8340#endif
8341 }
8342 ipaddr->type = QETH_IP_TYPE_RXIP;
8343 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
8344 ipaddr->del_flags = 0;
8345 } else
8346 return -ENOMEM;
8347 spin_lock_irqsave(&card->ip_lock, flags);
8348 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8349 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8350 rc = -EEXIST;
8351 spin_unlock_irqrestore(&card->ip_lock, flags);
8352 if (rc){
8353 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
8354 return rc;
8355 }
8356 if (!qeth_add_ip(card, ipaddr))
8357 kfree(ipaddr);
8358 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8359 schedule_work(&card->kernel_thread_starter);
8360 return 0;
8361}
8362
8363void
8364qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8365 const u8 *addr)
8366{
8367 struct qeth_ipaddr *ipaddr;
8368
8369 ipaddr = qeth_get_addr_buffer(proto);
8370 if (ipaddr){
8371 if (proto == QETH_PROT_IPV4){
8372 QETH_DBF_TEXT(trace, 2, "addrxip4");
8373 memcpy(&ipaddr->u.a4.addr, addr, 4);
8374 ipaddr->u.a4.mask = 0;
8375#ifdef CONFIG_QETH_IPV6
8376 } else if (proto == QETH_PROT_IPV6){
8377 QETH_DBF_TEXT(trace, 2, "addrxip6");
8378 memcpy(&ipaddr->u.a6.addr, addr, 16);
8379 ipaddr->u.a6.pfxlen = 0;
8380#endif
8381 }
8382 ipaddr->type = QETH_IP_TYPE_RXIP;
8383 } else
8384 return;
8385 if (!qeth_delete_ip(card, ipaddr))
8386 kfree(ipaddr);
8387 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8388 schedule_work(&card->kernel_thread_starter);
8389}
8390
8391/**
8392 * IP event handler
8393 */
8394static int
8395qeth_ip_event(struct notifier_block *this,
8396 unsigned long event,void *ptr)
8397{
8398 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
8399 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
8400 struct qeth_ipaddr *addr;
8401 struct qeth_card *card;
8402
8403 QETH_DBF_TEXT(trace,3,"ipevent");
8404 card = qeth_get_card_from_dev(dev);
8405 if (!card)
8406 return NOTIFY_DONE;
8407 if (card->options.layer2)
8408 return NOTIFY_DONE;
8409
8410 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
8411 if (addr != NULL) {
8412 addr->u.a4.addr = ifa->ifa_address;
8413 addr->u.a4.mask = ifa->ifa_mask;
8414 addr->type = QETH_IP_TYPE_NORMAL;
8415 } else
8416 goto out;
8417
8418 switch(event) {
8419 case NETDEV_UP:
8420 if (!qeth_add_ip(card, addr))
8421 kfree(addr);
8422 break;
8423 case NETDEV_DOWN:
8424 if (!qeth_delete_ip(card, addr))
8425 kfree(addr);
8426 break;
8427 default:
8428 break;
8429 }
8430 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8431 schedule_work(&card->kernel_thread_starter);
8432out:
8433 return NOTIFY_DONE;
8434}
8435
8436static struct notifier_block qeth_ip_notifier = {
8437 qeth_ip_event,
8438 0
8439};
8440
8441#ifdef CONFIG_QETH_IPV6
8442/**
8443 * IPv6 event handler
8444 */
8445static int
8446qeth_ip6_event(struct notifier_block *this,
8447 unsigned long event,void *ptr)
8448{
8449
8450 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
8451 struct net_device *dev = (struct net_device *)ifa->idev->dev;
8452 struct qeth_ipaddr *addr;
8453 struct qeth_card *card;
8454
8455 QETH_DBF_TEXT(trace,3,"ip6event");
8456
8457 card = qeth_get_card_from_dev(dev);
8458 if (!card)
8459 return NOTIFY_DONE;
8460 if (!qeth_is_supported(card, IPA_IPV6))
8461 return NOTIFY_DONE;
8462
8463 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
8464 if (addr != NULL) {
8465 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
8466 addr->u.a6.pfxlen = ifa->prefix_len;
8467 addr->type = QETH_IP_TYPE_NORMAL;
8468 } else
8469 goto out;
8470
8471 switch(event) {
8472 case NETDEV_UP:
8473 if (!qeth_add_ip(card, addr))
8474 kfree(addr);
8475 break;
8476 case NETDEV_DOWN:
8477 if (!qeth_delete_ip(card, addr))
8478 kfree(addr);
8479 break;
8480 default:
8481 break;
8482 }
8483 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8484 schedule_work(&card->kernel_thread_starter);
8485out:
8486 return NOTIFY_DONE;
8487}
8488
8489static struct notifier_block qeth_ip6_notifier = {
8490 qeth_ip6_event,
8491 0
8492};
8493#endif
8494
8495static int
66aea23f 8496__qeth_reboot_event_card(struct device *dev, void *data)
1da177e4 8497{
1da177e4
LT
8498 struct qeth_card *card;
8499
66aea23f
CH
8500 card = (struct qeth_card *) dev->driver_data;
8501 qeth_clear_ip_list(card, 0, 0);
8502 qeth_qdio_clear_card(card, 0);
8503 return 0;
8504}
8505
8506static int
8507qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8508{
8509
8510 driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8511 __qeth_reboot_event_card);
1da177e4
LT
8512 return NOTIFY_DONE;
8513}
8514
8515
8516static struct notifier_block qeth_reboot_notifier = {
8517 qeth_reboot_event,
8518 0
8519};
8520
8521static int
8522qeth_register_notifiers(void)
8523{
8524 int r;
8525
8526 QETH_DBF_TEXT(trace,5,"regnotif");
8527 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
8528 return r;
8529 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
8530 goto out_reboot;
8531#ifdef CONFIG_QETH_IPV6
8532 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
8533 goto out_ipv4;
8534#endif
8535 return 0;
8536
8537#ifdef CONFIG_QETH_IPV6
8538out_ipv4:
8539 unregister_inetaddr_notifier(&qeth_ip_notifier);
8540#endif
8541out_reboot:
8542 unregister_reboot_notifier(&qeth_reboot_notifier);
8543 return r;
8544}
8545
8546/**
8547 * unregister all event notifiers
8548 */
8549static void
8550qeth_unregister_notifiers(void)
8551{
8552
8553 QETH_DBF_TEXT(trace,5,"unregnot");
8554 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
8555 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
8556#ifdef CONFIG_QETH_IPV6
8557 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
8558#endif /* QETH_IPV6 */
8559
8560}
8561
8562#ifdef CONFIG_QETH_IPV6
8563static int
8564qeth_ipv6_init(void)
8565{
8566 qeth_old_arp_constructor = arp_tbl.constructor;
8567 write_lock(&arp_tbl.lock);
8568 arp_tbl.constructor = qeth_arp_constructor;
8569 write_unlock(&arp_tbl.lock);
8570
8571 arp_direct_ops = (struct neigh_ops*)
8572 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
8573 if (!arp_direct_ops)
8574 return -ENOMEM;
8575
8576 memcpy(arp_direct_ops, &arp_direct_ops_template,
8577 sizeof(struct neigh_ops));
8578
8579 return 0;
8580}
8581
8582static void
8583qeth_ipv6_uninit(void)
8584{
8585 write_lock(&arp_tbl.lock);
8586 arp_tbl.constructor = qeth_old_arp_constructor;
8587 write_unlock(&arp_tbl.lock);
8588 kfree(arp_direct_ops);
8589}
8590#endif /* CONFIG_QETH_IPV6 */
8591
8592static void
8593qeth_sysfs_unregister(void)
8594{
8595 qeth_remove_driver_attributes();
8596 ccw_driver_unregister(&qeth_ccw_driver);
8597 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8598 s390_root_dev_unregister(qeth_root_dev);
8599}
8600/**
8601 * register qeth at sysfs
8602 */
8603static int
8604qeth_sysfs_register(void)
8605{
8606 int rc=0;
8607
8608 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
8609 if (rc)
8610 return rc;
8611 rc = ccw_driver_register(&qeth_ccw_driver);
8612 if (rc)
8613 return rc;
8614 rc = qeth_create_driver_attributes();
8615 if (rc)
8616 return rc;
8617 qeth_root_dev = s390_root_dev_register("qeth");
8618 if (IS_ERR(qeth_root_dev)) {
8619 rc = PTR_ERR(qeth_root_dev);
8620 return rc;
8621 }
8622 return 0;
8623}
8624
8625/***
8626 * init function
8627 */
8628static int __init
8629qeth_init(void)
8630{
8631 int rc=0;
8632
1da177e4
LT
8633 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
8634 version, VERSION_QETH_C, VERSION_QETH_H,
8635 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
8636 VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
8637 VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
8638 QETH_VERSION_VLAN);
8639
8640 INIT_LIST_HEAD(&qeth_card_list.list);
8641 INIT_LIST_HEAD(&qeth_notify_list);
8642 spin_lock_init(&qeth_notify_lock);
8643 rwlock_init(&qeth_card_list.rwlock);
8644
8645 if (qeth_register_dbf_views())
8646 goto out_err;
8647 if (qeth_sysfs_register())
8648 goto out_sysfs;
8649
8650#ifdef CONFIG_QETH_IPV6
8651 if (qeth_ipv6_init()) {
8652 PRINT_ERR("Out of memory during ipv6 init.\n");
8653 goto out_sysfs;
8654 }
8655#endif /* QETH_IPV6 */
8656 if (qeth_register_notifiers())
8657 goto out_ipv6;
8658 if (qeth_create_procfs_entries())
8659 goto out_notifiers;
8660
8661 return rc;
8662
8663out_notifiers:
8664 qeth_unregister_notifiers();
8665out_ipv6:
8666#ifdef CONFIG_QETH_IPV6
8667 qeth_ipv6_uninit();
8668#endif /* QETH_IPV6 */
8669out_sysfs:
8670 qeth_sysfs_unregister();
8671 qeth_unregister_dbf_views();
8672out_err:
8673 PRINT_ERR("Initialization failed");
8674 return rc;
8675}
8676
8677static void
8678__exit qeth_exit(void)
8679{
8680 struct qeth_card *card, *tmp;
8681 unsigned long flags;
8682
8683 QETH_DBF_TEXT(trace,1, "cleanup.");
8684
8685 /*
8686 * Weed would not need to clean up our devices here, because the
8687 * common device layer calls qeth_remove_device for each device
8688 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8689 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8690 * qeth_remove_device called by the common device layer would otherwise
8691 * do a "hard" shutdown (card->use_hard_stop is set to one in
8692 * qeth_remove_device).
8693 */
8694again:
8695 read_lock_irqsave(&qeth_card_list.rwlock, flags);
8696 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
8697 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8698 qeth_set_offline(card->gdev);
8699 qeth_remove_device(card->gdev);
8700 goto again;
8701 }
8702 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8703#ifdef CONFIG_QETH_IPV6
8704 qeth_ipv6_uninit();
8705#endif
8706 qeth_unregister_notifiers();
8707 qeth_remove_procfs_entries();
8708 qeth_sysfs_unregister();
8709 qeth_unregister_dbf_views();
8710 printk("qeth: removed\n");
8711}
8712
500f83ab
UB
8713EXPORT_SYMBOL(qeth_osn_register);
8714EXPORT_SYMBOL(qeth_osn_deregister);
8715EXPORT_SYMBOL(qeth_osn_assist);
1da177e4
LT
8716module_init(qeth_init);
8717module_exit(qeth_exit);
1387780f 8718MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
1da177e4
LT
8719MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8720 "Copyright 2000,2003 IBM Corporation\n");
8721
8722MODULE_LICENSE("GPL");
This page took 0.447553 seconds and 5 git commands to generate.