hwmon: (max6650) Add support for alarms
[deliverable/linux.git] / drivers / s390 / net / qeth_l3_main.c
1 /*
2 * drivers/s390/net/qeth_l3_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11 #define KMSG_COMPONENT "qeth"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/etherdevice.h>
20 #include <linux/mii.h>
21 #include <linux/ip.h>
22 #include <linux/ipv6.h>
23 #include <linux/inetdevice.h>
24 #include <linux/igmp.h>
25
26 #include <net/ip.h>
27 #include <net/arp.h>
28 #include <net/ip6_checksum.h>
29
30 #include "qeth_l3.h"
31
32 static int qeth_l3_set_offline(struct ccwgroup_device *);
33 static int qeth_l3_recover(void *);
34 static int qeth_l3_stop(struct net_device *);
35 static void qeth_l3_set_multicast_list(struct net_device *);
36 static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *);
37 static int qeth_l3_register_addr_entry(struct qeth_card *,
38 struct qeth_ipaddr *);
39 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
40 struct qeth_ipaddr *);
41 static int __qeth_l3_set_online(struct ccwgroup_device *, int);
42 static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
43
44
45 static int qeth_l3_isxdigit(char *buf)
46 {
47 while (*buf) {
48 if (!isxdigit(*buf++))
49 return 0;
50 }
51 return 1;
52 }
53
54 void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
55 {
56 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
57 }
58
59 int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
60 {
61 int count = 0, rc = 0;
62 int in[4];
63 char c;
64
65 rc = sscanf(buf, "%u.%u.%u.%u%c",
66 &in[0], &in[1], &in[2], &in[3], &c);
67 if (rc != 4 && (rc != 5 || c != '\n'))
68 return -EINVAL;
69 for (count = 0; count < 4; count++) {
70 if (in[count] > 255)
71 return -EINVAL;
72 addr[count] = in[count];
73 }
74 return 0;
75 }
76
77 void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
78 {
79 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
80 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
81 addr[0], addr[1], addr[2], addr[3],
82 addr[4], addr[5], addr[6], addr[7],
83 addr[8], addr[9], addr[10], addr[11],
84 addr[12], addr[13], addr[14], addr[15]);
85 }
86
87 int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
88 {
89 const char *end, *end_tmp, *start;
90 __u16 *in;
91 char num[5];
92 int num2, cnt, out, found, save_cnt;
93 unsigned short in_tmp[8] = {0, };
94
95 cnt = out = found = save_cnt = num2 = 0;
96 end = start = buf;
97 in = (__u16 *) addr;
98 memset(in, 0, 16);
99 while (*end) {
100 end = strchr(start, ':');
101 if (end == NULL) {
102 end = buf + strlen(buf);
103 end_tmp = strchr(start, '\n');
104 if (end_tmp != NULL)
105 end = end_tmp;
106 out = 1;
107 }
108 if ((end - start)) {
109 memset(num, 0, 5);
110 if ((end - start) > 4)
111 return -EINVAL;
112 memcpy(num, start, end - start);
113 if (!qeth_l3_isxdigit(num))
114 return -EINVAL;
115 sscanf(start, "%x", &num2);
116 if (found)
117 in_tmp[save_cnt++] = num2;
118 else
119 in[cnt++] = num2;
120 if (out)
121 break;
122 } else {
123 if (found)
124 return -EINVAL;
125 found = 1;
126 }
127 start = ++end;
128 }
129 if (cnt + save_cnt > 8)
130 return -EINVAL;
131 cnt = 7;
132 while (save_cnt)
133 in[cnt--] = in_tmp[--save_cnt];
134 return 0;
135 }
136
137 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
138 char *buf)
139 {
140 if (proto == QETH_PROT_IPV4)
141 qeth_l3_ipaddr4_to_string(addr, buf);
142 else if (proto == QETH_PROT_IPV6)
143 qeth_l3_ipaddr6_to_string(addr, buf);
144 }
145
146 int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
147 __u8 *addr)
148 {
149 if (proto == QETH_PROT_IPV4)
150 return qeth_l3_string_to_ipaddr4(buf, addr);
151 else if (proto == QETH_PROT_IPV6)
152 return qeth_l3_string_to_ipaddr6(buf, addr);
153 else
154 return -EINVAL;
155 }
156
157 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
158 {
159 int i, j;
160 u8 octet;
161
162 for (i = 0; i < len; ++i) {
163 octet = addr[i];
164 for (j = 7; j >= 0; --j) {
165 bits[i*8 + j] = octet & 1;
166 octet >>= 1;
167 }
168 }
169 }
170
171 static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
172 struct qeth_ipaddr *addr)
173 {
174 struct qeth_ipato_entry *ipatoe;
175 u8 addr_bits[128] = {0, };
176 u8 ipatoe_bits[128] = {0, };
177 int rc = 0;
178
179 if (!card->ipato.enabled)
180 return 0;
181
182 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
183 (addr->proto == QETH_PROT_IPV4)? 4:16);
184 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
185 if (addr->proto != ipatoe->proto)
186 continue;
187 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
188 (ipatoe->proto == QETH_PROT_IPV4) ?
189 4 : 16);
190 if (addr->proto == QETH_PROT_IPV4)
191 rc = !memcmp(addr_bits, ipatoe_bits,
192 min(32, ipatoe->mask_bits));
193 else
194 rc = !memcmp(addr_bits, ipatoe_bits,
195 min(128, ipatoe->mask_bits));
196 if (rc)
197 break;
198 }
199 /* invert? */
200 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
201 rc = !rc;
202 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
203 rc = !rc;
204
205 return rc;
206 }
207
208 /*
209 * Add IP to be added to todo list. If there is already an "add todo"
210 * in this list we just incremenent the reference count.
211 * Returns 0 if we just incremented reference count.
212 */
213 static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
214 struct qeth_ipaddr *addr, int add)
215 {
216 struct qeth_ipaddr *tmp, *t;
217 int found = 0;
218
219 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
220 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
221 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
222 return 0;
223 if ((tmp->proto == QETH_PROT_IPV4) &&
224 (addr->proto == QETH_PROT_IPV4) &&
225 (tmp->type == addr->type) &&
226 (tmp->is_multicast == addr->is_multicast) &&
227 (tmp->u.a4.addr == addr->u.a4.addr) &&
228 (tmp->u.a4.mask == addr->u.a4.mask)) {
229 found = 1;
230 break;
231 }
232 if ((tmp->proto == QETH_PROT_IPV6) &&
233 (addr->proto == QETH_PROT_IPV6) &&
234 (tmp->type == addr->type) &&
235 (tmp->is_multicast == addr->is_multicast) &&
236 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
237 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
238 sizeof(struct in6_addr)) == 0)) {
239 found = 1;
240 break;
241 }
242 }
243 if (found) {
244 if (addr->users != 0)
245 tmp->users += addr->users;
246 else
247 tmp->users += add ? 1 : -1;
248 if (tmp->users == 0) {
249 list_del(&tmp->entry);
250 kfree(tmp);
251 }
252 return 0;
253 } else {
254 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
255 list_add(&addr->entry, card->ip_tbd_list);
256 else {
257 if (addr->users == 0)
258 addr->users += add ? 1 : -1;
259 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
260 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
261 QETH_DBF_TEXT(TRACE, 2, "tkovaddr");
262 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
263 }
264 list_add_tail(&addr->entry, card->ip_tbd_list);
265 }
266 return 1;
267 }
268 }
269
270 static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
271 {
272 unsigned long flags;
273 int rc = 0;
274
275 QETH_DBF_TEXT(TRACE, 4, "delip");
276
277 if (addr->proto == QETH_PROT_IPV4)
278 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
279 else {
280 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
281 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
282 }
283 spin_lock_irqsave(&card->ip_lock, flags);
284 rc = __qeth_l3_insert_ip_todo(card, addr, 0);
285 spin_unlock_irqrestore(&card->ip_lock, flags);
286 return rc;
287 }
288
289 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
290 {
291 unsigned long flags;
292 int rc = 0;
293
294 QETH_DBF_TEXT(TRACE, 4, "addip");
295 if (addr->proto == QETH_PROT_IPV4)
296 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
297 else {
298 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
299 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
300 }
301 spin_lock_irqsave(&card->ip_lock, flags);
302 rc = __qeth_l3_insert_ip_todo(card, addr, 1);
303 spin_unlock_irqrestore(&card->ip_lock, flags);
304 return rc;
305 }
306
307
308 static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
309 enum qeth_prot_versions prot)
310 {
311 struct qeth_ipaddr *addr;
312
313 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
314 if (addr == NULL) {
315 return NULL;
316 }
317 addr->type = QETH_IP_TYPE_NORMAL;
318 addr->proto = prot;
319 return addr;
320 }
321
322 static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
323 {
324 struct qeth_ipaddr *iptodo;
325 unsigned long flags;
326
327 QETH_DBF_TEXT(TRACE, 4, "delmc");
328 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
329 if (!iptodo) {
330 QETH_DBF_TEXT(TRACE, 2, "dmcnomem");
331 return;
332 }
333 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
334 spin_lock_irqsave(&card->ip_lock, flags);
335 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
336 kfree(iptodo);
337 spin_unlock_irqrestore(&card->ip_lock, flags);
338 }
339
340 /*
341 * Add/remove address to/from card's ip list, i.e. try to add or remove
342 * reference to/from an IP address that is already registered on the card.
343 * Returns:
344 * 0 address was on card and its reference count has been adjusted,
345 * but is still > 0, so nothing has to be done
346 * also returns 0 if card was not on card and the todo was to delete
347 * the address -> there is also nothing to be done
348 * 1 address was not on card and the todo is to add it to the card's ip
349 * list
350 * -1 address was on card and its reference count has been decremented
351 * to <= 0 by the todo -> address must be removed from card
352 */
353 static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
354 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
355 {
356 struct qeth_ipaddr *addr;
357 int found = 0;
358
359 list_for_each_entry(addr, &card->ip_list, entry) {
360 if ((addr->proto == QETH_PROT_IPV4) &&
361 (todo->proto == QETH_PROT_IPV4) &&
362 (addr->type == todo->type) &&
363 (addr->u.a4.addr == todo->u.a4.addr) &&
364 (addr->u.a4.mask == todo->u.a4.mask)) {
365 found = 1;
366 break;
367 }
368 if ((addr->proto == QETH_PROT_IPV6) &&
369 (todo->proto == QETH_PROT_IPV6) &&
370 (addr->type == todo->type) &&
371 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
372 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
373 sizeof(struct in6_addr)) == 0)) {
374 found = 1;
375 break;
376 }
377 }
378 if (found) {
379 addr->users += todo->users;
380 if (addr->users <= 0) {
381 *__addr = addr;
382 return -1;
383 } else {
384 /* for VIPA and RXIP limit refcount to 1 */
385 if (addr->type != QETH_IP_TYPE_NORMAL)
386 addr->users = 1;
387 return 0;
388 }
389 }
390 if (todo->users > 0) {
391 /* for VIPA and RXIP limit refcount to 1 */
392 if (todo->type != QETH_IP_TYPE_NORMAL)
393 todo->users = 1;
394 return 1;
395 } else
396 return 0;
397 }
398
399 static void __qeth_l3_delete_all_mc(struct qeth_card *card,
400 unsigned long *flags)
401 {
402 struct list_head fail_list;
403 struct qeth_ipaddr *addr, *tmp;
404 int rc;
405
406 INIT_LIST_HEAD(&fail_list);
407 again:
408 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
409 if (addr->is_multicast) {
410 list_del(&addr->entry);
411 spin_unlock_irqrestore(&card->ip_lock, *flags);
412 rc = qeth_l3_deregister_addr_entry(card, addr);
413 spin_lock_irqsave(&card->ip_lock, *flags);
414 if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND))
415 kfree(addr);
416 else
417 list_add_tail(&addr->entry, &fail_list);
418 goto again;
419 }
420 }
421 list_splice(&fail_list, &card->ip_list);
422 }
423
424 static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
425 {
426 struct list_head *tbd_list;
427 struct qeth_ipaddr *todo, *addr;
428 unsigned long flags;
429 int rc;
430
431 QETH_DBF_TEXT(TRACE, 2, "sdiplist");
432 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
433
434 spin_lock_irqsave(&card->ip_lock, flags);
435 tbd_list = card->ip_tbd_list;
436 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
437 if (!card->ip_tbd_list) {
438 QETH_DBF_TEXT(TRACE, 0, "silnomem");
439 card->ip_tbd_list = tbd_list;
440 spin_unlock_irqrestore(&card->ip_lock, flags);
441 return;
442 } else
443 INIT_LIST_HEAD(card->ip_tbd_list);
444
445 while (!list_empty(tbd_list)) {
446 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
447 list_del(&todo->entry);
448 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
449 __qeth_l3_delete_all_mc(card, &flags);
450 kfree(todo);
451 continue;
452 }
453 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
454 if (rc == 0) {
455 /* nothing to be done; only adjusted refcount */
456 kfree(todo);
457 } else if (rc == 1) {
458 /* new entry to be added to on-card list */
459 spin_unlock_irqrestore(&card->ip_lock, flags);
460 rc = qeth_l3_register_addr_entry(card, todo);
461 spin_lock_irqsave(&card->ip_lock, flags);
462 if (!rc || (rc == IPA_RC_LAN_OFFLINE))
463 list_add_tail(&todo->entry, &card->ip_list);
464 else
465 kfree(todo);
466 } else if (rc == -1) {
467 /* on-card entry to be removed */
468 list_del_init(&addr->entry);
469 spin_unlock_irqrestore(&card->ip_lock, flags);
470 rc = qeth_l3_deregister_addr_entry(card, addr);
471 spin_lock_irqsave(&card->ip_lock, flags);
472 if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED))
473 kfree(addr);
474 else
475 list_add_tail(&addr->entry, &card->ip_list);
476 kfree(todo);
477 }
478 }
479 spin_unlock_irqrestore(&card->ip_lock, flags);
480 kfree(tbd_list);
481 }
482
483 static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
484 int recover)
485 {
486 struct qeth_ipaddr *addr, *tmp;
487 unsigned long flags;
488
489 QETH_DBF_TEXT(TRACE, 4, "clearip");
490 spin_lock_irqsave(&card->ip_lock, flags);
491 /* clear todo list */
492 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
493 list_del(&addr->entry);
494 kfree(addr);
495 }
496
497 while (!list_empty(&card->ip_list)) {
498 addr = list_entry(card->ip_list.next,
499 struct qeth_ipaddr, entry);
500 list_del_init(&addr->entry);
501 if (clean) {
502 spin_unlock_irqrestore(&card->ip_lock, flags);
503 qeth_l3_deregister_addr_entry(card, addr);
504 spin_lock_irqsave(&card->ip_lock, flags);
505 }
506 if (!recover || addr->is_multicast) {
507 kfree(addr);
508 continue;
509 }
510 list_add_tail(&addr->entry, card->ip_tbd_list);
511 }
512 spin_unlock_irqrestore(&card->ip_lock, flags);
513 }
514
515 static int qeth_l3_address_exists_in_list(struct list_head *list,
516 struct qeth_ipaddr *addr, int same_type)
517 {
518 struct qeth_ipaddr *tmp;
519
520 list_for_each_entry(tmp, list, entry) {
521 if ((tmp->proto == QETH_PROT_IPV4) &&
522 (addr->proto == QETH_PROT_IPV4) &&
523 ((same_type && (tmp->type == addr->type)) ||
524 (!same_type && (tmp->type != addr->type))) &&
525 (tmp->u.a4.addr == addr->u.a4.addr))
526 return 1;
527
528 if ((tmp->proto == QETH_PROT_IPV6) &&
529 (addr->proto == QETH_PROT_IPV6) &&
530 ((same_type && (tmp->type == addr->type)) ||
531 (!same_type && (tmp->type != addr->type))) &&
532 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
533 sizeof(struct in6_addr)) == 0))
534 return 1;
535
536 }
537 return 0;
538 }
539
540 static int qeth_l3_send_setdelmc(struct qeth_card *card,
541 struct qeth_ipaddr *addr, int ipacmd)
542 {
543 int rc;
544 struct qeth_cmd_buffer *iob;
545 struct qeth_ipa_cmd *cmd;
546
547 QETH_DBF_TEXT(TRACE, 4, "setdelmc");
548
549 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
550 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
551 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
552 if (addr->proto == QETH_PROT_IPV6)
553 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
554 sizeof(struct in6_addr));
555 else
556 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
557
558 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
559
560 return rc;
561 }
562
563 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
564 {
565 int i, j;
566 for (i = 0; i < 16; i++) {
567 j = (len) - (i * 8);
568 if (j >= 8)
569 netmask[i] = 0xff;
570 else if (j > 0)
571 netmask[i] = (u8)(0xFF00 >> j);
572 else
573 netmask[i] = 0;
574 }
575 }
576
577 static int qeth_l3_send_setdelip(struct qeth_card *card,
578 struct qeth_ipaddr *addr, int ipacmd, unsigned int flags)
579 {
580 int rc;
581 struct qeth_cmd_buffer *iob;
582 struct qeth_ipa_cmd *cmd;
583 __u8 netmask[16];
584
585 QETH_DBF_TEXT(TRACE, 4, "setdelip");
586 QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags);
587
588 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
589 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
590 if (addr->proto == QETH_PROT_IPV6) {
591 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
592 sizeof(struct in6_addr));
593 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
594 memcpy(cmd->data.setdelip6.mask, netmask,
595 sizeof(struct in6_addr));
596 cmd->data.setdelip6.flags = flags;
597 } else {
598 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
599 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
600 cmd->data.setdelip4.flags = flags;
601 }
602
603 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
604
605 return rc;
606 }
607
608 static int qeth_l3_send_setrouting(struct qeth_card *card,
609 enum qeth_routing_types type, enum qeth_prot_versions prot)
610 {
611 int rc;
612 struct qeth_ipa_cmd *cmd;
613 struct qeth_cmd_buffer *iob;
614
615 QETH_DBF_TEXT(TRACE, 4, "setroutg");
616 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
617 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
618 cmd->data.setrtg.type = (type);
619 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
620
621 return rc;
622 }
623
624 static void qeth_l3_correct_routing_type(struct qeth_card *card,
625 enum qeth_routing_types *type, enum qeth_prot_versions prot)
626 {
627 if (card->info.type == QETH_CARD_TYPE_IQD) {
628 switch (*type) {
629 case NO_ROUTER:
630 case PRIMARY_CONNECTOR:
631 case SECONDARY_CONNECTOR:
632 case MULTICAST_ROUTER:
633 return;
634 default:
635 goto out_inval;
636 }
637 } else {
638 switch (*type) {
639 case NO_ROUTER:
640 case PRIMARY_ROUTER:
641 case SECONDARY_ROUTER:
642 return;
643 case MULTICAST_ROUTER:
644 if (qeth_is_ipafunc_supported(card, prot,
645 IPA_OSA_MC_ROUTER))
646 return;
647 default:
648 goto out_inval;
649 }
650 }
651 out_inval:
652 *type = NO_ROUTER;
653 }
654
655 int qeth_l3_setrouting_v4(struct qeth_card *card)
656 {
657 int rc;
658
659 QETH_DBF_TEXT(TRACE, 3, "setrtg4");
660
661 qeth_l3_correct_routing_type(card, &card->options.route4.type,
662 QETH_PROT_IPV4);
663
664 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
665 QETH_PROT_IPV4);
666 if (rc) {
667 card->options.route4.type = NO_ROUTER;
668 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
669 " on %s. Type set to 'no router'.\n", rc,
670 QETH_CARD_IFNAME(card));
671 }
672 return rc;
673 }
674
675 int qeth_l3_setrouting_v6(struct qeth_card *card)
676 {
677 int rc = 0;
678
679 QETH_DBF_TEXT(TRACE, 3, "setrtg6");
680 #ifdef CONFIG_QETH_IPV6
681
682 if (!qeth_is_supported(card, IPA_IPV6))
683 return 0;
684 qeth_l3_correct_routing_type(card, &card->options.route6.type,
685 QETH_PROT_IPV6);
686
687 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
688 QETH_PROT_IPV6);
689 if (rc) {
690 card->options.route6.type = NO_ROUTER;
691 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
692 " on %s. Type set to 'no router'.\n", rc,
693 QETH_CARD_IFNAME(card));
694 }
695 #endif
696 return rc;
697 }
698
699 /*
700 * IP address takeover related functions
701 */
702 static void qeth_l3_clear_ipato_list(struct qeth_card *card)
703 {
704
705 struct qeth_ipato_entry *ipatoe, *tmp;
706 unsigned long flags;
707
708 spin_lock_irqsave(&card->ip_lock, flags);
709 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
710 list_del(&ipatoe->entry);
711 kfree(ipatoe);
712 }
713 spin_unlock_irqrestore(&card->ip_lock, flags);
714 }
715
716 int qeth_l3_add_ipato_entry(struct qeth_card *card,
717 struct qeth_ipato_entry *new)
718 {
719 struct qeth_ipato_entry *ipatoe;
720 unsigned long flags;
721 int rc = 0;
722
723 QETH_DBF_TEXT(TRACE, 2, "addipato");
724 spin_lock_irqsave(&card->ip_lock, flags);
725 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
726 if (ipatoe->proto != new->proto)
727 continue;
728 if (!memcmp(ipatoe->addr, new->addr,
729 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
730 (ipatoe->mask_bits == new->mask_bits)) {
731 rc = -EEXIST;
732 break;
733 }
734 }
735 if (!rc)
736 list_add_tail(&new->entry, &card->ipato.entries);
737
738 spin_unlock_irqrestore(&card->ip_lock, flags);
739 return rc;
740 }
741
742 void qeth_l3_del_ipato_entry(struct qeth_card *card,
743 enum qeth_prot_versions proto, u8 *addr, int mask_bits)
744 {
745 struct qeth_ipato_entry *ipatoe, *tmp;
746 unsigned long flags;
747
748 QETH_DBF_TEXT(TRACE, 2, "delipato");
749 spin_lock_irqsave(&card->ip_lock, flags);
750 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
751 if (ipatoe->proto != proto)
752 continue;
753 if (!memcmp(ipatoe->addr, addr,
754 (proto == QETH_PROT_IPV4)? 4:16) &&
755 (ipatoe->mask_bits == mask_bits)) {
756 list_del(&ipatoe->entry);
757 kfree(ipatoe);
758 }
759 }
760 spin_unlock_irqrestore(&card->ip_lock, flags);
761 }
762
763 /*
764 * VIPA related functions
765 */
766 int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
767 const u8 *addr)
768 {
769 struct qeth_ipaddr *ipaddr;
770 unsigned long flags;
771 int rc = 0;
772
773 ipaddr = qeth_l3_get_addr_buffer(proto);
774 if (ipaddr) {
775 if (proto == QETH_PROT_IPV4) {
776 QETH_DBF_TEXT(TRACE, 2, "addvipa4");
777 memcpy(&ipaddr->u.a4.addr, addr, 4);
778 ipaddr->u.a4.mask = 0;
779 } else if (proto == QETH_PROT_IPV6) {
780 QETH_DBF_TEXT(TRACE, 2, "addvipa6");
781 memcpy(&ipaddr->u.a6.addr, addr, 16);
782 ipaddr->u.a6.pfxlen = 0;
783 }
784 ipaddr->type = QETH_IP_TYPE_VIPA;
785 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
786 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
787 } else
788 return -ENOMEM;
789 spin_lock_irqsave(&card->ip_lock, flags);
790 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
791 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
792 rc = -EEXIST;
793 spin_unlock_irqrestore(&card->ip_lock, flags);
794 if (rc) {
795 return rc;
796 }
797 if (!qeth_l3_add_ip(card, ipaddr))
798 kfree(ipaddr);
799 qeth_l3_set_ip_addr_list(card);
800 return rc;
801 }
802
803 void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
804 const u8 *addr)
805 {
806 struct qeth_ipaddr *ipaddr;
807
808 ipaddr = qeth_l3_get_addr_buffer(proto);
809 if (ipaddr) {
810 if (proto == QETH_PROT_IPV4) {
811 QETH_DBF_TEXT(TRACE, 2, "delvipa4");
812 memcpy(&ipaddr->u.a4.addr, addr, 4);
813 ipaddr->u.a4.mask = 0;
814 } else if (proto == QETH_PROT_IPV6) {
815 QETH_DBF_TEXT(TRACE, 2, "delvipa6");
816 memcpy(&ipaddr->u.a6.addr, addr, 16);
817 ipaddr->u.a6.pfxlen = 0;
818 }
819 ipaddr->type = QETH_IP_TYPE_VIPA;
820 } else
821 return;
822 if (!qeth_l3_delete_ip(card, ipaddr))
823 kfree(ipaddr);
824 qeth_l3_set_ip_addr_list(card);
825 }
826
827 /*
828 * proxy ARP related functions
829 */
830 int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
831 const u8 *addr)
832 {
833 struct qeth_ipaddr *ipaddr;
834 unsigned long flags;
835 int rc = 0;
836
837 ipaddr = qeth_l3_get_addr_buffer(proto);
838 if (ipaddr) {
839 if (proto == QETH_PROT_IPV4) {
840 QETH_DBF_TEXT(TRACE, 2, "addrxip4");
841 memcpy(&ipaddr->u.a4.addr, addr, 4);
842 ipaddr->u.a4.mask = 0;
843 } else if (proto == QETH_PROT_IPV6) {
844 QETH_DBF_TEXT(TRACE, 2, "addrxip6");
845 memcpy(&ipaddr->u.a6.addr, addr, 16);
846 ipaddr->u.a6.pfxlen = 0;
847 }
848 ipaddr->type = QETH_IP_TYPE_RXIP;
849 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
850 ipaddr->del_flags = 0;
851 } else
852 return -ENOMEM;
853 spin_lock_irqsave(&card->ip_lock, flags);
854 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
855 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
856 rc = -EEXIST;
857 spin_unlock_irqrestore(&card->ip_lock, flags);
858 if (rc) {
859 return rc;
860 }
861 if (!qeth_l3_add_ip(card, ipaddr))
862 kfree(ipaddr);
863 qeth_l3_set_ip_addr_list(card);
864 return 0;
865 }
866
867 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
868 const u8 *addr)
869 {
870 struct qeth_ipaddr *ipaddr;
871
872 ipaddr = qeth_l3_get_addr_buffer(proto);
873 if (ipaddr) {
874 if (proto == QETH_PROT_IPV4) {
875 QETH_DBF_TEXT(TRACE, 2, "addrxip4");
876 memcpy(&ipaddr->u.a4.addr, addr, 4);
877 ipaddr->u.a4.mask = 0;
878 } else if (proto == QETH_PROT_IPV6) {
879 QETH_DBF_TEXT(TRACE, 2, "addrxip6");
880 memcpy(&ipaddr->u.a6.addr, addr, 16);
881 ipaddr->u.a6.pfxlen = 0;
882 }
883 ipaddr->type = QETH_IP_TYPE_RXIP;
884 } else
885 return;
886 if (!qeth_l3_delete_ip(card, ipaddr))
887 kfree(ipaddr);
888 qeth_l3_set_ip_addr_list(card);
889 }
890
891 static int qeth_l3_register_addr_entry(struct qeth_card *card,
892 struct qeth_ipaddr *addr)
893 {
894 char buf[50];
895 int rc = 0;
896 int cnt = 3;
897
898 if (addr->proto == QETH_PROT_IPV4) {
899 QETH_DBF_TEXT(TRACE, 2, "setaddr4");
900 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
901 } else if (addr->proto == QETH_PROT_IPV6) {
902 QETH_DBF_TEXT(TRACE, 2, "setaddr6");
903 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
904 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
905 } else {
906 QETH_DBF_TEXT(TRACE, 2, "setaddr?");
907 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
908 }
909 do {
910 if (addr->is_multicast)
911 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
912 else
913 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
914 addr->set_flags);
915 if (rc)
916 QETH_DBF_TEXT(TRACE, 2, "failed");
917 } while ((--cnt > 0) && rc);
918 if (rc) {
919 QETH_DBF_TEXT(TRACE, 2, "FAILED");
920 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
921 dev_warn(&card->gdev->dev,
922 "Registering IP address %s failed\n", buf);
923 }
924 return rc;
925 }
926
927 static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
928 struct qeth_ipaddr *addr)
929 {
930 int rc = 0;
931
932 if (addr->proto == QETH_PROT_IPV4) {
933 QETH_DBF_TEXT(TRACE, 2, "deladdr4");
934 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
935 } else if (addr->proto == QETH_PROT_IPV6) {
936 QETH_DBF_TEXT(TRACE, 2, "deladdr6");
937 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
938 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
939 } else {
940 QETH_DBF_TEXT(TRACE, 2, "deladdr?");
941 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
942 }
943 if (addr->is_multicast)
944 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
945 else
946 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
947 addr->del_flags);
948 if (rc)
949 QETH_DBF_TEXT(TRACE, 2, "failed");
950
951 return rc;
952 }
953
954 static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
955 {
956 if (cast_type == RTN_MULTICAST)
957 return QETH_CAST_MULTICAST;
958 if (cast_type == RTN_BROADCAST)
959 return QETH_CAST_BROADCAST;
960 return QETH_CAST_UNICAST;
961 }
962
963 static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
964 {
965 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
966 if (cast_type == RTN_MULTICAST)
967 return ct | QETH_CAST_MULTICAST;
968 if (cast_type == RTN_ANYCAST)
969 return ct | QETH_CAST_ANYCAST;
970 if (cast_type == RTN_BROADCAST)
971 return ct | QETH_CAST_BROADCAST;
972 return ct | QETH_CAST_UNICAST;
973 }
974
975 static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
976 __u32 mode)
977 {
978 int rc;
979 struct qeth_cmd_buffer *iob;
980 struct qeth_ipa_cmd *cmd;
981
982 QETH_DBF_TEXT(TRACE, 4, "adpmode");
983
984 iob = qeth_get_adapter_cmd(card, command,
985 sizeof(struct qeth_ipacmd_setadpparms));
986 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
987 cmd->data.setadapterparms.data.mode = mode;
988 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
989 NULL);
990 return rc;
991 }
992
993 static int qeth_l3_setadapter_hstr(struct qeth_card *card)
994 {
995 int rc;
996
997 QETH_DBF_TEXT(TRACE, 4, "adphstr");
998
999 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
1000 rc = qeth_l3_send_setadp_mode(card,
1001 IPA_SETADP_SET_BROADCAST_MODE,
1002 card->options.broadcast_mode);
1003 if (rc)
1004 QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on "
1005 "device %s: x%x\n",
1006 CARD_BUS_ID(card), rc);
1007 rc = qeth_l3_send_setadp_mode(card,
1008 IPA_SETADP_ALTER_MAC_ADDRESS,
1009 card->options.macaddr_mode);
1010 if (rc)
1011 QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on "
1012 "device %s: x%x\n", CARD_BUS_ID(card), rc);
1013 return rc;
1014 }
1015 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
1016 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
1017 "to set broadcast mode, using ALLRINGS "
1018 "on device %s:\n", CARD_BUS_ID(card));
1019 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
1020 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
1021 "to set macaddr mode, using NONCANONICAL "
1022 "on device %s:\n", CARD_BUS_ID(card));
1023 return 0;
1024 }
1025
1026 static int qeth_l3_setadapter_parms(struct qeth_card *card)
1027 {
1028 int rc;
1029
1030 QETH_DBF_TEXT(SETUP, 2, "setadprm");
1031
1032 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
1033 dev_info(&card->gdev->dev,
1034 "set adapter parameters not supported.\n");
1035 QETH_DBF_TEXT(SETUP, 2, " notsupp");
1036 return 0;
1037 }
1038 rc = qeth_query_setadapterparms(card);
1039 if (rc) {
1040 QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: "
1041 "0x%x\n", dev_name(&card->gdev->dev), rc);
1042 return rc;
1043 }
1044 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
1045 rc = qeth_setadpparms_change_macaddr(card);
1046 if (rc)
1047 dev_warn(&card->gdev->dev, "Reading the adapter MAC"
1048 " address failed\n");
1049 }
1050
1051 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
1052 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
1053 rc = qeth_l3_setadapter_hstr(card);
1054
1055 return rc;
1056 }
1057
1058 static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
1059 struct qeth_reply *reply, unsigned long data)
1060 {
1061 struct qeth_ipa_cmd *cmd;
1062
1063 QETH_DBF_TEXT(TRACE, 4, "defadpcb");
1064
1065 cmd = (struct qeth_ipa_cmd *) data;
1066 if (cmd->hdr.return_code == 0) {
1067 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1068 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
1069 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1070 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
1071 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1072 }
1073 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
1074 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
1075 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
1076 QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
1077 }
1078 return 0;
1079 }
1080
1081 static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
1082 struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code,
1083 __u16 len, enum qeth_prot_versions prot)
1084 {
1085 struct qeth_cmd_buffer *iob;
1086 struct qeth_ipa_cmd *cmd;
1087
1088 QETH_DBF_TEXT(TRACE, 4, "getasscm");
1089 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1090
1091 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1092 cmd->data.setassparms.hdr.assist_no = ipa_func;
1093 cmd->data.setassparms.hdr.length = 8 + len;
1094 cmd->data.setassparms.hdr.command_code = cmd_code;
1095 cmd->data.setassparms.hdr.return_code = 0;
1096 cmd->data.setassparms.hdr.seq_no = 0;
1097
1098 return iob;
1099 }
1100
1101 static int qeth_l3_send_setassparms(struct qeth_card *card,
1102 struct qeth_cmd_buffer *iob, __u16 len, long data,
1103 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1104 unsigned long),
1105 void *reply_param)
1106 {
1107 int rc;
1108 struct qeth_ipa_cmd *cmd;
1109
1110 QETH_DBF_TEXT(TRACE, 4, "sendassp");
1111
1112 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1113 if (len <= sizeof(__u32))
1114 cmd->data.setassparms.data.flags_32bit = (__u32) data;
1115 else /* (len > sizeof(__u32)) */
1116 memcpy(&cmd->data.setassparms.data, (void *) data, len);
1117
1118 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
1119 return rc;
1120 }
1121
1122 #ifdef CONFIG_QETH_IPV6
1123 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1124 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
1125 {
1126 int rc;
1127 struct qeth_cmd_buffer *iob;
1128
1129 QETH_DBF_TEXT(TRACE, 4, "simassp6");
1130 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1131 0, QETH_PROT_IPV6);
1132 rc = qeth_l3_send_setassparms(card, iob, 0, 0,
1133 qeth_l3_default_setassparms_cb, NULL);
1134 return rc;
1135 }
1136 #endif
1137
1138 static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
1139 enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data)
1140 {
1141 int rc;
1142 int length = 0;
1143 struct qeth_cmd_buffer *iob;
1144
1145 QETH_DBF_TEXT(TRACE, 4, "simassp4");
1146 if (data)
1147 length = sizeof(__u32);
1148 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1149 length, QETH_PROT_IPV4);
1150 rc = qeth_l3_send_setassparms(card, iob, length, data,
1151 qeth_l3_default_setassparms_cb, NULL);
1152 return rc;
1153 }
1154
1155 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
1156 {
1157 int rc;
1158
1159 QETH_DBF_TEXT(TRACE, 3, "ipaarp");
1160
1161 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1162 dev_info(&card->gdev->dev,
1163 "ARP processing not supported on %s!\n",
1164 QETH_CARD_IFNAME(card));
1165 return 0;
1166 }
1167 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1168 IPA_CMD_ASS_START, 0);
1169 if (rc) {
1170 dev_warn(&card->gdev->dev,
1171 "Starting ARP processing support for %s failed\n",
1172 QETH_CARD_IFNAME(card));
1173 }
1174 return rc;
1175 }
1176
1177 static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
1178 {
1179 int rc;
1180
1181 QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
1182
1183 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
1184 dev_info(&card->gdev->dev,
1185 "Hardware IP fragmentation not supported on %s\n",
1186 QETH_CARD_IFNAME(card));
1187 return -EOPNOTSUPP;
1188 }
1189
1190 rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
1191 IPA_CMD_ASS_START, 0);
1192 if (rc) {
1193 dev_warn(&card->gdev->dev,
1194 "Starting IP fragmentation support for %s failed\n",
1195 QETH_CARD_IFNAME(card));
1196 } else
1197 dev_info(&card->gdev->dev,
1198 "Hardware IP fragmentation enabled \n");
1199 return rc;
1200 }
1201
1202 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
1203 {
1204 int rc;
1205
1206 QETH_DBF_TEXT(TRACE, 3, "stsrcmac");
1207
1208 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
1209 dev_info(&card->gdev->dev,
1210 "Inbound source MAC-address not supported on %s\n",
1211 QETH_CARD_IFNAME(card));
1212 return -EOPNOTSUPP;
1213 }
1214
1215 rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
1216 IPA_CMD_ASS_START, 0);
1217 if (rc)
1218 dev_warn(&card->gdev->dev,
1219 "Starting source MAC-address support for %s failed\n",
1220 QETH_CARD_IFNAME(card));
1221 return rc;
1222 }
1223
1224 static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
1225 {
1226 int rc = 0;
1227
1228 QETH_DBF_TEXT(TRACE, 3, "strtvlan");
1229
1230 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
1231 dev_info(&card->gdev->dev,
1232 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
1233 return -EOPNOTSUPP;
1234 }
1235
1236 rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
1237 IPA_CMD_ASS_START, 0);
1238 if (rc) {
1239 dev_warn(&card->gdev->dev,
1240 "Starting VLAN support for %s failed\n",
1241 QETH_CARD_IFNAME(card));
1242 } else {
1243 dev_info(&card->gdev->dev, "VLAN enabled\n");
1244 }
1245 return rc;
1246 }
1247
1248 static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
1249 {
1250 int rc;
1251
1252 QETH_DBF_TEXT(TRACE, 3, "stmcast");
1253
1254 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
1255 dev_info(&card->gdev->dev,
1256 "Multicast not supported on %s\n",
1257 QETH_CARD_IFNAME(card));
1258 return -EOPNOTSUPP;
1259 }
1260
1261 rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
1262 IPA_CMD_ASS_START, 0);
1263 if (rc) {
1264 dev_warn(&card->gdev->dev,
1265 "Starting multicast support for %s failed\n",
1266 QETH_CARD_IFNAME(card));
1267 } else {
1268 dev_info(&card->gdev->dev, "Multicast enabled\n");
1269 card->dev->flags |= IFF_MULTICAST;
1270 }
1271 return rc;
1272 }
1273
1274 static int qeth_l3_query_ipassists_cb(struct qeth_card *card,
1275 struct qeth_reply *reply, unsigned long data)
1276 {
1277 struct qeth_ipa_cmd *cmd;
1278
1279 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
1280
1281 cmd = (struct qeth_ipa_cmd *) data;
1282 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
1283 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
1284 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1285 } else {
1286 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
1287 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1288 }
1289 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
1290 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
1291 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
1292 return 0;
1293 }
1294
1295 static int qeth_l3_query_ipassists(struct qeth_card *card,
1296 enum qeth_prot_versions prot)
1297 {
1298 int rc;
1299 struct qeth_cmd_buffer *iob;
1300
1301 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
1302 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
1303 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL);
1304 return rc;
1305 }
1306
1307 #ifdef CONFIG_QETH_IPV6
1308 static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1309 {
1310 int rc;
1311
1312 QETH_DBF_TEXT(TRACE, 3, "softipv6");
1313
1314 if (card->info.type == QETH_CARD_TYPE_IQD)
1315 goto out;
1316
1317 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6);
1318 if (rc) {
1319 dev_err(&card->gdev->dev,
1320 "Activating IPv6 support for %s failed\n",
1321 QETH_CARD_IFNAME(card));
1322 return rc;
1323 }
1324 rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
1325 IPA_CMD_ASS_START, 3);
1326 if (rc) {
1327 dev_err(&card->gdev->dev,
1328 "Activating IPv6 support for %s failed\n",
1329 QETH_CARD_IFNAME(card));
1330 return rc;
1331 }
1332 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
1333 IPA_CMD_ASS_START);
1334 if (rc) {
1335 dev_err(&card->gdev->dev,
1336 "Activating IPv6 support for %s failed\n",
1337 QETH_CARD_IFNAME(card));
1338 return rc;
1339 }
1340 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
1341 IPA_CMD_ASS_START);
1342 if (rc) {
1343 dev_warn(&card->gdev->dev,
1344 "Enabling the passthrough mode for %s failed\n",
1345 QETH_CARD_IFNAME(card));
1346 return rc;
1347 }
1348 out:
1349 dev_info(&card->gdev->dev, "IPV6 enabled\n");
1350 return 0;
1351 }
1352 #endif
1353
1354 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
1355 {
1356 int rc = 0;
1357
1358 QETH_DBF_TEXT(TRACE, 3, "strtipv6");
1359
1360 if (!qeth_is_supported(card, IPA_IPV6)) {
1361 dev_info(&card->gdev->dev,
1362 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
1363 return 0;
1364 }
1365 #ifdef CONFIG_QETH_IPV6
1366 rc = qeth_l3_softsetup_ipv6(card);
1367 #endif
1368 return rc ;
1369 }
1370
1371 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
1372 {
1373 int rc;
1374
1375 QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
1376 card->info.broadcast_capable = 0;
1377 if (!qeth_is_supported(card, IPA_FILTERING)) {
1378 dev_info(&card->gdev->dev,
1379 "Broadcast not supported on %s\n",
1380 QETH_CARD_IFNAME(card));
1381 rc = -EOPNOTSUPP;
1382 goto out;
1383 }
1384 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1385 IPA_CMD_ASS_START, 0);
1386 if (rc) {
1387 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
1388 "%s failed\n", QETH_CARD_IFNAME(card));
1389 goto out;
1390 }
1391
1392 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1393 IPA_CMD_ASS_CONFIGURE, 1);
1394 if (rc) {
1395 dev_warn(&card->gdev->dev,
1396 "Setting up broadcast filtering for %s failed\n",
1397 QETH_CARD_IFNAME(card));
1398 goto out;
1399 }
1400 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
1401 dev_info(&card->gdev->dev, "Broadcast enabled\n");
1402 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1403 IPA_CMD_ASS_ENABLE, 1);
1404 if (rc) {
1405 dev_warn(&card->gdev->dev, "Setting up broadcast echo "
1406 "filtering for %s failed\n", QETH_CARD_IFNAME(card));
1407 goto out;
1408 }
1409 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
1410 out:
1411 if (card->info.broadcast_capable)
1412 card->dev->flags |= IFF_BROADCAST;
1413 else
1414 card->dev->flags &= ~IFF_BROADCAST;
1415 return rc;
1416 }
1417
1418 static int qeth_l3_send_checksum_command(struct qeth_card *card)
1419 {
1420 int rc;
1421
1422 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1423 IPA_CMD_ASS_START, 0);
1424 if (rc) {
1425 dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
1426 "failed, using SW checksumming\n",
1427 QETH_CARD_IFNAME(card));
1428 return rc;
1429 }
1430 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1431 IPA_CMD_ASS_ENABLE,
1432 card->info.csum_mask);
1433 if (rc) {
1434 dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
1435 "failed, using SW checksumming\n",
1436 QETH_CARD_IFNAME(card));
1437 return rc;
1438 }
1439 return 0;
1440 }
1441
1442 static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1443 {
1444 int rc = 0;
1445
1446 QETH_DBF_TEXT(TRACE, 3, "strtcsum");
1447
1448 if (card->options.checksum_type == NO_CHECKSUMMING) {
1449 dev_info(&card->gdev->dev,
1450 "Using no checksumming on %s.\n",
1451 QETH_CARD_IFNAME(card));
1452 return 0;
1453 }
1454 if (card->options.checksum_type == SW_CHECKSUMMING) {
1455 dev_info(&card->gdev->dev,
1456 "Using SW checksumming on %s.\n",
1457 QETH_CARD_IFNAME(card));
1458 return 0;
1459 }
1460 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1461 dev_info(&card->gdev->dev,
1462 "Inbound HW Checksumming not "
1463 "supported on %s,\ncontinuing "
1464 "using Inbound SW Checksumming\n",
1465 QETH_CARD_IFNAME(card));
1466 card->options.checksum_type = SW_CHECKSUMMING;
1467 return 0;
1468 }
1469 rc = qeth_l3_send_checksum_command(card);
1470 if (!rc)
1471 dev_info(&card->gdev->dev,
1472 "HW Checksumming (inbound) enabled\n");
1473
1474 return rc;
1475 }
1476
1477 static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1478 {
1479 int rc;
1480
1481 QETH_DBF_TEXT(TRACE, 3, "sttso");
1482
1483 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1484 dev_info(&card->gdev->dev,
1485 "Outbound TSO not supported on %s\n",
1486 QETH_CARD_IFNAME(card));
1487 rc = -EOPNOTSUPP;
1488 } else {
1489 rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
1490 IPA_CMD_ASS_START, 0);
1491 if (rc)
1492 dev_warn(&card->gdev->dev, "Starting outbound TCP "
1493 "segmentation offload for %s failed\n",
1494 QETH_CARD_IFNAME(card));
1495 else
1496 dev_info(&card->gdev->dev,
1497 "Outbound TSO enabled\n");
1498 }
1499 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) {
1500 card->options.large_send = QETH_LARGE_SEND_NO;
1501 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
1502 }
1503 return rc;
1504 }
1505
1506 static int qeth_l3_start_ipassists(struct qeth_card *card)
1507 {
1508 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1511 qeth_l3_start_ipa_source_mac(card); /* go on*/
1512 qeth_l3_start_ipa_vlan(card); /* go on*/
1513 qeth_l3_start_ipa_multicast(card); /* go on*/
1514 qeth_l3_start_ipa_ipv6(card); /* go on*/
1515 qeth_l3_start_ipa_broadcast(card); /* go on*/
1516 qeth_l3_start_ipa_checksum(card); /* go on*/
1517 qeth_l3_start_ipa_tso(card); /* go on*/
1518 return 0;
1519 }
1520
1521 static int qeth_l3_put_unique_id(struct qeth_card *card)
1522 {
1523
1524 int rc = 0;
1525 struct qeth_cmd_buffer *iob;
1526 struct qeth_ipa_cmd *cmd;
1527
1528 QETH_DBF_TEXT(TRACE, 2, "puniqeid");
1529
1530 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
1531 UNIQUE_ID_NOT_BY_CARD)
1532 return -1;
1533 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
1534 QETH_PROT_IPV6);
1535 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1536 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1537 card->info.unique_id;
1538 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
1539 card->dev->dev_addr, OSA_ADDR_LEN);
1540 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
1541 return rc;
1542 }
1543
1544 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1545 struct qeth_reply *reply, unsigned long data)
1546 {
1547 struct qeth_ipa_cmd *cmd;
1548
1549 cmd = (struct qeth_ipa_cmd *) data;
1550 if (cmd->hdr.return_code == 0)
1551 memcpy(card->dev->dev_addr,
1552 cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
1553 else
1554 random_ether_addr(card->dev->dev_addr);
1555
1556 return 0;
1557 }
1558
1559 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
1560 {
1561 int rc = 0;
1562 struct qeth_cmd_buffer *iob;
1563 struct qeth_ipa_cmd *cmd;
1564
1565 QETH_DBF_TEXT(SETUP, 2, "hsrmac");
1566
1567 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1568 QETH_PROT_IPV6);
1569 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1570 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1571 card->info.unique_id;
1572
1573 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
1574 NULL);
1575 return rc;
1576 }
1577
1578 static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
1579 struct qeth_reply *reply, unsigned long data)
1580 {
1581 struct qeth_ipa_cmd *cmd;
1582
1583 cmd = (struct qeth_ipa_cmd *) data;
1584 if (cmd->hdr.return_code == 0)
1585 card->info.unique_id = *((__u16 *)
1586 &cmd->data.create_destroy_addr.unique_id[6]);
1587 else {
1588 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1589 UNIQUE_ID_NOT_BY_CARD;
1590 dev_warn(&card->gdev->dev, "The network adapter failed to "
1591 "generate a unique ID\n");
1592 }
1593 return 0;
1594 }
1595
1596 static int qeth_l3_get_unique_id(struct qeth_card *card)
1597 {
1598 int rc = 0;
1599 struct qeth_cmd_buffer *iob;
1600 struct qeth_ipa_cmd *cmd;
1601
1602 QETH_DBF_TEXT(SETUP, 2, "guniqeid");
1603
1604 if (!qeth_is_supported(card, IPA_IPV6)) {
1605 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1606 UNIQUE_ID_NOT_BY_CARD;
1607 return 0;
1608 }
1609
1610 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1611 QETH_PROT_IPV6);
1612 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1613 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1614 card->info.unique_id;
1615
1616 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
1617 return rc;
1618 }
1619
1620 static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1621 struct net_device *dev)
1622 {
1623 if (dev->type == ARPHRD_IEEE802_TR)
1624 ip_tr_mc_map(ipm, mac);
1625 else
1626 ip_eth_mc_map(ipm, mac);
1627 }
1628
1629 static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1630 {
1631 struct qeth_ipaddr *ipm;
1632 struct ip_mc_list *im4;
1633 char buf[MAX_ADDR_LEN];
1634
1635 QETH_DBF_TEXT(TRACE, 4, "addmc");
1636 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1637 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1638 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1639 if (!ipm)
1640 continue;
1641 ipm->u.a4.addr = im4->multiaddr;
1642 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1643 ipm->is_multicast = 1;
1644 if (!qeth_l3_add_ip(card, ipm))
1645 kfree(ipm);
1646 }
1647 }
1648
1649 static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1650 {
1651 struct in_device *in_dev;
1652 struct vlan_group *vg;
1653 int i;
1654
1655 QETH_DBF_TEXT(TRACE, 4, "addmcvl");
1656 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1657 return;
1658
1659 vg = card->vlangrp;
1660 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1661 struct net_device *netdev = vlan_group_get_device(vg, i);
1662 if (netdev == NULL ||
1663 !(netdev->flags & IFF_UP))
1664 continue;
1665 in_dev = in_dev_get(netdev);
1666 if (!in_dev)
1667 continue;
1668 read_lock(&in_dev->mc_list_lock);
1669 qeth_l3_add_mc(card, in_dev);
1670 read_unlock(&in_dev->mc_list_lock);
1671 in_dev_put(in_dev);
1672 }
1673 }
1674
1675 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1676 {
1677 struct in_device *in4_dev;
1678
1679 QETH_DBF_TEXT(TRACE, 4, "chkmcv4");
1680 in4_dev = in_dev_get(card->dev);
1681 if (in4_dev == NULL)
1682 return;
1683 read_lock(&in4_dev->mc_list_lock);
1684 qeth_l3_add_mc(card, in4_dev);
1685 qeth_l3_add_vlan_mc(card);
1686 read_unlock(&in4_dev->mc_list_lock);
1687 in_dev_put(in4_dev);
1688 }
1689
1690 #ifdef CONFIG_QETH_IPV6
1691 static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
1692 {
1693 struct qeth_ipaddr *ipm;
1694 struct ifmcaddr6 *im6;
1695 char buf[MAX_ADDR_LEN];
1696
1697 QETH_DBF_TEXT(TRACE, 4, "addmc6");
1698 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1699 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
1700 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1701 if (!ipm)
1702 continue;
1703 ipm->is_multicast = 1;
1704 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1705 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1706 sizeof(struct in6_addr));
1707 if (!qeth_l3_add_ip(card, ipm))
1708 kfree(ipm);
1709 }
1710 }
1711
1712 static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1713 {
1714 struct inet6_dev *in_dev;
1715 struct vlan_group *vg;
1716 int i;
1717
1718 QETH_DBF_TEXT(TRACE, 4, "admc6vl");
1719 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1720 return;
1721
1722 vg = card->vlangrp;
1723 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1724 struct net_device *netdev = vlan_group_get_device(vg, i);
1725 if (netdev == NULL ||
1726 !(netdev->flags & IFF_UP))
1727 continue;
1728 in_dev = in6_dev_get(netdev);
1729 if (!in_dev)
1730 continue;
1731 read_lock_bh(&in_dev->lock);
1732 qeth_l3_add_mc6(card, in_dev);
1733 read_unlock_bh(&in_dev->lock);
1734 in6_dev_put(in_dev);
1735 }
1736 }
1737
1738 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1739 {
1740 struct inet6_dev *in6_dev;
1741
1742 QETH_DBF_TEXT(TRACE, 4, "chkmcv6");
1743 if (!qeth_is_supported(card, IPA_IPV6))
1744 return ;
1745 in6_dev = in6_dev_get(card->dev);
1746 if (in6_dev == NULL)
1747 return;
1748 read_lock_bh(&in6_dev->lock);
1749 qeth_l3_add_mc6(card, in6_dev);
1750 qeth_l3_add_vlan_mc6(card);
1751 read_unlock_bh(&in6_dev->lock);
1752 in6_dev_put(in6_dev);
1753 }
1754 #endif /* CONFIG_QETH_IPV6 */
1755
1756 static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1757 unsigned short vid)
1758 {
1759 struct in_device *in_dev;
1760 struct in_ifaddr *ifa;
1761 struct qeth_ipaddr *addr;
1762
1763 QETH_DBF_TEXT(TRACE, 4, "frvaddr4");
1764
1765 in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
1766 if (!in_dev)
1767 return;
1768 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1769 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1770 if (addr) {
1771 addr->u.a4.addr = ifa->ifa_address;
1772 addr->u.a4.mask = ifa->ifa_mask;
1773 addr->type = QETH_IP_TYPE_NORMAL;
1774 if (!qeth_l3_delete_ip(card, addr))
1775 kfree(addr);
1776 }
1777 }
1778 in_dev_put(in_dev);
1779 }
1780
1781 static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1782 unsigned short vid)
1783 {
1784 #ifdef CONFIG_QETH_IPV6
1785 struct inet6_dev *in6_dev;
1786 struct inet6_ifaddr *ifa;
1787 struct qeth_ipaddr *addr;
1788
1789 QETH_DBF_TEXT(TRACE, 4, "frvaddr6");
1790
1791 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
1792 if (!in6_dev)
1793 return;
1794 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
1795 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1796 if (addr) {
1797 memcpy(&addr->u.a6.addr, &ifa->addr,
1798 sizeof(struct in6_addr));
1799 addr->u.a6.pfxlen = ifa->prefix_len;
1800 addr->type = QETH_IP_TYPE_NORMAL;
1801 if (!qeth_l3_delete_ip(card, addr))
1802 kfree(addr);
1803 }
1804 }
1805 in6_dev_put(in6_dev);
1806 #endif /* CONFIG_QETH_IPV6 */
1807 }
1808
1809 static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1810 unsigned short vid)
1811 {
1812 if (!card->vlangrp)
1813 return;
1814 qeth_l3_free_vlan_addresses4(card, vid);
1815 qeth_l3_free_vlan_addresses6(card, vid);
1816 }
1817
1818 static void qeth_l3_vlan_rx_register(struct net_device *dev,
1819 struct vlan_group *grp)
1820 {
1821 struct qeth_card *card = dev->ml_priv;
1822 unsigned long flags;
1823
1824 QETH_DBF_TEXT(TRACE, 4, "vlanreg");
1825 spin_lock_irqsave(&card->vlanlock, flags);
1826 card->vlangrp = grp;
1827 spin_unlock_irqrestore(&card->vlanlock, flags);
1828 }
1829
1830 static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1831 {
1832 return;
1833 }
1834
1835 static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1836 {
1837 struct qeth_card *card = dev->ml_priv;
1838 unsigned long flags;
1839
1840 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
1841 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
1842 QETH_DBF_TEXT(TRACE, 3, "kidREC");
1843 return;
1844 }
1845 spin_lock_irqsave(&card->vlanlock, flags);
1846 /* unregister IP addresses of vlan device */
1847 qeth_l3_free_vlan_addresses(card, vid);
1848 vlan_group_set_device(card->vlangrp, vid, NULL);
1849 spin_unlock_irqrestore(&card->vlanlock, flags);
1850 qeth_l3_set_multicast_list(card->dev);
1851 }
1852
1853 static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1854 struct sk_buff *skb, struct qeth_hdr *hdr)
1855 {
1856 unsigned short vlan_id = 0;
1857 __be16 prot;
1858 struct iphdr *ip_hdr;
1859 unsigned char tg_addr[MAX_ADDR_LEN];
1860
1861 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
1862 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
1863 ETH_P_IP);
1864 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
1865 case QETH_CAST_MULTICAST:
1866 switch (prot) {
1867 #ifdef CONFIG_QETH_IPV6
1868 case __constant_htons(ETH_P_IPV6):
1869 ndisc_mc_map((struct in6_addr *)
1870 skb->data + 24,
1871 tg_addr, card->dev, 0);
1872 break;
1873 #endif
1874 case __constant_htons(ETH_P_IP):
1875 ip_hdr = (struct iphdr *)skb->data;
1876 (card->dev->type == ARPHRD_IEEE802_TR) ?
1877 ip_tr_mc_map(ip_hdr->daddr, tg_addr):
1878 ip_eth_mc_map(ip_hdr->daddr, tg_addr);
1879 break;
1880 default:
1881 memcpy(tg_addr, card->dev->broadcast,
1882 card->dev->addr_len);
1883 }
1884 card->stats.multicast++;
1885 skb->pkt_type = PACKET_MULTICAST;
1886 break;
1887 case QETH_CAST_BROADCAST:
1888 memcpy(tg_addr, card->dev->broadcast,
1889 card->dev->addr_len);
1890 card->stats.multicast++;
1891 skb->pkt_type = PACKET_BROADCAST;
1892 break;
1893 case QETH_CAST_UNICAST:
1894 case QETH_CAST_ANYCAST:
1895 case QETH_CAST_NOCAST:
1896 default:
1897 skb->pkt_type = PACKET_HOST;
1898 memcpy(tg_addr, card->dev->dev_addr,
1899 card->dev->addr_len);
1900 }
1901 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
1902 card->dev->header_ops->create(skb, card->dev, prot,
1903 tg_addr, &hdr->hdr.l3.dest_addr[2],
1904 card->dev->addr_len);
1905 else
1906 card->dev->header_ops->create(skb, card->dev, prot,
1907 tg_addr, "FAKELL", card->dev->addr_len);
1908 }
1909
1910 #ifdef CONFIG_TR
1911 if (card->dev->type == ARPHRD_IEEE802_TR)
1912 skb->protocol = tr_type_trans(skb, card->dev);
1913 else
1914 #endif
1915 skb->protocol = eth_type_trans(skb, card->dev);
1916
1917 if (hdr->hdr.l3.ext_flags &
1918 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
1919 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
1920 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
1921 }
1922
1923 skb->ip_summed = card->options.checksum_type;
1924 if (card->options.checksum_type == HW_CHECKSUMMING) {
1925 if ((hdr->hdr.l3.ext_flags &
1926 (QETH_HDR_EXT_CSUM_HDR_REQ |
1927 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
1928 (QETH_HDR_EXT_CSUM_HDR_REQ |
1929 QETH_HDR_EXT_CSUM_TRANSP_REQ))
1930 skb->ip_summed = CHECKSUM_UNNECESSARY;
1931 else
1932 skb->ip_summed = SW_CHECKSUMMING;
1933 }
1934
1935 return vlan_id;
1936 }
1937
1938 static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
1939 struct qeth_qdio_buffer *buf, int index)
1940 {
1941 struct qdio_buffer_element *element;
1942 struct sk_buff *skb;
1943 struct qeth_hdr *hdr;
1944 int offset;
1945 __u16 vlan_tag = 0;
1946 unsigned int len;
1947
1948 /* get first element of current buffer */
1949 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
1950 offset = 0;
1951 if (card->options.performance_stats)
1952 card->perf_stats.bufs_rec++;
1953 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
1954 &offset, &hdr))) {
1955 skb->dev = card->dev;
1956 /* is device UP ? */
1957 if (!(card->dev->flags & IFF_UP)) {
1958 dev_kfree_skb_any(skb);
1959 continue;
1960 }
1961
1962 switch (hdr->hdr.l3.id) {
1963 case QETH_HEADER_TYPE_LAYER3:
1964 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
1965 len = skb->len;
1966 if (vlan_tag)
1967 if (card->vlangrp)
1968 vlan_hwaccel_rx(skb, card->vlangrp,
1969 vlan_tag);
1970 else {
1971 dev_kfree_skb_any(skb);
1972 continue;
1973 }
1974 else
1975 netif_rx(skb);
1976 break;
1977 default:
1978 dev_kfree_skb_any(skb);
1979 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
1980 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
1981 continue;
1982 }
1983
1984 card->dev->last_rx = jiffies;
1985 card->stats.rx_packets++;
1986 card->stats.rx_bytes += len;
1987 }
1988 }
1989
1990 static int qeth_l3_verify_vlan_dev(struct net_device *dev,
1991 struct qeth_card *card)
1992 {
1993 int rc = 0;
1994 struct vlan_group *vg;
1995 int i;
1996
1997 vg = card->vlangrp;
1998 if (!vg)
1999 return rc;
2000
2001 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2002 if (vlan_group_get_device(vg, i) == dev) {
2003 rc = QETH_VLAN_CARD;
2004 break;
2005 }
2006 }
2007
2008 if (rc && !(vlan_dev_real_dev(dev)->ml_priv == (void *)card))
2009 return 0;
2010
2011 return rc;
2012 }
2013
2014 static int qeth_l3_verify_dev(struct net_device *dev)
2015 {
2016 struct qeth_card *card;
2017 unsigned long flags;
2018 int rc = 0;
2019
2020 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
2021 list_for_each_entry(card, &qeth_core_card_list.list, list) {
2022 if (card->dev == dev) {
2023 rc = QETH_REAL_CARD;
2024 break;
2025 }
2026 rc = qeth_l3_verify_vlan_dev(dev, card);
2027 if (rc)
2028 break;
2029 }
2030 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
2031
2032 return rc;
2033 }
2034
2035 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2036 {
2037 struct qeth_card *card = NULL;
2038 int rc;
2039
2040 rc = qeth_l3_verify_dev(dev);
2041 if (rc == QETH_REAL_CARD)
2042 card = dev->ml_priv;
2043 else if (rc == QETH_VLAN_CARD)
2044 card = vlan_dev_real_dev(dev)->ml_priv;
2045 if (card && card->options.layer2)
2046 card = NULL;
2047 QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
2048 return card ;
2049 }
2050
2051 static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2052 {
2053 int rc = 0;
2054
2055 QETH_DBF_TEXT(SETUP, 2, "stopcard");
2056 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2057
2058 qeth_set_allowed_threads(card, 0, 1);
2059 if (card->read.state == CH_STATE_UP &&
2060 card->write.state == CH_STATE_UP &&
2061 (card->state == CARD_STATE_UP)) {
2062 if (recovery_mode)
2063 qeth_l3_stop(card->dev);
2064 else {
2065 if (card->dev) {
2066 rtnl_lock();
2067 dev_close(card->dev);
2068 rtnl_unlock();
2069 }
2070 }
2071 if (!card->use_hard_stop) {
2072 rc = qeth_send_stoplan(card);
2073 if (rc)
2074 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2075 }
2076 card->state = CARD_STATE_SOFTSETUP;
2077 }
2078 if (card->state == CARD_STATE_SOFTSETUP) {
2079 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
2080 qeth_clear_ipacmd_list(card);
2081 card->state = CARD_STATE_HARDSETUP;
2082 }
2083 if (card->state == CARD_STATE_HARDSETUP) {
2084 if (!card->use_hard_stop &&
2085 (card->info.type != QETH_CARD_TYPE_IQD)) {
2086 rc = qeth_l3_put_unique_id(card);
2087 if (rc)
2088 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2089 }
2090 qeth_qdio_clear_card(card, 0);
2091 qeth_clear_qdio_buffers(card);
2092 qeth_clear_working_pool_list(card);
2093 card->state = CARD_STATE_DOWN;
2094 }
2095 if (card->state == CARD_STATE_DOWN) {
2096 qeth_clear_cmd_buffers(&card->read);
2097 qeth_clear_cmd_buffers(&card->write);
2098 }
2099 card->use_hard_stop = 0;
2100 return rc;
2101 }
2102
2103 static void qeth_l3_set_multicast_list(struct net_device *dev)
2104 {
2105 struct qeth_card *card = dev->ml_priv;
2106
2107 QETH_DBF_TEXT(TRACE, 3, "setmulti");
2108 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
2109 (card->state != CARD_STATE_UP))
2110 return;
2111 qeth_l3_delete_mc_addresses(card);
2112 qeth_l3_add_multicast_ipv4(card);
2113 #ifdef CONFIG_QETH_IPV6
2114 qeth_l3_add_multicast_ipv6(card);
2115 #endif
2116 qeth_l3_set_ip_addr_list(card);
2117 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2118 return;
2119 qeth_setadp_promisc_mode(card);
2120 }
2121
2122 static const char *qeth_l3_arp_get_error_cause(int *rc)
2123 {
2124 switch (*rc) {
2125 case QETH_IPA_ARP_RC_FAILED:
2126 *rc = -EIO;
2127 return "operation failed";
2128 case QETH_IPA_ARP_RC_NOTSUPP:
2129 *rc = -EOPNOTSUPP;
2130 return "operation not supported";
2131 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
2132 *rc = -EINVAL;
2133 return "argument out of range";
2134 case QETH_IPA_ARP_RC_Q_NOTSUPP:
2135 *rc = -EOPNOTSUPP;
2136 return "query operation not supported";
2137 case QETH_IPA_ARP_RC_Q_NO_DATA:
2138 *rc = -ENOENT;
2139 return "no query data available";
2140 default:
2141 return "unknown error";
2142 }
2143 }
2144
2145 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
2146 {
2147 int tmp;
2148 int rc;
2149
2150 QETH_DBF_TEXT(TRACE, 3, "arpstnoe");
2151
2152 /*
2153 * currently GuestLAN only supports the ARP assist function
2154 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
2155 * thus we say EOPNOTSUPP for this ARP function
2156 */
2157 if (card->info.guestlan)
2158 return -EOPNOTSUPP;
2159 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2160 return -EOPNOTSUPP;
2161 }
2162 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2163 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
2164 no_entries);
2165 if (rc) {
2166 tmp = rc;
2167 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
2168 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
2169 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2170 }
2171 return rc;
2172 }
2173
2174 static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
2175 struct qeth_arp_query_data *qdata, int entry_size,
2176 int uentry_size)
2177 {
2178 char *entry_ptr;
2179 char *uentry_ptr;
2180 int i;
2181
2182 entry_ptr = (char *)&qdata->data;
2183 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
2184 for (i = 0; i < qdata->no_entries; ++i) {
2185 /* strip off 32 bytes "media specific information" */
2186 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
2187 entry_ptr += entry_size;
2188 uentry_ptr += uentry_size;
2189 }
2190 }
2191
2192 static int qeth_l3_arp_query_cb(struct qeth_card *card,
2193 struct qeth_reply *reply, unsigned long data)
2194 {
2195 struct qeth_ipa_cmd *cmd;
2196 struct qeth_arp_query_data *qdata;
2197 struct qeth_arp_query_info *qinfo;
2198 int entry_size;
2199 int uentry_size;
2200 int i;
2201
2202 QETH_DBF_TEXT(TRACE, 4, "arpquecb");
2203
2204 qinfo = (struct qeth_arp_query_info *) reply->param;
2205 cmd = (struct qeth_ipa_cmd *) data;
2206 if (cmd->hdr.return_code) {
2207 QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code);
2208 return 0;
2209 }
2210 if (cmd->data.setassparms.hdr.return_code) {
2211 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
2212 QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code);
2213 return 0;
2214 }
2215 qdata = &cmd->data.setassparms.data.query_arp;
2216 switch (qdata->reply_bits) {
2217 case 5:
2218 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
2219 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2220 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
2221 break;
2222 case 7:
2223 /* fall through to default */
2224 default:
2225 /* tr is the same as eth -> entry7 */
2226 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
2227 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2228 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
2229 break;
2230 }
2231 /* check if there is enough room in userspace */
2232 if ((qinfo->udata_len - qinfo->udata_offset) <
2233 qdata->no_entries * uentry_size){
2234 QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
2235 cmd->hdr.return_code = -ENOMEM;
2236 goto out_error;
2237 }
2238 QETH_DBF_TEXT_(TRACE, 4, "anore%i",
2239 cmd->data.setassparms.hdr.number_of_replies);
2240 QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
2241 QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries);
2242
2243 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
2244 /* strip off "media specific information" */
2245 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size,
2246 uentry_size);
2247 } else
2248 /*copy entries to user buffer*/
2249 memcpy(qinfo->udata + qinfo->udata_offset,
2250 (char *)&qdata->data, qdata->no_entries*uentry_size);
2251
2252 qinfo->no_entries += qdata->no_entries;
2253 qinfo->udata_offset += (qdata->no_entries*uentry_size);
2254 /* check if all replies received ... */
2255 if (cmd->data.setassparms.hdr.seq_no <
2256 cmd->data.setassparms.hdr.number_of_replies)
2257 return 1;
2258 memcpy(qinfo->udata, &qinfo->no_entries, 4);
2259 /* keep STRIP_ENTRIES flag so the user program can distinguish
2260 * stripped entries from normal ones */
2261 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2262 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
2263 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
2264 return 0;
2265 out_error:
2266 i = 0;
2267 memcpy(qinfo->udata, &i, 4);
2268 return 0;
2269 }
2270
2271 static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
2272 struct qeth_cmd_buffer *iob, int len,
2273 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
2274 unsigned long),
2275 void *reply_param)
2276 {
2277 QETH_DBF_TEXT(TRACE, 4, "sendarp");
2278
2279 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2280 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2281 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2282 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
2283 reply_cb, reply_param);
2284 }
2285
2286 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2287 {
2288 struct qeth_cmd_buffer *iob;
2289 struct qeth_arp_query_info qinfo = {0, };
2290 int tmp;
2291 int rc;
2292
2293 QETH_DBF_TEXT(TRACE, 3, "arpquery");
2294
2295 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
2296 IPA_ARP_PROCESSING)) {
2297 return -EOPNOTSUPP;
2298 }
2299 /* get size of userspace buffer and mask_bits -> 6 bytes */
2300 if (copy_from_user(&qinfo, udata, 6))
2301 return -EFAULT;
2302 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
2303 if (!qinfo.udata)
2304 return -ENOMEM;
2305 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
2306 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2307 IPA_CMD_ASS_ARP_QUERY_INFO,
2308 sizeof(int), QETH_PROT_IPV4);
2309
2310 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2311 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2312 qeth_l3_arp_query_cb, (void *)&qinfo);
2313 if (rc) {
2314 tmp = rc;
2315 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s "
2316 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2317 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2318 if (copy_to_user(udata, qinfo.udata, 4))
2319 rc = -EFAULT;
2320 } else {
2321 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
2322 rc = -EFAULT;
2323 }
2324 kfree(qinfo.udata);
2325 return rc;
2326 }
2327
2328 static int qeth_l3_arp_add_entry(struct qeth_card *card,
2329 struct qeth_arp_cache_entry *entry)
2330 {
2331 struct qeth_cmd_buffer *iob;
2332 char buf[16];
2333 int tmp;
2334 int rc;
2335
2336 QETH_DBF_TEXT(TRACE, 3, "arpadent");
2337
2338 /*
2339 * currently GuestLAN only supports the ARP assist function
2340 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
2341 * thus we say EOPNOTSUPP for this ARP function
2342 */
2343 if (card->info.guestlan)
2344 return -EOPNOTSUPP;
2345 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2346 return -EOPNOTSUPP;
2347 }
2348
2349 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2350 IPA_CMD_ASS_ARP_ADD_ENTRY,
2351 sizeof(struct qeth_arp_cache_entry),
2352 QETH_PROT_IPV4);
2353 rc = qeth_l3_send_setassparms(card, iob,
2354 sizeof(struct qeth_arp_cache_entry),
2355 (unsigned long) entry,
2356 qeth_l3_default_setassparms_cb, NULL);
2357 if (rc) {
2358 tmp = rc;
2359 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2360 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
2361 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
2362 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2363 }
2364 return rc;
2365 }
2366
2367 static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2368 struct qeth_arp_cache_entry *entry)
2369 {
2370 struct qeth_cmd_buffer *iob;
2371 char buf[16] = {0, };
2372 int tmp;
2373 int rc;
2374
2375 QETH_DBF_TEXT(TRACE, 3, "arprment");
2376
2377 /*
2378 * currently GuestLAN only supports the ARP assist function
2379 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
2380 * thus we say EOPNOTSUPP for this ARP function
2381 */
2382 if (card->info.guestlan)
2383 return -EOPNOTSUPP;
2384 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2385 return -EOPNOTSUPP;
2386 }
2387 memcpy(buf, entry, 12);
2388 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2389 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
2390 12,
2391 QETH_PROT_IPV4);
2392 rc = qeth_l3_send_setassparms(card, iob,
2393 12, (unsigned long)buf,
2394 qeth_l3_default_setassparms_cb, NULL);
2395 if (rc) {
2396 tmp = rc;
2397 memset(buf, 0, 16);
2398 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2399 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
2400 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
2401 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2402 }
2403 return rc;
2404 }
2405
2406 static int qeth_l3_arp_flush_cache(struct qeth_card *card)
2407 {
2408 int rc;
2409 int tmp;
2410
2411 QETH_DBF_TEXT(TRACE, 3, "arpflush");
2412
2413 /*
2414 * currently GuestLAN only supports the ARP assist function
2415 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
2416 * thus we say EOPNOTSUPP for this ARP function
2417 */
2418 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
2419 return -EOPNOTSUPP;
2420 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2421 return -EOPNOTSUPP;
2422 }
2423 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2424 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
2425 if (rc) {
2426 tmp = rc;
2427 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
2428 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2429 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2430 }
2431 return rc;
2432 }
2433
2434 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2435 {
2436 struct qeth_card *card = dev->ml_priv;
2437 struct qeth_arp_cache_entry arp_entry;
2438 struct mii_ioctl_data *mii_data;
2439 int rc = 0;
2440
2441 if (!card)
2442 return -ENODEV;
2443
2444 if ((card->state != CARD_STATE_UP) &&
2445 (card->state != CARD_STATE_SOFTSETUP))
2446 return -ENODEV;
2447
2448 switch (cmd) {
2449 case SIOC_QETH_ARP_SET_NO_ENTRIES:
2450 if (!capable(CAP_NET_ADMIN)) {
2451 rc = -EPERM;
2452 break;
2453 }
2454 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
2455 break;
2456 case SIOC_QETH_ARP_QUERY_INFO:
2457 if (!capable(CAP_NET_ADMIN)) {
2458 rc = -EPERM;
2459 break;
2460 }
2461 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
2462 break;
2463 case SIOC_QETH_ARP_ADD_ENTRY:
2464 if (!capable(CAP_NET_ADMIN)) {
2465 rc = -EPERM;
2466 break;
2467 }
2468 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2469 sizeof(struct qeth_arp_cache_entry)))
2470 rc = -EFAULT;
2471 else
2472 rc = qeth_l3_arp_add_entry(card, &arp_entry);
2473 break;
2474 case SIOC_QETH_ARP_REMOVE_ENTRY:
2475 if (!capable(CAP_NET_ADMIN)) {
2476 rc = -EPERM;
2477 break;
2478 }
2479 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2480 sizeof(struct qeth_arp_cache_entry)))
2481 rc = -EFAULT;
2482 else
2483 rc = qeth_l3_arp_remove_entry(card, &arp_entry);
2484 break;
2485 case SIOC_QETH_ARP_FLUSH_CACHE:
2486 if (!capable(CAP_NET_ADMIN)) {
2487 rc = -EPERM;
2488 break;
2489 }
2490 rc = qeth_l3_arp_flush_cache(card);
2491 break;
2492 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
2493 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
2494 break;
2495 case SIOC_QETH_GET_CARD_TYPE:
2496 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
2497 !card->info.guestlan)
2498 return 1;
2499 return 0;
2500 break;
2501 case SIOCGMIIPHY:
2502 mii_data = if_mii(rq);
2503 mii_data->phy_id = 0;
2504 break;
2505 case SIOCGMIIREG:
2506 mii_data = if_mii(rq);
2507 if (mii_data->phy_id != 0)
2508 rc = -EINVAL;
2509 else
2510 mii_data->val_out = qeth_mdio_read(dev,
2511 mii_data->phy_id,
2512 mii_data->reg_num);
2513 break;
2514 default:
2515 rc = -EOPNOTSUPP;
2516 }
2517 if (rc)
2518 QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
2519 return rc;
2520 }
2521
2522 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2523 struct sk_buff *skb, int ipv, int cast_type)
2524 {
2525 memset(hdr, 0, sizeof(struct qeth_hdr));
2526 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2527 hdr->hdr.l3.ext_flags = 0;
2528
2529 /*
2530 * before we're going to overwrite this location with next hop ip.
2531 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2532 */
2533 if (card->vlangrp && vlan_tx_tag_present(skb)) {
2534 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
2535 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
2536 else
2537 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2538 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
2539 }
2540
2541 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2542 if (ipv == 4) {
2543 /* IPv4 */
2544 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
2545 memset(hdr->hdr.l3.dest_addr, 0, 12);
2546 if ((skb->dst) && (skb->dst->neighbour)) {
2547 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2548 *((u32 *) skb->dst->neighbour->primary_key);
2549 } else {
2550 /* fill in destination address used in ip header */
2551 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2552 ip_hdr(skb)->daddr;
2553 }
2554 } else if (ipv == 6) {
2555 /* IPv6 */
2556 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
2557 if (card->info.type == QETH_CARD_TYPE_IQD)
2558 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
2559 if ((skb->dst) && (skb->dst->neighbour)) {
2560 memcpy(hdr->hdr.l3.dest_addr,
2561 skb->dst->neighbour->primary_key, 16);
2562 } else {
2563 /* fill in destination address used in ip header */
2564 memcpy(hdr->hdr.l3.dest_addr,
2565 &ipv6_hdr(skb)->daddr, 16);
2566 }
2567 } else {
2568 /* passthrough */
2569 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
2570 !memcmp(skb->data + sizeof(struct qeth_hdr) +
2571 sizeof(__u16), skb->dev->broadcast, 6)) {
2572 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2573 QETH_HDR_PASSTHRU;
2574 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2575 skb->dev->broadcast, 6)) {
2576 /* broadcast? */
2577 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2578 QETH_HDR_PASSTHRU;
2579 } else {
2580 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
2581 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
2582 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2583 }
2584 }
2585 }
2586
2587 static void qeth_tso_fill_header(struct qeth_card *card,
2588 struct qeth_hdr *qhdr, struct sk_buff *skb)
2589 {
2590 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
2591 struct tcphdr *tcph = tcp_hdr(skb);
2592 struct iphdr *iph = ip_hdr(skb);
2593 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2594
2595 /*fix header to TSO values ...*/
2596 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
2597 /*set values which are fix for the first approach ...*/
2598 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2599 hdr->ext.imb_hdr_no = 1;
2600 hdr->ext.hdr_type = 1;
2601 hdr->ext.hdr_version = 1;
2602 hdr->ext.hdr_len = 28;
2603 /*insert non-fix values */
2604 hdr->ext.mss = skb_shinfo(skb)->gso_size;
2605 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
2606 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
2607 sizeof(struct qeth_hdr_tso));
2608 tcph->check = 0;
2609 if (skb->protocol == ETH_P_IPV6) {
2610 ip6h->payload_len = 0;
2611 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
2612 0, IPPROTO_TCP, 0);
2613 } else {
2614 /*OSA want us to set these values ...*/
2615 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2616 0, IPPROTO_TCP, 0);
2617 iph->tot_len = 0;
2618 iph->check = 0;
2619 }
2620 }
2621
2622 static void qeth_tx_csum(struct sk_buff *skb)
2623 {
2624 __wsum csum;
2625 int offset;
2626
2627 skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
2628 offset = skb->csum_start - skb_headroom(skb);
2629 BUG_ON(offset >= skb_headlen(skb));
2630 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2631
2632 offset += skb->csum_offset;
2633 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2634 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2635 }
2636
2637 static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2638 {
2639 int rc;
2640 u16 *tag;
2641 struct qeth_hdr *hdr = NULL;
2642 int elements_needed = 0;
2643 int elems;
2644 struct qeth_card *card = dev->ml_priv;
2645 struct sk_buff *new_skb = NULL;
2646 int ipv = qeth_get_ip_version(skb);
2647 int cast_type = qeth_get_cast_type(card, skb);
2648 struct qeth_qdio_out_q *queue = card->qdio.out_qs
2649 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
2650 int tx_bytes = skb->len;
2651 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2652 int data_offset = -1;
2653 int nr_frags;
2654
2655 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2656 (skb->protocol != htons(ETH_P_IPV6)) &&
2657 (skb->protocol != htons(ETH_P_IP)))
2658 goto tx_drop;
2659
2660 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
2661 card->stats.tx_carrier_errors++;
2662 goto tx_drop;
2663 }
2664
2665 if ((cast_type == RTN_BROADCAST) &&
2666 (card->info.broadcast_capable == 0))
2667 goto tx_drop;
2668
2669 if (card->options.performance_stats) {
2670 card->perf_stats.outbound_cnt++;
2671 card->perf_stats.outbound_start_time = qeth_get_micros();
2672 }
2673
2674 if (skb_is_gso(skb))
2675 large_send = card->options.large_send;
2676 else
2677 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2678 qeth_tx_csum(skb);
2679 if (card->options.performance_stats)
2680 card->perf_stats.tx_csum++;
2681 }
2682
2683 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
2684 (skb_shinfo(skb)->nr_frags == 0)) {
2685 new_skb = skb;
2686 data_offset = ETH_HLEN;
2687 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2688 if (!hdr)
2689 goto tx_drop;
2690 elements_needed++;
2691 } else {
2692 /* create a clone with writeable headroom */
2693 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
2694 + VLAN_HLEN);
2695 if (!new_skb)
2696 goto tx_drop;
2697 }
2698
2699 if (card->info.type == QETH_CARD_TYPE_IQD) {
2700 if (data_offset < 0)
2701 skb_pull(new_skb, ETH_HLEN);
2702 } else {
2703 if (new_skb->protocol == htons(ETH_P_IP)) {
2704 if (card->dev->type == ARPHRD_IEEE802_TR)
2705 skb_pull(new_skb, TR_HLEN);
2706 else
2707 skb_pull(new_skb, ETH_HLEN);
2708 }
2709
2710 if (new_skb->protocol == ETH_P_IPV6 && card->vlangrp &&
2711 vlan_tx_tag_present(new_skb)) {
2712 skb_push(new_skb, VLAN_HLEN);
2713 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
2714 skb_copy_to_linear_data_offset(new_skb, 4,
2715 new_skb->data + 8, 4);
2716 skb_copy_to_linear_data_offset(new_skb, 8,
2717 new_skb->data + 12, 4);
2718 tag = (u16 *)(new_skb->data + 12);
2719 *tag = __constant_htons(ETH_P_8021Q);
2720 *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
2721 new_skb->vlan_tci = 0;
2722 }
2723 }
2724
2725 netif_stop_queue(dev);
2726
2727 /* fix hardware limitation: as long as we do not have sbal
2728 * chaining we can not send long frag lists
2729 */
2730 if ((large_send == QETH_LARGE_SEND_TSO) &&
2731 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) {
2732 if (skb_linearize(new_skb))
2733 goto tx_drop;
2734 }
2735
2736 if ((large_send == QETH_LARGE_SEND_TSO) &&
2737 (cast_type == RTN_UNSPEC)) {
2738 hdr = (struct qeth_hdr *)skb_push(new_skb,
2739 sizeof(struct qeth_hdr_tso));
2740 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2741 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2742 qeth_tso_fill_header(card, hdr, new_skb);
2743 elements_needed++;
2744 } else {
2745 if (data_offset < 0) {
2746 hdr = (struct qeth_hdr *)skb_push(new_skb,
2747 sizeof(struct qeth_hdr));
2748 qeth_l3_fill_header(card, hdr, new_skb, ipv,
2749 cast_type);
2750 } else {
2751 qeth_l3_fill_header(card, hdr, new_skb, ipv,
2752 cast_type);
2753 hdr->hdr.l3.length = new_skb->len - data_offset;
2754 }
2755 }
2756
2757 elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
2758 elements_needed);
2759 if (!elems) {
2760 if (data_offset >= 0)
2761 kmem_cache_free(qeth_core_header_cache, hdr);
2762 goto tx_drop;
2763 }
2764 elements_needed += elems;
2765 nr_frags = skb_shinfo(new_skb)->nr_frags;
2766
2767 if (card->info.type != QETH_CARD_TYPE_IQD)
2768 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
2769 elements_needed);
2770 else
2771 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
2772 elements_needed, data_offset, 0);
2773
2774 if (!rc) {
2775 card->stats.tx_packets++;
2776 card->stats.tx_bytes += tx_bytes;
2777 if (new_skb != skb)
2778 dev_kfree_skb_any(skb);
2779 if (card->options.performance_stats) {
2780 if (large_send != QETH_LARGE_SEND_NO) {
2781 card->perf_stats.large_send_bytes += tx_bytes;
2782 card->perf_stats.large_send_cnt++;
2783 }
2784 if (nr_frags) {
2785 card->perf_stats.sg_skbs_sent++;
2786 /* nr_frags + skb->data */
2787 card->perf_stats.sg_frags_sent += nr_frags + 1;
2788 }
2789 }
2790 } else {
2791 if (data_offset >= 0)
2792 kmem_cache_free(qeth_core_header_cache, hdr);
2793
2794 if (rc == -EBUSY) {
2795 if (new_skb != skb)
2796 dev_kfree_skb_any(new_skb);
2797 return NETDEV_TX_BUSY;
2798 } else
2799 goto tx_drop;
2800 }
2801
2802 netif_wake_queue(dev);
2803 if (card->options.performance_stats)
2804 card->perf_stats.outbound_time += qeth_get_micros() -
2805 card->perf_stats.outbound_start_time;
2806 return rc;
2807
2808 tx_drop:
2809 card->stats.tx_dropped++;
2810 card->stats.tx_errors++;
2811 if ((new_skb != skb) && new_skb)
2812 dev_kfree_skb_any(new_skb);
2813 dev_kfree_skb_any(skb);
2814 netif_wake_queue(dev);
2815 return NETDEV_TX_OK;
2816 }
2817
2818 static int qeth_l3_open(struct net_device *dev)
2819 {
2820 struct qeth_card *card = dev->ml_priv;
2821
2822 QETH_DBF_TEXT(TRACE, 4, "qethopen");
2823 if (card->state != CARD_STATE_SOFTSETUP)
2824 return -ENODEV;
2825 card->data.state = CH_STATE_UP;
2826 card->state = CARD_STATE_UP;
2827 netif_start_queue(dev);
2828
2829 if (!card->lan_online && netif_carrier_ok(dev))
2830 netif_carrier_off(dev);
2831 return 0;
2832 }
2833
2834 static int qeth_l3_stop(struct net_device *dev)
2835 {
2836 struct qeth_card *card = dev->ml_priv;
2837
2838 QETH_DBF_TEXT(TRACE, 4, "qethstop");
2839 netif_tx_disable(dev);
2840 if (card->state == CARD_STATE_UP)
2841 card->state = CARD_STATE_SOFTSETUP;
2842 return 0;
2843 }
2844
2845 static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2846 {
2847 struct qeth_card *card = dev->ml_priv;
2848
2849 return (card->options.checksum_type == HW_CHECKSUMMING);
2850 }
2851
2852 static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2853 {
2854 struct qeth_card *card = dev->ml_priv;
2855 enum qeth_card_states old_state;
2856 enum qeth_checksum_types csum_type;
2857
2858 if ((card->state != CARD_STATE_UP) &&
2859 (card->state != CARD_STATE_DOWN))
2860 return -EPERM;
2861
2862 if (data)
2863 csum_type = HW_CHECKSUMMING;
2864 else
2865 csum_type = SW_CHECKSUMMING;
2866
2867 if (card->options.checksum_type != csum_type) {
2868 old_state = card->state;
2869 if (card->state == CARD_STATE_UP)
2870 __qeth_l3_set_offline(card->gdev, 1);
2871 card->options.checksum_type = csum_type;
2872 if (old_state == CARD_STATE_UP)
2873 __qeth_l3_set_online(card->gdev, 1);
2874 }
2875 return 0;
2876 }
2877
2878 static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2879 {
2880 struct qeth_card *card = dev->ml_priv;
2881
2882 if (data) {
2883 if (card->options.large_send == QETH_LARGE_SEND_NO) {
2884 if (card->info.type == QETH_CARD_TYPE_IQD)
2885 return -EPERM;
2886 else
2887 card->options.large_send = QETH_LARGE_SEND_TSO;
2888 dev->features |= NETIF_F_TSO;
2889 }
2890 } else {
2891 dev->features &= ~NETIF_F_TSO;
2892 card->options.large_send = QETH_LARGE_SEND_NO;
2893 }
2894 return 0;
2895 }
2896
2897 static struct ethtool_ops qeth_l3_ethtool_ops = {
2898 .get_link = ethtool_op_get_link,
2899 .get_tx_csum = ethtool_op_get_tx_csum,
2900 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2901 .get_rx_csum = qeth_l3_ethtool_get_rx_csum,
2902 .set_rx_csum = qeth_l3_ethtool_set_rx_csum,
2903 .get_sg = ethtool_op_get_sg,
2904 .set_sg = ethtool_op_set_sg,
2905 .get_tso = ethtool_op_get_tso,
2906 .set_tso = qeth_l3_ethtool_set_tso,
2907 .get_strings = qeth_core_get_strings,
2908 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2909 .get_stats_count = qeth_core_get_stats_count,
2910 .get_drvinfo = qeth_core_get_drvinfo,
2911 .get_settings = qeth_core_ethtool_get_settings,
2912 };
2913
2914 /*
2915 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
2916 * NOARP on the netdevice is no option because it also turns off neighbor
2917 * solicitation. For IPv4 we install a neighbor_setup function. We don't want
2918 * arp resolution but we want the hard header (packet socket will work
2919 * e.g. tcpdump)
2920 */
2921 static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
2922 {
2923 n->nud_state = NUD_NOARP;
2924 memcpy(n->ha, "FAKELL", 6);
2925 n->output = n->ops->connected_output;
2926 return 0;
2927 }
2928
2929 static int
2930 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
2931 {
2932 if (np->tbl->family == AF_INET)
2933 np->neigh_setup = qeth_l3_neigh_setup_noarp;
2934
2935 return 0;
2936 }
2937
2938 static const struct net_device_ops qeth_l3_netdev_ops = {
2939 .ndo_open = qeth_l3_open,
2940 .ndo_stop = qeth_l3_stop,
2941 .ndo_get_stats = qeth_get_stats,
2942 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2943 .ndo_validate_addr = eth_validate_addr,
2944 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
2945 .ndo_do_ioctl = qeth_l3_do_ioctl,
2946 .ndo_change_mtu = qeth_change_mtu,
2947 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
2948 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
2949 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
2950 .ndo_tx_timeout = qeth_tx_timeout,
2951 };
2952
2953 static const struct net_device_ops qeth_l3_osa_netdev_ops = {
2954 .ndo_open = qeth_l3_open,
2955 .ndo_stop = qeth_l3_stop,
2956 .ndo_get_stats = qeth_get_stats,
2957 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2958 .ndo_validate_addr = eth_validate_addr,
2959 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
2960 .ndo_do_ioctl = qeth_l3_do_ioctl,
2961 .ndo_change_mtu = qeth_change_mtu,
2962 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
2963 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
2964 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
2965 .ndo_tx_timeout = qeth_tx_timeout,
2966 .ndo_neigh_setup = qeth_l3_neigh_setup,
2967 };
2968
2969 static int qeth_l3_setup_netdev(struct qeth_card *card)
2970 {
2971 if (card->info.type == QETH_CARD_TYPE_OSAE) {
2972 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
2973 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
2974 #ifdef CONFIG_TR
2975 card->dev = alloc_trdev(0);
2976 #endif
2977 if (!card->dev)
2978 return -ENODEV;
2979 card->dev->netdev_ops = &qeth_l3_netdev_ops;
2980 } else {
2981 card->dev = alloc_etherdev(0);
2982 if (!card->dev)
2983 return -ENODEV;
2984 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
2985
2986 /*IPv6 address autoconfiguration stuff*/
2987 qeth_l3_get_unique_id(card);
2988 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
2989 card->dev->dev_id = card->info.unique_id &
2990 0xffff;
2991 }
2992 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
2993 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
2994 if (!card->dev)
2995 return -ENODEV;
2996 card->dev->flags |= IFF_NOARP;
2997 card->dev->netdev_ops = &qeth_l3_netdev_ops;
2998 qeth_l3_iqd_read_initial_mac(card);
2999 } else
3000 return -ENODEV;
3001
3002 card->dev->ml_priv = card;
3003 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
3004 card->dev->mtu = card->info.initial_mtu;
3005 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
3006 card->dev->features |= NETIF_F_HW_VLAN_TX |
3007 NETIF_F_HW_VLAN_RX |
3008 NETIF_F_HW_VLAN_FILTER;
3009
3010 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3011 return register_netdev(card->dev);
3012 }
3013
3014 static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
3015 unsigned int qdio_err, unsigned int queue, int first_element,
3016 int count, unsigned long card_ptr)
3017 {
3018 struct net_device *net_dev;
3019 struct qeth_card *card;
3020 struct qeth_qdio_buffer *buffer;
3021 int index;
3022 int i;
3023
3024 card = (struct qeth_card *) card_ptr;
3025 net_dev = card->dev;
3026 if (card->options.performance_stats) {
3027 card->perf_stats.inbound_cnt++;
3028 card->perf_stats.inbound_start_time = qeth_get_micros();
3029 }
3030 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
3031 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
3032 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
3033 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
3034 first_element, count);
3035 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
3036 qeth_schedule_recovery(card);
3037 return;
3038 }
3039 for (i = first_element; i < (first_element + count); ++i) {
3040 index = i % QDIO_MAX_BUFFERS_PER_Q;
3041 buffer = &card->qdio.in_q->bufs[index];
3042 if (!(qdio_err &&
3043 qeth_check_qdio_errors(buffer->buffer,
3044 qdio_err, "qinerr")))
3045 qeth_l3_process_inbound_buffer(card, buffer, index);
3046 /* clear buffer and give back to hardware */
3047 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3048 qeth_queue_input_buffer(card, index);
3049 }
3050 if (card->options.performance_stats)
3051 card->perf_stats.inbound_time += qeth_get_micros() -
3052 card->perf_stats.inbound_start_time;
3053 }
3054
3055 static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3056 {
3057 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3058
3059 qeth_l3_create_device_attributes(&gdev->dev);
3060 card->options.layer2 = 0;
3061 card->discipline.input_handler = (qdio_handler_t *)
3062 qeth_l3_qdio_input_handler;
3063 card->discipline.output_handler = (qdio_handler_t *)
3064 qeth_qdio_output_handler;
3065 card->discipline.recover = qeth_l3_recover;
3066 return 0;
3067 }
3068
3069 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3070 {
3071 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3072
3073 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3074
3075 if (cgdev->state == CCWGROUP_ONLINE) {
3076 card->use_hard_stop = 1;
3077 qeth_l3_set_offline(cgdev);
3078 }
3079
3080 if (card->dev) {
3081 unregister_netdev(card->dev);
3082 card->dev = NULL;
3083 }
3084
3085 qeth_l3_remove_device_attributes(&cgdev->dev);
3086 qeth_l3_clear_ip_list(card, 0, 0);
3087 qeth_l3_clear_ipato_list(card);
3088 return;
3089 }
3090
3091 static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3092 {
3093 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3094 int rc = 0;
3095 enum qeth_card_states recover_flag;
3096
3097 BUG_ON(!card);
3098 QETH_DBF_TEXT(SETUP, 2, "setonlin");
3099 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3100
3101 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3102
3103 recover_flag = card->state;
3104 rc = ccw_device_set_online(CARD_RDEV(card));
3105 if (rc) {
3106 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3107 return -EIO;
3108 }
3109 rc = ccw_device_set_online(CARD_WDEV(card));
3110 if (rc) {
3111 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3112 return -EIO;
3113 }
3114 rc = ccw_device_set_online(CARD_DDEV(card));
3115 if (rc) {
3116 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3117 return -EIO;
3118 }
3119
3120 rc = qeth_core_hardsetup_card(card);
3121 if (rc) {
3122 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3123 goto out_remove;
3124 }
3125
3126 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3127
3128 if (!card->dev && qeth_l3_setup_netdev(card))
3129 goto out_remove;
3130
3131 card->state = CARD_STATE_HARDSETUP;
3132 qeth_print_status_message(card);
3133
3134 /* softsetup */
3135 QETH_DBF_TEXT(SETUP, 2, "softsetp");
3136
3137 rc = qeth_send_startlan(card);
3138 if (rc) {
3139 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3140 if (rc == 0xe080) {
3141 dev_warn(&card->gdev->dev,
3142 "The LAN is offline\n");
3143 card->lan_online = 0;
3144 }
3145 return rc;
3146 } else
3147 card->lan_online = 1;
3148 qeth_set_large_send(card, card->options.large_send);
3149
3150 rc = qeth_l3_setadapter_parms(card);
3151 if (rc)
3152 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3153 rc = qeth_l3_start_ipassists(card);
3154 if (rc)
3155 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3156 rc = qeth_l3_setrouting_v4(card);
3157 if (rc)
3158 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3159 rc = qeth_l3_setrouting_v6(card);
3160 if (rc)
3161 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3162 netif_tx_disable(card->dev);
3163
3164 rc = qeth_init_qdio_queues(card);
3165 if (rc) {
3166 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3167 goto out_remove;
3168 }
3169 card->state = CARD_STATE_SOFTSETUP;
3170 netif_carrier_on(card->dev);
3171
3172 qeth_set_allowed_threads(card, 0xffffffff, 0);
3173 if (recover_flag == CARD_STATE_RECOVER) {
3174 if (recovery_mode)
3175 qeth_l3_open(card->dev);
3176 else {
3177 rtnl_lock();
3178 dev_open(card->dev);
3179 rtnl_unlock();
3180 }
3181 qeth_l3_set_multicast_list(card->dev);
3182 }
3183 /* let user_space know that device is online */
3184 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3185 return 0;
3186 out_remove:
3187 card->use_hard_stop = 1;
3188 qeth_l3_stop_card(card, 0);
3189 ccw_device_set_offline(CARD_DDEV(card));
3190 ccw_device_set_offline(CARD_WDEV(card));
3191 ccw_device_set_offline(CARD_RDEV(card));
3192 if (recover_flag == CARD_STATE_RECOVER)
3193 card->state = CARD_STATE_RECOVER;
3194 else
3195 card->state = CARD_STATE_DOWN;
3196 return -ENODEV;
3197 }
3198
3199 static int qeth_l3_set_online(struct ccwgroup_device *gdev)
3200 {
3201 return __qeth_l3_set_online(gdev, 0);
3202 }
3203
3204 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3205 int recovery_mode)
3206 {
3207 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3208 int rc = 0, rc2 = 0, rc3 = 0;
3209 enum qeth_card_states recover_flag;
3210
3211 QETH_DBF_TEXT(SETUP, 3, "setoffl");
3212 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
3213
3214 if (card->dev && netif_carrier_ok(card->dev))
3215 netif_carrier_off(card->dev);
3216 recover_flag = card->state;
3217 qeth_l3_stop_card(card, recovery_mode);
3218 rc = ccw_device_set_offline(CARD_DDEV(card));
3219 rc2 = ccw_device_set_offline(CARD_WDEV(card));
3220 rc3 = ccw_device_set_offline(CARD_RDEV(card));
3221 if (!rc)
3222 rc = (rc2) ? rc2 : rc3;
3223 if (rc)
3224 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3225 if (recover_flag == CARD_STATE_UP)
3226 card->state = CARD_STATE_RECOVER;
3227 /* let user_space know that device is offline */
3228 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
3229 return 0;
3230 }
3231
3232 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
3233 {
3234 return __qeth_l3_set_offline(cgdev, 0);
3235 }
3236
3237 static int qeth_l3_recover(void *ptr)
3238 {
3239 struct qeth_card *card;
3240 int rc = 0;
3241
3242 card = (struct qeth_card *) ptr;
3243 QETH_DBF_TEXT(TRACE, 2, "recover1");
3244 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
3245 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
3246 return 0;
3247 QETH_DBF_TEXT(TRACE, 2, "recover2");
3248 dev_warn(&card->gdev->dev,
3249 "A recovery process has been started for the device\n");
3250 card->use_hard_stop = 1;
3251 __qeth_l3_set_offline(card->gdev, 1);
3252 rc = __qeth_l3_set_online(card->gdev, 1);
3253 /* don't run another scheduled recovery */
3254 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3255 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3256 if (!rc)
3257 dev_info(&card->gdev->dev,
3258 "Device successfully recovered!\n");
3259 else {
3260 rtnl_lock();
3261 dev_close(card->dev);
3262 rtnl_unlock();
3263 dev_warn(&card->gdev->dev, "The qeth device driver "
3264 "failed to recover an error on the device\n");
3265 }
3266 return 0;
3267 }
3268
3269 static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3270 {
3271 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3272 qeth_l3_clear_ip_list(card, 0, 0);
3273 qeth_qdio_clear_card(card, 0);
3274 qeth_clear_qdio_buffers(card);
3275 }
3276
3277 struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
3278 .probe = qeth_l3_probe_device,
3279 .remove = qeth_l3_remove_device,
3280 .set_online = qeth_l3_set_online,
3281 .set_offline = qeth_l3_set_offline,
3282 .shutdown = qeth_l3_shutdown,
3283 };
3284 EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
3285
3286 static int qeth_l3_ip_event(struct notifier_block *this,
3287 unsigned long event, void *ptr)
3288 {
3289 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3290 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
3291 struct qeth_ipaddr *addr;
3292 struct qeth_card *card;
3293
3294 if (dev_net(dev) != &init_net)
3295 return NOTIFY_DONE;
3296
3297 QETH_DBF_TEXT(TRACE, 3, "ipevent");
3298 card = qeth_l3_get_card_from_dev(dev);
3299 if (!card)
3300 return NOTIFY_DONE;
3301
3302 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3303 if (addr != NULL) {
3304 addr->u.a4.addr = ifa->ifa_address;
3305 addr->u.a4.mask = ifa->ifa_mask;
3306 addr->type = QETH_IP_TYPE_NORMAL;
3307 } else
3308 goto out;
3309
3310 switch (event) {
3311 case NETDEV_UP:
3312 if (!qeth_l3_add_ip(card, addr))
3313 kfree(addr);
3314 break;
3315 case NETDEV_DOWN:
3316 if (!qeth_l3_delete_ip(card, addr))
3317 kfree(addr);
3318 break;
3319 default:
3320 break;
3321 }
3322 qeth_l3_set_ip_addr_list(card);
3323 out:
3324 return NOTIFY_DONE;
3325 }
3326
3327 static struct notifier_block qeth_l3_ip_notifier = {
3328 qeth_l3_ip_event,
3329 NULL,
3330 };
3331
3332 #ifdef CONFIG_QETH_IPV6
3333 /**
3334 * IPv6 event handler
3335 */
3336 static int qeth_l3_ip6_event(struct notifier_block *this,
3337 unsigned long event, void *ptr)
3338 {
3339 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
3340 struct net_device *dev = (struct net_device *)ifa->idev->dev;
3341 struct qeth_ipaddr *addr;
3342 struct qeth_card *card;
3343
3344 QETH_DBF_TEXT(TRACE, 3, "ip6event");
3345
3346 card = qeth_l3_get_card_from_dev(dev);
3347 if (!card)
3348 return NOTIFY_DONE;
3349 if (!qeth_is_supported(card, IPA_IPV6))
3350 return NOTIFY_DONE;
3351
3352 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
3353 if (addr != NULL) {
3354 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
3355 addr->u.a6.pfxlen = ifa->prefix_len;
3356 addr->type = QETH_IP_TYPE_NORMAL;
3357 } else
3358 goto out;
3359
3360 switch (event) {
3361 case NETDEV_UP:
3362 if (!qeth_l3_add_ip(card, addr))
3363 kfree(addr);
3364 break;
3365 case NETDEV_DOWN:
3366 if (!qeth_l3_delete_ip(card, addr))
3367 kfree(addr);
3368 break;
3369 default:
3370 break;
3371 }
3372 qeth_l3_set_ip_addr_list(card);
3373 out:
3374 return NOTIFY_DONE;
3375 }
3376
3377 static struct notifier_block qeth_l3_ip6_notifier = {
3378 qeth_l3_ip6_event,
3379 NULL,
3380 };
3381 #endif
3382
3383 static int qeth_l3_register_notifiers(void)
3384 {
3385 int rc;
3386
3387 QETH_DBF_TEXT(TRACE, 5, "regnotif");
3388 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
3389 if (rc)
3390 return rc;
3391 #ifdef CONFIG_QETH_IPV6
3392 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
3393 if (rc) {
3394 unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
3395 return rc;
3396 }
3397 #else
3398 pr_warning("There is no IPv6 support for the layer 3 discipline\n");
3399 #endif
3400 return 0;
3401 }
3402
3403 static void qeth_l3_unregister_notifiers(void)
3404 {
3405
3406 QETH_DBF_TEXT(TRACE, 5, "unregnot");
3407 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
3408 #ifdef CONFIG_QETH_IPV6
3409 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
3410 #endif /* QETH_IPV6 */
3411 }
3412
3413 static int __init qeth_l3_init(void)
3414 {
3415 int rc = 0;
3416
3417 pr_info("register layer 3 discipline\n");
3418 rc = qeth_l3_register_notifiers();
3419 return rc;
3420 }
3421
3422 static void __exit qeth_l3_exit(void)
3423 {
3424 qeth_l3_unregister_notifiers();
3425 pr_info("unregister layer 3 discipline\n");
3426 }
3427
3428 module_init(qeth_l3_init);
3429 module_exit(qeth_l3_exit);
3430 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
3431 MODULE_DESCRIPTION("qeth layer 3 discipline");
3432 MODULE_LICENSE("GPL");
This page took 0.139341 seconds and 5 git commands to generate.