include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / net / qeth_l3_main.c
CommitLineData
4a71df50
FB
1/*
2 * drivers/s390/net/qeth_l3_main.c
3 *
bbcfcdc8 4 * Copyright IBM Corp. 2007, 2009
4a71df50
FB
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
74eacdb9
FB
11#define KMSG_COMPONENT "qeth"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
4a71df50
FB
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/etherdevice.h>
20#include <linux/mii.h>
21#include <linux/ip.h>
64ef8957 22#include <linux/ipv6.h>
4a71df50
FB
23#include <linux/inetdevice.h>
24#include <linux/igmp.h>
5a0e3ad6 25#include <linux/slab.h>
4a71df50
FB
26
27#include <net/ip.h>
28#include <net/arp.h>
64ef8957 29#include <net/ip6_checksum.h>
4a71df50 30
4a71df50 31#include "qeth_l3.h"
4a71df50 32
4a71df50
FB
33static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *);
36static void qeth_l3_set_multicast_list(struct net_device *);
37static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *);
38static int qeth_l3_register_addr_entry(struct qeth_card *,
39 struct qeth_ipaddr *);
40static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41 struct qeth_ipaddr *);
42static int __qeth_l3_set_online(struct ccwgroup_device *, int);
43static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
44
c3b4a740
FB
45int qeth_l3_set_large_send(struct qeth_card *card,
46 enum qeth_large_send_types type)
47{
48 int rc = 0;
49
50 card->options.large_send = type;
51 if (card->dev == NULL)
52 return 0;
53
54 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
55 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
56 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
57 NETIF_F_HW_CSUM;
58 } else {
59 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
60 NETIF_F_HW_CSUM);
61 card->options.large_send = QETH_LARGE_SEND_NO;
62 rc = -EOPNOTSUPP;
63 }
64 } else {
65 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
66 NETIF_F_HW_CSUM);
67 card->options.large_send = QETH_LARGE_SEND_NO;
68 }
69 return rc;
70}
4a71df50
FB
71
72static int qeth_l3_isxdigit(char *buf)
73{
74 while (*buf) {
75 if (!isxdigit(*buf++))
76 return 0;
77 }
78 return 1;
79}
80
81void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
82{
83 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
84}
85
86int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
87{
88 int count = 0, rc = 0;
89 int in[4];
90 char c;
91
92 rc = sscanf(buf, "%u.%u.%u.%u%c",
93 &in[0], &in[1], &in[2], &in[3], &c);
94 if (rc != 4 && (rc != 5 || c != '\n'))
95 return -EINVAL;
96 for (count = 0; count < 4; count++) {
97 if (in[count] > 255)
98 return -EINVAL;
99 addr[count] = in[count];
100 }
101 return 0;
102}
103
104void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
105{
106 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
107 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
108 addr[0], addr[1], addr[2], addr[3],
109 addr[4], addr[5], addr[6], addr[7],
110 addr[8], addr[9], addr[10], addr[11],
111 addr[12], addr[13], addr[14], addr[15]);
112}
113
114int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
115{
116 const char *end, *end_tmp, *start;
117 __u16 *in;
118 char num[5];
119 int num2, cnt, out, found, save_cnt;
120 unsigned short in_tmp[8] = {0, };
121
122 cnt = out = found = save_cnt = num2 = 0;
123 end = start = buf;
124 in = (__u16 *) addr;
125 memset(in, 0, 16);
126 while (*end) {
127 end = strchr(start, ':');
128 if (end == NULL) {
129 end = buf + strlen(buf);
130 end_tmp = strchr(start, '\n');
131 if (end_tmp != NULL)
132 end = end_tmp;
133 out = 1;
134 }
135 if ((end - start)) {
136 memset(num, 0, 5);
137 if ((end - start) > 4)
138 return -EINVAL;
139 memcpy(num, start, end - start);
140 if (!qeth_l3_isxdigit(num))
141 return -EINVAL;
142 sscanf(start, "%x", &num2);
143 if (found)
144 in_tmp[save_cnt++] = num2;
145 else
146 in[cnt++] = num2;
147 if (out)
148 break;
149 } else {
150 if (found)
151 return -EINVAL;
152 found = 1;
153 }
154 start = ++end;
155 }
156 if (cnt + save_cnt > 8)
157 return -EINVAL;
158 cnt = 7;
159 while (save_cnt)
160 in[cnt--] = in_tmp[--save_cnt];
161 return 0;
162}
163
164void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
165 char *buf)
166{
167 if (proto == QETH_PROT_IPV4)
168 qeth_l3_ipaddr4_to_string(addr, buf);
169 else if (proto == QETH_PROT_IPV6)
170 qeth_l3_ipaddr6_to_string(addr, buf);
171}
172
173int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
174 __u8 *addr)
175{
176 if (proto == QETH_PROT_IPV4)
177 return qeth_l3_string_to_ipaddr4(buf, addr);
178 else if (proto == QETH_PROT_IPV6)
179 return qeth_l3_string_to_ipaddr6(buf, addr);
180 else
181 return -EINVAL;
182}
183
184static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
185{
186 int i, j;
187 u8 octet;
188
189 for (i = 0; i < len; ++i) {
190 octet = addr[i];
191 for (j = 7; j >= 0; --j) {
192 bits[i*8 + j] = octet & 1;
193 octet >>= 1;
194 }
195 }
196}
197
198static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
199 struct qeth_ipaddr *addr)
200{
201 struct qeth_ipato_entry *ipatoe;
202 u8 addr_bits[128] = {0, };
203 u8 ipatoe_bits[128] = {0, };
204 int rc = 0;
205
206 if (!card->ipato.enabled)
207 return 0;
208
209 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
210 (addr->proto == QETH_PROT_IPV4)? 4:16);
211 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
212 if (addr->proto != ipatoe->proto)
213 continue;
214 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
215 (ipatoe->proto == QETH_PROT_IPV4) ?
216 4 : 16);
217 if (addr->proto == QETH_PROT_IPV4)
218 rc = !memcmp(addr_bits, ipatoe_bits,
219 min(32, ipatoe->mask_bits));
220 else
221 rc = !memcmp(addr_bits, ipatoe_bits,
222 min(128, ipatoe->mask_bits));
223 if (rc)
224 break;
225 }
226 /* invert? */
227 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
228 rc = !rc;
229 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
230 rc = !rc;
231
232 return rc;
233}
234
235/*
236 * Add IP to be added to todo list. If there is already an "add todo"
237 * in this list we just incremenent the reference count.
238 * Returns 0 if we just incremented reference count.
239 */
240static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
241 struct qeth_ipaddr *addr, int add)
242{
243 struct qeth_ipaddr *tmp, *t;
244 int found = 0;
245
76b11f8e
UB
246 if (card->options.sniffer)
247 return 0;
4a71df50
FB
248 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
249 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
250 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
251 return 0;
252 if ((tmp->proto == QETH_PROT_IPV4) &&
253 (addr->proto == QETH_PROT_IPV4) &&
254 (tmp->type == addr->type) &&
255 (tmp->is_multicast == addr->is_multicast) &&
256 (tmp->u.a4.addr == addr->u.a4.addr) &&
257 (tmp->u.a4.mask == addr->u.a4.mask)) {
258 found = 1;
259 break;
260 }
261 if ((tmp->proto == QETH_PROT_IPV6) &&
262 (addr->proto == QETH_PROT_IPV6) &&
263 (tmp->type == addr->type) &&
264 (tmp->is_multicast == addr->is_multicast) &&
265 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
266 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
267 sizeof(struct in6_addr)) == 0)) {
268 found = 1;
269 break;
270 }
271 }
272 if (found) {
273 if (addr->users != 0)
274 tmp->users += addr->users;
275 else
276 tmp->users += add ? 1 : -1;
277 if (tmp->users == 0) {
278 list_del(&tmp->entry);
279 kfree(tmp);
280 }
281 return 0;
282 } else {
283 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
284 list_add(&addr->entry, card->ip_tbd_list);
285 else {
286 if (addr->users == 0)
287 addr->users += add ? 1 : -1;
288 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
289 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
d11ba0c4 290 QETH_DBF_TEXT(TRACE, 2, "tkovaddr");
4a71df50
FB
291 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
292 }
293 list_add_tail(&addr->entry, card->ip_tbd_list);
294 }
295 return 1;
296 }
297}
298
299static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
300{
301 unsigned long flags;
302 int rc = 0;
303
d11ba0c4 304 QETH_DBF_TEXT(TRACE, 4, "delip");
4a71df50
FB
305
306 if (addr->proto == QETH_PROT_IPV4)
d11ba0c4 307 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
4a71df50 308 else {
d11ba0c4
PT
309 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
310 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
4a71df50
FB
311 }
312 spin_lock_irqsave(&card->ip_lock, flags);
313 rc = __qeth_l3_insert_ip_todo(card, addr, 0);
314 spin_unlock_irqrestore(&card->ip_lock, flags);
315 return rc;
316}
317
318static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
319{
320 unsigned long flags;
321 int rc = 0;
322
d11ba0c4 323 QETH_DBF_TEXT(TRACE, 4, "addip");
4a71df50 324 if (addr->proto == QETH_PROT_IPV4)
d11ba0c4 325 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
4a71df50 326 else {
d11ba0c4
PT
327 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
328 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
4a71df50
FB
329 }
330 spin_lock_irqsave(&card->ip_lock, flags);
331 rc = __qeth_l3_insert_ip_todo(card, addr, 1);
332 spin_unlock_irqrestore(&card->ip_lock, flags);
333 return rc;
334}
335
336
337static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
338 enum qeth_prot_versions prot)
339{
340 struct qeth_ipaddr *addr;
341
342 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
343 if (addr == NULL) {
4a71df50
FB
344 return NULL;
345 }
346 addr->type = QETH_IP_TYPE_NORMAL;
347 addr->proto = prot;
348 return addr;
349}
350
351static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
352{
353 struct qeth_ipaddr *iptodo;
354 unsigned long flags;
355
d11ba0c4 356 QETH_DBF_TEXT(TRACE, 4, "delmc");
4a71df50
FB
357 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
358 if (!iptodo) {
d11ba0c4 359 QETH_DBF_TEXT(TRACE, 2, "dmcnomem");
4a71df50
FB
360 return;
361 }
362 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
363 spin_lock_irqsave(&card->ip_lock, flags);
364 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
365 kfree(iptodo);
366 spin_unlock_irqrestore(&card->ip_lock, flags);
367}
368
369/*
370 * Add/remove address to/from card's ip list, i.e. try to add or remove
371 * reference to/from an IP address that is already registered on the card.
372 * Returns:
373 * 0 address was on card and its reference count has been adjusted,
374 * but is still > 0, so nothing has to be done
375 * also returns 0 if card was not on card and the todo was to delete
376 * the address -> there is also nothing to be done
377 * 1 address was not on card and the todo is to add it to the card's ip
378 * list
379 * -1 address was on card and its reference count has been decremented
380 * to <= 0 by the todo -> address must be removed from card
381 */
382static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
383 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
384{
385 struct qeth_ipaddr *addr;
386 int found = 0;
387
388 list_for_each_entry(addr, &card->ip_list, entry) {
389 if ((addr->proto == QETH_PROT_IPV4) &&
390 (todo->proto == QETH_PROT_IPV4) &&
391 (addr->type == todo->type) &&
392 (addr->u.a4.addr == todo->u.a4.addr) &&
393 (addr->u.a4.mask == todo->u.a4.mask)) {
394 found = 1;
395 break;
396 }
397 if ((addr->proto == QETH_PROT_IPV6) &&
398 (todo->proto == QETH_PROT_IPV6) &&
399 (addr->type == todo->type) &&
400 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
401 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
402 sizeof(struct in6_addr)) == 0)) {
403 found = 1;
404 break;
405 }
406 }
407 if (found) {
408 addr->users += todo->users;
409 if (addr->users <= 0) {
410 *__addr = addr;
411 return -1;
412 } else {
413 /* for VIPA and RXIP limit refcount to 1 */
414 if (addr->type != QETH_IP_TYPE_NORMAL)
415 addr->users = 1;
416 return 0;
417 }
418 }
419 if (todo->users > 0) {
420 /* for VIPA and RXIP limit refcount to 1 */
421 if (todo->type != QETH_IP_TYPE_NORMAL)
422 todo->users = 1;
423 return 1;
424 } else
425 return 0;
426}
427
428static void __qeth_l3_delete_all_mc(struct qeth_card *card,
429 unsigned long *flags)
430{
2d921c32 431 struct list_head fail_list;
4a71df50
FB
432 struct qeth_ipaddr *addr, *tmp;
433 int rc;
2d921c32
UB
434
435 INIT_LIST_HEAD(&fail_list);
4a71df50
FB
436again:
437 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
438 if (addr->is_multicast) {
439 list_del(&addr->entry);
440 spin_unlock_irqrestore(&card->ip_lock, *flags);
441 rc = qeth_l3_deregister_addr_entry(card, addr);
442 spin_lock_irqsave(&card->ip_lock, *flags);
2d921c32 443 if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND))
4a71df50 444 kfree(addr);
2d921c32
UB
445 else
446 list_add_tail(&addr->entry, &fail_list);
447 goto again;
4a71df50
FB
448 }
449 }
2d921c32 450 list_splice(&fail_list, &card->ip_list);
4a71df50
FB
451}
452
453static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
454{
455 struct list_head *tbd_list;
456 struct qeth_ipaddr *todo, *addr;
457 unsigned long flags;
458 int rc;
459
d11ba0c4
PT
460 QETH_DBF_TEXT(TRACE, 2, "sdiplist");
461 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
4a71df50 462
76b11f8e
UB
463 if (card->options.sniffer)
464 return;
4a71df50
FB
465 spin_lock_irqsave(&card->ip_lock, flags);
466 tbd_list = card->ip_tbd_list;
467 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
468 if (!card->ip_tbd_list) {
d11ba0c4 469 QETH_DBF_TEXT(TRACE, 0, "silnomem");
4a71df50
FB
470 card->ip_tbd_list = tbd_list;
471 spin_unlock_irqrestore(&card->ip_lock, flags);
472 return;
473 } else
474 INIT_LIST_HEAD(card->ip_tbd_list);
475
476 while (!list_empty(tbd_list)) {
477 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
478 list_del(&todo->entry);
479 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
480 __qeth_l3_delete_all_mc(card, &flags);
481 kfree(todo);
482 continue;
483 }
484 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
485 if (rc == 0) {
486 /* nothing to be done; only adjusted refcount */
487 kfree(todo);
488 } else if (rc == 1) {
489 /* new entry to be added to on-card list */
490 spin_unlock_irqrestore(&card->ip_lock, flags);
491 rc = qeth_l3_register_addr_entry(card, todo);
492 spin_lock_irqsave(&card->ip_lock, flags);
3caa4af8 493 if (!rc || (rc == IPA_RC_LAN_OFFLINE))
4a71df50
FB
494 list_add_tail(&todo->entry, &card->ip_list);
495 else
496 kfree(todo);
497 } else if (rc == -1) {
498 /* on-card entry to be removed */
499 list_del_init(&addr->entry);
500 spin_unlock_irqrestore(&card->ip_lock, flags);
501 rc = qeth_l3_deregister_addr_entry(card, addr);
502 spin_lock_irqsave(&card->ip_lock, flags);
76b11f8e 503 if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
4a71df50
FB
504 kfree(addr);
505 else
506 list_add_tail(&addr->entry, &card->ip_list);
507 kfree(todo);
508 }
509 }
510 spin_unlock_irqrestore(&card->ip_lock, flags);
511 kfree(tbd_list);
512}
513
514static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
515 int recover)
516{
517 struct qeth_ipaddr *addr, *tmp;
518 unsigned long flags;
519
d11ba0c4 520 QETH_DBF_TEXT(TRACE, 4, "clearip");
76b11f8e
UB
521 if (recover && card->options.sniffer)
522 return;
4a71df50
FB
523 spin_lock_irqsave(&card->ip_lock, flags);
524 /* clear todo list */
525 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
526 list_del(&addr->entry);
527 kfree(addr);
528 }
529
530 while (!list_empty(&card->ip_list)) {
531 addr = list_entry(card->ip_list.next,
532 struct qeth_ipaddr, entry);
533 list_del_init(&addr->entry);
534 if (clean) {
535 spin_unlock_irqrestore(&card->ip_lock, flags);
536 qeth_l3_deregister_addr_entry(card, addr);
537 spin_lock_irqsave(&card->ip_lock, flags);
538 }
539 if (!recover || addr->is_multicast) {
540 kfree(addr);
541 continue;
542 }
543 list_add_tail(&addr->entry, card->ip_tbd_list);
544 }
545 spin_unlock_irqrestore(&card->ip_lock, flags);
546}
547
548static int qeth_l3_address_exists_in_list(struct list_head *list,
549 struct qeth_ipaddr *addr, int same_type)
550{
551 struct qeth_ipaddr *tmp;
552
553 list_for_each_entry(tmp, list, entry) {
554 if ((tmp->proto == QETH_PROT_IPV4) &&
555 (addr->proto == QETH_PROT_IPV4) &&
556 ((same_type && (tmp->type == addr->type)) ||
557 (!same_type && (tmp->type != addr->type))) &&
558 (tmp->u.a4.addr == addr->u.a4.addr))
559 return 1;
560
561 if ((tmp->proto == QETH_PROT_IPV6) &&
562 (addr->proto == QETH_PROT_IPV6) &&
563 ((same_type && (tmp->type == addr->type)) ||
564 (!same_type && (tmp->type != addr->type))) &&
565 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
566 sizeof(struct in6_addr)) == 0))
567 return 1;
568
569 }
570 return 0;
571}
572
573static int qeth_l3_send_setdelmc(struct qeth_card *card,
574 struct qeth_ipaddr *addr, int ipacmd)
575{
576 int rc;
577 struct qeth_cmd_buffer *iob;
578 struct qeth_ipa_cmd *cmd;
579
d11ba0c4 580 QETH_DBF_TEXT(TRACE, 4, "setdelmc");
4a71df50
FB
581
582 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
583 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
584 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
585 if (addr->proto == QETH_PROT_IPV6)
586 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
587 sizeof(struct in6_addr));
588 else
589 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
590
591 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
592
593 return rc;
594}
595
596static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
597{
598 int i, j;
599 for (i = 0; i < 16; i++) {
600 j = (len) - (i * 8);
601 if (j >= 8)
602 netmask[i] = 0xff;
603 else if (j > 0)
604 netmask[i] = (u8)(0xFF00 >> j);
605 else
606 netmask[i] = 0;
607 }
608}
609
610static int qeth_l3_send_setdelip(struct qeth_card *card,
611 struct qeth_ipaddr *addr, int ipacmd, unsigned int flags)
612{
613 int rc;
614 struct qeth_cmd_buffer *iob;
615 struct qeth_ipa_cmd *cmd;
616 __u8 netmask[16];
617
d11ba0c4
PT
618 QETH_DBF_TEXT(TRACE, 4, "setdelip");
619 QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags);
4a71df50
FB
620
621 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
622 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
623 if (addr->proto == QETH_PROT_IPV6) {
624 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
625 sizeof(struct in6_addr));
626 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
627 memcpy(cmd->data.setdelip6.mask, netmask,
628 sizeof(struct in6_addr));
629 cmd->data.setdelip6.flags = flags;
630 } else {
631 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
632 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
633 cmd->data.setdelip4.flags = flags;
634 }
635
636 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
637
638 return rc;
639}
640
641static int qeth_l3_send_setrouting(struct qeth_card *card,
642 enum qeth_routing_types type, enum qeth_prot_versions prot)
643{
644 int rc;
645 struct qeth_ipa_cmd *cmd;
646 struct qeth_cmd_buffer *iob;
647
d11ba0c4 648 QETH_DBF_TEXT(TRACE, 4, "setroutg");
4a71df50
FB
649 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
650 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
651 cmd->data.setrtg.type = (type);
652 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
653
654 return rc;
655}
656
657static void qeth_l3_correct_routing_type(struct qeth_card *card,
658 enum qeth_routing_types *type, enum qeth_prot_versions prot)
659{
660 if (card->info.type == QETH_CARD_TYPE_IQD) {
661 switch (*type) {
662 case NO_ROUTER:
663 case PRIMARY_CONNECTOR:
664 case SECONDARY_CONNECTOR:
665 case MULTICAST_ROUTER:
666 return;
667 default:
668 goto out_inval;
669 }
670 } else {
671 switch (*type) {
672 case NO_ROUTER:
673 case PRIMARY_ROUTER:
674 case SECONDARY_ROUTER:
675 return;
676 case MULTICAST_ROUTER:
677 if (qeth_is_ipafunc_supported(card, prot,
678 IPA_OSA_MC_ROUTER))
679 return;
680 default:
681 goto out_inval;
682 }
683 }
684out_inval:
4a71df50
FB
685 *type = NO_ROUTER;
686}
687
688int qeth_l3_setrouting_v4(struct qeth_card *card)
689{
690 int rc;
691
d11ba0c4 692 QETH_DBF_TEXT(TRACE, 3, "setrtg4");
4a71df50
FB
693
694 qeth_l3_correct_routing_type(card, &card->options.route4.type,
695 QETH_PROT_IPV4);
696
697 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
698 QETH_PROT_IPV4);
699 if (rc) {
700 card->options.route4.type = NO_ROUTER;
14cc21b6
FB
701 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
702 " on %s. Type set to 'no router'.\n", rc,
703 QETH_CARD_IFNAME(card));
4a71df50
FB
704 }
705 return rc;
706}
707
708int qeth_l3_setrouting_v6(struct qeth_card *card)
709{
710 int rc = 0;
711
d11ba0c4 712 QETH_DBF_TEXT(TRACE, 3, "setrtg6");
4a71df50
FB
713#ifdef CONFIG_QETH_IPV6
714
715 if (!qeth_is_supported(card, IPA_IPV6))
716 return 0;
717 qeth_l3_correct_routing_type(card, &card->options.route6.type,
718 QETH_PROT_IPV6);
719
720 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
721 QETH_PROT_IPV6);
722 if (rc) {
723 card->options.route6.type = NO_ROUTER;
14cc21b6
FB
724 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
725 " on %s. Type set to 'no router'.\n", rc,
726 QETH_CARD_IFNAME(card));
4a71df50
FB
727 }
728#endif
729 return rc;
730}
731
732/*
733 * IP address takeover related functions
734 */
735static void qeth_l3_clear_ipato_list(struct qeth_card *card)
736{
737
738 struct qeth_ipato_entry *ipatoe, *tmp;
739 unsigned long flags;
740
741 spin_lock_irqsave(&card->ip_lock, flags);
742 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
743 list_del(&ipatoe->entry);
744 kfree(ipatoe);
745 }
746 spin_unlock_irqrestore(&card->ip_lock, flags);
747}
748
749int qeth_l3_add_ipato_entry(struct qeth_card *card,
750 struct qeth_ipato_entry *new)
751{
752 struct qeth_ipato_entry *ipatoe;
753 unsigned long flags;
754 int rc = 0;
755
d11ba0c4 756 QETH_DBF_TEXT(TRACE, 2, "addipato");
4a71df50
FB
757 spin_lock_irqsave(&card->ip_lock, flags);
758 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
759 if (ipatoe->proto != new->proto)
760 continue;
761 if (!memcmp(ipatoe->addr, new->addr,
762 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
763 (ipatoe->mask_bits == new->mask_bits)) {
4a71df50
FB
764 rc = -EEXIST;
765 break;
766 }
767 }
768 if (!rc)
769 list_add_tail(&new->entry, &card->ipato.entries);
770
771 spin_unlock_irqrestore(&card->ip_lock, flags);
772 return rc;
773}
774
775void qeth_l3_del_ipato_entry(struct qeth_card *card,
776 enum qeth_prot_versions proto, u8 *addr, int mask_bits)
777{
778 struct qeth_ipato_entry *ipatoe, *tmp;
779 unsigned long flags;
780
d11ba0c4 781 QETH_DBF_TEXT(TRACE, 2, "delipato");
4a71df50
FB
782 spin_lock_irqsave(&card->ip_lock, flags);
783 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
784 if (ipatoe->proto != proto)
785 continue;
786 if (!memcmp(ipatoe->addr, addr,
787 (proto == QETH_PROT_IPV4)? 4:16) &&
788 (ipatoe->mask_bits == mask_bits)) {
789 list_del(&ipatoe->entry);
790 kfree(ipatoe);
791 }
792 }
793 spin_unlock_irqrestore(&card->ip_lock, flags);
794}
795
796/*
797 * VIPA related functions
798 */
799int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
800 const u8 *addr)
801{
802 struct qeth_ipaddr *ipaddr;
803 unsigned long flags;
804 int rc = 0;
805
806 ipaddr = qeth_l3_get_addr_buffer(proto);
807 if (ipaddr) {
808 if (proto == QETH_PROT_IPV4) {
d11ba0c4 809 QETH_DBF_TEXT(TRACE, 2, "addvipa4");
4a71df50
FB
810 memcpy(&ipaddr->u.a4.addr, addr, 4);
811 ipaddr->u.a4.mask = 0;
812 } else if (proto == QETH_PROT_IPV6) {
d11ba0c4 813 QETH_DBF_TEXT(TRACE, 2, "addvipa6");
4a71df50
FB
814 memcpy(&ipaddr->u.a6.addr, addr, 16);
815 ipaddr->u.a6.pfxlen = 0;
816 }
817 ipaddr->type = QETH_IP_TYPE_VIPA;
818 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
819 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
820 } else
821 return -ENOMEM;
822 spin_lock_irqsave(&card->ip_lock, flags);
823 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
824 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
825 rc = -EEXIST;
826 spin_unlock_irqrestore(&card->ip_lock, flags);
827 if (rc) {
4a71df50
FB
828 return rc;
829 }
830 if (!qeth_l3_add_ip(card, ipaddr))
831 kfree(ipaddr);
832 qeth_l3_set_ip_addr_list(card);
833 return rc;
834}
835
836void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
837 const u8 *addr)
838{
839 struct qeth_ipaddr *ipaddr;
840
841 ipaddr = qeth_l3_get_addr_buffer(proto);
842 if (ipaddr) {
843 if (proto == QETH_PROT_IPV4) {
d11ba0c4 844 QETH_DBF_TEXT(TRACE, 2, "delvipa4");
4a71df50
FB
845 memcpy(&ipaddr->u.a4.addr, addr, 4);
846 ipaddr->u.a4.mask = 0;
847 } else if (proto == QETH_PROT_IPV6) {
d11ba0c4 848 QETH_DBF_TEXT(TRACE, 2, "delvipa6");
4a71df50
FB
849 memcpy(&ipaddr->u.a6.addr, addr, 16);
850 ipaddr->u.a6.pfxlen = 0;
851 }
852 ipaddr->type = QETH_IP_TYPE_VIPA;
853 } else
854 return;
855 if (!qeth_l3_delete_ip(card, ipaddr))
856 kfree(ipaddr);
857 qeth_l3_set_ip_addr_list(card);
858}
859
860/*
861 * proxy ARP related functions
862 */
863int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
864 const u8 *addr)
865{
866 struct qeth_ipaddr *ipaddr;
867 unsigned long flags;
868 int rc = 0;
869
870 ipaddr = qeth_l3_get_addr_buffer(proto);
871 if (ipaddr) {
872 if (proto == QETH_PROT_IPV4) {
d11ba0c4 873 QETH_DBF_TEXT(TRACE, 2, "addrxip4");
4a71df50
FB
874 memcpy(&ipaddr->u.a4.addr, addr, 4);
875 ipaddr->u.a4.mask = 0;
876 } else if (proto == QETH_PROT_IPV6) {
d11ba0c4 877 QETH_DBF_TEXT(TRACE, 2, "addrxip6");
4a71df50
FB
878 memcpy(&ipaddr->u.a6.addr, addr, 16);
879 ipaddr->u.a6.pfxlen = 0;
880 }
881 ipaddr->type = QETH_IP_TYPE_RXIP;
882 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
883 ipaddr->del_flags = 0;
884 } else
885 return -ENOMEM;
886 spin_lock_irqsave(&card->ip_lock, flags);
887 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
888 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
889 rc = -EEXIST;
890 spin_unlock_irqrestore(&card->ip_lock, flags);
891 if (rc) {
4a71df50
FB
892 return rc;
893 }
894 if (!qeth_l3_add_ip(card, ipaddr))
895 kfree(ipaddr);
896 qeth_l3_set_ip_addr_list(card);
897 return 0;
898}
899
900void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
901 const u8 *addr)
902{
903 struct qeth_ipaddr *ipaddr;
904
905 ipaddr = qeth_l3_get_addr_buffer(proto);
906 if (ipaddr) {
907 if (proto == QETH_PROT_IPV4) {
d11ba0c4 908 QETH_DBF_TEXT(TRACE, 2, "addrxip4");
4a71df50
FB
909 memcpy(&ipaddr->u.a4.addr, addr, 4);
910 ipaddr->u.a4.mask = 0;
911 } else if (proto == QETH_PROT_IPV6) {
d11ba0c4 912 QETH_DBF_TEXT(TRACE, 2, "addrxip6");
4a71df50
FB
913 memcpy(&ipaddr->u.a6.addr, addr, 16);
914 ipaddr->u.a6.pfxlen = 0;
915 }
916 ipaddr->type = QETH_IP_TYPE_RXIP;
917 } else
918 return;
919 if (!qeth_l3_delete_ip(card, ipaddr))
920 kfree(ipaddr);
921 qeth_l3_set_ip_addr_list(card);
922}
923
924static int qeth_l3_register_addr_entry(struct qeth_card *card,
925 struct qeth_ipaddr *addr)
926{
927 char buf[50];
928 int rc = 0;
929 int cnt = 3;
930
931 if (addr->proto == QETH_PROT_IPV4) {
d11ba0c4
PT
932 QETH_DBF_TEXT(TRACE, 2, "setaddr4");
933 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
4a71df50 934 } else if (addr->proto == QETH_PROT_IPV6) {
d11ba0c4
PT
935 QETH_DBF_TEXT(TRACE, 2, "setaddr6");
936 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
937 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
4a71df50 938 } else {
d11ba0c4
PT
939 QETH_DBF_TEXT(TRACE, 2, "setaddr?");
940 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
4a71df50
FB
941 }
942 do {
943 if (addr->is_multicast)
944 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
945 else
946 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
947 addr->set_flags);
948 if (rc)
d11ba0c4 949 QETH_DBF_TEXT(TRACE, 2, "failed");
4a71df50
FB
950 } while ((--cnt > 0) && rc);
951 if (rc) {
d11ba0c4 952 QETH_DBF_TEXT(TRACE, 2, "FAILED");
4a71df50 953 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
74eacdb9
FB
954 dev_warn(&card->gdev->dev,
955 "Registering IP address %s failed\n", buf);
4a71df50
FB
956 }
957 return rc;
958}
959
960static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
961 struct qeth_ipaddr *addr)
962{
963 int rc = 0;
964
965 if (addr->proto == QETH_PROT_IPV4) {
d11ba0c4
PT
966 QETH_DBF_TEXT(TRACE, 2, "deladdr4");
967 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
4a71df50 968 } else if (addr->proto == QETH_PROT_IPV6) {
d11ba0c4
PT
969 QETH_DBF_TEXT(TRACE, 2, "deladdr6");
970 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
971 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
4a71df50 972 } else {
d11ba0c4
PT
973 QETH_DBF_TEXT(TRACE, 2, "deladdr?");
974 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
4a71df50
FB
975 }
976 if (addr->is_multicast)
977 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
978 else
979 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
980 addr->del_flags);
c4cef07c 981 if (rc)
d11ba0c4 982 QETH_DBF_TEXT(TRACE, 2, "failed");
4a71df50
FB
983
984 return rc;
985}
986
987static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
988{
989 if (cast_type == RTN_MULTICAST)
990 return QETH_CAST_MULTICAST;
991 if (cast_type == RTN_BROADCAST)
992 return QETH_CAST_BROADCAST;
993 return QETH_CAST_UNICAST;
994}
995
996static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
997{
998 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
999 if (cast_type == RTN_MULTICAST)
1000 return ct | QETH_CAST_MULTICAST;
1001 if (cast_type == RTN_ANYCAST)
1002 return ct | QETH_CAST_ANYCAST;
1003 if (cast_type == RTN_BROADCAST)
1004 return ct | QETH_CAST_BROADCAST;
1005 return ct | QETH_CAST_UNICAST;
1006}
1007
1008static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
1009 __u32 mode)
1010{
1011 int rc;
1012 struct qeth_cmd_buffer *iob;
1013 struct qeth_ipa_cmd *cmd;
1014
d11ba0c4 1015 QETH_DBF_TEXT(TRACE, 4, "adpmode");
4a71df50
FB
1016
1017 iob = qeth_get_adapter_cmd(card, command,
1018 sizeof(struct qeth_ipacmd_setadpparms));
1019 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1020 cmd->data.setadapterparms.data.mode = mode;
1021 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
1022 NULL);
1023 return rc;
1024}
1025
1026static int qeth_l3_setadapter_hstr(struct qeth_card *card)
1027{
1028 int rc;
1029
d11ba0c4 1030 QETH_DBF_TEXT(TRACE, 4, "adphstr");
4a71df50
FB
1031
1032 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
1033 rc = qeth_l3_send_setadp_mode(card,
1034 IPA_SETADP_SET_BROADCAST_MODE,
1035 card->options.broadcast_mode);
1036 if (rc)
14cc21b6 1037 QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on "
4a71df50
FB
1038 "device %s: x%x\n",
1039 CARD_BUS_ID(card), rc);
1040 rc = qeth_l3_send_setadp_mode(card,
1041 IPA_SETADP_ALTER_MAC_ADDRESS,
1042 card->options.macaddr_mode);
1043 if (rc)
14cc21b6 1044 QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on "
4a71df50
FB
1045 "device %s: x%x\n", CARD_BUS_ID(card), rc);
1046 return rc;
1047 }
1048 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
14cc21b6 1049 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
4a71df50
FB
1050 "to set broadcast mode, using ALLRINGS "
1051 "on device %s:\n", CARD_BUS_ID(card));
1052 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
14cc21b6 1053 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
4a71df50
FB
1054 "to set macaddr mode, using NONCANONICAL "
1055 "on device %s:\n", CARD_BUS_ID(card));
1056 return 0;
1057}
1058
1059static int qeth_l3_setadapter_parms(struct qeth_card *card)
1060{
1061 int rc;
1062
d11ba0c4 1063 QETH_DBF_TEXT(SETUP, 2, "setadprm");
4a71df50
FB
1064
1065 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
74eacdb9
FB
1066 dev_info(&card->gdev->dev,
1067 "set adapter parameters not supported.\n");
d11ba0c4 1068 QETH_DBF_TEXT(SETUP, 2, " notsupp");
4a71df50
FB
1069 return 0;
1070 }
1071 rc = qeth_query_setadapterparms(card);
1072 if (rc) {
74eacdb9 1073 QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: "
7f6d95e7 1074 "0x%x\n", dev_name(&card->gdev->dev), rc);
4a71df50
FB
1075 return rc;
1076 }
1077 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
1078 rc = qeth_setadpparms_change_macaddr(card);
1079 if (rc)
74eacdb9 1080 dev_warn(&card->gdev->dev, "Reading the adapter MAC"
6ea2fde1 1081 " address failed\n");
4a71df50
FB
1082 }
1083
1084 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
1085 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
1086 rc = qeth_l3_setadapter_hstr(card);
1087
1088 return rc;
1089}
1090
1091static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
1092 struct qeth_reply *reply, unsigned long data)
1093{
1094 struct qeth_ipa_cmd *cmd;
1095
d11ba0c4 1096 QETH_DBF_TEXT(TRACE, 4, "defadpcb");
4a71df50
FB
1097
1098 cmd = (struct qeth_ipa_cmd *) data;
1099 if (cmd->hdr.return_code == 0) {
1100 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1101 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
1102 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1103 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
1104 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1105 }
1106 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
1107 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
1108 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
d11ba0c4 1109 QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
4a71df50
FB
1110 }
1111 return 0;
1112}
1113
1114static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
1115 struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code,
1116 __u16 len, enum qeth_prot_versions prot)
1117{
1118 struct qeth_cmd_buffer *iob;
1119 struct qeth_ipa_cmd *cmd;
1120
d11ba0c4 1121 QETH_DBF_TEXT(TRACE, 4, "getasscm");
4a71df50
FB
1122 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1123
1124 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1125 cmd->data.setassparms.hdr.assist_no = ipa_func;
1126 cmd->data.setassparms.hdr.length = 8 + len;
1127 cmd->data.setassparms.hdr.command_code = cmd_code;
1128 cmd->data.setassparms.hdr.return_code = 0;
1129 cmd->data.setassparms.hdr.seq_no = 0;
1130
1131 return iob;
1132}
1133
1134static int qeth_l3_send_setassparms(struct qeth_card *card,
1135 struct qeth_cmd_buffer *iob, __u16 len, long data,
1136 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1137 unsigned long),
1138 void *reply_param)
1139{
1140 int rc;
1141 struct qeth_ipa_cmd *cmd;
1142
d11ba0c4 1143 QETH_DBF_TEXT(TRACE, 4, "sendassp");
4a71df50
FB
1144
1145 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1146 if (len <= sizeof(__u32))
1147 cmd->data.setassparms.data.flags_32bit = (__u32) data;
1148 else /* (len > sizeof(__u32)) */
1149 memcpy(&cmd->data.setassparms.data, (void *) data, len);
1150
1151 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
1152 return rc;
1153}
1154
1155#ifdef CONFIG_QETH_IPV6
1156static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1157 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
1158{
1159 int rc;
1160 struct qeth_cmd_buffer *iob;
1161
d11ba0c4 1162 QETH_DBF_TEXT(TRACE, 4, "simassp6");
4a71df50
FB
1163 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1164 0, QETH_PROT_IPV6);
1165 rc = qeth_l3_send_setassparms(card, iob, 0, 0,
1166 qeth_l3_default_setassparms_cb, NULL);
1167 return rc;
1168}
1169#endif
1170
1171static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
1172 enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data)
1173{
1174 int rc;
1175 int length = 0;
1176 struct qeth_cmd_buffer *iob;
1177
d11ba0c4 1178 QETH_DBF_TEXT(TRACE, 4, "simassp4");
4a71df50
FB
1179 if (data)
1180 length = sizeof(__u32);
1181 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1182 length, QETH_PROT_IPV4);
1183 rc = qeth_l3_send_setassparms(card, iob, length, data,
1184 qeth_l3_default_setassparms_cb, NULL);
1185 return rc;
1186}
1187
1188static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
1189{
1190 int rc;
1191
d11ba0c4 1192 QETH_DBF_TEXT(TRACE, 3, "ipaarp");
4a71df50
FB
1193
1194 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
74eacdb9
FB
1195 dev_info(&card->gdev->dev,
1196 "ARP processing not supported on %s!\n",
1197 QETH_CARD_IFNAME(card));
4a71df50
FB
1198 return 0;
1199 }
1200 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1201 IPA_CMD_ASS_START, 0);
1202 if (rc) {
74eacdb9
FB
1203 dev_warn(&card->gdev->dev,
1204 "Starting ARP processing support for %s failed\n",
1205 QETH_CARD_IFNAME(card));
4a71df50
FB
1206 }
1207 return rc;
1208}
1209
1210static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
1211{
1212 int rc;
1213
d11ba0c4 1214 QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
4a71df50
FB
1215
1216 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
74eacdb9
FB
1217 dev_info(&card->gdev->dev,
1218 "Hardware IP fragmentation not supported on %s\n",
1219 QETH_CARD_IFNAME(card));
4a71df50
FB
1220 return -EOPNOTSUPP;
1221 }
1222
1223 rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
1224 IPA_CMD_ASS_START, 0);
1225 if (rc) {
74eacdb9
FB
1226 dev_warn(&card->gdev->dev,
1227 "Starting IP fragmentation support for %s failed\n",
1228 QETH_CARD_IFNAME(card));
4a71df50 1229 } else
74eacdb9
FB
1230 dev_info(&card->gdev->dev,
1231 "Hardware IP fragmentation enabled \n");
4a71df50
FB
1232 return rc;
1233}
1234
1235static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
1236{
1237 int rc;
1238
d11ba0c4 1239 QETH_DBF_TEXT(TRACE, 3, "stsrcmac");
4a71df50 1240
4a71df50 1241 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
74eacdb9 1242 dev_info(&card->gdev->dev,
fe94e2e0 1243 "Inbound source MAC-address not supported on %s\n",
74eacdb9 1244 QETH_CARD_IFNAME(card));
4a71df50
FB
1245 return -EOPNOTSUPP;
1246 }
1247
1248 rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
1249 IPA_CMD_ASS_START, 0);
1250 if (rc)
74eacdb9 1251 dev_warn(&card->gdev->dev,
fe94e2e0 1252 "Starting source MAC-address support for %s failed\n",
74eacdb9 1253 QETH_CARD_IFNAME(card));
4a71df50
FB
1254 return rc;
1255}
1256
1257static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
1258{
1259 int rc = 0;
1260
d11ba0c4 1261 QETH_DBF_TEXT(TRACE, 3, "strtvlan");
4a71df50
FB
1262
1263 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
74eacdb9
FB
1264 dev_info(&card->gdev->dev,
1265 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
4a71df50
FB
1266 return -EOPNOTSUPP;
1267 }
1268
1269 rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
1270 IPA_CMD_ASS_START, 0);
1271 if (rc) {
74eacdb9
FB
1272 dev_warn(&card->gdev->dev,
1273 "Starting VLAN support for %s failed\n",
1274 QETH_CARD_IFNAME(card));
4a71df50 1275 } else {
74eacdb9 1276 dev_info(&card->gdev->dev, "VLAN enabled\n");
4a71df50
FB
1277 }
1278 return rc;
1279}
1280
1281static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
1282{
1283 int rc;
1284
d11ba0c4 1285 QETH_DBF_TEXT(TRACE, 3, "stmcast");
4a71df50
FB
1286
1287 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
74eacdb9
FB
1288 dev_info(&card->gdev->dev,
1289 "Multicast not supported on %s\n",
1290 QETH_CARD_IFNAME(card));
4a71df50
FB
1291 return -EOPNOTSUPP;
1292 }
1293
1294 rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
1295 IPA_CMD_ASS_START, 0);
1296 if (rc) {
74eacdb9
FB
1297 dev_warn(&card->gdev->dev,
1298 "Starting multicast support for %s failed\n",
1299 QETH_CARD_IFNAME(card));
4a71df50 1300 } else {
74eacdb9 1301 dev_info(&card->gdev->dev, "Multicast enabled\n");
4a71df50
FB
1302 card->dev->flags |= IFF_MULTICAST;
1303 }
1304 return rc;
1305}
1306
1307static int qeth_l3_query_ipassists_cb(struct qeth_card *card,
1308 struct qeth_reply *reply, unsigned long data)
1309{
1310 struct qeth_ipa_cmd *cmd;
1311
d11ba0c4 1312 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
4a71df50
FB
1313
1314 cmd = (struct qeth_ipa_cmd *) data;
1315 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
1316 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
1317 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1318 } else {
1319 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
1320 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1321 }
d11ba0c4
PT
1322 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
1323 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
1324 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
4a71df50
FB
1325 return 0;
1326}
1327
1328static int qeth_l3_query_ipassists(struct qeth_card *card,
1329 enum qeth_prot_versions prot)
1330{
1331 int rc;
1332 struct qeth_cmd_buffer *iob;
1333
d11ba0c4 1334 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
4a71df50
FB
1335 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
1336 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL);
1337 return rc;
1338}
1339
1340#ifdef CONFIG_QETH_IPV6
1341static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1342{
1343 int rc;
1344
d11ba0c4 1345 QETH_DBF_TEXT(TRACE, 3, "softipv6");
4a71df50
FB
1346
1347 if (card->info.type == QETH_CARD_TYPE_IQD)
1348 goto out;
1349
1350 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6);
1351 if (rc) {
74eacdb9
FB
1352 dev_err(&card->gdev->dev,
1353 "Activating IPv6 support for %s failed\n",
1354 QETH_CARD_IFNAME(card));
4a71df50
FB
1355 return rc;
1356 }
1357 rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
1358 IPA_CMD_ASS_START, 3);
1359 if (rc) {
74eacdb9
FB
1360 dev_err(&card->gdev->dev,
1361 "Activating IPv6 support for %s failed\n",
1362 QETH_CARD_IFNAME(card));
4a71df50
FB
1363 return rc;
1364 }
1365 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
1366 IPA_CMD_ASS_START);
1367 if (rc) {
74eacdb9
FB
1368 dev_err(&card->gdev->dev,
1369 "Activating IPv6 support for %s failed\n",
1370 QETH_CARD_IFNAME(card));
4a71df50
FB
1371 return rc;
1372 }
1373 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
1374 IPA_CMD_ASS_START);
1375 if (rc) {
74eacdb9
FB
1376 dev_warn(&card->gdev->dev,
1377 "Enabling the passthrough mode for %s failed\n",
1378 QETH_CARD_IFNAME(card));
4a71df50
FB
1379 return rc;
1380 }
1381out:
74eacdb9 1382 dev_info(&card->gdev->dev, "IPV6 enabled\n");
4a71df50
FB
1383 return 0;
1384}
1385#endif
1386
1387static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
1388{
1389 int rc = 0;
1390
d11ba0c4 1391 QETH_DBF_TEXT(TRACE, 3, "strtipv6");
4a71df50
FB
1392
1393 if (!qeth_is_supported(card, IPA_IPV6)) {
74eacdb9
FB
1394 dev_info(&card->gdev->dev,
1395 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
4a71df50
FB
1396 return 0;
1397 }
1398#ifdef CONFIG_QETH_IPV6
1399 rc = qeth_l3_softsetup_ipv6(card);
1400#endif
1401 return rc ;
1402}
1403
1404static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
1405{
1406 int rc;
1407
d11ba0c4 1408 QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
4a71df50
FB
1409 card->info.broadcast_capable = 0;
1410 if (!qeth_is_supported(card, IPA_FILTERING)) {
74eacdb9
FB
1411 dev_info(&card->gdev->dev,
1412 "Broadcast not supported on %s\n",
1413 QETH_CARD_IFNAME(card));
4a71df50
FB
1414 rc = -EOPNOTSUPP;
1415 goto out;
1416 }
1417 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1418 IPA_CMD_ASS_START, 0);
1419 if (rc) {
74eacdb9
FB
1420 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
1421 "%s failed\n", QETH_CARD_IFNAME(card));
4a71df50
FB
1422 goto out;
1423 }
1424
1425 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1426 IPA_CMD_ASS_CONFIGURE, 1);
1427 if (rc) {
74eacdb9
FB
1428 dev_warn(&card->gdev->dev,
1429 "Setting up broadcast filtering for %s failed\n",
1430 QETH_CARD_IFNAME(card));
4a71df50
FB
1431 goto out;
1432 }
1433 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
74eacdb9 1434 dev_info(&card->gdev->dev, "Broadcast enabled\n");
4a71df50
FB
1435 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1436 IPA_CMD_ASS_ENABLE, 1);
1437 if (rc) {
74eacdb9
FB
1438 dev_warn(&card->gdev->dev, "Setting up broadcast echo "
1439 "filtering for %s failed\n", QETH_CARD_IFNAME(card));
4a71df50
FB
1440 goto out;
1441 }
1442 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
1443out:
1444 if (card->info.broadcast_capable)
1445 card->dev->flags |= IFF_BROADCAST;
1446 else
1447 card->dev->flags &= ~IFF_BROADCAST;
1448 return rc;
1449}
1450
1451static int qeth_l3_send_checksum_command(struct qeth_card *card)
1452{
1453 int rc;
1454
1455 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1456 IPA_CMD_ASS_START, 0);
1457 if (rc) {
74eacdb9
FB
1458 dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
1459 "failed, using SW checksumming\n",
1460 QETH_CARD_IFNAME(card));
4a71df50
FB
1461 return rc;
1462 }
1463 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1464 IPA_CMD_ASS_ENABLE,
1465 card->info.csum_mask);
1466 if (rc) {
74eacdb9
FB
1467 dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
1468 "failed, using SW checksumming\n",
1469 QETH_CARD_IFNAME(card));
4a71df50
FB
1470 return rc;
1471 }
1472 return 0;
1473}
1474
3fd434d8
FB
1475int qeth_l3_set_rx_csum(struct qeth_card *card,
1476 enum qeth_checksum_types csum_type)
1477{
1478 int rc = 0;
1479
1480 if (card->options.checksum_type == HW_CHECKSUMMING) {
1481 if ((csum_type != HW_CHECKSUMMING) &&
1482 (card->state != CARD_STATE_DOWN)) {
1483 rc = qeth_l3_send_simple_setassparms(card,
1484 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1485 if (rc)
1486 return -EIO;
1487 }
1488 } else {
1489 if (csum_type == HW_CHECKSUMMING) {
1490 if (card->state != CARD_STATE_DOWN) {
1491 if (!qeth_is_supported(card,
1492 IPA_INBOUND_CHECKSUM))
1493 return -EPERM;
1494 rc = qeth_l3_send_checksum_command(card);
1495 if (rc)
1496 return -EIO;
1497 }
1498 }
1499 }
1500 card->options.checksum_type = csum_type;
1501 return rc;
1502}
1503
4a71df50
FB
1504static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1505{
1506 int rc = 0;
1507
d11ba0c4 1508 QETH_DBF_TEXT(TRACE, 3, "strtcsum");
4a71df50
FB
1509
1510 if (card->options.checksum_type == NO_CHECKSUMMING) {
74eacdb9
FB
1511 dev_info(&card->gdev->dev,
1512 "Using no checksumming on %s.\n",
1513 QETH_CARD_IFNAME(card));
4a71df50
FB
1514 return 0;
1515 }
1516 if (card->options.checksum_type == SW_CHECKSUMMING) {
74eacdb9
FB
1517 dev_info(&card->gdev->dev,
1518 "Using SW checksumming on %s.\n",
1519 QETH_CARD_IFNAME(card));
4a71df50
FB
1520 return 0;
1521 }
1522 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
74eacdb9
FB
1523 dev_info(&card->gdev->dev,
1524 "Inbound HW Checksumming not "
1525 "supported on %s,\ncontinuing "
1526 "using Inbound SW Checksumming\n",
1527 QETH_CARD_IFNAME(card));
4a71df50
FB
1528 card->options.checksum_type = SW_CHECKSUMMING;
1529 return 0;
1530 }
1531 rc = qeth_l3_send_checksum_command(card);
1532 if (!rc)
74eacdb9
FB
1533 dev_info(&card->gdev->dev,
1534 "HW Checksumming (inbound) enabled\n");
4a71df50
FB
1535
1536 return rc;
1537}
1538
1539static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1540{
1541 int rc;
1542
d11ba0c4 1543 QETH_DBF_TEXT(TRACE, 3, "sttso");
4a71df50
FB
1544
1545 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
74eacdb9
FB
1546 dev_info(&card->gdev->dev,
1547 "Outbound TSO not supported on %s\n",
1548 QETH_CARD_IFNAME(card));
4a71df50
FB
1549 rc = -EOPNOTSUPP;
1550 } else {
1551 rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
1552 IPA_CMD_ASS_START, 0);
1553 if (rc)
74eacdb9
FB
1554 dev_warn(&card->gdev->dev, "Starting outbound TCP "
1555 "segmentation offload for %s failed\n",
1556 QETH_CARD_IFNAME(card));
4a71df50 1557 else
74eacdb9
FB
1558 dev_info(&card->gdev->dev,
1559 "Outbound TSO enabled\n");
4a71df50
FB
1560 }
1561 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) {
1562 card->options.large_send = QETH_LARGE_SEND_NO;
1563 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
1564 }
1565 return rc;
1566}
1567
1568static int qeth_l3_start_ipassists(struct qeth_card *card)
1569{
d11ba0c4 1570 QETH_DBF_TEXT(TRACE, 3, "strtipas");
d64ecc22
EL
1571
1572 qeth_set_access_ctrl_online(card); /* go on*/
4a71df50
FB
1573 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1574 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1575 qeth_l3_start_ipa_source_mac(card); /* go on*/
1576 qeth_l3_start_ipa_vlan(card); /* go on*/
1577 qeth_l3_start_ipa_multicast(card); /* go on*/
1578 qeth_l3_start_ipa_ipv6(card); /* go on*/
1579 qeth_l3_start_ipa_broadcast(card); /* go on*/
1580 qeth_l3_start_ipa_checksum(card); /* go on*/
1581 qeth_l3_start_ipa_tso(card); /* go on*/
1582 return 0;
1583}
1584
1585static int qeth_l3_put_unique_id(struct qeth_card *card)
1586{
1587
1588 int rc = 0;
1589 struct qeth_cmd_buffer *iob;
1590 struct qeth_ipa_cmd *cmd;
1591
d11ba0c4 1592 QETH_DBF_TEXT(TRACE, 2, "puniqeid");
4a71df50
FB
1593
1594 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
1595 UNIQUE_ID_NOT_BY_CARD)
1596 return -1;
1597 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
1598 QETH_PROT_IPV6);
1599 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1600 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1601 card->info.unique_id;
1602 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
1603 card->dev->dev_addr, OSA_ADDR_LEN);
1604 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
1605 return rc;
1606}
1607
1608static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1609 struct qeth_reply *reply, unsigned long data)
1610{
1611 struct qeth_ipa_cmd *cmd;
1612
1613 cmd = (struct qeth_ipa_cmd *) data;
1614 if (cmd->hdr.return_code == 0)
1615 memcpy(card->dev->dev_addr,
1616 cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
1617 else
1618 random_ether_addr(card->dev->dev_addr);
1619
1620 return 0;
1621}
1622
1623static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
1624{
1625 int rc = 0;
1626 struct qeth_cmd_buffer *iob;
1627 struct qeth_ipa_cmd *cmd;
1628
d11ba0c4 1629 QETH_DBF_TEXT(SETUP, 2, "hsrmac");
4a71df50
FB
1630
1631 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1632 QETH_PROT_IPV6);
1633 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1634 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1635 card->info.unique_id;
1636
1637 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
1638 NULL);
1639 return rc;
1640}
1641
1642static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
1643 struct qeth_reply *reply, unsigned long data)
1644{
1645 struct qeth_ipa_cmd *cmd;
1646
1647 cmd = (struct qeth_ipa_cmd *) data;
1648 if (cmd->hdr.return_code == 0)
1649 card->info.unique_id = *((__u16 *)
1650 &cmd->data.create_destroy_addr.unique_id[6]);
1651 else {
1652 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1653 UNIQUE_ID_NOT_BY_CARD;
74eacdb9
FB
1654 dev_warn(&card->gdev->dev, "The network adapter failed to "
1655 "generate a unique ID\n");
4a71df50
FB
1656 }
1657 return 0;
1658}
1659
1660static int qeth_l3_get_unique_id(struct qeth_card *card)
1661{
1662 int rc = 0;
1663 struct qeth_cmd_buffer *iob;
1664 struct qeth_ipa_cmd *cmd;
1665
d11ba0c4 1666 QETH_DBF_TEXT(SETUP, 2, "guniqeid");
4a71df50
FB
1667
1668 if (!qeth_is_supported(card, IPA_IPV6)) {
1669 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1670 UNIQUE_ID_NOT_BY_CARD;
1671 return 0;
1672 }
1673
1674 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1675 QETH_PROT_IPV6);
1676 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1677 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1678 card->info.unique_id;
1679
1680 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
1681 return rc;
1682}
1683
76b11f8e
UB
1684static int
1685qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
1686 unsigned long data)
1687{
1688 struct qeth_ipa_cmd *cmd;
1689 __u16 rc;
1690
1691 QETH_DBF_TEXT(SETUP, 2, "diastrcb");
1692
1693 cmd = (struct qeth_ipa_cmd *)data;
1694 rc = cmd->hdr.return_code;
a959189a 1695 if (rc)
76b11f8e 1696 QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
76b11f8e
UB
1697 switch (cmd->data.diagass.action) {
1698 case QETH_DIAGS_CMD_TRACE_QUERY:
1699 break;
1700 case QETH_DIAGS_CMD_TRACE_DISABLE:
a959189a
UB
1701 switch (rc) {
1702 case 0:
1703 case IPA_RC_INVALID_SUBCMD:
1704 card->info.promisc_mode = SET_PROMISC_MODE_OFF;
1705 dev_info(&card->gdev->dev, "The HiperSockets network "
1706 "traffic analyzer is deactivated\n");
1707 break;
1708 default:
1709 break;
1710 }
76b11f8e
UB
1711 break;
1712 case QETH_DIAGS_CMD_TRACE_ENABLE:
a959189a
UB
1713 switch (rc) {
1714 case 0:
1715 card->info.promisc_mode = SET_PROMISC_MODE_ON;
1716 dev_info(&card->gdev->dev, "The HiperSockets network "
1717 "traffic analyzer is activated\n");
1718 break;
1719 case IPA_RC_HARDWARE_AUTH_ERROR:
1720 dev_warn(&card->gdev->dev, "The device is not "
1721 "authorized to run as a HiperSockets network "
1722 "traffic analyzer\n");
1723 break;
1724 case IPA_RC_TRACE_ALREADY_ACTIVE:
1725 dev_warn(&card->gdev->dev, "A HiperSockets "
1726 "network traffic analyzer is already "
1727 "active in the HiperSockets LAN\n");
1728 break;
1729 default:
1730 break;
1731 }
76b11f8e
UB
1732 break;
1733 default:
1734 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
1735 cmd->data.diagass.action, QETH_CARD_IFNAME(card));
1736 }
1737
1738 return 0;
1739}
1740
1741static int
1742qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1743{
1744 struct qeth_cmd_buffer *iob;
1745 struct qeth_ipa_cmd *cmd;
1746
1747 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1748
1749 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1750 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1751 cmd->data.diagass.subcmd_len = 16;
1752 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
1753 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
1754 cmd->data.diagass.action = diags_cmd;
1755 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
1756}
1757
4a71df50
FB
1758static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1759 struct net_device *dev)
1760{
1761 if (dev->type == ARPHRD_IEEE802_TR)
1762 ip_tr_mc_map(ipm, mac);
1763 else
1764 ip_eth_mc_map(ipm, mac);
1765}
1766
1767static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1768{
1769 struct qeth_ipaddr *ipm;
1770 struct ip_mc_list *im4;
1771 char buf[MAX_ADDR_LEN];
1772
d11ba0c4 1773 QETH_DBF_TEXT(TRACE, 4, "addmc");
4a71df50
FB
1774 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1775 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1776 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1777 if (!ipm)
1778 continue;
1779 ipm->u.a4.addr = im4->multiaddr;
1780 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1781 ipm->is_multicast = 1;
1782 if (!qeth_l3_add_ip(card, ipm))
1783 kfree(ipm);
1784 }
1785}
1786
1787static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1788{
1789 struct in_device *in_dev;
1790 struct vlan_group *vg;
1791 int i;
1792
d11ba0c4 1793 QETH_DBF_TEXT(TRACE, 4, "addmcvl");
4a71df50
FB
1794 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1795 return;
1796
1797 vg = card->vlangrp;
1798 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1799 struct net_device *netdev = vlan_group_get_device(vg, i);
1800 if (netdev == NULL ||
1801 !(netdev->flags & IFF_UP))
1802 continue;
1803 in_dev = in_dev_get(netdev);
1804 if (!in_dev)
1805 continue;
1806 read_lock(&in_dev->mc_list_lock);
1807 qeth_l3_add_mc(card, in_dev);
1808 read_unlock(&in_dev->mc_list_lock);
1809 in_dev_put(in_dev);
1810 }
1811}
1812
1813static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1814{
1815 struct in_device *in4_dev;
1816
d11ba0c4 1817 QETH_DBF_TEXT(TRACE, 4, "chkmcv4");
4a71df50
FB
1818 in4_dev = in_dev_get(card->dev);
1819 if (in4_dev == NULL)
1820 return;
1821 read_lock(&in4_dev->mc_list_lock);
1822 qeth_l3_add_mc(card, in4_dev);
1823 qeth_l3_add_vlan_mc(card);
1824 read_unlock(&in4_dev->mc_list_lock);
1825 in_dev_put(in4_dev);
1826}
1827
1828#ifdef CONFIG_QETH_IPV6
1829static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
1830{
1831 struct qeth_ipaddr *ipm;
1832 struct ifmcaddr6 *im6;
1833 char buf[MAX_ADDR_LEN];
1834
d11ba0c4 1835 QETH_DBF_TEXT(TRACE, 4, "addmc6");
4a71df50
FB
1836 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1837 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
1838 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1839 if (!ipm)
1840 continue;
1841 ipm->is_multicast = 1;
1842 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1843 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1844 sizeof(struct in6_addr));
1845 if (!qeth_l3_add_ip(card, ipm))
1846 kfree(ipm);
1847 }
1848}
1849
1850static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1851{
1852 struct inet6_dev *in_dev;
1853 struct vlan_group *vg;
1854 int i;
1855
d11ba0c4 1856 QETH_DBF_TEXT(TRACE, 4, "admc6vl");
4a71df50
FB
1857 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1858 return;
1859
1860 vg = card->vlangrp;
1861 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1862 struct net_device *netdev = vlan_group_get_device(vg, i);
1863 if (netdev == NULL ||
1864 !(netdev->flags & IFF_UP))
1865 continue;
1866 in_dev = in6_dev_get(netdev);
1867 if (!in_dev)
1868 continue;
1869 read_lock_bh(&in_dev->lock);
1870 qeth_l3_add_mc6(card, in_dev);
1871 read_unlock_bh(&in_dev->lock);
1872 in6_dev_put(in_dev);
1873 }
1874}
1875
1876static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1877{
1878 struct inet6_dev *in6_dev;
1879
d11ba0c4 1880 QETH_DBF_TEXT(TRACE, 4, "chkmcv6");
4a71df50
FB
1881 if (!qeth_is_supported(card, IPA_IPV6))
1882 return ;
1883 in6_dev = in6_dev_get(card->dev);
1884 if (in6_dev == NULL)
1885 return;
1886 read_lock_bh(&in6_dev->lock);
1887 qeth_l3_add_mc6(card, in6_dev);
1888 qeth_l3_add_vlan_mc6(card);
1889 read_unlock_bh(&in6_dev->lock);
1890 in6_dev_put(in6_dev);
1891}
1892#endif /* CONFIG_QETH_IPV6 */
1893
1894static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1895 unsigned short vid)
1896{
1897 struct in_device *in_dev;
1898 struct in_ifaddr *ifa;
1899 struct qeth_ipaddr *addr;
1900
d11ba0c4 1901 QETH_DBF_TEXT(TRACE, 4, "frvaddr4");
4a71df50
FB
1902
1903 in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
1904 if (!in_dev)
1905 return;
1906 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1907 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1908 if (addr) {
1909 addr->u.a4.addr = ifa->ifa_address;
1910 addr->u.a4.mask = ifa->ifa_mask;
1911 addr->type = QETH_IP_TYPE_NORMAL;
1912 if (!qeth_l3_delete_ip(card, addr))
1913 kfree(addr);
1914 }
1915 }
1916 in_dev_put(in_dev);
1917}
1918
1919static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1920 unsigned short vid)
1921{
1922#ifdef CONFIG_QETH_IPV6
1923 struct inet6_dev *in6_dev;
1924 struct inet6_ifaddr *ifa;
1925 struct qeth_ipaddr *addr;
1926
d11ba0c4 1927 QETH_DBF_TEXT(TRACE, 4, "frvaddr6");
4a71df50
FB
1928
1929 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
1930 if (!in6_dev)
1931 return;
1932 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
1933 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1934 if (addr) {
1935 memcpy(&addr->u.a6.addr, &ifa->addr,
1936 sizeof(struct in6_addr));
1937 addr->u.a6.pfxlen = ifa->prefix_len;
1938 addr->type = QETH_IP_TYPE_NORMAL;
1939 if (!qeth_l3_delete_ip(card, addr))
1940 kfree(addr);
1941 }
1942 }
1943 in6_dev_put(in6_dev);
1944#endif /* CONFIG_QETH_IPV6 */
1945}
1946
1947static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1948 unsigned short vid)
1949{
1950 if (!card->vlangrp)
1951 return;
1952 qeth_l3_free_vlan_addresses4(card, vid);
1953 qeth_l3_free_vlan_addresses6(card, vid);
1954}
1955
1956static void qeth_l3_vlan_rx_register(struct net_device *dev,
1957 struct vlan_group *grp)
1958{
509e2562 1959 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
1960 unsigned long flags;
1961
d11ba0c4 1962 QETH_DBF_TEXT(TRACE, 4, "vlanreg");
4a71df50
FB
1963 spin_lock_irqsave(&card->vlanlock, flags);
1964 card->vlangrp = grp;
1965 spin_unlock_irqrestore(&card->vlanlock, flags);
1966}
1967
1968static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1969{
4a71df50
FB
1970 return;
1971}
1972
1973static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1974{
509e2562 1975 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
1976 unsigned long flags;
1977
d11ba0c4 1978 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
8e98ac48
UB
1979 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
1980 QETH_DBF_TEXT(TRACE, 3, "kidREC");
1981 return;
1982 }
4a71df50
FB
1983 spin_lock_irqsave(&card->vlanlock, flags);
1984 /* unregister IP addresses of vlan device */
1985 qeth_l3_free_vlan_addresses(card, vid);
1986 vlan_group_set_device(card->vlangrp, vid, NULL);
1987 spin_unlock_irqrestore(&card->vlanlock, flags);
1988 qeth_l3_set_multicast_list(card->dev);
1989}
1990
1991static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1992 struct sk_buff *skb, struct qeth_hdr *hdr)
1993{
1994 unsigned short vlan_id = 0;
1995 __be16 prot;
1996 struct iphdr *ip_hdr;
1997 unsigned char tg_addr[MAX_ADDR_LEN];
1998
1999 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
2000 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2001 ETH_P_IP);
2002 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
2003 case QETH_CAST_MULTICAST:
2004 switch (prot) {
2005#ifdef CONFIG_QETH_IPV6
2006 case __constant_htons(ETH_P_IPV6):
2007 ndisc_mc_map((struct in6_addr *)
2008 skb->data + 24,
2009 tg_addr, card->dev, 0);
2010 break;
2011#endif
2012 case __constant_htons(ETH_P_IP):
2013 ip_hdr = (struct iphdr *)skb->data;
2014 (card->dev->type == ARPHRD_IEEE802_TR) ?
2015 ip_tr_mc_map(ip_hdr->daddr, tg_addr):
2016 ip_eth_mc_map(ip_hdr->daddr, tg_addr);
2017 break;
2018 default:
2019 memcpy(tg_addr, card->dev->broadcast,
2020 card->dev->addr_len);
2021 }
2022 card->stats.multicast++;
2023 skb->pkt_type = PACKET_MULTICAST;
2024 break;
2025 case QETH_CAST_BROADCAST:
2026 memcpy(tg_addr, card->dev->broadcast,
2027 card->dev->addr_len);
2028 card->stats.multicast++;
2029 skb->pkt_type = PACKET_BROADCAST;
2030 break;
2031 case QETH_CAST_UNICAST:
2032 case QETH_CAST_ANYCAST:
2033 case QETH_CAST_NOCAST:
2034 default:
76b11f8e
UB
2035 if (card->options.sniffer)
2036 skb->pkt_type = PACKET_OTHERHOST;
2037 else
2038 skb->pkt_type = PACKET_HOST;
4a71df50
FB
2039 memcpy(tg_addr, card->dev->dev_addr,
2040 card->dev->addr_len);
2041 }
fe94e2e0
UB
2042 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2043 card->dev->header_ops->create(skb, card->dev, prot,
2044 tg_addr, &hdr->hdr.l3.dest_addr[2],
2045 card->dev->addr_len);
2046 else
2047 card->dev->header_ops->create(skb, card->dev, prot,
2048 tg_addr, "FAKELL", card->dev->addr_len);
4a71df50
FB
2049 }
2050
2051#ifdef CONFIG_TR
2052 if (card->dev->type == ARPHRD_IEEE802_TR)
2053 skb->protocol = tr_type_trans(skb, card->dev);
2054 else
2055#endif
2056 skb->protocol = eth_type_trans(skb, card->dev);
2057
2058 if (hdr->hdr.l3.ext_flags &
2059 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2060 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2061 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2062 }
2063
b9139800
UB
2064 switch (card->options.checksum_type) {
2065 case SW_CHECKSUMMING:
2066 skb->ip_summed = CHECKSUM_NONE;
2067 break;
2068 case NO_CHECKSUMMING:
2069 skb->ip_summed = CHECKSUM_UNNECESSARY;
2070 break;
2071 case HW_CHECKSUMMING:
4a71df50 2072 if ((hdr->hdr.l3.ext_flags &
b9139800
UB
2073 (QETH_HDR_EXT_CSUM_HDR_REQ |
2074 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2075 (QETH_HDR_EXT_CSUM_HDR_REQ |
2076 QETH_HDR_EXT_CSUM_TRANSP_REQ))
4a71df50
FB
2077 skb->ip_summed = CHECKSUM_UNNECESSARY;
2078 else
b9139800 2079 skb->ip_summed = CHECKSUM_NONE;
4a71df50
FB
2080 }
2081
2082 return vlan_id;
2083}
2084
2085static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2086 struct qeth_qdio_buffer *buf, int index)
2087{
2088 struct qdio_buffer_element *element;
2089 struct sk_buff *skb;
2090 struct qeth_hdr *hdr;
2091 int offset;
2092 __u16 vlan_tag = 0;
2093 unsigned int len;
4a71df50
FB
2094 /* get first element of current buffer */
2095 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2096 offset = 0;
2097 if (card->options.performance_stats)
2098 card->perf_stats.bufs_rec++;
2099 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
2100 &offset, &hdr))) {
2101 skb->dev = card->dev;
2102 /* is device UP ? */
2103 if (!(card->dev->flags & IFF_UP)) {
2104 dev_kfree_skb_any(skb);
2105 continue;
2106 }
2107
2108 switch (hdr->hdr.l3.id) {
2109 case QETH_HEADER_TYPE_LAYER3:
2110 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
2111 len = skb->len;
76b11f8e 2112 if (vlan_tag && !card->options.sniffer)
4a71df50
FB
2113 if (card->vlangrp)
2114 vlan_hwaccel_rx(skb, card->vlangrp,
2115 vlan_tag);
2116 else {
2117 dev_kfree_skb_any(skb);
2118 continue;
2119 }
2120 else
2121 netif_rx(skb);
2122 break;
76b11f8e
UB
2123 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2124 skb->pkt_type = PACKET_HOST;
2125 skb->protocol = eth_type_trans(skb, skb->dev);
2126 if (card->options.checksum_type == NO_CHECKSUMMING)
2127 skb->ip_summed = CHECKSUM_UNNECESSARY;
2128 else
2129 skb->ip_summed = CHECKSUM_NONE;
2130 len = skb->len;
2131 netif_receive_skb(skb);
2132 break;
4a71df50
FB
2133 default:
2134 dev_kfree_skb_any(skb);
d11ba0c4
PT
2135 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
2136 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
4a71df50
FB
2137 continue;
2138 }
2139
4a71df50
FB
2140 card->stats.rx_packets++;
2141 card->stats.rx_bytes += len;
2142 }
2143}
2144
2145static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2146 struct qeth_card *card)
2147{
2148 int rc = 0;
2149 struct vlan_group *vg;
2150 int i;
2151
2152 vg = card->vlangrp;
2153 if (!vg)
2154 return rc;
2155
2156 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2157 if (vlan_group_get_device(vg, i) == dev) {
2158 rc = QETH_VLAN_CARD;
2159 break;
2160 }
2161 }
2162
509e2562 2163 if (rc && !(vlan_dev_real_dev(dev)->ml_priv == (void *)card))
4a71df50
FB
2164 return 0;
2165
2166 return rc;
2167}
2168
2169static int qeth_l3_verify_dev(struct net_device *dev)
2170{
2171 struct qeth_card *card;
2172 unsigned long flags;
2173 int rc = 0;
2174
2175 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
2176 list_for_each_entry(card, &qeth_core_card_list.list, list) {
2177 if (card->dev == dev) {
2178 rc = QETH_REAL_CARD;
2179 break;
2180 }
2181 rc = qeth_l3_verify_vlan_dev(dev, card);
2182 if (rc)
2183 break;
2184 }
2185 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
2186
2187 return rc;
2188}
2189
2190static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2191{
2192 struct qeth_card *card = NULL;
2193 int rc;
2194
2195 rc = qeth_l3_verify_dev(dev);
2196 if (rc == QETH_REAL_CARD)
509e2562 2197 card = dev->ml_priv;
4a71df50 2198 else if (rc == QETH_VLAN_CARD)
509e2562 2199 card = vlan_dev_real_dev(dev)->ml_priv;
e5bd7be5 2200 if (card && card->options.layer2)
4a71df50 2201 card = NULL;
d11ba0c4 2202 QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
4a71df50
FB
2203 return card ;
2204}
2205
2206static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2207{
2208 int rc = 0;
2209
d11ba0c4
PT
2210 QETH_DBF_TEXT(SETUP, 2, "stopcard");
2211 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
4a71df50
FB
2212
2213 qeth_set_allowed_threads(card, 0, 1);
76b11f8e
UB
2214 if (card->options.sniffer &&
2215 (card->info.promisc_mode == SET_PROMISC_MODE_ON))
2216 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
4a71df50
FB
2217 if (card->read.state == CH_STATE_UP &&
2218 card->write.state == CH_STATE_UP &&
2219 (card->state == CARD_STATE_UP)) {
2220 if (recovery_mode)
2221 qeth_l3_stop(card->dev);
8af7c5ae 2222 else {
869da90b
UB
2223 rtnl_lock();
2224 dev_close(card->dev);
2225 rtnl_unlock();
8af7c5ae 2226 }
4a71df50
FB
2227 if (!card->use_hard_stop) {
2228 rc = qeth_send_stoplan(card);
2229 if (rc)
d11ba0c4 2230 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
4a71df50
FB
2231 }
2232 card->state = CARD_STATE_SOFTSETUP;
2233 }
2234 if (card->state == CARD_STATE_SOFTSETUP) {
2235 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
2236 qeth_clear_ipacmd_list(card);
2237 card->state = CARD_STATE_HARDSETUP;
2238 }
2239 if (card->state == CARD_STATE_HARDSETUP) {
2240 if (!card->use_hard_stop &&
2241 (card->info.type != QETH_CARD_TYPE_IQD)) {
2242 rc = qeth_l3_put_unique_id(card);
2243 if (rc)
d11ba0c4 2244 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
4a71df50
FB
2245 }
2246 qeth_qdio_clear_card(card, 0);
2247 qeth_clear_qdio_buffers(card);
2248 qeth_clear_working_pool_list(card);
2249 card->state = CARD_STATE_DOWN;
2250 }
2251 if (card->state == CARD_STATE_DOWN) {
2252 qeth_clear_cmd_buffers(&card->read);
2253 qeth_clear_cmd_buffers(&card->write);
2254 }
2255 card->use_hard_stop = 0;
2256 return rc;
2257}
2258
76b11f8e
UB
2259/*
2260 * test for and Switch promiscuous mode (on or off)
2261 * either for guestlan or HiperSocket Sniffer
2262 */
2263static void
2264qeth_l3_handle_promisc_mode(struct qeth_card *card)
2265{
2266 struct net_device *dev = card->dev;
2267
2268 if (((dev->flags & IFF_PROMISC) &&
2269 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
2270 (!(dev->flags & IFF_PROMISC) &&
2271 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
2272 return;
2273
2274 if (card->info.guestlan) { /* Guestlan trace */
2275 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2276 qeth_setadp_promisc_mode(card);
2277 } else if (card->options.sniffer && /* HiperSockets trace */
2278 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
2279 if (dev->flags & IFF_PROMISC) {
2280 QETH_DBF_TEXT(TRACE, 3, "+promisc");
2281 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
2282 } else {
2283 QETH_DBF_TEXT(TRACE, 3, "-promisc");
2284 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
2285 }
2286 }
2287}
2288
4a71df50
FB
2289static void qeth_l3_set_multicast_list(struct net_device *dev)
2290{
509e2562 2291 struct qeth_card *card = dev->ml_priv;
4a71df50 2292
d11ba0c4 2293 QETH_DBF_TEXT(TRACE, 3, "setmulti");
8e98ac48
UB
2294 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
2295 (card->state != CARD_STATE_UP))
2296 return;
76b11f8e
UB
2297 if (!card->options.sniffer) {
2298 qeth_l3_delete_mc_addresses(card);
2299 qeth_l3_add_multicast_ipv4(card);
4a71df50 2300#ifdef CONFIG_QETH_IPV6
76b11f8e 2301 qeth_l3_add_multicast_ipv6(card);
4a71df50 2302#endif
76b11f8e
UB
2303 qeth_l3_set_ip_addr_list(card);
2304 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2305 return;
2306 }
2307 qeth_l3_handle_promisc_mode(card);
4a71df50
FB
2308}
2309
2310static const char *qeth_l3_arp_get_error_cause(int *rc)
2311{
2312 switch (*rc) {
2313 case QETH_IPA_ARP_RC_FAILED:
2314 *rc = -EIO;
2315 return "operation failed";
2316 case QETH_IPA_ARP_RC_NOTSUPP:
2317 *rc = -EOPNOTSUPP;
2318 return "operation not supported";
2319 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
2320 *rc = -EINVAL;
2321 return "argument out of range";
2322 case QETH_IPA_ARP_RC_Q_NOTSUPP:
2323 *rc = -EOPNOTSUPP;
2324 return "query operation not supported";
2325 case QETH_IPA_ARP_RC_Q_NO_DATA:
2326 *rc = -ENOENT;
2327 return "no query data available";
2328 default:
2329 return "unknown error";
2330 }
2331}
2332
2333static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
2334{
2335 int tmp;
2336 int rc;
2337
d11ba0c4 2338 QETH_DBF_TEXT(TRACE, 3, "arpstnoe");
4a71df50
FB
2339
2340 /*
2341 * currently GuestLAN only supports the ARP assist function
2342 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
2343 * thus we say EOPNOTSUPP for this ARP function
2344 */
2345 if (card->info.guestlan)
2346 return -EOPNOTSUPP;
2347 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
4a71df50
FB
2348 return -EOPNOTSUPP;
2349 }
2350 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2351 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
2352 no_entries);
2353 if (rc) {
2354 tmp = rc;
14cc21b6
FB
2355 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
2356 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
4a71df50
FB
2357 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2358 }
2359 return rc;
2360}
2361
2362static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
2363 struct qeth_arp_query_data *qdata, int entry_size,
2364 int uentry_size)
2365{
2366 char *entry_ptr;
2367 char *uentry_ptr;
2368 int i;
2369
2370 entry_ptr = (char *)&qdata->data;
2371 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
2372 for (i = 0; i < qdata->no_entries; ++i) {
2373 /* strip off 32 bytes "media specific information" */
2374 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
2375 entry_ptr += entry_size;
2376 uentry_ptr += uentry_size;
2377 }
2378}
2379
2380static int qeth_l3_arp_query_cb(struct qeth_card *card,
2381 struct qeth_reply *reply, unsigned long data)
2382{
2383 struct qeth_ipa_cmd *cmd;
2384 struct qeth_arp_query_data *qdata;
2385 struct qeth_arp_query_info *qinfo;
2386 int entry_size;
2387 int uentry_size;
2388 int i;
2389
d11ba0c4 2390 QETH_DBF_TEXT(TRACE, 4, "arpquecb");
4a71df50
FB
2391
2392 qinfo = (struct qeth_arp_query_info *) reply->param;
2393 cmd = (struct qeth_ipa_cmd *) data;
2394 if (cmd->hdr.return_code) {
d11ba0c4 2395 QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code);
4a71df50
FB
2396 return 0;
2397 }
2398 if (cmd->data.setassparms.hdr.return_code) {
2399 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
d11ba0c4 2400 QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code);
4a71df50
FB
2401 return 0;
2402 }
2403 qdata = &cmd->data.setassparms.data.query_arp;
2404 switch (qdata->reply_bits) {
2405 case 5:
2406 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
2407 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2408 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
2409 break;
2410 case 7:
2411 /* fall through to default */
2412 default:
2413 /* tr is the same as eth -> entry7 */
2414 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
2415 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2416 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
2417 break;
2418 }
2419 /* check if there is enough room in userspace */
2420 if ((qinfo->udata_len - qinfo->udata_offset) <
2421 qdata->no_entries * uentry_size){
d11ba0c4 2422 QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
4a71df50 2423 cmd->hdr.return_code = -ENOMEM;
4a71df50
FB
2424 goto out_error;
2425 }
d11ba0c4 2426 QETH_DBF_TEXT_(TRACE, 4, "anore%i",
4a71df50 2427 cmd->data.setassparms.hdr.number_of_replies);
d11ba0c4
PT
2428 QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
2429 QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries);
4a71df50
FB
2430
2431 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
2432 /* strip off "media specific information" */
2433 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size,
2434 uentry_size);
2435 } else
2436 /*copy entries to user buffer*/
2437 memcpy(qinfo->udata + qinfo->udata_offset,
2438 (char *)&qdata->data, qdata->no_entries*uentry_size);
2439
2440 qinfo->no_entries += qdata->no_entries;
2441 qinfo->udata_offset += (qdata->no_entries*uentry_size);
2442 /* check if all replies received ... */
2443 if (cmd->data.setassparms.hdr.seq_no <
2444 cmd->data.setassparms.hdr.number_of_replies)
2445 return 1;
2446 memcpy(qinfo->udata, &qinfo->no_entries, 4);
2447 /* keep STRIP_ENTRIES flag so the user program can distinguish
2448 * stripped entries from normal ones */
2449 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2450 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
2451 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
2452 return 0;
2453out_error:
2454 i = 0;
2455 memcpy(qinfo->udata, &i, 4);
2456 return 0;
2457}
2458
2459static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
2460 struct qeth_cmd_buffer *iob, int len,
2461 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
2462 unsigned long),
2463 void *reply_param)
2464{
d11ba0c4 2465 QETH_DBF_TEXT(TRACE, 4, "sendarp");
4a71df50
FB
2466
2467 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2468 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2469 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2470 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
2471 reply_cb, reply_param);
2472}
2473
2474static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2475{
2476 struct qeth_cmd_buffer *iob;
2477 struct qeth_arp_query_info qinfo = {0, };
2478 int tmp;
2479 int rc;
2480
d11ba0c4 2481 QETH_DBF_TEXT(TRACE, 3, "arpquery");
4a71df50
FB
2482
2483 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
2484 IPA_ARP_PROCESSING)) {
4a71df50
FB
2485 return -EOPNOTSUPP;
2486 }
2487 /* get size of userspace buffer and mask_bits -> 6 bytes */
2488 if (copy_from_user(&qinfo, udata, 6))
2489 return -EFAULT;
2490 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
2491 if (!qinfo.udata)
2492 return -ENOMEM;
2493 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
2494 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2495 IPA_CMD_ASS_ARP_QUERY_INFO,
2496 sizeof(int), QETH_PROT_IPV4);
2497
2498 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2499 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2500 qeth_l3_arp_query_cb, (void *)&qinfo);
2501 if (rc) {
2502 tmp = rc;
14cc21b6 2503 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s "
4a71df50
FB
2504 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2505 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2506 if (copy_to_user(udata, qinfo.udata, 4))
2507 rc = -EFAULT;
2508 } else {
2509 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
2510 rc = -EFAULT;
2511 }
2512 kfree(qinfo.udata);
2513 return rc;
2514}
2515
2516static int qeth_l3_arp_add_entry(struct qeth_card *card,
2517 struct qeth_arp_cache_entry *entry)
2518{
2519 struct qeth_cmd_buffer *iob;
2520 char buf[16];
2521 int tmp;
2522 int rc;
2523
d11ba0c4 2524 QETH_DBF_TEXT(TRACE, 3, "arpadent");
4a71df50
FB
2525
2526 /*
2527 * currently GuestLAN only supports the ARP assist function
2528 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
2529 * thus we say EOPNOTSUPP for this ARP function
2530 */
2531 if (card->info.guestlan)
2532 return -EOPNOTSUPP;
2533 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
4a71df50
FB
2534 return -EOPNOTSUPP;
2535 }
2536
2537 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2538 IPA_CMD_ASS_ARP_ADD_ENTRY,
2539 sizeof(struct qeth_arp_cache_entry),
2540 QETH_PROT_IPV4);
2541 rc = qeth_l3_send_setassparms(card, iob,
2542 sizeof(struct qeth_arp_cache_entry),
2543 (unsigned long) entry,
2544 qeth_l3_default_setassparms_cb, NULL);
2545 if (rc) {
2546 tmp = rc;
2547 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
14cc21b6
FB
2548 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
2549 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
2550 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
4a71df50
FB
2551 }
2552 return rc;
2553}
2554
2555static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2556 struct qeth_arp_cache_entry *entry)
2557{
2558 struct qeth_cmd_buffer *iob;
2559 char buf[16] = {0, };
2560 int tmp;
2561 int rc;
2562
d11ba0c4 2563 QETH_DBF_TEXT(TRACE, 3, "arprment");
4a71df50
FB
2564
2565 /*
2566 * currently GuestLAN only supports the ARP assist function
2567 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
2568 * thus we say EOPNOTSUPP for this ARP function
2569 */
2570 if (card->info.guestlan)
2571 return -EOPNOTSUPP;
2572 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
4a71df50
FB
2573 return -EOPNOTSUPP;
2574 }
2575 memcpy(buf, entry, 12);
2576 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2577 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
2578 12,
2579 QETH_PROT_IPV4);
2580 rc = qeth_l3_send_setassparms(card, iob,
2581 12, (unsigned long)buf,
2582 qeth_l3_default_setassparms_cb, NULL);
2583 if (rc) {
2584 tmp = rc;
2585 memset(buf, 0, 16);
2586 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
14cc21b6
FB
2587 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
2588 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
2589 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
4a71df50
FB
2590 }
2591 return rc;
2592}
2593
2594static int qeth_l3_arp_flush_cache(struct qeth_card *card)
2595{
2596 int rc;
2597 int tmp;
2598
d11ba0c4 2599 QETH_DBF_TEXT(TRACE, 3, "arpflush");
4a71df50
FB
2600
2601 /*
2602 * currently GuestLAN only supports the ARP assist function
2603 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
2604 * thus we say EOPNOTSUPP for this ARP function
2605 */
2606 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
2607 return -EOPNOTSUPP;
2608 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
4a71df50
FB
2609 return -EOPNOTSUPP;
2610 }
2611 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2612 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
2613 if (rc) {
2614 tmp = rc;
14cc21b6
FB
2615 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
2616 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
4a71df50
FB
2617 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2618 }
2619 return rc;
2620}
2621
2622static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2623{
509e2562 2624 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
2625 struct qeth_arp_cache_entry arp_entry;
2626 struct mii_ioctl_data *mii_data;
2627 int rc = 0;
2628
2629 if (!card)
2630 return -ENODEV;
2631
2632 if ((card->state != CARD_STATE_UP) &&
2633 (card->state != CARD_STATE_SOFTSETUP))
2634 return -ENODEV;
2635
2636 switch (cmd) {
2637 case SIOC_QETH_ARP_SET_NO_ENTRIES:
2638 if (!capable(CAP_NET_ADMIN)) {
2639 rc = -EPERM;
2640 break;
2641 }
2642 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
2643 break;
2644 case SIOC_QETH_ARP_QUERY_INFO:
2645 if (!capable(CAP_NET_ADMIN)) {
2646 rc = -EPERM;
2647 break;
2648 }
2649 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
2650 break;
2651 case SIOC_QETH_ARP_ADD_ENTRY:
2652 if (!capable(CAP_NET_ADMIN)) {
2653 rc = -EPERM;
2654 break;
2655 }
2656 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2657 sizeof(struct qeth_arp_cache_entry)))
2658 rc = -EFAULT;
2659 else
2660 rc = qeth_l3_arp_add_entry(card, &arp_entry);
2661 break;
2662 case SIOC_QETH_ARP_REMOVE_ENTRY:
2663 if (!capable(CAP_NET_ADMIN)) {
2664 rc = -EPERM;
2665 break;
2666 }
2667 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2668 sizeof(struct qeth_arp_cache_entry)))
2669 rc = -EFAULT;
2670 else
2671 rc = qeth_l3_arp_remove_entry(card, &arp_entry);
2672 break;
2673 case SIOC_QETH_ARP_FLUSH_CACHE:
2674 if (!capable(CAP_NET_ADMIN)) {
2675 rc = -EPERM;
2676 break;
2677 }
2678 rc = qeth_l3_arp_flush_cache(card);
2679 break;
2680 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
2681 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
2682 break;
2683 case SIOC_QETH_GET_CARD_TYPE:
2684 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
2685 !card->info.guestlan)
2686 return 1;
2687 return 0;
2688 break;
2689 case SIOCGMIIPHY:
2690 mii_data = if_mii(rq);
2691 mii_data->phy_id = 0;
2692 break;
2693 case SIOCGMIIREG:
2694 mii_data = if_mii(rq);
2695 if (mii_data->phy_id != 0)
2696 rc = -EINVAL;
2697 else
2698 mii_data->val_out = qeth_mdio_read(dev,
2699 mii_data->phy_id,
2700 mii_data->reg_num);
2701 break;
2702 default:
2703 rc = -EOPNOTSUPP;
2704 }
2705 if (rc)
d11ba0c4 2706 QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
4a71df50
FB
2707 return rc;
2708}
2709
ce73e10e
KDW
2710int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2711{
2712 int cast_type = RTN_UNSPEC;
2713
2714 if (skb_dst(skb) && skb_dst(skb)->neighbour) {
2715 cast_type = skb_dst(skb)->neighbour->type;
2716 if ((cast_type == RTN_BROADCAST) ||
2717 (cast_type == RTN_MULTICAST) ||
2718 (cast_type == RTN_ANYCAST))
2719 return cast_type;
2720 else
2721 return RTN_UNSPEC;
2722 }
2723 /* try something else */
2724 if (skb->protocol == ETH_P_IPV6)
2725 return (skb_network_header(skb)[24] == 0xff) ?
2726 RTN_MULTICAST : 0;
2727 else if (skb->protocol == ETH_P_IP)
2728 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
2729 RTN_MULTICAST : 0;
2730 /* ... */
2731 if (!memcmp(skb->data, skb->dev->broadcast, 6))
2732 return RTN_BROADCAST;
2733 else {
2734 u16 hdr_mac;
2735
2736 hdr_mac = *((u16 *)skb->data);
2737 /* tr multicast? */
2738 switch (card->info.link_type) {
2739 case QETH_LINK_TYPE_HSTR:
2740 case QETH_LINK_TYPE_LANE_TR:
2741 if ((hdr_mac == QETH_TR_MAC_NC) ||
2742 (hdr_mac == QETH_TR_MAC_C))
2743 return RTN_MULTICAST;
2744 break;
2745 /* eth or so multicast? */
2746 default:
2747 if ((hdr_mac == QETH_ETH_MAC_V4) ||
2748 (hdr_mac == QETH_ETH_MAC_V6))
2749 return RTN_MULTICAST;
2750 }
2751 }
2752 return cast_type;
2753}
2754
4a71df50
FB
2755static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2756 struct sk_buff *skb, int ipv, int cast_type)
2757{
4a71df50
FB
2758 memset(hdr, 0, sizeof(struct qeth_hdr));
2759 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2760 hdr->hdr.l3.ext_flags = 0;
2761
2762 /*
2763 * before we're going to overwrite this location with next hop ip.
2764 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2765 */
2766 if (card->vlangrp && vlan_tx_tag_present(skb)) {
213298f8
FB
2767 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
2768 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
2769 else
2770 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
4a71df50
FB
2771 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
2772 }
2773
2774 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2775 if (ipv == 4) {
2776 /* IPv4 */
2777 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
2778 memset(hdr->hdr.l3.dest_addr, 0, 12);
adf30907 2779 if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
4a71df50 2780 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
adf30907 2781 *((u32 *) skb_dst(skb)->neighbour->primary_key);
4a71df50
FB
2782 } else {
2783 /* fill in destination address used in ip header */
2784 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2785 ip_hdr(skb)->daddr;
2786 }
2787 } else if (ipv == 6) {
2788 /* IPv6 */
2789 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
2790 if (card->info.type == QETH_CARD_TYPE_IQD)
2791 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
adf30907 2792 if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
4a71df50 2793 memcpy(hdr->hdr.l3.dest_addr,
adf30907 2794 skb_dst(skb)->neighbour->primary_key, 16);
4a71df50
FB
2795 } else {
2796 /* fill in destination address used in ip header */
2797 memcpy(hdr->hdr.l3.dest_addr,
2798 &ipv6_hdr(skb)->daddr, 16);
2799 }
2800 } else {
2801 /* passthrough */
2802 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
2803 !memcmp(skb->data + sizeof(struct qeth_hdr) +
2804 sizeof(__u16), skb->dev->broadcast, 6)) {
2805 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2806 QETH_HDR_PASSTHRU;
2807 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2808 skb->dev->broadcast, 6)) {
2809 /* broadcast? */
2810 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2811 QETH_HDR_PASSTHRU;
2812 } else {
2813 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
2814 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
2815 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2816 }
2817 }
2818}
2819
64ef8957
FB
2820static void qeth_tso_fill_header(struct qeth_card *card,
2821 struct qeth_hdr *qhdr, struct sk_buff *skb)
2822{
2823 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
2824 struct tcphdr *tcph = tcp_hdr(skb);
2825 struct iphdr *iph = ip_hdr(skb);
2826 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2827
2828 /*fix header to TSO values ...*/
2829 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
2830 /*set values which are fix for the first approach ...*/
2831 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2832 hdr->ext.imb_hdr_no = 1;
2833 hdr->ext.hdr_type = 1;
2834 hdr->ext.hdr_version = 1;
2835 hdr->ext.hdr_len = 28;
2836 /*insert non-fix values */
2837 hdr->ext.mss = skb_shinfo(skb)->gso_size;
2838 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
2839 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
2840 sizeof(struct qeth_hdr_tso));
2841 tcph->check = 0;
2842 if (skb->protocol == ETH_P_IPV6) {
2843 ip6h->payload_len = 0;
2844 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
2845 0, IPPROTO_TCP, 0);
2846 } else {
2847 /*OSA want us to set these values ...*/
2848 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2849 0, IPPROTO_TCP, 0);
2850 iph->tot_len = 0;
2851 iph->check = 0;
2852 }
2853}
2854
2855static void qeth_tx_csum(struct sk_buff *skb)
2856{
2857 __wsum csum;
2858 int offset;
2859
2860 skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
2861 offset = skb->csum_start - skb_headroom(skb);
2862 BUG_ON(offset >= skb_headlen(skb));
2863 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2864
2865 offset += skb->csum_offset;
2866 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2867 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2868}
2869
c3b4a740
FB
2870static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2871{
2872 unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
2873 tcp_hdr(skb)->doff * 4;
2874 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2875 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
2876 elements += skb_shinfo(skb)->nr_frags;
2877 return elements;
2878}
2879
2880static inline int qeth_l3_tso_check(struct sk_buff *skb)
2881{
2882 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
2883 (unsigned long)skb->data;
2884 return (((unsigned long)skb->data & PAGE_MASK) !=
2885 (((unsigned long)skb->data + len) & PAGE_MASK));
2886}
2887
4a71df50
FB
2888static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2889{
2890 int rc;
2891 u16 *tag;
2892 struct qeth_hdr *hdr = NULL;
2893 int elements_needed = 0;
64ef8957 2894 int elems;
509e2562 2895 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
2896 struct sk_buff *new_skb = NULL;
2897 int ipv = qeth_get_ip_version(skb);
ce73e10e 2898 int cast_type = qeth_l3_get_cast_type(card, skb);
4a71df50
FB
2899 struct qeth_qdio_out_q *queue = card->qdio.out_qs
2900 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
2901 int tx_bytes = skb->len;
2902 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
683d718a 2903 int data_offset = -1;
64ef8957 2904 int nr_frags;
4a71df50 2905
21fde749
FB
2906 if (((card->info.type == QETH_CARD_TYPE_IQD) && (!ipv)) ||
2907 card->options.sniffer)
4a71df50
FB
2908 goto tx_drop;
2909
2910 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
2911 card->stats.tx_carrier_errors++;
2912 goto tx_drop;
2913 }
2914
2915 if ((cast_type == RTN_BROADCAST) &&
2916 (card->info.broadcast_capable == 0))
2917 goto tx_drop;
2918
2919 if (card->options.performance_stats) {
2920 card->perf_stats.outbound_cnt++;
2921 card->perf_stats.outbound_start_time = qeth_get_micros();
2922 }
2923
683d718a
FB
2924 if (skb_is_gso(skb))
2925 large_send = card->options.large_send;
64ef8957
FB
2926 else
2927 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2928 qeth_tx_csum(skb);
2929 if (card->options.performance_stats)
2930 card->perf_stats.tx_csum++;
2931 }
683d718a
FB
2932
2933 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
2934 (skb_shinfo(skb)->nr_frags == 0)) {
2935 new_skb = skb;
2936 data_offset = ETH_HLEN;
2937 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2938 if (!hdr)
2939 goto tx_drop;
2940 elements_needed++;
2941 } else {
2942 /* create a clone with writeable headroom */
2943 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
2944 + VLAN_HLEN);
2945 if (!new_skb)
2946 goto tx_drop;
2947 }
4a71df50
FB
2948
2949 if (card->info.type == QETH_CARD_TYPE_IQD) {
683d718a
FB
2950 if (data_offset < 0)
2951 skb_pull(new_skb, ETH_HLEN);
4a71df50 2952 } else {
21fde749 2953 if (ipv == 4) {
4a71df50
FB
2954 if (card->dev->type == ARPHRD_IEEE802_TR)
2955 skb_pull(new_skb, TR_HLEN);
2956 else
2957 skb_pull(new_skb, ETH_HLEN);
2958 }
2959
21fde749 2960 if (ipv == 6 && card->vlangrp &&
4a71df50
FB
2961 vlan_tx_tag_present(new_skb)) {
2962 skb_push(new_skb, VLAN_HLEN);
2963 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
2964 skb_copy_to_linear_data_offset(new_skb, 4,
2965 new_skb->data + 8, 4);
2966 skb_copy_to_linear_data_offset(new_skb, 8,
2967 new_skb->data + 12, 4);
2968 tag = (u16 *)(new_skb->data + 12);
2969 *tag = __constant_htons(ETH_P_8021Q);
2970 *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
2b4ff112 2971 new_skb->vlan_tci = 0;
4a71df50
FB
2972 }
2973 }
2974
2975 netif_stop_queue(dev);
2976
4a71df50 2977 /* fix hardware limitation: as long as we do not have sbal
64ef8957 2978 * chaining we can not send long frag lists
4a71df50 2979 */
c3b4a740
FB
2980 if (large_send == QETH_LARGE_SEND_TSO) {
2981 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
2982 if (skb_linearize(new_skb))
2983 goto tx_drop;
2984 if (card->options.performance_stats)
2985 card->perf_stats.tx_lin++;
2986 }
64ef8957 2987 }
4a71df50
FB
2988
2989 if ((large_send == QETH_LARGE_SEND_TSO) &&
2990 (cast_type == RTN_UNSPEC)) {
2991 hdr = (struct qeth_hdr *)skb_push(new_skb,
2992 sizeof(struct qeth_hdr_tso));
c3b4a740
FB
2993 if (qeth_l3_tso_check(new_skb))
2994 QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
4a71df50
FB
2995 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2996 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2997 qeth_tso_fill_header(card, hdr, new_skb);
2998 elements_needed++;
2999 } else {
683d718a
FB
3000 if (data_offset < 0) {
3001 hdr = (struct qeth_hdr *)skb_push(new_skb,
4a71df50 3002 sizeof(struct qeth_hdr));
683d718a
FB
3003 qeth_l3_fill_header(card, hdr, new_skb, ipv,
3004 cast_type);
3005 } else {
3006 qeth_l3_fill_header(card, hdr, new_skb, ipv,
3007 cast_type);
3008 hdr->hdr.l3.length = new_skb->len - data_offset;
3009 }
4a71df50
FB
3010 }
3011
64ef8957 3012 elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
4a71df50 3013 elements_needed);
64ef8957
FB
3014 if (!elems) {
3015 if (data_offset >= 0)
3016 kmem_cache_free(qeth_core_header_cache, hdr);
3017 goto tx_drop;
f61a0d05 3018 }
64ef8957
FB
3019 elements_needed += elems;
3020 nr_frags = skb_shinfo(new_skb)->nr_frags;
4a71df50
FB
3021
3022 if (card->info.type != QETH_CARD_TYPE_IQD)
3023 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
64ef8957 3024 elements_needed);
4a71df50
FB
3025 else
3026 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
64ef8957 3027 elements_needed, data_offset, 0);
4a71df50
FB
3028
3029 if (!rc) {
3030 card->stats.tx_packets++;
3031 card->stats.tx_bytes += tx_bytes;
3032 if (new_skb != skb)
3033 dev_kfree_skb_any(skb);
3034 if (card->options.performance_stats) {
3035 if (large_send != QETH_LARGE_SEND_NO) {
3036 card->perf_stats.large_send_bytes += tx_bytes;
3037 card->perf_stats.large_send_cnt++;
3038 }
64ef8957 3039 if (nr_frags) {
4a71df50
FB
3040 card->perf_stats.sg_skbs_sent++;
3041 /* nr_frags + skb->data */
64ef8957 3042 card->perf_stats.sg_frags_sent += nr_frags + 1;
4a71df50
FB
3043 }
3044 }
ec634fe3 3045 rc = NETDEV_TX_OK;
4a71df50 3046 } else {
683d718a
FB
3047 if (data_offset >= 0)
3048 kmem_cache_free(qeth_core_header_cache, hdr);
3049
4a71df50
FB
3050 if (rc == -EBUSY) {
3051 if (new_skb != skb)
3052 dev_kfree_skb_any(new_skb);
3053 return NETDEV_TX_BUSY;
3054 } else
3055 goto tx_drop;
3056 }
3057
3058 netif_wake_queue(dev);
3059 if (card->options.performance_stats)
3060 card->perf_stats.outbound_time += qeth_get_micros() -
3061 card->perf_stats.outbound_start_time;
3062 return rc;
3063
3064tx_drop:
3065 card->stats.tx_dropped++;
3066 card->stats.tx_errors++;
3067 if ((new_skb != skb) && new_skb)
3068 dev_kfree_skb_any(new_skb);
3069 dev_kfree_skb_any(skb);
d0ec0f54 3070 netif_wake_queue(dev);
4a71df50
FB
3071 return NETDEV_TX_OK;
3072}
3073
3074static int qeth_l3_open(struct net_device *dev)
3075{
509e2562 3076 struct qeth_card *card = dev->ml_priv;
4a71df50 3077
d11ba0c4 3078 QETH_DBF_TEXT(TRACE, 4, "qethopen");
4a71df50
FB
3079 if (card->state != CARD_STATE_SOFTSETUP)
3080 return -ENODEV;
3081 card->data.state = CH_STATE_UP;
3082 card->state = CARD_STATE_UP;
4a71df50
FB
3083 netif_start_queue(dev);
3084
3085 if (!card->lan_online && netif_carrier_ok(dev))
3086 netif_carrier_off(dev);
3087 return 0;
3088}
3089
3090static int qeth_l3_stop(struct net_device *dev)
3091{
509e2562 3092 struct qeth_card *card = dev->ml_priv;
4a71df50 3093
d11ba0c4 3094 QETH_DBF_TEXT(TRACE, 4, "qethstop");
4a71df50 3095 netif_tx_disable(dev);
4a71df50
FB
3096 if (card->state == CARD_STATE_UP)
3097 card->state = CARD_STATE_SOFTSETUP;
3098 return 0;
3099}
3100
3101static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
3102{
509e2562 3103 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
3104
3105 return (card->options.checksum_type == HW_CHECKSUMMING);
3106}
3107
3108static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3109{
509e2562 3110 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
3111 enum qeth_checksum_types csum_type;
3112
4a71df50
FB
3113 if (data)
3114 csum_type = HW_CHECKSUMMING;
3115 else
3116 csum_type = SW_CHECKSUMMING;
3117
3fd434d8 3118 return qeth_l3_set_rx_csum(card, csum_type);
4a71df50
FB
3119}
3120
3121static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
3122{
509e2562 3123 struct qeth_card *card = dev->ml_priv;
c3b4a740 3124 int rc = 0;
4a71df50
FB
3125
3126 if (data) {
c3b4a740 3127 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
4a71df50
FB
3128 } else {
3129 dev->features &= ~NETIF_F_TSO;
3130 card->options.large_send = QETH_LARGE_SEND_NO;
3131 }
c3b4a740 3132 return rc;
4a71df50
FB
3133}
3134
0fc0b732 3135static const struct ethtool_ops qeth_l3_ethtool_ops = {
4a71df50
FB
3136 .get_link = ethtool_op_get_link,
3137 .get_tx_csum = ethtool_op_get_tx_csum,
3138 .set_tx_csum = ethtool_op_set_tx_hw_csum,
3139 .get_rx_csum = qeth_l3_ethtool_get_rx_csum,
3140 .set_rx_csum = qeth_l3_ethtool_set_rx_csum,
3141 .get_sg = ethtool_op_get_sg,
3142 .set_sg = ethtool_op_set_sg,
3143 .get_tso = ethtool_op_get_tso,
3144 .set_tso = qeth_l3_ethtool_set_tso,
3145 .get_strings = qeth_core_get_strings,
3146 .get_ethtool_stats = qeth_core_get_ethtool_stats,
df8b4ec8 3147 .get_sset_count = qeth_core_get_sset_count,
4a71df50 3148 .get_drvinfo = qeth_core_get_drvinfo,
3f9975aa 3149 .get_settings = qeth_core_ethtool_get_settings,
4a71df50
FB
3150};
3151
3152/*
3153 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
3154 * NOARP on the netdevice is no option because it also turns off neighbor
3155 * solicitation. For IPv4 we install a neighbor_setup function. We don't want
3156 * arp resolution but we want the hard header (packet socket will work
3157 * e.g. tcpdump)
3158 */
3159static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
3160{
3161 n->nud_state = NUD_NOARP;
3162 memcpy(n->ha, "FAKELL", 6);
3163 n->output = n->ops->connected_output;
3164 return 0;
3165}
3166
3167static int
3168qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
3169{
3170 if (np->tbl->family == AF_INET)
3171 np->neigh_setup = qeth_l3_neigh_setup_noarp;
3172
3173 return 0;
3174}
3175
3d58cefd 3176static const struct net_device_ops qeth_l3_netdev_ops = {
8403b13c
FB
3177 .ndo_open = qeth_l3_open,
3178 .ndo_stop = qeth_l3_stop,
3179 .ndo_get_stats = qeth_get_stats,
3180 .ndo_start_xmit = qeth_l3_hard_start_xmit,
3181 .ndo_validate_addr = eth_validate_addr,
3182 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3183 .ndo_do_ioctl = qeth_l3_do_ioctl,
3184 .ndo_change_mtu = qeth_change_mtu,
3185 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
3186 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3187 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
3188 .ndo_tx_timeout = qeth_tx_timeout,
3189};
3190
3d58cefd
FB
3191static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3192 .ndo_open = qeth_l3_open,
3193 .ndo_stop = qeth_l3_stop,
3194 .ndo_get_stats = qeth_get_stats,
3195 .ndo_start_xmit = qeth_l3_hard_start_xmit,
3196 .ndo_validate_addr = eth_validate_addr,
3197 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3198 .ndo_do_ioctl = qeth_l3_do_ioctl,
3199 .ndo_change_mtu = qeth_change_mtu,
3200 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
3201 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3202 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
3203 .ndo_tx_timeout = qeth_tx_timeout,
3204 .ndo_neigh_setup = qeth_l3_neigh_setup,
3205};
3206
4a71df50
FB
3207static int qeth_l3_setup_netdev(struct qeth_card *card)
3208{
3209 if (card->info.type == QETH_CARD_TYPE_OSAE) {
3210 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
3211 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
3212#ifdef CONFIG_TR
3213 card->dev = alloc_trdev(0);
3214#endif
3215 if (!card->dev)
3216 return -ENODEV;
3d58cefd 3217 card->dev->netdev_ops = &qeth_l3_netdev_ops;
4a71df50
FB
3218 } else {
3219 card->dev = alloc_etherdev(0);
3220 if (!card->dev)
3221 return -ENODEV;
3d58cefd 3222 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
4a71df50
FB
3223
3224 /*IPv6 address autoconfiguration stuff*/
3225 qeth_l3_get_unique_id(card);
3226 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
3227 card->dev->dev_id = card->info.unique_id &
3228 0xffff;
3229 }
3230 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
3231 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
3232 if (!card->dev)
3233 return -ENODEV;
3234 card->dev->flags |= IFF_NOARP;
3d58cefd 3235 card->dev->netdev_ops = &qeth_l3_netdev_ops;
4a71df50
FB
3236 qeth_l3_iqd_read_initial_mac(card);
3237 } else
3238 return -ENODEV;
3239
509e2562 3240 card->dev->ml_priv = card;
4a71df50 3241 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
4a71df50
FB
3242 card->dev->mtu = card->info.initial_mtu;
3243 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
3244 card->dev->features |= NETIF_F_HW_VLAN_TX |
3245 NETIF_F_HW_VLAN_RX |
3246 NETIF_F_HW_VLAN_FILTER;
86d15cd8 3247 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
c3b4a740 3248 card->dev->gso_max_size = 15 * PAGE_SIZE;
4a71df50
FB
3249
3250 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3251 return register_netdev(card->dev);
3252}
3253
3254static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
779e6e1c 3255 unsigned int qdio_err, unsigned int queue, int first_element,
4a71df50
FB
3256 int count, unsigned long card_ptr)
3257{
3258 struct net_device *net_dev;
3259 struct qeth_card *card;
3260 struct qeth_qdio_buffer *buffer;
3261 int index;
3262 int i;
3263
4a71df50
FB
3264 card = (struct qeth_card *) card_ptr;
3265 net_dev = card->dev;
3266 if (card->options.performance_stats) {
3267 card->perf_stats.inbound_cnt++;
3268 card->perf_stats.inbound_start_time = qeth_get_micros();
3269 }
779e6e1c
JG
3270 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
3271 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
3272 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
3273 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
3274 first_element, count);
3275 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
3276 qeth_schedule_recovery(card);
3277 return;
4a71df50
FB
3278 }
3279 for (i = first_element; i < (first_element + count); ++i) {
3280 index = i % QDIO_MAX_BUFFERS_PER_Q;
3281 buffer = &card->qdio.in_q->bufs[index];
779e6e1c 3282 if (!(qdio_err &&
76b11f8e 3283 qeth_check_qdio_errors(card, buffer->buffer,
779e6e1c 3284 qdio_err, "qinerr")))
4a71df50
FB
3285 qeth_l3_process_inbound_buffer(card, buffer, index);
3286 /* clear buffer and give back to hardware */
3287 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3288 qeth_queue_input_buffer(card, index);
3289 }
3290 if (card->options.performance_stats)
3291 card->perf_stats.inbound_time += qeth_get_micros() -
3292 card->perf_stats.inbound_start_time;
3293}
3294
3295static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3296{
3297 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3298
3299 qeth_l3_create_device_attributes(&gdev->dev);
3300 card->options.layer2 = 0;
3301 card->discipline.input_handler = (qdio_handler_t *)
3302 qeth_l3_qdio_input_handler;
3303 card->discipline.output_handler = (qdio_handler_t *)
3304 qeth_qdio_output_handler;
3305 card->discipline.recover = qeth_l3_recover;
3306 return 0;
3307}
3308
3309static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3310{
3311 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3312
f2148565 3313 qeth_set_allowed_threads(card, 0, 1);
4a71df50
FB
3314 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3315
3316 if (cgdev->state == CCWGROUP_ONLINE) {
3317 card->use_hard_stop = 1;
3318 qeth_l3_set_offline(cgdev);
3319 }
3320
3321 if (card->dev) {
3322 unregister_netdev(card->dev);
3323 card->dev = NULL;
3324 }
3325
3326 qeth_l3_remove_device_attributes(&cgdev->dev);
3327 qeth_l3_clear_ip_list(card, 0, 0);
3328 qeth_l3_clear_ipato_list(card);
3329 return;
3330}
3331
3332static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3333{
3334 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3335 int rc = 0;
3336 enum qeth_card_states recover_flag;
3337
3338 BUG_ON(!card);
d11ba0c4
PT
3339 QETH_DBF_TEXT(SETUP, 2, "setonlin");
3340 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
4a71df50 3341
4a71df50 3342 recover_flag = card->state;
4a71df50
FB
3343 rc = qeth_core_hardsetup_card(card);
3344 if (rc) {
d11ba0c4 3345 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
aa909224 3346 rc = -ENODEV;
4a71df50
FB
3347 goto out_remove;
3348 }
3349
3350 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3351
aa909224
UB
3352 if (!card->dev && qeth_l3_setup_netdev(card)) {
3353 rc = -ENODEV;
4a71df50 3354 goto out_remove;
aa909224 3355 }
4a71df50
FB
3356
3357 card->state = CARD_STATE_HARDSETUP;
3358 qeth_print_status_message(card);
3359
3360 /* softsetup */
d11ba0c4 3361 QETH_DBF_TEXT(SETUP, 2, "softsetp");
4a71df50
FB
3362
3363 rc = qeth_send_startlan(card);
3364 if (rc) {
d11ba0c4 3365 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
4a71df50 3366 if (rc == 0xe080) {
74eacdb9
FB
3367 dev_warn(&card->gdev->dev,
3368 "The LAN is offline\n");
4a71df50 3369 card->lan_online = 0;
f2148565 3370 return 0;
4a71df50 3371 }
aa909224 3372 rc = -ENODEV;
f2148565 3373 goto out_remove;
4a71df50
FB
3374 } else
3375 card->lan_online = 1;
4a71df50
FB
3376
3377 rc = qeth_l3_setadapter_parms(card);
3378 if (rc)
d11ba0c4 3379 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
76b11f8e
UB
3380 if (!card->options.sniffer) {
3381 rc = qeth_l3_start_ipassists(card);
3382 if (rc)
3383 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3384 qeth_l3_set_large_send(card, card->options.large_send);
3385 rc = qeth_l3_setrouting_v4(card);
3386 if (rc)
3387 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3388 rc = qeth_l3_setrouting_v6(card);
3389 if (rc)
3390 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3391 }
4a71df50
FB
3392 netif_tx_disable(card->dev);
3393
3394 rc = qeth_init_qdio_queues(card);
3395 if (rc) {
d11ba0c4 3396 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
aa909224 3397 rc = -ENODEV;
4a71df50
FB
3398 goto out_remove;
3399 }
3400 card->state = CARD_STATE_SOFTSETUP;
3401 netif_carrier_on(card->dev);
3402
3403 qeth_set_allowed_threads(card, 0xffffffff, 0);
e8069040 3404 qeth_l3_set_ip_addr_list(card);
8af7c5ae
FB
3405 if (recover_flag == CARD_STATE_RECOVER) {
3406 if (recovery_mode)
4a71df50 3407 qeth_l3_open(card->dev);
8af7c5ae
FB
3408 else {
3409 rtnl_lock();
3410 dev_open(card->dev);
3411 rtnl_unlock();
3412 }
3413 qeth_l3_set_multicast_list(card->dev);
4a71df50
FB
3414 }
3415 /* let user_space know that device is online */
3416 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3417 return 0;
3418out_remove:
3419 card->use_hard_stop = 1;
3420 qeth_l3_stop_card(card, 0);
3421 ccw_device_set_offline(CARD_DDEV(card));
3422 ccw_device_set_offline(CARD_WDEV(card));
3423 ccw_device_set_offline(CARD_RDEV(card));
3424 if (recover_flag == CARD_STATE_RECOVER)
3425 card->state = CARD_STATE_RECOVER;
3426 else
3427 card->state = CARD_STATE_DOWN;
aa909224 3428 return rc;
4a71df50
FB
3429}
3430
3431static int qeth_l3_set_online(struct ccwgroup_device *gdev)
3432{
3433 return __qeth_l3_set_online(gdev, 0);
3434}
3435
3436static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3437 int recovery_mode)
3438{
3439 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3440 int rc = 0, rc2 = 0, rc3 = 0;
3441 enum qeth_card_states recover_flag;
3442
d11ba0c4
PT
3443 QETH_DBF_TEXT(SETUP, 3, "setoffl");
3444 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
4a71df50
FB
3445
3446 if (card->dev && netif_carrier_ok(card->dev))
3447 netif_carrier_off(card->dev);
3448 recover_flag = card->state;
0f5623c9 3449 qeth_l3_stop_card(card, recovery_mode);
4a71df50
FB
3450 rc = ccw_device_set_offline(CARD_DDEV(card));
3451 rc2 = ccw_device_set_offline(CARD_WDEV(card));
3452 rc3 = ccw_device_set_offline(CARD_RDEV(card));
3453 if (!rc)
3454 rc = (rc2) ? rc2 : rc3;
3455 if (rc)
d11ba0c4 3456 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
4a71df50
FB
3457 if (recover_flag == CARD_STATE_UP)
3458 card->state = CARD_STATE_RECOVER;
3459 /* let user_space know that device is offline */
3460 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
3461 return 0;
3462}
3463
3464static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
3465{
3466 return __qeth_l3_set_offline(cgdev, 0);
3467}
3468
3469static int qeth_l3_recover(void *ptr)
3470{
3471 struct qeth_card *card;
3472 int rc = 0;
3473
3474 card = (struct qeth_card *) ptr;
d11ba0c4
PT
3475 QETH_DBF_TEXT(TRACE, 2, "recover1");
3476 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
4a71df50
FB
3477 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
3478 return 0;
d11ba0c4 3479 QETH_DBF_TEXT(TRACE, 2, "recover2");
74eacdb9
FB
3480 dev_warn(&card->gdev->dev,
3481 "A recovery process has been started for the device\n");
4a71df50
FB
3482 card->use_hard_stop = 1;
3483 __qeth_l3_set_offline(card->gdev, 1);
3484 rc = __qeth_l3_set_online(card->gdev, 1);
3485 /* don't run another scheduled recovery */
3486 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3487 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3488 if (!rc)
74eacdb9
FB
3489 dev_info(&card->gdev->dev,
3490 "Device successfully recovered!\n");
28a7e4c9
UB
3491 else {
3492 rtnl_lock();
3493 dev_close(card->dev);
3494 rtnl_unlock();
74eacdb9
FB
3495 dev_warn(&card->gdev->dev, "The qeth device driver "
3496 "failed to recover an error on the device\n");
28a7e4c9 3497 }
4a71df50
FB
3498 return 0;
3499}
3500
3501static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3502{
3503 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3504 qeth_l3_clear_ip_list(card, 0, 0);
3505 qeth_qdio_clear_card(card, 0);
3506 qeth_clear_qdio_buffers(card);
3507}
3508
bbcfcdc8
FB
3509static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
3510{
3511 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3512
3513 if (card->dev)
3514 netif_device_detach(card->dev);
3515 qeth_set_allowed_threads(card, 0, 1);
3516 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3517 if (gdev->state == CCWGROUP_OFFLINE)
3518 return 0;
3519 if (card->state == CARD_STATE_UP) {
3520 card->use_hard_stop = 1;
3521 __qeth_l3_set_offline(card->gdev, 1);
3522 } else
3523 __qeth_l3_set_offline(card->gdev, 0);
3524 return 0;
3525}
3526
3527static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
3528{
3529 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3530 int rc = 0;
3531
3532 if (gdev->state == CCWGROUP_OFFLINE)
3533 goto out;
3534
3535 if (card->state == CARD_STATE_RECOVER) {
3536 rc = __qeth_l3_set_online(card->gdev, 1);
3537 if (rc) {
869da90b
UB
3538 rtnl_lock();
3539 dev_close(card->dev);
3540 rtnl_unlock();
bbcfcdc8
FB
3541 }
3542 } else
3543 rc = __qeth_l3_set_online(card->gdev, 0);
3544out:
3545 qeth_set_allowed_threads(card, 0xffffffff, 0);
3546 if (card->dev)
3547 netif_device_attach(card->dev);
3548 if (rc)
3549 dev_warn(&card->gdev->dev, "The qeth device driver "
3550 "failed to recover an error on the device\n");
3551 return rc;
3552}
3553
4a71df50
FB
3554struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
3555 .probe = qeth_l3_probe_device,
3556 .remove = qeth_l3_remove_device,
3557 .set_online = qeth_l3_set_online,
3558 .set_offline = qeth_l3_set_offline,
3559 .shutdown = qeth_l3_shutdown,
bbcfcdc8
FB
3560 .freeze = qeth_l3_pm_suspend,
3561 .thaw = qeth_l3_pm_resume,
3562 .restore = qeth_l3_pm_resume,
4a71df50
FB
3563};
3564EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
3565
3566static int qeth_l3_ip_event(struct notifier_block *this,
76fef2b6 3567 unsigned long event, void *ptr)
4a71df50
FB
3568{
3569 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3570 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
3571 struct qeth_ipaddr *addr;
3572 struct qeth_card *card;
3573
c346dca1 3574 if (dev_net(dev) != &init_net)
76fef2b6
DM
3575 return NOTIFY_DONE;
3576
d11ba0c4 3577 QETH_DBF_TEXT(TRACE, 3, "ipevent");
4a71df50
FB
3578 card = qeth_l3_get_card_from_dev(dev);
3579 if (!card)
3580 return NOTIFY_DONE;
3581
3582 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3583 if (addr != NULL) {
3584 addr->u.a4.addr = ifa->ifa_address;
3585 addr->u.a4.mask = ifa->ifa_mask;
3586 addr->type = QETH_IP_TYPE_NORMAL;
3587 } else
3588 goto out;
3589
3590 switch (event) {
3591 case NETDEV_UP:
3592 if (!qeth_l3_add_ip(card, addr))
3593 kfree(addr);
3594 break;
3595 case NETDEV_DOWN:
3596 if (!qeth_l3_delete_ip(card, addr))
3597 kfree(addr);
3598 break;
3599 default:
3600 break;
3601 }
3602 qeth_l3_set_ip_addr_list(card);
3603out:
3604 return NOTIFY_DONE;
3605}
3606
3607static struct notifier_block qeth_l3_ip_notifier = {
3608 qeth_l3_ip_event,
3609 NULL,
3610};
3611
3612#ifdef CONFIG_QETH_IPV6
3613/**
3614 * IPv6 event handler
3615 */
3616static int qeth_l3_ip6_event(struct notifier_block *this,
76fef2b6 3617 unsigned long event, void *ptr)
4a71df50
FB
3618{
3619 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
3620 struct net_device *dev = (struct net_device *)ifa->idev->dev;
3621 struct qeth_ipaddr *addr;
3622 struct qeth_card *card;
3623
d11ba0c4 3624 QETH_DBF_TEXT(TRACE, 3, "ip6event");
4a71df50
FB
3625
3626 card = qeth_l3_get_card_from_dev(dev);
3627 if (!card)
3628 return NOTIFY_DONE;
3629 if (!qeth_is_supported(card, IPA_IPV6))
3630 return NOTIFY_DONE;
3631
3632 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
3633 if (addr != NULL) {
3634 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
3635 addr->u.a6.pfxlen = ifa->prefix_len;
3636 addr->type = QETH_IP_TYPE_NORMAL;
3637 } else
3638 goto out;
3639
3640 switch (event) {
3641 case NETDEV_UP:
3642 if (!qeth_l3_add_ip(card, addr))
3643 kfree(addr);
3644 break;
3645 case NETDEV_DOWN:
3646 if (!qeth_l3_delete_ip(card, addr))
3647 kfree(addr);
3648 break;
3649 default:
3650 break;
3651 }
3652 qeth_l3_set_ip_addr_list(card);
3653out:
3654 return NOTIFY_DONE;
3655}
3656
3657static struct notifier_block qeth_l3_ip6_notifier = {
3658 qeth_l3_ip6_event,
3659 NULL,
3660};
3661#endif
3662
3663static int qeth_l3_register_notifiers(void)
3664{
3665 int rc;
3666
d11ba0c4 3667 QETH_DBF_TEXT(TRACE, 5, "regnotif");
4a71df50
FB
3668 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
3669 if (rc)
3670 return rc;
3671#ifdef CONFIG_QETH_IPV6
3672 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
3673 if (rc) {
3674 unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
3675 return rc;
3676 }
3677#else
74eacdb9 3678 pr_warning("There is no IPv6 support for the layer 3 discipline\n");
4a71df50
FB
3679#endif
3680 return 0;
3681}
3682
3683static void qeth_l3_unregister_notifiers(void)
3684{
3685
d11ba0c4 3686 QETH_DBF_TEXT(TRACE, 5, "unregnot");
4a71df50
FB
3687 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
3688#ifdef CONFIG_QETH_IPV6
3689 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
3690#endif /* QETH_IPV6 */
3691}
3692
3693static int __init qeth_l3_init(void)
3694{
3695 int rc = 0;
3696
74eacdb9 3697 pr_info("register layer 3 discipline\n");
4a71df50
FB
3698 rc = qeth_l3_register_notifiers();
3699 return rc;
3700}
3701
3702static void __exit qeth_l3_exit(void)
3703{
3704 qeth_l3_unregister_notifiers();
74eacdb9 3705 pr_info("unregister layer 3 discipline\n");
4a71df50
FB
3706}
3707
3708module_init(qeth_l3_init);
3709module_exit(qeth_l3_exit);
3710MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
3711MODULE_DESCRIPTION("qeth layer 3 discipline");
3712MODULE_LICENSE("GPL");