1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/bitops.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/etherdevice.h>
20 #include <linux/ip.h>
21 #include <linux/in.h>
22 #include <linux/ipv6.h>
23 #include <linux/inetdevice.h>
24 #include <linux/igmp.h>
25 #include <linux/slab.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/skbuff.h>
29
30 #include <net/ip.h>
31 #include <net/arp.h>
32 #include <net/route.h>
33 #include <net/ipv6.h>
34 #include <net/ip6_route.h>
35 #include <net/ip6_fib.h>
36 #include <net/ip6_checksum.h>
37 #include <net/iucv/af_iucv.h>
38 #include <linux/hashtable.h>
39
40 #include "qeth_l3.h"
41
42
43 static int qeth_l3_set_offline(struct ccwgroup_device *);
44 static int qeth_l3_stop(struct net_device *);
45 static void qeth_l3_set_rx_mode(struct net_device *dev);
46 static int qeth_l3_register_addr_entry(struct qeth_card *,
47 struct qeth_ipaddr *);
48 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
49 struct qeth_ipaddr *);
50
qeth_l3_ipaddr4_to_string(const __u8 * addr,char * buf)51 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
52 {
53 sprintf(buf, "%pI4", addr);
54 }
55
qeth_l3_ipaddr6_to_string(const __u8 * addr,char * buf)56 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
57 {
58 sprintf(buf, "%pI6", addr);
59 }
60
qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto,const __u8 * addr,char * buf)61 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
62 char *buf)
63 {
64 if (proto == QETH_PROT_IPV4)
65 qeth_l3_ipaddr4_to_string(addr, buf);
66 else if (proto == QETH_PROT_IPV6)
67 qeth_l3_ipaddr6_to_string(addr, buf);
68 }
69
qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)70 static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
71 {
72 struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
73
74 if (addr)
75 qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot);
76 return addr;
77 }
78
qeth_l3_find_addr_by_ip(struct qeth_card * card,struct qeth_ipaddr * query)79 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
80 struct qeth_ipaddr *query)
81 {
82 u64 key = qeth_l3_ipaddr_hash(query);
83 struct qeth_ipaddr *addr;
84
85 if (query->is_multicast) {
86 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
87 if (qeth_l3_addr_match_ip(addr, query))
88 return addr;
89 } else {
90 hash_for_each_possible(card->ip_htable, addr, hnode, key)
91 if (qeth_l3_addr_match_ip(addr, query))
92 return addr;
93 }
94 return NULL;
95 }
96
qeth_l3_convert_addr_to_bits(u8 * addr,u8 * bits,int len)97 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
98 {
99 int i, j;
100 u8 octet;
101
102 for (i = 0; i < len; ++i) {
103 octet = addr[i];
104 for (j = 7; j >= 0; --j) {
105 bits[i*8 + j] = octet & 1;
106 octet >>= 1;
107 }
108 }
109 }
110
qeth_l3_is_addr_covered_by_ipato(struct qeth_card * card,struct qeth_ipaddr * addr)111 static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
112 struct qeth_ipaddr *addr)
113 {
114 struct qeth_ipato_entry *ipatoe;
115 u8 addr_bits[128] = {0, };
116 u8 ipatoe_bits[128] = {0, };
117 int rc = 0;
118
119 if (!card->ipato.enabled)
120 return false;
121 if (addr->type != QETH_IP_TYPE_NORMAL)
122 return false;
123
124 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
125 (addr->proto == QETH_PROT_IPV4)? 4:16);
126 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
127 if (addr->proto != ipatoe->proto)
128 continue;
129 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
130 (ipatoe->proto == QETH_PROT_IPV4) ?
131 4 : 16);
132 if (addr->proto == QETH_PROT_IPV4)
133 rc = !memcmp(addr_bits, ipatoe_bits,
134 min(32, ipatoe->mask_bits));
135 else
136 rc = !memcmp(addr_bits, ipatoe_bits,
137 min(128, ipatoe->mask_bits));
138 if (rc)
139 break;
140 }
141 /* invert? */
142 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
143 rc = !rc;
144 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
145 rc = !rc;
146
147 return rc;
148 }
149
qeth_l3_delete_ip(struct qeth_card * card,struct qeth_ipaddr * tmp_addr)150 static int qeth_l3_delete_ip(struct qeth_card *card,
151 struct qeth_ipaddr *tmp_addr)
152 {
153 int rc = 0;
154 struct qeth_ipaddr *addr;
155
156 if (tmp_addr->type == QETH_IP_TYPE_RXIP)
157 QETH_CARD_TEXT(card, 2, "delrxip");
158 else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
159 QETH_CARD_TEXT(card, 2, "delvipa");
160 else
161 QETH_CARD_TEXT(card, 2, "delip");
162
163 if (tmp_addr->proto == QETH_PROT_IPV4)
164 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
165 else {
166 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
167 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
168 }
169
170 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
171 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
172 return -ENOENT;
173
174 addr->ref_counter--;
175 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
176 return rc;
177 if (addr->in_progress)
178 return -EINPROGRESS;
179
180 if (qeth_card_hw_is_reachable(card))
181 rc = qeth_l3_deregister_addr_entry(card, addr);
182
183 hash_del(&addr->hnode);
184 kfree(addr);
185
186 return rc;
187 }
188
qeth_l3_add_ip(struct qeth_card * card,struct qeth_ipaddr * tmp_addr)189 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
190 {
191 int rc = 0;
192 struct qeth_ipaddr *addr;
193 char buf[40];
194
195 if (tmp_addr->type == QETH_IP_TYPE_RXIP)
196 QETH_CARD_TEXT(card, 2, "addrxip");
197 else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
198 QETH_CARD_TEXT(card, 2, "addvipa");
199 else
200 QETH_CARD_TEXT(card, 2, "addip");
201
202 if (tmp_addr->proto == QETH_PROT_IPV4)
203 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
204 else {
205 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
206 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
207 }
208
209 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
210 if (addr) {
211 if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
212 return -EADDRINUSE;
213 if (qeth_l3_addr_match_all(addr, tmp_addr)) {
214 addr->ref_counter++;
215 return 0;
216 }
217 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
218 buf);
219 dev_warn(&card->gdev->dev,
220 "Registering IP address %s failed\n", buf);
221 return -EADDRINUSE;
222 } else {
223 addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
224 if (!addr)
225 return -ENOMEM;
226
227 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
228 addr->ref_counter = 1;
229
230 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
231 QETH_CARD_TEXT(card, 2, "tkovaddr");
232 addr->ipato = 1;
233 }
234 hash_add(card->ip_htable, &addr->hnode,
235 qeth_l3_ipaddr_hash(addr));
236
237 if (!qeth_card_hw_is_reachable(card)) {
238 addr->disp_flag = QETH_DISP_ADDR_ADD;
239 return 0;
240 }
241
242 /* qeth_l3_register_addr_entry can go to sleep
243 * if we add a IPV4 addr. It is caused by the reason
244 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
245 * Thus we should unlock spinlock, and make a protection
246 * using in_progress variable to indicate that there is
247 * an hardware operation with this IPV4 address
248 */
249 if (addr->proto == QETH_PROT_IPV4) {
250 addr->in_progress = 1;
251 spin_unlock_bh(&card->ip_lock);
252 rc = qeth_l3_register_addr_entry(card, addr);
253 spin_lock_bh(&card->ip_lock);
254 addr->in_progress = 0;
255 } else
256 rc = qeth_l3_register_addr_entry(card, addr);
257
258 if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
259 (rc == IPA_RC_LAN_OFFLINE)) {
260 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
261 if (addr->ref_counter < 1) {
262 qeth_l3_deregister_addr_entry(card, addr);
263 hash_del(&addr->hnode);
264 kfree(addr);
265 }
266 } else {
267 hash_del(&addr->hnode);
268 kfree(addr);
269 }
270 }
271 return rc;
272 }
273
qeth_l3_clear_ip_htable(struct qeth_card * card,int recover)274 static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
275 {
276 struct qeth_ipaddr *addr;
277 struct hlist_node *tmp;
278 int i;
279
280 QETH_CARD_TEXT(card, 4, "clearip");
281
282 spin_lock_bh(&card->ip_lock);
283
284 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
285 if (!recover) {
286 hash_del(&addr->hnode);
287 kfree(addr);
288 continue;
289 }
290 addr->disp_flag = QETH_DISP_ADDR_ADD;
291 }
292
293 spin_unlock_bh(&card->ip_lock);
294
295 spin_lock_bh(&card->mclock);
296
297 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
298 hash_del(&addr->hnode);
299 kfree(addr);
300 }
301
302 spin_unlock_bh(&card->mclock);
303
304
305 }
qeth_l3_recover_ip(struct qeth_card * card)306 static void qeth_l3_recover_ip(struct qeth_card *card)
307 {
308 struct qeth_ipaddr *addr;
309 struct hlist_node *tmp;
310 int i;
311 int rc;
312
313 QETH_CARD_TEXT(card, 4, "recovrip");
314
315 spin_lock_bh(&card->ip_lock);
316
317 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
318 if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
319 if (addr->proto == QETH_PROT_IPV4) {
320 addr->in_progress = 1;
321 spin_unlock_bh(&card->ip_lock);
322 rc = qeth_l3_register_addr_entry(card, addr);
323 spin_lock_bh(&card->ip_lock);
324 addr->in_progress = 0;
325 } else
326 rc = qeth_l3_register_addr_entry(card, addr);
327
328 if (!rc) {
329 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
330 if (addr->ref_counter < 1)
331 qeth_l3_delete_ip(card, addr);
332 } else {
333 hash_del(&addr->hnode);
334 kfree(addr);
335 }
336 }
337 }
338
339 spin_unlock_bh(&card->ip_lock);
340
341 }
342
qeth_l3_send_setdelmc(struct qeth_card * card,struct qeth_ipaddr * addr,int ipacmd)343 static int qeth_l3_send_setdelmc(struct qeth_card *card,
344 struct qeth_ipaddr *addr, int ipacmd)
345 {
346 int rc;
347 struct qeth_cmd_buffer *iob;
348 struct qeth_ipa_cmd *cmd;
349
350 QETH_CARD_TEXT(card, 4, "setdelmc");
351
352 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
353 if (!iob)
354 return -ENOMEM;
355 cmd = __ipa_cmd(iob);
356 ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
357 if (addr->proto == QETH_PROT_IPV6)
358 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
359 sizeof(struct in6_addr));
360 else
361 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
362
363 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
364
365 return rc;
366 }
367
qeth_l3_fill_netmask(u8 * netmask,unsigned int len)368 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
369 {
370 int i, j;
371 for (i = 0; i < 16; i++) {
372 j = (len) - (i * 8);
373 if (j >= 8)
374 netmask[i] = 0xff;
375 else if (j > 0)
376 netmask[i] = (u8)(0xFF00 >> j);
377 else
378 netmask[i] = 0;
379 }
380 }
381
qeth_l3_get_setdelip_flags(struct qeth_ipaddr * addr,bool set)382 static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set)
383 {
384 switch (addr->type) {
385 case QETH_IP_TYPE_RXIP:
386 return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
387 case QETH_IP_TYPE_VIPA:
388 return (set) ? QETH_IPA_SETIP_VIPA_FLAG :
389 QETH_IPA_DELIP_VIPA_FLAG;
390 default:
391 return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
392 }
393 }
394
qeth_l3_send_setdelip(struct qeth_card * card,struct qeth_ipaddr * addr,enum qeth_ipa_cmds ipacmd)395 static int qeth_l3_send_setdelip(struct qeth_card *card,
396 struct qeth_ipaddr *addr,
397 enum qeth_ipa_cmds ipacmd)
398 {
399 struct qeth_cmd_buffer *iob;
400 struct qeth_ipa_cmd *cmd;
401 __u8 netmask[16];
402 u32 flags;
403
404 QETH_CARD_TEXT(card, 4, "setdelip");
405
406 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
407 if (!iob)
408 return -ENOMEM;
409 cmd = __ipa_cmd(iob);
410
411 flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP);
412 QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
413
414 if (addr->proto == QETH_PROT_IPV6) {
415 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
416 sizeof(struct in6_addr));
417 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
418 memcpy(cmd->data.setdelip6.mask, netmask,
419 sizeof(struct in6_addr));
420 cmd->data.setdelip6.flags = flags;
421 } else {
422 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
423 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
424 cmd->data.setdelip4.flags = flags;
425 }
426
427 return qeth_send_ipa_cmd(card, iob, NULL, NULL);
428 }
429
qeth_l3_send_setrouting(struct qeth_card * card,enum qeth_routing_types type,enum qeth_prot_versions prot)430 static int qeth_l3_send_setrouting(struct qeth_card *card,
431 enum qeth_routing_types type, enum qeth_prot_versions prot)
432 {
433 int rc;
434 struct qeth_ipa_cmd *cmd;
435 struct qeth_cmd_buffer *iob;
436
437 QETH_CARD_TEXT(card, 4, "setroutg");
438 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
439 if (!iob)
440 return -ENOMEM;
441 cmd = __ipa_cmd(iob);
442 cmd->data.setrtg.type = (type);
443 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
444
445 return rc;
446 }
447
qeth_l3_correct_routing_type(struct qeth_card * card,enum qeth_routing_types * type,enum qeth_prot_versions prot)448 static int qeth_l3_correct_routing_type(struct qeth_card *card,
449 enum qeth_routing_types *type, enum qeth_prot_versions prot)
450 {
451 if (card->info.type == QETH_CARD_TYPE_IQD) {
452 switch (*type) {
453 case NO_ROUTER:
454 case PRIMARY_CONNECTOR:
455 case SECONDARY_CONNECTOR:
456 case MULTICAST_ROUTER:
457 return 0;
458 default:
459 goto out_inval;
460 }
461 } else {
462 switch (*type) {
463 case NO_ROUTER:
464 case PRIMARY_ROUTER:
465 case SECONDARY_ROUTER:
466 return 0;
467 case MULTICAST_ROUTER:
468 if (qeth_is_ipafunc_supported(card, prot,
469 IPA_OSA_MC_ROUTER))
470 return 0;
471 default:
472 goto out_inval;
473 }
474 }
475 out_inval:
476 *type = NO_ROUTER;
477 return -EINVAL;
478 }
479
qeth_l3_setrouting_v4(struct qeth_card * card)480 int qeth_l3_setrouting_v4(struct qeth_card *card)
481 {
482 int rc;
483
484 QETH_CARD_TEXT(card, 3, "setrtg4");
485
486 rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
487 QETH_PROT_IPV4);
488 if (rc)
489 return rc;
490
491 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
492 QETH_PROT_IPV4);
493 if (rc) {
494 card->options.route4.type = NO_ROUTER;
495 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
496 " on %s. Type set to 'no router'.\n", rc,
497 QETH_CARD_IFNAME(card));
498 }
499 return rc;
500 }
501
qeth_l3_setrouting_v6(struct qeth_card * card)502 int qeth_l3_setrouting_v6(struct qeth_card *card)
503 {
504 int rc = 0;
505
506 QETH_CARD_TEXT(card, 3, "setrtg6");
507
508 if (!qeth_is_supported(card, IPA_IPV6))
509 return 0;
510 rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
511 QETH_PROT_IPV6);
512 if (rc)
513 return rc;
514
515 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
516 QETH_PROT_IPV6);
517 if (rc) {
518 card->options.route6.type = NO_ROUTER;
519 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
520 " on %s. Type set to 'no router'.\n", rc,
521 QETH_CARD_IFNAME(card));
522 }
523 return rc;
524 }
525
526 /*
527 * IP address takeover related functions
528 */
529
530 /**
531 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
532 *
533 * Caller must hold ip_lock.
534 */
qeth_l3_update_ipato(struct qeth_card * card)535 void qeth_l3_update_ipato(struct qeth_card *card)
536 {
537 struct qeth_ipaddr *addr;
538 unsigned int i;
539
540 hash_for_each(card->ip_htable, i, addr, hnode) {
541 if (addr->type != QETH_IP_TYPE_NORMAL)
542 continue;
543 addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr);
544 }
545 }
546
qeth_l3_clear_ipato_list(struct qeth_card * card)547 static void qeth_l3_clear_ipato_list(struct qeth_card *card)
548 {
549 struct qeth_ipato_entry *ipatoe, *tmp;
550
551 spin_lock_bh(&card->ip_lock);
552
553 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
554 list_del(&ipatoe->entry);
555 kfree(ipatoe);
556 }
557
558 qeth_l3_update_ipato(card);
559 spin_unlock_bh(&card->ip_lock);
560 }
561
qeth_l3_add_ipato_entry(struct qeth_card * card,struct qeth_ipato_entry * new)562 int qeth_l3_add_ipato_entry(struct qeth_card *card,
563 struct qeth_ipato_entry *new)
564 {
565 struct qeth_ipato_entry *ipatoe;
566 int rc = 0;
567
568 QETH_CARD_TEXT(card, 2, "addipato");
569
570 spin_lock_bh(&card->ip_lock);
571
572 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
573 if (ipatoe->proto != new->proto)
574 continue;
575 if (!memcmp(ipatoe->addr, new->addr,
576 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
577 (ipatoe->mask_bits == new->mask_bits)) {
578 rc = -EEXIST;
579 break;
580 }
581 }
582
583 if (!rc) {
584 list_add_tail(&new->entry, &card->ipato.entries);
585 qeth_l3_update_ipato(card);
586 }
587
588 spin_unlock_bh(&card->ip_lock);
589
590 return rc;
591 }
592
qeth_l3_del_ipato_entry(struct qeth_card * card,enum qeth_prot_versions proto,u8 * addr,int mask_bits)593 int qeth_l3_del_ipato_entry(struct qeth_card *card,
594 enum qeth_prot_versions proto, u8 *addr,
595 int mask_bits)
596 {
597 struct qeth_ipato_entry *ipatoe, *tmp;
598 int rc = -ENOENT;
599
600 QETH_CARD_TEXT(card, 2, "delipato");
601
602 spin_lock_bh(&card->ip_lock);
603
604 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
605 if (ipatoe->proto != proto)
606 continue;
607 if (!memcmp(ipatoe->addr, addr,
608 (proto == QETH_PROT_IPV4)? 4:16) &&
609 (ipatoe->mask_bits == mask_bits)) {
610 list_del(&ipatoe->entry);
611 qeth_l3_update_ipato(card);
612 kfree(ipatoe);
613 rc = 0;
614 }
615 }
616
617 spin_unlock_bh(&card->ip_lock);
618 return rc;
619 }
620
qeth_l3_modify_rxip_vipa(struct qeth_card * card,bool add,const u8 * ip,enum qeth_ip_types type,enum qeth_prot_versions proto)621 int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
622 enum qeth_ip_types type,
623 enum qeth_prot_versions proto)
624 {
625 struct qeth_ipaddr addr;
626 int rc;
627
628 qeth_l3_init_ipaddr(&addr, type, proto);
629 if (proto == QETH_PROT_IPV4)
630 memcpy(&addr.u.a4.addr, ip, 4);
631 else
632 memcpy(&addr.u.a6.addr, ip, 16);
633
634 spin_lock_bh(&card->ip_lock);
635 rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
636 spin_unlock_bh(&card->ip_lock);
637 return rc;
638 }
639
qeth_l3_modify_hsuid(struct qeth_card * card,bool add)640 int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
641 {
642 struct qeth_ipaddr addr;
643 int rc, i;
644
645 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
646 addr.u.a6.addr.s6_addr[0] = 0xfe;
647 addr.u.a6.addr.s6_addr[1] = 0x80;
648 for (i = 0; i < 8; i++)
649 addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
650
651 spin_lock_bh(&card->ip_lock);
652 rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
653 spin_unlock_bh(&card->ip_lock);
654 return rc;
655 }
656
qeth_l3_register_addr_entry(struct qeth_card * card,struct qeth_ipaddr * addr)657 static int qeth_l3_register_addr_entry(struct qeth_card *card,
658 struct qeth_ipaddr *addr)
659 {
660 char buf[50];
661 int rc = 0;
662 int cnt = 3;
663
664 if (card->options.sniffer)
665 return 0;
666
667 if (addr->proto == QETH_PROT_IPV4) {
668 QETH_CARD_TEXT(card, 2, "setaddr4");
669 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
670 } else if (addr->proto == QETH_PROT_IPV6) {
671 QETH_CARD_TEXT(card, 2, "setaddr6");
672 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
673 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
674 } else {
675 QETH_CARD_TEXT(card, 2, "setaddr?");
676 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
677 }
678 do {
679 if (addr->is_multicast)
680 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
681 else
682 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP);
683 if (rc)
684 QETH_CARD_TEXT(card, 2, "failed");
685 } while ((--cnt > 0) && rc);
686 if (rc) {
687 QETH_CARD_TEXT(card, 2, "FAILED");
688 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
689 dev_warn(&card->gdev->dev,
690 "Registering IP address %s failed\n", buf);
691 }
692 return rc;
693 }
694
qeth_l3_deregister_addr_entry(struct qeth_card * card,struct qeth_ipaddr * addr)695 static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
696 struct qeth_ipaddr *addr)
697 {
698 int rc = 0;
699
700 if (card->options.sniffer)
701 return 0;
702
703 if (addr->proto == QETH_PROT_IPV4) {
704 QETH_CARD_TEXT(card, 2, "deladdr4");
705 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
706 } else if (addr->proto == QETH_PROT_IPV6) {
707 QETH_CARD_TEXT(card, 2, "deladdr6");
708 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
709 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
710 } else {
711 QETH_CARD_TEXT(card, 2, "deladdr?");
712 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
713 }
714 if (addr->is_multicast)
715 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
716 else
717 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP);
718 if (rc)
719 QETH_CARD_TEXT(card, 2, "failed");
720
721 return rc;
722 }
723
qeth_l3_setadapter_parms(struct qeth_card * card)724 static int qeth_l3_setadapter_parms(struct qeth_card *card)
725 {
726 int rc = 0;
727
728 QETH_DBF_TEXT(SETUP, 2, "setadprm");
729
730 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
731 rc = qeth_setadpparms_change_macaddr(card);
732 if (rc)
733 dev_warn(&card->gdev->dev, "Reading the adapter MAC"
734 " address failed\n");
735 }
736
737 return rc;
738 }
739
qeth_l3_start_ipa_arp_processing(struct qeth_card * card)740 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
741 {
742 int rc;
743
744 QETH_CARD_TEXT(card, 3, "ipaarp");
745
746 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
747 dev_info(&card->gdev->dev,
748 "ARP processing not supported on %s!\n",
749 QETH_CARD_IFNAME(card));
750 return 0;
751 }
752 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
753 IPA_CMD_ASS_START, 0);
754 if (rc) {
755 dev_warn(&card->gdev->dev,
756 "Starting ARP processing support for %s failed\n",
757 QETH_CARD_IFNAME(card));
758 }
759 return rc;
760 }
761
qeth_l3_start_ipa_source_mac(struct qeth_card * card)762 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
763 {
764 int rc;
765
766 QETH_CARD_TEXT(card, 3, "stsrcmac");
767
768 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
769 dev_info(&card->gdev->dev,
770 "Inbound source MAC-address not supported on %s\n",
771 QETH_CARD_IFNAME(card));
772 return -EOPNOTSUPP;
773 }
774
775 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
776 IPA_CMD_ASS_START, 0);
777 if (rc)
778 dev_warn(&card->gdev->dev,
779 "Starting source MAC-address support for %s failed\n",
780 QETH_CARD_IFNAME(card));
781 return rc;
782 }
783
qeth_l3_start_ipa_vlan(struct qeth_card * card)784 static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
785 {
786 int rc = 0;
787
788 QETH_CARD_TEXT(card, 3, "strtvlan");
789
790 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
791 dev_info(&card->gdev->dev,
792 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
793 return -EOPNOTSUPP;
794 }
795
796 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
797 IPA_CMD_ASS_START, 0);
798 if (rc) {
799 dev_warn(&card->gdev->dev,
800 "Starting VLAN support for %s failed\n",
801 QETH_CARD_IFNAME(card));
802 } else {
803 dev_info(&card->gdev->dev, "VLAN enabled\n");
804 }
805 return rc;
806 }
807
qeth_l3_start_ipa_multicast(struct qeth_card * card)808 static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
809 {
810 int rc;
811
812 QETH_CARD_TEXT(card, 3, "stmcast");
813
814 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
815 dev_info(&card->gdev->dev,
816 "Multicast not supported on %s\n",
817 QETH_CARD_IFNAME(card));
818 return -EOPNOTSUPP;
819 }
820
821 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
822 IPA_CMD_ASS_START, 0);
823 if (rc) {
824 dev_warn(&card->gdev->dev,
825 "Starting multicast support for %s failed\n",
826 QETH_CARD_IFNAME(card));
827 } else {
828 dev_info(&card->gdev->dev, "Multicast enabled\n");
829 card->dev->flags |= IFF_MULTICAST;
830 }
831 return rc;
832 }
833
qeth_l3_softsetup_ipv6(struct qeth_card * card)834 static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
835 {
836 int rc;
837
838 QETH_CARD_TEXT(card, 3, "softipv6");
839
840 if (card->info.type == QETH_CARD_TYPE_IQD)
841 goto out;
842
843 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
844 IPA_CMD_ASS_START, 3);
845 if (rc) {
846 dev_err(&card->gdev->dev,
847 "Activating IPv6 support for %s failed\n",
848 QETH_CARD_IFNAME(card));
849 return rc;
850 }
851 rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
852 IPA_CMD_ASS_START, 0);
853 if (rc) {
854 dev_err(&card->gdev->dev,
855 "Activating IPv6 support for %s failed\n",
856 QETH_CARD_IFNAME(card));
857 return rc;
858 }
859 rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
860 IPA_CMD_ASS_START, 0);
861 if (rc) {
862 dev_warn(&card->gdev->dev,
863 "Enabling the passthrough mode for %s failed\n",
864 QETH_CARD_IFNAME(card));
865 return rc;
866 }
867 out:
868 dev_info(&card->gdev->dev, "IPV6 enabled\n");
869 return 0;
870 }
871
qeth_l3_start_ipa_ipv6(struct qeth_card * card)872 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
873 {
874 QETH_CARD_TEXT(card, 3, "strtipv6");
875
876 if (!qeth_is_supported(card, IPA_IPV6)) {
877 dev_info(&card->gdev->dev,
878 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
879 return 0;
880 }
881 return qeth_l3_softsetup_ipv6(card);
882 }
883
qeth_l3_start_ipa_broadcast(struct qeth_card * card)884 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
885 {
886 int rc;
887
888 QETH_CARD_TEXT(card, 3, "stbrdcst");
889 card->info.broadcast_capable = 0;
890 if (!qeth_is_supported(card, IPA_FILTERING)) {
891 dev_info(&card->gdev->dev,
892 "Broadcast not supported on %s\n",
893 QETH_CARD_IFNAME(card));
894 rc = -EOPNOTSUPP;
895 goto out;
896 }
897 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
898 IPA_CMD_ASS_START, 0);
899 if (rc) {
900 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
901 "%s failed\n", QETH_CARD_IFNAME(card));
902 goto out;
903 }
904
905 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
906 IPA_CMD_ASS_CONFIGURE, 1);
907 if (rc) {
908 dev_warn(&card->gdev->dev,
909 "Setting up broadcast filtering for %s failed\n",
910 QETH_CARD_IFNAME(card));
911 goto out;
912 }
913 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
914 dev_info(&card->gdev->dev, "Broadcast enabled\n");
915 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
916 IPA_CMD_ASS_ENABLE, 1);
917 if (rc) {
918 dev_warn(&card->gdev->dev, "Setting up broadcast echo "
919 "filtering for %s failed\n", QETH_CARD_IFNAME(card));
920 goto out;
921 }
922 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
923 out:
924 if (card->info.broadcast_capable)
925 card->dev->flags |= IFF_BROADCAST;
926 else
927 card->dev->flags &= ~IFF_BROADCAST;
928 return rc;
929 }
930
qeth_l3_start_ipassists(struct qeth_card * card)931 static int qeth_l3_start_ipassists(struct qeth_card *card)
932 {
933 QETH_CARD_TEXT(card, 3, "strtipas");
934
935 if (qeth_set_access_ctrl_online(card, 0))
936 return -EIO;
937 qeth_l3_start_ipa_arp_processing(card); /* go on*/
938 qeth_l3_start_ipa_source_mac(card); /* go on*/
939 qeth_l3_start_ipa_vlan(card); /* go on*/
940 qeth_l3_start_ipa_multicast(card); /* go on*/
941 qeth_l3_start_ipa_ipv6(card); /* go on*/
942 qeth_l3_start_ipa_broadcast(card); /* go on*/
943 return 0;
944 }
945
qeth_l3_iqd_read_initial_mac_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)946 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
947 struct qeth_reply *reply, unsigned long data)
948 {
949 struct qeth_ipa_cmd *cmd;
950
951 cmd = (struct qeth_ipa_cmd *) data;
952 if (cmd->hdr.return_code == 0)
953 ether_addr_copy(card->dev->dev_addr,
954 cmd->data.create_destroy_addr.unique_id);
955 else
956 eth_random_addr(card->dev->dev_addr);
957
958 return 0;
959 }
960
qeth_l3_iqd_read_initial_mac(struct qeth_card * card)961 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
962 {
963 int rc = 0;
964 struct qeth_cmd_buffer *iob;
965 struct qeth_ipa_cmd *cmd;
966
967 QETH_DBF_TEXT(SETUP, 2, "hsrmac");
968
969 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
970 QETH_PROT_IPV6);
971 if (!iob)
972 return -ENOMEM;
973 cmd = __ipa_cmd(iob);
974 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
975 card->info.unique_id;
976
977 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
978 NULL);
979 return rc;
980 }
981
qeth_l3_get_unique_id_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)982 static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
983 struct qeth_reply *reply, unsigned long data)
984 {
985 struct qeth_ipa_cmd *cmd;
986
987 cmd = (struct qeth_ipa_cmd *) data;
988 if (cmd->hdr.return_code == 0)
989 card->info.unique_id = *((__u16 *)
990 &cmd->data.create_destroy_addr.unique_id[6]);
991 else {
992 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
993 UNIQUE_ID_NOT_BY_CARD;
994 dev_warn(&card->gdev->dev, "The network adapter failed to "
995 "generate a unique ID\n");
996 }
997 return 0;
998 }
999
qeth_l3_get_unique_id(struct qeth_card * card)1000 static int qeth_l3_get_unique_id(struct qeth_card *card)
1001 {
1002 int rc = 0;
1003 struct qeth_cmd_buffer *iob;
1004 struct qeth_ipa_cmd *cmd;
1005
1006 QETH_DBF_TEXT(SETUP, 2, "guniqeid");
1007
1008 if (!qeth_is_supported(card, IPA_IPV6)) {
1009 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1010 UNIQUE_ID_NOT_BY_CARD;
1011 return 0;
1012 }
1013
1014 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1015 QETH_PROT_IPV6);
1016 if (!iob)
1017 return -ENOMEM;
1018 cmd = __ipa_cmd(iob);
1019 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1020 card->info.unique_id;
1021
1022 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
1023 return rc;
1024 }
1025
1026 static int
qeth_diags_trace_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1027 qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
1028 unsigned long data)
1029 {
1030 struct qeth_ipa_cmd *cmd;
1031 __u16 rc;
1032
1033 QETH_DBF_TEXT(SETUP, 2, "diastrcb");
1034
1035 cmd = (struct qeth_ipa_cmd *)data;
1036 rc = cmd->hdr.return_code;
1037 if (rc)
1038 QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
1039 switch (cmd->data.diagass.action) {
1040 case QETH_DIAGS_CMD_TRACE_QUERY:
1041 break;
1042 case QETH_DIAGS_CMD_TRACE_DISABLE:
1043 switch (rc) {
1044 case 0:
1045 case IPA_RC_INVALID_SUBCMD:
1046 card->info.promisc_mode = SET_PROMISC_MODE_OFF;
1047 dev_info(&card->gdev->dev, "The HiperSockets network "
1048 "traffic analyzer is deactivated\n");
1049 break;
1050 default:
1051 break;
1052 }
1053 break;
1054 case QETH_DIAGS_CMD_TRACE_ENABLE:
1055 switch (rc) {
1056 case 0:
1057 card->info.promisc_mode = SET_PROMISC_MODE_ON;
1058 dev_info(&card->gdev->dev, "The HiperSockets network "
1059 "traffic analyzer is activated\n");
1060 break;
1061 case IPA_RC_HARDWARE_AUTH_ERROR:
1062 dev_warn(&card->gdev->dev, "The device is not "
1063 "authorized to run as a HiperSockets network "
1064 "traffic analyzer\n");
1065 break;
1066 case IPA_RC_TRACE_ALREADY_ACTIVE:
1067 dev_warn(&card->gdev->dev, "A HiperSockets "
1068 "network traffic analyzer is already "
1069 "active in the HiperSockets LAN\n");
1070 break;
1071 default:
1072 break;
1073 }
1074 break;
1075 default:
1076 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
1077 cmd->data.diagass.action, QETH_CARD_IFNAME(card));
1078 }
1079
1080 return 0;
1081 }
1082
1083 static int
qeth_diags_trace(struct qeth_card * card,enum qeth_diags_trace_cmds diags_cmd)1084 qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1085 {
1086 struct qeth_cmd_buffer *iob;
1087 struct qeth_ipa_cmd *cmd;
1088
1089 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1090
1091 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1092 if (!iob)
1093 return -ENOMEM;
1094 cmd = __ipa_cmd(iob);
1095 cmd->data.diagass.subcmd_len = 16;
1096 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
1097 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
1098 cmd->data.diagass.action = diags_cmd;
1099 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
1100 }
1101
1102 static void
qeth_l3_add_mc_to_hash(struct qeth_card * card,struct in_device * in4_dev)1103 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1104 {
1105 struct ip_mc_list *im4;
1106 struct qeth_ipaddr *tmp, *ipm;
1107
1108 QETH_CARD_TEXT(card, 4, "addmc");
1109
1110 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1111 if (!tmp)
1112 return;
1113
1114 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1115 im4 = rcu_dereference(im4->next_rcu)) {
1116 ip_eth_mc_map(im4->multiaddr, tmp->mac);
1117 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
1118 tmp->is_multicast = 1;
1119
1120 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1121 if (ipm) {
1122 /* for mcast, by-IP match means full match */
1123 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1124 } else {
1125 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1126 if (!ipm)
1127 continue;
1128 ether_addr_copy(ipm->mac, tmp->mac);
1129 ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
1130 ipm->is_multicast = 1;
1131 ipm->disp_flag = QETH_DISP_ADDR_ADD;
1132 hash_add(card->ip_mc_htable,
1133 &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
1134 }
1135 }
1136
1137 kfree(tmp);
1138 }
1139
1140 /* called with rcu_read_lock */
qeth_l3_add_vlan_mc(struct qeth_card * card)1141 static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1142 {
1143 struct in_device *in_dev;
1144 u16 vid;
1145
1146 QETH_CARD_TEXT(card, 4, "addmcvl");
1147
1148 if (!qeth_is_supported(card, IPA_FULL_VLAN))
1149 return;
1150
1151 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1152 struct net_device *netdev;
1153
1154 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
1155 vid);
1156 if (netdev == NULL ||
1157 !(netdev->flags & IFF_UP))
1158 continue;
1159 in_dev = __in_dev_get_rcu(netdev);
1160 if (!in_dev)
1161 continue;
1162 qeth_l3_add_mc_to_hash(card, in_dev);
1163 }
1164 }
1165
qeth_l3_add_multicast_ipv4(struct qeth_card * card)1166 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1167 {
1168 struct in_device *in4_dev;
1169
1170 QETH_CARD_TEXT(card, 4, "chkmcv4");
1171
1172 rcu_read_lock();
1173 in4_dev = __in_dev_get_rcu(card->dev);
1174 if (in4_dev == NULL)
1175 goto unlock;
1176 qeth_l3_add_mc_to_hash(card, in4_dev);
1177 qeth_l3_add_vlan_mc(card);
1178 unlock:
1179 rcu_read_unlock();
1180 }
1181
qeth_l3_add_mc6_to_hash(struct qeth_card * card,struct inet6_dev * in6_dev)1182 static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
1183 struct inet6_dev *in6_dev)
1184 {
1185 struct qeth_ipaddr *ipm;
1186 struct ifmcaddr6 *im6;
1187 struct qeth_ipaddr *tmp;
1188
1189 QETH_CARD_TEXT(card, 4, "addmc6");
1190
1191 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1192 if (!tmp)
1193 return;
1194
1195 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1196 ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
1197 memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
1198 sizeof(struct in6_addr));
1199 tmp->is_multicast = 1;
1200
1201 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1202 if (ipm) {
1203 /* for mcast, by-IP match means full match */
1204 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1205 continue;
1206 }
1207
1208 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1209 if (!ipm)
1210 continue;
1211
1212 ether_addr_copy(ipm->mac, tmp->mac);
1213 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1214 sizeof(struct in6_addr));
1215 ipm->is_multicast = 1;
1216 ipm->disp_flag = QETH_DISP_ADDR_ADD;
1217 hash_add(card->ip_mc_htable,
1218 &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
1219
1220 }
1221 kfree(tmp);
1222 }
1223
1224 /* called with rcu_read_lock */
qeth_l3_add_vlan_mc6(struct qeth_card * card)1225 static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1226 {
1227 struct inet6_dev *in_dev;
1228 u16 vid;
1229
1230 QETH_CARD_TEXT(card, 4, "admc6vl");
1231
1232 if (!qeth_is_supported(card, IPA_FULL_VLAN))
1233 return;
1234
1235 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1236 struct net_device *netdev;
1237
1238 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
1239 vid);
1240 if (netdev == NULL ||
1241 !(netdev->flags & IFF_UP))
1242 continue;
1243 in_dev = in6_dev_get(netdev);
1244 if (!in_dev)
1245 continue;
1246 read_lock_bh(&in_dev->lock);
1247 qeth_l3_add_mc6_to_hash(card, in_dev);
1248 read_unlock_bh(&in_dev->lock);
1249 in6_dev_put(in_dev);
1250 }
1251 }
1252
qeth_l3_add_multicast_ipv6(struct qeth_card * card)1253 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1254 {
1255 struct inet6_dev *in6_dev;
1256
1257 QETH_CARD_TEXT(card, 4, "chkmcv6");
1258
1259 if (!qeth_is_supported(card, IPA_IPV6))
1260 return ;
1261 in6_dev = in6_dev_get(card->dev);
1262 if (!in6_dev)
1263 return;
1264
1265 rcu_read_lock();
1266 read_lock_bh(&in6_dev->lock);
1267 qeth_l3_add_mc6_to_hash(card, in6_dev);
1268 qeth_l3_add_vlan_mc6(card);
1269 read_unlock_bh(&in6_dev->lock);
1270 rcu_read_unlock();
1271 in6_dev_put(in6_dev);
1272 }
1273
qeth_l3_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1274 static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
1275 __be16 proto, u16 vid)
1276 {
1277 struct qeth_card *card = dev->ml_priv;
1278
1279 set_bit(vid, card->active_vlans);
1280 return 0;
1281 }
1282
qeth_l3_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1283 static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
1284 __be16 proto, u16 vid)
1285 {
1286 struct qeth_card *card = dev->ml_priv;
1287
1288 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
1289
1290 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
1291 QETH_CARD_TEXT(card, 3, "kidREC");
1292 return 0;
1293 }
1294 clear_bit(vid, card->active_vlans);
1295 qeth_l3_set_rx_mode(dev);
1296 return 0;
1297 }
1298
qeth_l3_rebuild_skb(struct qeth_card * card,struct sk_buff * skb,struct qeth_hdr * hdr)1299 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
1300 struct qeth_hdr *hdr)
1301 {
1302 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
1303 u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
1304 ETH_P_IP;
1305 unsigned char tg_addr[ETH_ALEN];
1306
1307 skb_reset_network_header(skb);
1308 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
1309 case QETH_CAST_MULTICAST:
1310 if (prot == ETH_P_IP)
1311 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
1312 else
1313 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
1314
1315 card->stats.multicast++;
1316 break;
1317 case QETH_CAST_BROADCAST:
1318 ether_addr_copy(tg_addr, card->dev->broadcast);
1319 card->stats.multicast++;
1320 break;
1321 default:
1322 if (card->options.sniffer)
1323 skb->pkt_type = PACKET_OTHERHOST;
1324 ether_addr_copy(tg_addr, card->dev->dev_addr);
1325 }
1326
1327 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
1328 card->dev->header_ops->create(skb, card->dev, prot,
1329 tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
1330 skb->len);
1331 else
1332 card->dev->header_ops->create(skb, card->dev, prot,
1333 tg_addr, "FAKELL", skb->len);
1334 }
1335
1336 skb->protocol = eth_type_trans(skb, card->dev);
1337
1338 /* copy VLAN tag from hdr into skb */
1339 if (!card->options.sniffer &&
1340 (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
1341 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
1342 u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
1343 hdr->hdr.l3.vlan_id :
1344 hdr->hdr.l3.next_hop.rx.vlan_id;
1345 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
1346 }
1347
1348 qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
1349 }
1350
qeth_l3_process_inbound_buffer(struct qeth_card * card,int budget,int * done)1351 static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1352 int budget, int *done)
1353 {
1354 int work_done = 0;
1355 struct sk_buff *skb;
1356 struct qeth_hdr *hdr;
1357 unsigned int len;
1358 __u16 magic;
1359
1360 *done = 0;
1361 WARN_ON_ONCE(!budget);
1362 while (budget) {
1363 skb = qeth_core_get_next_skb(card,
1364 &card->qdio.in_q->bufs[card->rx.b_index],
1365 &card->rx.b_element, &card->rx.e_offset, &hdr);
1366 if (!skb) {
1367 *done = 1;
1368 break;
1369 }
1370 switch (hdr->hdr.l3.id) {
1371 case QETH_HEADER_TYPE_LAYER3:
1372 magic = *(__u16 *)skb->data;
1373 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
1374 (magic == ETH_P_AF_IUCV)) {
1375 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
1376 len = skb->len;
1377 card->dev->header_ops->create(skb, card->dev, 0,
1378 card->dev->dev_addr, "FAKELL", len);
1379 skb_reset_mac_header(skb);
1380 netif_receive_skb(skb);
1381 } else {
1382 qeth_l3_rebuild_skb(card, skb, hdr);
1383 len = skb->len;
1384 napi_gro_receive(&card->napi, skb);
1385 }
1386 break;
1387 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
1388 skb->protocol = eth_type_trans(skb, skb->dev);
1389 len = skb->len;
1390 netif_receive_skb(skb);
1391 break;
1392 default:
1393 dev_kfree_skb_any(skb);
1394 QETH_CARD_TEXT(card, 3, "inbunkno");
1395 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
1396 continue;
1397 }
1398 work_done++;
1399 budget--;
1400 card->stats.rx_packets++;
1401 card->stats.rx_bytes += len;
1402 }
1403 return work_done;
1404 }
1405
qeth_l3_stop_card(struct qeth_card * card,int recovery_mode)1406 static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
1407 {
1408 QETH_DBF_TEXT(SETUP, 2, "stopcard");
1409 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1410
1411 qeth_set_allowed_threads(card, 0, 1);
1412 if (card->options.sniffer &&
1413 (card->info.promisc_mode == SET_PROMISC_MODE_ON))
1414 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
1415 if (card->read.state == CH_STATE_UP &&
1416 card->write.state == CH_STATE_UP &&
1417 (card->state == CARD_STATE_UP)) {
1418 if (recovery_mode)
1419 qeth_l3_stop(card->dev);
1420 else {
1421 rtnl_lock();
1422 dev_close(card->dev);
1423 rtnl_unlock();
1424 }
1425 card->state = CARD_STATE_SOFTSETUP;
1426 }
1427 if (card->state == CARD_STATE_SOFTSETUP) {
1428 qeth_l3_clear_ip_htable(card, 1);
1429 qeth_clear_ipacmd_list(card);
1430 card->state = CARD_STATE_HARDSETUP;
1431 }
1432 if (card->state == CARD_STATE_HARDSETUP) {
1433 qeth_qdio_clear_card(card, 0);
1434 qeth_clear_qdio_buffers(card);
1435 qeth_clear_working_pool_list(card);
1436 card->state = CARD_STATE_DOWN;
1437 }
1438 if (card->state == CARD_STATE_DOWN) {
1439 qeth_clear_cmd_buffers(&card->read);
1440 qeth_clear_cmd_buffers(&card->write);
1441 }
1442 }
1443
1444 /*
1445 * test for and Switch promiscuous mode (on or off)
1446 * either for guestlan or HiperSocket Sniffer
1447 */
1448 static void
qeth_l3_handle_promisc_mode(struct qeth_card * card)1449 qeth_l3_handle_promisc_mode(struct qeth_card *card)
1450 {
1451 struct net_device *dev = card->dev;
1452
1453 if (((dev->flags & IFF_PROMISC) &&
1454 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
1455 (!(dev->flags & IFF_PROMISC) &&
1456 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
1457 return;
1458
1459 if (card->info.guestlan) { /* Guestlan trace */
1460 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
1461 qeth_setadp_promisc_mode(card);
1462 } else if (card->options.sniffer && /* HiperSockets trace */
1463 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
1464 if (dev->flags & IFF_PROMISC) {
1465 QETH_CARD_TEXT(card, 3, "+promisc");
1466 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
1467 } else {
1468 QETH_CARD_TEXT(card, 3, "-promisc");
1469 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
1470 }
1471 }
1472 }
1473
qeth_l3_set_rx_mode(struct net_device * dev)1474 static void qeth_l3_set_rx_mode(struct net_device *dev)
1475 {
1476 struct qeth_card *card = dev->ml_priv;
1477 struct qeth_ipaddr *addr;
1478 struct hlist_node *tmp;
1479 int i, rc;
1480
1481 QETH_CARD_TEXT(card, 3, "setmulti");
1482 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
1483 (card->state != CARD_STATE_UP))
1484 return;
1485 if (!card->options.sniffer) {
1486 spin_lock_bh(&card->mclock);
1487
1488 qeth_l3_add_multicast_ipv4(card);
1489 qeth_l3_add_multicast_ipv6(card);
1490
1491 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
1492 switch (addr->disp_flag) {
1493 case QETH_DISP_ADDR_DELETE:
1494 rc = qeth_l3_deregister_addr_entry(card, addr);
1495 if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
1496 hash_del(&addr->hnode);
1497 kfree(addr);
1498 }
1499 break;
1500 case QETH_DISP_ADDR_ADD:
1501 rc = qeth_l3_register_addr_entry(card, addr);
1502 if (rc && rc != IPA_RC_LAN_OFFLINE) {
1503 hash_del(&addr->hnode);
1504 kfree(addr);
1505 break;
1506 }
1507 addr->ref_counter = 1;
1508 /* fall through */
1509 default:
1510 /* for next call to set_rx_mode(): */
1511 addr->disp_flag = QETH_DISP_ADDR_DELETE;
1512 }
1513 }
1514
1515 spin_unlock_bh(&card->mclock);
1516
1517 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
1518 return;
1519 }
1520 qeth_l3_handle_promisc_mode(card);
1521 }
1522
qeth_l3_arp_get_error_cause(int * rc)1523 static const char *qeth_l3_arp_get_error_cause(int *rc)
1524 {
1525 switch (*rc) {
1526 case QETH_IPA_ARP_RC_FAILED:
1527 *rc = -EIO;
1528 return "operation failed";
1529 case QETH_IPA_ARP_RC_NOTSUPP:
1530 *rc = -EOPNOTSUPP;
1531 return "operation not supported";
1532 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
1533 *rc = -EINVAL;
1534 return "argument out of range";
1535 case QETH_IPA_ARP_RC_Q_NOTSUPP:
1536 *rc = -EOPNOTSUPP;
1537 return "query operation not supported";
1538 case QETH_IPA_ARP_RC_Q_NO_DATA:
1539 *rc = -ENOENT;
1540 return "no query data available";
1541 default:
1542 return "unknown error";
1543 }
1544 }
1545
qeth_l3_arp_set_no_entries(struct qeth_card * card,int no_entries)1546 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
1547 {
1548 int tmp;
1549 int rc;
1550
1551 QETH_CARD_TEXT(card, 3, "arpstnoe");
1552
1553 /*
1554 * currently GuestLAN only supports the ARP assist function
1555 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
1556 * thus we say EOPNOTSUPP for this ARP function
1557 */
1558 if (card->info.guestlan)
1559 return -EOPNOTSUPP;
1560 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1561 return -EOPNOTSUPP;
1562 }
1563 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1564 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
1565 no_entries);
1566 if (rc) {
1567 tmp = rc;
1568 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
1569 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
1570 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1571 }
1572 return rc;
1573 }
1574
get_arp_entry_size(struct qeth_card * card,struct qeth_arp_query_data * qdata,struct qeth_arp_entrytype * type,__u8 strip_entries)1575 static __u32 get_arp_entry_size(struct qeth_card *card,
1576 struct qeth_arp_query_data *qdata,
1577 struct qeth_arp_entrytype *type, __u8 strip_entries)
1578 {
1579 __u32 rc;
1580 __u8 is_hsi;
1581
1582 is_hsi = qdata->reply_bits == 5;
1583 if (type->ip == QETHARP_IP_ADDR_V4) {
1584 QETH_CARD_TEXT(card, 4, "arpev4");
1585 if (strip_entries) {
1586 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
1587 sizeof(struct qeth_arp_qi_entry7_short);
1588 } else {
1589 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
1590 sizeof(struct qeth_arp_qi_entry7);
1591 }
1592 } else if (type->ip == QETHARP_IP_ADDR_V6) {
1593 QETH_CARD_TEXT(card, 4, "arpev6");
1594 if (strip_entries) {
1595 rc = is_hsi ?
1596 sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
1597 sizeof(struct qeth_arp_qi_entry7_short_ipv6);
1598 } else {
1599 rc = is_hsi ?
1600 sizeof(struct qeth_arp_qi_entry5_ipv6) :
1601 sizeof(struct qeth_arp_qi_entry7_ipv6);
1602 }
1603 } else {
1604 QETH_CARD_TEXT(card, 4, "arpinv");
1605 rc = 0;
1606 }
1607
1608 return rc;
1609 }
1610
arpentry_matches_prot(struct qeth_arp_entrytype * type,__u16 prot)1611 static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
1612 {
1613 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
1614 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
1615 }
1616
qeth_l3_arp_query_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1617 static int qeth_l3_arp_query_cb(struct qeth_card *card,
1618 struct qeth_reply *reply, unsigned long data)
1619 {
1620 struct qeth_ipa_cmd *cmd;
1621 struct qeth_arp_query_data *qdata;
1622 struct qeth_arp_query_info *qinfo;
1623 int i;
1624 int e;
1625 int entrybytes_done;
1626 int stripped_bytes;
1627 __u8 do_strip_entries;
1628
1629 QETH_CARD_TEXT(card, 3, "arpquecb");
1630
1631 qinfo = (struct qeth_arp_query_info *) reply->param;
1632 cmd = (struct qeth_ipa_cmd *) data;
1633 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
1634 if (cmd->hdr.return_code) {
1635 QETH_CARD_TEXT(card, 4, "arpcberr");
1636 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
1637 return 0;
1638 }
1639 if (cmd->data.setassparms.hdr.return_code) {
1640 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1641 QETH_CARD_TEXT(card, 4, "setaperr");
1642 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
1643 return 0;
1644 }
1645 qdata = &cmd->data.setassparms.data.query_arp;
1646 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
1647
1648 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
1649 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
1650 entrybytes_done = 0;
1651 for (e = 0; e < qdata->no_entries; ++e) {
1652 char *cur_entry;
1653 __u32 esize;
1654 struct qeth_arp_entrytype *etype;
1655
1656 cur_entry = &qdata->data + entrybytes_done;
1657 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
1658 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
1659 QETH_CARD_TEXT(card, 4, "pmis");
1660 QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
1661 break;
1662 }
1663 esize = get_arp_entry_size(card, qdata, etype,
1664 do_strip_entries);
1665 QETH_CARD_TEXT_(card, 5, "esz%i", esize);
1666 if (!esize)
1667 break;
1668
1669 if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
1670 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
1671 cmd->hdr.return_code = IPA_RC_ENOMEM;
1672 goto out_error;
1673 }
1674
1675 memcpy(qinfo->udata + qinfo->udata_offset,
1676 &qdata->data + entrybytes_done + stripped_bytes,
1677 esize);
1678 entrybytes_done += esize + stripped_bytes;
1679 qinfo->udata_offset += esize;
1680 ++qinfo->no_entries;
1681 }
1682 /* check if all replies received ... */
1683 if (cmd->data.setassparms.hdr.seq_no <
1684 cmd->data.setassparms.hdr.number_of_replies)
1685 return 1;
1686 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
1687 memcpy(qinfo->udata, &qinfo->no_entries, 4);
1688 /* keep STRIP_ENTRIES flag so the user program can distinguish
1689 * stripped entries from normal ones */
1690 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
1691 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
1692 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
1693 QETH_CARD_TEXT_(card, 4, "rc%i", 0);
1694 return 0;
1695 out_error:
1696 i = 0;
1697 memcpy(qinfo->udata, &i, 4);
1698 return 0;
1699 }
1700
qeth_l3_send_ipa_arp_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob,int len,int (* reply_cb)(struct qeth_card *,struct qeth_reply *,unsigned long),void * reply_param)1701 static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
1702 struct qeth_cmd_buffer *iob, int len,
1703 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1704 unsigned long),
1705 void *reply_param)
1706 {
1707 QETH_CARD_TEXT(card, 4, "sendarp");
1708
1709 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1710 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1711 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1712 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
1713 reply_cb, reply_param);
1714 }
1715
qeth_l3_query_arp_cache_info(struct qeth_card * card,enum qeth_prot_versions prot,struct qeth_arp_query_info * qinfo)1716 static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
1717 enum qeth_prot_versions prot,
1718 struct qeth_arp_query_info *qinfo)
1719 {
1720 struct qeth_cmd_buffer *iob;
1721 struct qeth_ipa_cmd *cmd;
1722 int tmp;
1723 int rc;
1724
1725 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
1726
1727 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
1728 IPA_CMD_ASS_ARP_QUERY_INFO,
1729 sizeof(struct qeth_arp_query_data)
1730 - sizeof(char),
1731 prot);
1732 if (!iob)
1733 return -ENOMEM;
1734 cmd = __ipa_cmd(iob);
1735 cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
1736 cmd->data.setassparms.data.query_arp.reply_bits = 0;
1737 cmd->data.setassparms.data.query_arp.no_entries = 0;
1738 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
1739 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
1740 qeth_l3_arp_query_cb, (void *)qinfo);
1741 if (rc) {
1742 tmp = rc;
1743 QETH_DBF_MESSAGE(2,
1744 "Error while querying ARP cache on %s: %s "
1745 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
1746 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1747 }
1748
1749 return rc;
1750 }
1751
qeth_l3_arp_query(struct qeth_card * card,char __user * udata)1752 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
1753 {
1754 struct qeth_arp_query_info qinfo = {0, };
1755 int rc;
1756
1757 QETH_CARD_TEXT(card, 3, "arpquery");
1758
1759 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
1760 IPA_ARP_PROCESSING)) {
1761 QETH_CARD_TEXT(card, 3, "arpqnsup");
1762 rc = -EOPNOTSUPP;
1763 goto out;
1764 }
1765 /* get size of userspace buffer and mask_bits -> 6 bytes */
1766 if (copy_from_user(&qinfo, udata, 6)) {
1767 rc = -EFAULT;
1768 goto out;
1769 }
1770 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
1771 if (!qinfo.udata) {
1772 rc = -ENOMEM;
1773 goto out;
1774 }
1775 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
1776 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
1777 if (rc) {
1778 if (copy_to_user(udata, qinfo.udata, 4))
1779 rc = -EFAULT;
1780 goto free_and_out;
1781 }
1782 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
1783 /* fails in case of GuestLAN QDIO mode */
1784 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
1785 }
1786 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
1787 QETH_CARD_TEXT(card, 4, "qactf");
1788 rc = -EFAULT;
1789 goto free_and_out;
1790 }
1791 QETH_CARD_TEXT(card, 4, "qacts");
1792
1793 free_and_out:
1794 kfree(qinfo.udata);
1795 out:
1796 return rc;
1797 }
1798
qeth_l3_arp_add_entry(struct qeth_card * card,struct qeth_arp_cache_entry * entry)1799 static int qeth_l3_arp_add_entry(struct qeth_card *card,
1800 struct qeth_arp_cache_entry *entry)
1801 {
1802 struct qeth_cmd_buffer *iob;
1803 char buf[16];
1804 int tmp;
1805 int rc;
1806
1807 QETH_CARD_TEXT(card, 3, "arpadent");
1808
1809 /*
1810 * currently GuestLAN only supports the ARP assist function
1811 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
1812 * thus we say EOPNOTSUPP for this ARP function
1813 */
1814 if (card->info.guestlan)
1815 return -EOPNOTSUPP;
1816 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1817 return -EOPNOTSUPP;
1818 }
1819
1820 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
1821 IPA_CMD_ASS_ARP_ADD_ENTRY,
1822 sizeof(struct qeth_arp_cache_entry),
1823 QETH_PROT_IPV4);
1824 if (!iob)
1825 return -ENOMEM;
1826 rc = qeth_send_setassparms(card, iob,
1827 sizeof(struct qeth_arp_cache_entry),
1828 (unsigned long) entry,
1829 qeth_setassparms_cb, NULL);
1830 if (rc) {
1831 tmp = rc;
1832 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
1833 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
1834 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
1835 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1836 }
1837 return rc;
1838 }
1839
qeth_l3_arp_remove_entry(struct qeth_card * card,struct qeth_arp_cache_entry * entry)1840 static int qeth_l3_arp_remove_entry(struct qeth_card *card,
1841 struct qeth_arp_cache_entry *entry)
1842 {
1843 struct qeth_cmd_buffer *iob;
1844 char buf[16] = {0, };
1845 int tmp;
1846 int rc;
1847
1848 QETH_CARD_TEXT(card, 3, "arprment");
1849
1850 /*
1851 * currently GuestLAN only supports the ARP assist function
1852 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
1853 * thus we say EOPNOTSUPP for this ARP function
1854 */
1855 if (card->info.guestlan)
1856 return -EOPNOTSUPP;
1857 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1858 return -EOPNOTSUPP;
1859 }
1860 memcpy(buf, entry, 12);
1861 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
1862 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
1863 12,
1864 QETH_PROT_IPV4);
1865 if (!iob)
1866 return -ENOMEM;
1867 rc = qeth_send_setassparms(card, iob,
1868 12, (unsigned long)buf,
1869 qeth_setassparms_cb, NULL);
1870 if (rc) {
1871 tmp = rc;
1872 memset(buf, 0, 16);
1873 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
1874 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
1875 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
1876 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1877 }
1878 return rc;
1879 }
1880
qeth_l3_arp_flush_cache(struct qeth_card * card)1881 static int qeth_l3_arp_flush_cache(struct qeth_card *card)
1882 {
1883 int rc;
1884 int tmp;
1885
1886 QETH_CARD_TEXT(card, 3, "arpflush");
1887
1888 /*
1889 * currently GuestLAN only supports the ARP assist function
1890 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
1891 * thus we say EOPNOTSUPP for this ARP function
1892 */
1893 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
1894 return -EOPNOTSUPP;
1895 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1896 return -EOPNOTSUPP;
1897 }
1898 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1899 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
1900 if (rc) {
1901 tmp = rc;
1902 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
1903 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
1904 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1905 }
1906 return rc;
1907 }
1908
qeth_l3_do_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1909 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1910 {
1911 struct qeth_card *card = dev->ml_priv;
1912 struct qeth_arp_cache_entry arp_entry;
1913 int rc = 0;
1914
1915 switch (cmd) {
1916 case SIOC_QETH_ARP_SET_NO_ENTRIES:
1917 if (!capable(CAP_NET_ADMIN)) {
1918 rc = -EPERM;
1919 break;
1920 }
1921 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
1922 break;
1923 case SIOC_QETH_ARP_QUERY_INFO:
1924 if (!capable(CAP_NET_ADMIN)) {
1925 rc = -EPERM;
1926 break;
1927 }
1928 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
1929 break;
1930 case SIOC_QETH_ARP_ADD_ENTRY:
1931 if (!capable(CAP_NET_ADMIN)) {
1932 rc = -EPERM;
1933 break;
1934 }
1935 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
1936 sizeof(struct qeth_arp_cache_entry)))
1937 rc = -EFAULT;
1938 else
1939 rc = qeth_l3_arp_add_entry(card, &arp_entry);
1940 break;
1941 case SIOC_QETH_ARP_REMOVE_ENTRY:
1942 if (!capable(CAP_NET_ADMIN)) {
1943 rc = -EPERM;
1944 break;
1945 }
1946 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
1947 sizeof(struct qeth_arp_cache_entry)))
1948 rc = -EFAULT;
1949 else
1950 rc = qeth_l3_arp_remove_entry(card, &arp_entry);
1951 break;
1952 case SIOC_QETH_ARP_FLUSH_CACHE:
1953 if (!capable(CAP_NET_ADMIN)) {
1954 rc = -EPERM;
1955 break;
1956 }
1957 rc = qeth_l3_arp_flush_cache(card);
1958 break;
1959 default:
1960 rc = -EOPNOTSUPP;
1961 }
1962 return rc;
1963 }
1964
qeth_l3_get_cast_type(struct sk_buff * skb)1965 static int qeth_l3_get_cast_type(struct sk_buff *skb)
1966 {
1967 struct neighbour *n = NULL;
1968 struct dst_entry *dst;
1969
1970 rcu_read_lock();
1971 dst = skb_dst(skb);
1972 if (dst)
1973 n = dst_neigh_lookup_skb(dst, skb);
1974 if (n) {
1975 int cast_type = n->type;
1976
1977 rcu_read_unlock();
1978 neigh_release(n);
1979 if ((cast_type == RTN_BROADCAST) ||
1980 (cast_type == RTN_MULTICAST) ||
1981 (cast_type == RTN_ANYCAST))
1982 return cast_type;
1983 return RTN_UNICAST;
1984 }
1985 rcu_read_unlock();
1986
1987 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
1988 if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
1989 return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
1990 RTN_MULTICAST : RTN_UNICAST;
1991 else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
1992 return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
1993 RTN_MULTICAST : RTN_UNICAST;
1994
1995 /* ... and MAC address */
1996 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
1997 return RTN_BROADCAST;
1998 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1999 return RTN_MULTICAST;
2000
2001 /* default to unicast */
2002 return RTN_UNICAST;
2003 }
2004
qeth_l3_fill_af_iucv_hdr(struct qeth_hdr * hdr,struct sk_buff * skb,unsigned int data_len)2005 static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
2006 unsigned int data_len)
2007 {
2008 char daddr[16];
2009 struct af_iucv_trans_hdr *iucv_hdr;
2010
2011 memset(hdr, 0, sizeof(struct qeth_hdr));
2012 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2013 hdr->hdr.l3.length = data_len;
2014 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2015
2016 iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
2017 memset(daddr, 0, sizeof(daddr));
2018 daddr[0] = 0xfe;
2019 daddr[1] = 0x80;
2020 memcpy(&daddr[8], iucv_hdr->destUserID, 8);
2021 memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
2022 }
2023
qeth_l3_cast_type_to_flag(int cast_type)2024 static u8 qeth_l3_cast_type_to_flag(int cast_type)
2025 {
2026 if (cast_type == RTN_MULTICAST)
2027 return QETH_CAST_MULTICAST;
2028 if (cast_type == RTN_ANYCAST)
2029 return QETH_CAST_ANYCAST;
2030 if (cast_type == RTN_BROADCAST)
2031 return QETH_CAST_BROADCAST;
2032 return QETH_CAST_UNICAST;
2033 }
2034
qeth_l3_fill_header(struct qeth_card * card,struct qeth_hdr * hdr,struct sk_buff * skb,int ipv,int cast_type,unsigned int data_len)2035 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2036 struct sk_buff *skb, int ipv, int cast_type,
2037 unsigned int data_len)
2038 {
2039 memset(hdr, 0, sizeof(struct qeth_hdr));
2040 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2041 hdr->hdr.l3.length = data_len;
2042
2043 /*
2044 * before we're going to overwrite this location with next hop ip.
2045 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2046 */
2047 if (skb_vlan_tag_present(skb)) {
2048 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
2049 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
2050 else
2051 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2052 hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
2053 }
2054
2055 if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
2056 qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
2057 if (card->options.performance_stats)
2058 card->perf_stats.tx_csum++;
2059 }
2060
2061 /* OSA only: */
2062 if (!ipv) {
2063 hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
2064 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
2065 skb->dev->broadcast))
2066 hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
2067 else
2068 hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
2069 QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
2070 return;
2071 }
2072
2073 hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
2074 rcu_read_lock();
2075 if (ipv == 4) {
2076 struct rtable *rt = skb_rtable(skb);
2077
2078 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
2079 rt_nexthop(rt, ip_hdr(skb)->daddr) :
2080 ip_hdr(skb)->daddr;
2081 } else {
2082 /* IPv6 */
2083 const struct rt6_info *rt = skb_rt6_info(skb);
2084 const struct in6_addr *next_hop;
2085
2086 if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
2087 next_hop = &rt->rt6i_gateway;
2088 else
2089 next_hop = &ipv6_hdr(skb)->daddr;
2090 memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
2091
2092 hdr->hdr.l3.flags |= QETH_HDR_IPV6;
2093 if (card->info.type != QETH_CARD_TYPE_IQD)
2094 hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
2095 }
2096 rcu_read_unlock();
2097 }
2098
qeth_tso_fill_header(struct qeth_card * card,struct qeth_hdr * qhdr,struct sk_buff * skb)2099 static void qeth_tso_fill_header(struct qeth_card *card,
2100 struct qeth_hdr *qhdr, struct sk_buff *skb)
2101 {
2102 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
2103 struct tcphdr *tcph = tcp_hdr(skb);
2104 struct iphdr *iph = ip_hdr(skb);
2105 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2106
2107 /*fix header to TSO values ...*/
2108 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
2109 /*set values which are fix for the first approach ...*/
2110 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2111 hdr->ext.imb_hdr_no = 1;
2112 hdr->ext.hdr_type = 1;
2113 hdr->ext.hdr_version = 1;
2114 hdr->ext.hdr_len = 28;
2115 /*insert non-fix values */
2116 hdr->ext.mss = skb_shinfo(skb)->gso_size;
2117 hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
2118 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
2119 sizeof(struct qeth_hdr_tso));
2120 tcph->check = 0;
2121 if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) {
2122 ip6h->payload_len = 0;
2123 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
2124 0, IPPROTO_TCP, 0);
2125 } else {
2126 /*OSA want us to set these values ...*/
2127 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2128 0, IPPROTO_TCP, 0);
2129 iph->tot_len = 0;
2130 iph->check = 0;
2131 }
2132 }
2133
2134 /**
2135 * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
2136 * @card: qeth card structure, to check max. elems.
2137 * @skb: SKB address
2138 * @extra_elems: extra elems needed, to check against max.
2139 *
2140 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
2141 * skb data, including linear part and fragments, but excluding TCP header.
2142 * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
2143 * Checks if the result plus extra_elems fits under the limit for the card.
2144 * Returns 0 if it does not.
2145 * Note: extra_elems is not included in the returned result.
2146 */
qeth_l3_get_elements_no_tso(struct qeth_card * card,struct sk_buff * skb,int extra_elems)2147 static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
2148 struct sk_buff *skb, int extra_elems)
2149 {
2150 addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
2151 addr_t end = (addr_t)skb->data + skb_headlen(skb);
2152 int elements = qeth_get_elements_for_frags(skb);
2153
2154 if (start != end)
2155 elements += qeth_get_elements_for_range(start, end);
2156
2157 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
2158 QETH_DBF_MESSAGE(2,
2159 "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
2160 elements + extra_elems, skb->len);
2161 return 0;
2162 }
2163 return elements;
2164 }
2165
qeth_l3_xmit_offload(struct qeth_card * card,struct sk_buff * skb,struct qeth_qdio_out_q * queue,int ipv,int cast_type)2166 static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
2167 struct qeth_qdio_out_q *queue, int ipv,
2168 int cast_type)
2169 {
2170 const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
2171 unsigned int frame_len, elements;
2172 unsigned char eth_hdr[ETH_HLEN];
2173 struct qeth_hdr *hdr = NULL;
2174 unsigned int hd_len = 0;
2175 int push_len, rc;
2176 bool is_sg;
2177
2178 /* re-use the L2 header area for the HW header: */
2179 rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
2180 if (rc)
2181 return rc;
2182 skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
2183 skb_pull(skb, ETH_HLEN);
2184 frame_len = skb->len;
2185
2186 push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
2187 &elements);
2188 if (push_len < 0)
2189 return push_len;
2190 if (!push_len) {
2191 /* hdr was added discontiguous from skb->data */
2192 hd_len = hw_hdr_len;
2193 }
2194
2195 if (skb->protocol == htons(ETH_P_AF_IUCV))
2196 qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
2197 else
2198 qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
2199
2200 is_sg = skb_is_nonlinear(skb);
2201 if (IS_IQD(card)) {
2202 rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
2203 } else {
2204 /* TODO: drop skb_orphan() once TX completion is fast enough */
2205 skb_orphan(skb);
2206 rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
2207 elements);
2208 }
2209
2210 if (!rc) {
2211 if (card->options.performance_stats) {
2212 card->perf_stats.buf_elements_sent += elements;
2213 if (is_sg)
2214 card->perf_stats.sg_skbs_sent++;
2215 }
2216 } else {
2217 if (!push_len)
2218 kmem_cache_free(qeth_core_header_cache, hdr);
2219 if (rc == -EBUSY) {
2220 /* roll back to ETH header */
2221 skb_pull(skb, push_len);
2222 skb_push(skb, ETH_HLEN);
2223 skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
2224 }
2225 }
2226 return rc;
2227 }
2228
qeth_l3_xmit(struct qeth_card * card,struct sk_buff * skb,struct qeth_qdio_out_q * queue,int ipv,int cast_type)2229 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
2230 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
2231 {
2232 int elements, len, rc;
2233 __be16 *tag;
2234 struct qeth_hdr *hdr = NULL;
2235 int hdr_elements = 0;
2236 struct sk_buff *new_skb = NULL;
2237 int tx_bytes = skb->len;
2238 unsigned int hd_len;
2239 bool use_tso, is_sg;
2240
2241 /* Ignore segment size from skb_is_gso(), 1 page is always used. */
2242 use_tso = skb_is_gso(skb) &&
2243 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
2244
2245 /* create a clone with writeable headroom */
2246 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
2247 VLAN_HLEN);
2248 if (!new_skb)
2249 return -ENOMEM;
2250
2251 if (ipv == 4) {
2252 skb_pull(new_skb, ETH_HLEN);
2253 } else if (skb_vlan_tag_present(new_skb)) {
2254 skb_push(new_skb, VLAN_HLEN);
2255 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
2256 skb_copy_to_linear_data_offset(new_skb, 4,
2257 new_skb->data + 8, 4);
2258 skb_copy_to_linear_data_offset(new_skb, 8,
2259 new_skb->data + 12, 4);
2260 tag = (__be16 *)(new_skb->data + 12);
2261 *tag = cpu_to_be16(ETH_P_8021Q);
2262 *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
2263 }
2264
2265 /* fix hardware limitation: as long as we do not have sbal
2266 * chaining we can not send long frag lists
2267 */
2268 if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
2269 (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
2270 rc = skb_linearize(new_skb);
2271
2272 if (card->options.performance_stats) {
2273 if (rc)
2274 card->perf_stats.tx_linfail++;
2275 else
2276 card->perf_stats.tx_lin++;
2277 }
2278 if (rc)
2279 goto out;
2280 }
2281
2282 if (use_tso) {
2283 hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
2284 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2285 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
2286 new_skb->len - sizeof(struct qeth_hdr_tso));
2287 qeth_tso_fill_header(card, hdr, new_skb);
2288 hdr_elements++;
2289 } else {
2290 hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
2291 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
2292 new_skb->len - sizeof(struct qeth_hdr));
2293 }
2294
2295 elements = use_tso ?
2296 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2297 qeth_get_elements_no(card, new_skb, hdr_elements, 0);
2298 if (!elements) {
2299 rc = -E2BIG;
2300 goto out;
2301 }
2302 elements += hdr_elements;
2303
2304 if (use_tso) {
2305 hd_len = sizeof(struct qeth_hdr_tso) +
2306 ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
2307 len = hd_len;
2308 } else {
2309 hd_len = 0;
2310 len = sizeof(struct qeth_hdr_layer3);
2311 }
2312
2313 if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
2314 rc = -EINVAL;
2315 goto out;
2316 }
2317
2318 is_sg = skb_is_nonlinear(new_skb);
2319 rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
2320 elements);
2321 out:
2322 if (!rc) {
2323 if (new_skb != skb)
2324 dev_kfree_skb_any(skb);
2325 if (card->options.performance_stats) {
2326 card->perf_stats.buf_elements_sent += elements;
2327 if (is_sg)
2328 card->perf_stats.sg_skbs_sent++;
2329 if (use_tso) {
2330 card->perf_stats.large_send_bytes += tx_bytes;
2331 card->perf_stats.large_send_cnt++;
2332 }
2333 }
2334 } else {
2335 if (new_skb != skb)
2336 dev_kfree_skb_any(new_skb);
2337 }
2338 return rc;
2339 }
2340
qeth_l3_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)2341 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
2342 struct net_device *dev)
2343 {
2344 int cast_type = qeth_l3_get_cast_type(skb);
2345 struct qeth_card *card = dev->ml_priv;
2346 int ipv = qeth_get_ip_version(skb);
2347 struct qeth_qdio_out_q *queue;
2348 int tx_bytes = skb->len;
2349 int rc;
2350
2351 if (IS_IQD(card)) {
2352 if (card->options.sniffer)
2353 goto tx_drop;
2354 if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
2355 (card->options.cq == QETH_CQ_ENABLED &&
2356 skb->protocol != htons(ETH_P_AF_IUCV)))
2357 goto tx_drop;
2358 }
2359
2360 if (card->state != CARD_STATE_UP || !card->lan_online) {
2361 card->stats.tx_carrier_errors++;
2362 goto tx_drop;
2363 }
2364
2365 if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
2366 goto tx_drop;
2367
2368 queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
2369
2370 if (card->options.performance_stats) {
2371 card->perf_stats.outbound_cnt++;
2372 card->perf_stats.outbound_start_time = qeth_get_micros();
2373 }
2374 netif_stop_queue(dev);
2375
2376 if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
2377 rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
2378 else
2379 rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
2380
2381 if (!rc) {
2382 card->stats.tx_packets++;
2383 card->stats.tx_bytes += tx_bytes;
2384 if (card->options.performance_stats)
2385 card->perf_stats.outbound_time += qeth_get_micros() -
2386 card->perf_stats.outbound_start_time;
2387 netif_wake_queue(dev);
2388 return NETDEV_TX_OK;
2389 } else if (rc == -EBUSY) {
2390 return NETDEV_TX_BUSY;
2391 } /* else fall through */
2392
2393 tx_drop:
2394 card->stats.tx_dropped++;
2395 card->stats.tx_errors++;
2396 dev_kfree_skb_any(skb);
2397 netif_wake_queue(dev);
2398 return NETDEV_TX_OK;
2399 }
2400
__qeth_l3_open(struct net_device * dev)2401 static int __qeth_l3_open(struct net_device *dev)
2402 {
2403 struct qeth_card *card = dev->ml_priv;
2404 int rc = 0;
2405
2406 QETH_CARD_TEXT(card, 4, "qethopen");
2407 if (card->state == CARD_STATE_UP)
2408 return rc;
2409 if (card->state != CARD_STATE_SOFTSETUP)
2410 return -ENODEV;
2411 card->data.state = CH_STATE_UP;
2412 card->state = CARD_STATE_UP;
2413 netif_start_queue(dev);
2414
2415 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
2416 napi_enable(&card->napi);
2417 local_bh_disable();
2418 napi_schedule(&card->napi);
2419 /* kick-start the NAPI softirq: */
2420 local_bh_enable();
2421 } else
2422 rc = -EIO;
2423 return rc;
2424 }
2425
qeth_l3_open(struct net_device * dev)2426 static int qeth_l3_open(struct net_device *dev)
2427 {
2428 struct qeth_card *card = dev->ml_priv;
2429
2430 QETH_CARD_TEXT(card, 5, "qethope_");
2431 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
2432 QETH_CARD_TEXT(card, 3, "openREC");
2433 return -ERESTARTSYS;
2434 }
2435 return __qeth_l3_open(dev);
2436 }
2437
qeth_l3_stop(struct net_device * dev)2438 static int qeth_l3_stop(struct net_device *dev)
2439 {
2440 struct qeth_card *card = dev->ml_priv;
2441
2442 QETH_CARD_TEXT(card, 4, "qethstop");
2443 netif_tx_disable(dev);
2444 if (card->state == CARD_STATE_UP) {
2445 card->state = CARD_STATE_SOFTSETUP;
2446 napi_disable(&card->napi);
2447 }
2448 return 0;
2449 }
2450
2451 static const struct ethtool_ops qeth_l3_ethtool_ops = {
2452 .get_link = ethtool_op_get_link,
2453 .get_strings = qeth_core_get_strings,
2454 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2455 .get_sset_count = qeth_core_get_sset_count,
2456 .get_drvinfo = qeth_core_get_drvinfo,
2457 .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
2458 };
2459
2460 /*
2461 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
2462 * NOARP on the netdevice is no option because it also turns off neighbor
2463 * solicitation. For IPv4 we install a neighbor_setup function. We don't want
2464 * arp resolution but we want the hard header (packet socket will work
2465 * e.g. tcpdump)
2466 */
qeth_l3_neigh_setup_noarp(struct neighbour * n)2467 static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
2468 {
2469 n->nud_state = NUD_NOARP;
2470 memcpy(n->ha, "FAKELL", 6);
2471 n->output = n->ops->connected_output;
2472 return 0;
2473 }
2474
2475 static int
qeth_l3_neigh_setup(struct net_device * dev,struct neigh_parms * np)2476 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
2477 {
2478 if (np->tbl->family == AF_INET)
2479 np->neigh_setup = qeth_l3_neigh_setup_noarp;
2480
2481 return 0;
2482 }
2483
2484 static const struct net_device_ops qeth_l3_netdev_ops = {
2485 .ndo_open = qeth_l3_open,
2486 .ndo_stop = qeth_l3_stop,
2487 .ndo_get_stats = qeth_get_stats,
2488 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2489 .ndo_validate_addr = eth_validate_addr,
2490 .ndo_set_rx_mode = qeth_l3_set_rx_mode,
2491 .ndo_do_ioctl = qeth_do_ioctl,
2492 .ndo_fix_features = qeth_fix_features,
2493 .ndo_set_features = qeth_set_features,
2494 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
2495 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
2496 .ndo_tx_timeout = qeth_tx_timeout,
2497 };
2498
2499 static const struct net_device_ops qeth_l3_osa_netdev_ops = {
2500 .ndo_open = qeth_l3_open,
2501 .ndo_stop = qeth_l3_stop,
2502 .ndo_get_stats = qeth_get_stats,
2503 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2504 .ndo_features_check = qeth_features_check,
2505 .ndo_validate_addr = eth_validate_addr,
2506 .ndo_set_rx_mode = qeth_l3_set_rx_mode,
2507 .ndo_do_ioctl = qeth_do_ioctl,
2508 .ndo_fix_features = qeth_fix_features,
2509 .ndo_set_features = qeth_set_features,
2510 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
2511 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
2512 .ndo_tx_timeout = qeth_tx_timeout,
2513 .ndo_neigh_setup = qeth_l3_neigh_setup,
2514 };
2515
qeth_l3_setup_netdev(struct qeth_card * card)2516 static int qeth_l3_setup_netdev(struct qeth_card *card)
2517 {
2518 int rc;
2519
2520 if (qeth_netdev_is_registered(card->dev))
2521 return 0;
2522
2523 if (card->info.type == QETH_CARD_TYPE_OSD ||
2524 card->info.type == QETH_CARD_TYPE_OSX) {
2525 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
2526 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
2527 pr_info("qeth_l3: ignoring TR device\n");
2528 return -ENODEV;
2529 }
2530
2531 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
2532
2533 /*IPv6 address autoconfiguration stuff*/
2534 qeth_l3_get_unique_id(card);
2535 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
2536 card->dev->dev_id = card->info.unique_id & 0xffff;
2537
2538 if (!card->info.guestlan) {
2539 card->dev->features |= NETIF_F_SG;
2540 card->dev->hw_features |= NETIF_F_TSO |
2541 NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2542 card->dev->vlan_features |= NETIF_F_TSO |
2543 NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2544 }
2545
2546 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
2547 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
2548 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
2549 }
2550 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
2551 card->dev->flags |= IFF_NOARP;
2552 card->dev->netdev_ops = &qeth_l3_netdev_ops;
2553
2554 rc = qeth_l3_iqd_read_initial_mac(card);
2555 if (rc)
2556 goto out;
2557
2558 if (card->options.hsuid[0])
2559 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
2560 } else
2561 return -ENODEV;
2562
2563 card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
2564 card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
2565 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2566 NETIF_F_HW_VLAN_CTAG_RX |
2567 NETIF_F_HW_VLAN_CTAG_FILTER;
2568
2569 netif_keep_dst(card->dev);
2570 if (card->dev->hw_features & NETIF_F_TSO)
2571 netif_set_gso_max_size(card->dev,
2572 PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
2573
2574 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
2575 rc = register_netdev(card->dev);
2576 out:
2577 if (rc)
2578 card->dev->netdev_ops = NULL;
2579 return rc;
2580 }
2581
2582 static const struct device_type qeth_l3_devtype = {
2583 .name = "qeth_layer3",
2584 .groups = qeth_l3_attr_groups,
2585 };
2586
qeth_l3_probe_device(struct ccwgroup_device * gdev)2587 static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
2588 {
2589 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2590 int rc;
2591
2592 hash_init(card->ip_htable);
2593
2594 if (gdev->dev.type == &qeth_generic_devtype) {
2595 rc = qeth_l3_create_device_attributes(&gdev->dev);
2596 if (rc)
2597 return rc;
2598 }
2599
2600 hash_init(card->ip_mc_htable);
2601 card->options.layer2 = 0;
2602 card->info.hwtrap = 0;
2603 return 0;
2604 }
2605
qeth_l3_remove_device(struct ccwgroup_device * cgdev)2606 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2607 {
2608 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
2609
2610 if (cgdev->dev.type == &qeth_generic_devtype)
2611 qeth_l3_remove_device_attributes(&cgdev->dev);
2612
2613 qeth_set_allowed_threads(card, 0, 1);
2614 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2615
2616 if (cgdev->state == CCWGROUP_ONLINE)
2617 qeth_l3_set_offline(cgdev);
2618
2619 cancel_work_sync(&card->close_dev_work);
2620 if (qeth_netdev_is_registered(card->dev))
2621 unregister_netdev(card->dev);
2622 qeth_l3_clear_ip_htable(card, 0);
2623 qeth_l3_clear_ipato_list(card);
2624 }
2625
__qeth_l3_set_online(struct ccwgroup_device * gdev,int recovery_mode)2626 static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2627 {
2628 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2629 int rc = 0;
2630 enum qeth_card_states recover_flag;
2631
2632 mutex_lock(&card->discipline_mutex);
2633 mutex_lock(&card->conf_mutex);
2634 QETH_DBF_TEXT(SETUP, 2, "setonlin");
2635 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2636
2637 recover_flag = card->state;
2638 rc = qeth_core_hardsetup_card(card);
2639 if (rc) {
2640 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
2641 rc = -ENODEV;
2642 goto out_remove;
2643 }
2644
2645 rc = qeth_l3_setup_netdev(card);
2646 if (rc)
2647 goto out_remove;
2648
2649 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
2650 if (card->info.hwtrap &&
2651 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
2652 card->info.hwtrap = 0;
2653 } else
2654 card->info.hwtrap = 0;
2655
2656 card->state = CARD_STATE_HARDSETUP;
2657 qeth_print_status_message(card);
2658
2659 /* softsetup */
2660 QETH_DBF_TEXT(SETUP, 2, "softsetp");
2661
2662 rc = qeth_l3_setadapter_parms(card);
2663 if (rc)
2664 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
2665 if (!card->options.sniffer) {
2666 rc = qeth_l3_start_ipassists(card);
2667 if (rc) {
2668 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2669 goto out_remove;
2670 }
2671 rc = qeth_l3_setrouting_v4(card);
2672 if (rc)
2673 QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
2674 rc = qeth_l3_setrouting_v6(card);
2675 if (rc)
2676 QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
2677 }
2678 netif_tx_disable(card->dev);
2679
2680 rc = qeth_init_qdio_queues(card);
2681 if (rc) {
2682 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2683 rc = -ENODEV;
2684 goto out_remove;
2685 }
2686 card->state = CARD_STATE_SOFTSETUP;
2687
2688 qeth_set_allowed_threads(card, 0xffffffff, 0);
2689 qeth_l3_recover_ip(card);
2690 if (card->lan_online)
2691 netif_carrier_on(card->dev);
2692 else
2693 netif_carrier_off(card->dev);
2694
2695 qeth_enable_hw_features(card->dev);
2696 if (recover_flag == CARD_STATE_RECOVER) {
2697 rtnl_lock();
2698 if (recovery_mode) {
2699 __qeth_l3_open(card->dev);
2700 qeth_l3_set_rx_mode(card->dev);
2701 } else {
2702 dev_open(card->dev);
2703 }
2704 rtnl_unlock();
2705 }
2706 qeth_trace_features(card);
2707 /* let user_space know that device is online */
2708 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
2709 mutex_unlock(&card->conf_mutex);
2710 mutex_unlock(&card->discipline_mutex);
2711 return 0;
2712 out_remove:
2713 qeth_l3_stop_card(card, 0);
2714 ccw_device_set_offline(CARD_DDEV(card));
2715 ccw_device_set_offline(CARD_WDEV(card));
2716 ccw_device_set_offline(CARD_RDEV(card));
2717 qdio_free(CARD_DDEV(card));
2718 if (recover_flag == CARD_STATE_RECOVER)
2719 card->state = CARD_STATE_RECOVER;
2720 else
2721 card->state = CARD_STATE_DOWN;
2722 mutex_unlock(&card->conf_mutex);
2723 mutex_unlock(&card->discipline_mutex);
2724 return rc;
2725 }
2726
qeth_l3_set_online(struct ccwgroup_device * gdev)2727 static int qeth_l3_set_online(struct ccwgroup_device *gdev)
2728 {
2729 return __qeth_l3_set_online(gdev, 0);
2730 }
2731
__qeth_l3_set_offline(struct ccwgroup_device * cgdev,int recovery_mode)2732 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
2733 int recovery_mode)
2734 {
2735 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
2736 int rc = 0, rc2 = 0, rc3 = 0;
2737 enum qeth_card_states recover_flag;
2738
2739 mutex_lock(&card->discipline_mutex);
2740 mutex_lock(&card->conf_mutex);
2741 QETH_DBF_TEXT(SETUP, 3, "setoffl");
2742 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
2743
2744 netif_carrier_off(card->dev);
2745 recover_flag = card->state;
2746 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
2747 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
2748 card->info.hwtrap = 1;
2749 }
2750 qeth_l3_stop_card(card, recovery_mode);
2751 if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
2752 rtnl_lock();
2753 call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
2754 rtnl_unlock();
2755 }
2756 rc = ccw_device_set_offline(CARD_DDEV(card));
2757 rc2 = ccw_device_set_offline(CARD_WDEV(card));
2758 rc3 = ccw_device_set_offline(CARD_RDEV(card));
2759 if (!rc)
2760 rc = (rc2) ? rc2 : rc3;
2761 if (rc)
2762 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2763 qdio_free(CARD_DDEV(card));
2764 if (recover_flag == CARD_STATE_UP)
2765 card->state = CARD_STATE_RECOVER;
2766 /* let user_space know that device is offline */
2767 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
2768 mutex_unlock(&card->conf_mutex);
2769 mutex_unlock(&card->discipline_mutex);
2770 return 0;
2771 }
2772
qeth_l3_set_offline(struct ccwgroup_device * cgdev)2773 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
2774 {
2775 return __qeth_l3_set_offline(cgdev, 0);
2776 }
2777
qeth_l3_recover(void * ptr)2778 static int qeth_l3_recover(void *ptr)
2779 {
2780 struct qeth_card *card;
2781 int rc = 0;
2782
2783 card = (struct qeth_card *) ptr;
2784 QETH_CARD_TEXT(card, 2, "recover1");
2785 QETH_CARD_HEX(card, 2, &card, sizeof(void *));
2786 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
2787 return 0;
2788 QETH_CARD_TEXT(card, 2, "recover2");
2789 dev_warn(&card->gdev->dev,
2790 "A recovery process has been started for the device\n");
2791 qeth_set_recovery_task(card);
2792 __qeth_l3_set_offline(card->gdev, 1);
2793 rc = __qeth_l3_set_online(card->gdev, 1);
2794 if (!rc)
2795 dev_info(&card->gdev->dev,
2796 "Device successfully recovered!\n");
2797 else {
2798 qeth_close_dev(card);
2799 dev_warn(&card->gdev->dev, "The qeth device driver "
2800 "failed to recover an error on the device\n");
2801 }
2802 qeth_clear_recovery_task(card);
2803 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
2804 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
2805 return 0;
2806 }
2807
qeth_l3_pm_suspend(struct ccwgroup_device * gdev)2808 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
2809 {
2810 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2811
2812 netif_device_detach(card->dev);
2813 qeth_set_allowed_threads(card, 0, 1);
2814 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2815 if (gdev->state == CCWGROUP_OFFLINE)
2816 return 0;
2817 if (card->state == CARD_STATE_UP) {
2818 if (card->info.hwtrap)
2819 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
2820 __qeth_l3_set_offline(card->gdev, 1);
2821 } else
2822 __qeth_l3_set_offline(card->gdev, 0);
2823 return 0;
2824 }
2825
qeth_l3_pm_resume(struct ccwgroup_device * gdev)2826 static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
2827 {
2828 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2829 int rc = 0;
2830
2831 if (gdev->state == CCWGROUP_OFFLINE)
2832 goto out;
2833
2834 if (card->state == CARD_STATE_RECOVER) {
2835 rc = __qeth_l3_set_online(card->gdev, 1);
2836 if (rc) {
2837 rtnl_lock();
2838 dev_close(card->dev);
2839 rtnl_unlock();
2840 }
2841 } else
2842 rc = __qeth_l3_set_online(card->gdev, 0);
2843 out:
2844 qeth_set_allowed_threads(card, 0xffffffff, 0);
2845 netif_device_attach(card->dev);
2846 if (rc)
2847 dev_warn(&card->gdev->dev, "The qeth device driver "
2848 "failed to recover an error on the device\n");
2849 return rc;
2850 }
2851
2852 /* Returns zero if the command is successfully "consumed" */
qeth_l3_control_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)2853 static int qeth_l3_control_event(struct qeth_card *card,
2854 struct qeth_ipa_cmd *cmd)
2855 {
2856 return 1;
2857 }
2858
2859 struct qeth_discipline qeth_l3_discipline = {
2860 .devtype = &qeth_l3_devtype,
2861 .process_rx_buffer = qeth_l3_process_inbound_buffer,
2862 .recover = qeth_l3_recover,
2863 .setup = qeth_l3_probe_device,
2864 .remove = qeth_l3_remove_device,
2865 .set_online = qeth_l3_set_online,
2866 .set_offline = qeth_l3_set_offline,
2867 .freeze = qeth_l3_pm_suspend,
2868 .thaw = qeth_l3_pm_resume,
2869 .restore = qeth_l3_pm_resume,
2870 .do_ioctl = qeth_l3_do_ioctl,
2871 .control_event_handler = qeth_l3_control_event,
2872 };
2873 EXPORT_SYMBOL_GPL(qeth_l3_discipline);
2874
qeth_l3_handle_ip_event(struct qeth_card * card,struct qeth_ipaddr * addr,unsigned long event)2875 static int qeth_l3_handle_ip_event(struct qeth_card *card,
2876 struct qeth_ipaddr *addr,
2877 unsigned long event)
2878 {
2879 switch (event) {
2880 case NETDEV_UP:
2881 spin_lock_bh(&card->ip_lock);
2882 qeth_l3_add_ip(card, addr);
2883 spin_unlock_bh(&card->ip_lock);
2884 return NOTIFY_OK;
2885 case NETDEV_DOWN:
2886 spin_lock_bh(&card->ip_lock);
2887 qeth_l3_delete_ip(card, addr);
2888 spin_unlock_bh(&card->ip_lock);
2889 return NOTIFY_OK;
2890 default:
2891 return NOTIFY_DONE;
2892 }
2893 }
2894
qeth_l3_get_card_from_dev(struct net_device * dev)2895 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2896 {
2897 if (is_vlan_dev(dev))
2898 dev = vlan_dev_real_dev(dev);
2899 if (dev->netdev_ops == &qeth_l3_osa_netdev_ops ||
2900 dev->netdev_ops == &qeth_l3_netdev_ops)
2901 return (struct qeth_card *) dev->ml_priv;
2902 return NULL;
2903 }
2904
qeth_l3_ip_event(struct notifier_block * this,unsigned long event,void * ptr)2905 static int qeth_l3_ip_event(struct notifier_block *this,
2906 unsigned long event, void *ptr)
2907 {
2908
2909 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2910 struct net_device *dev = ifa->ifa_dev->dev;
2911 struct qeth_ipaddr addr;
2912 struct qeth_card *card;
2913
2914 if (dev_net(dev) != &init_net)
2915 return NOTIFY_DONE;
2916
2917 card = qeth_l3_get_card_from_dev(dev);
2918 if (!card)
2919 return NOTIFY_DONE;
2920 QETH_CARD_TEXT(card, 3, "ipevent");
2921
2922 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
2923 addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
2924 addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
2925
2926 return qeth_l3_handle_ip_event(card, &addr, event);
2927 }
2928
2929 static struct notifier_block qeth_l3_ip_notifier = {
2930 qeth_l3_ip_event,
2931 NULL,
2932 };
2933
qeth_l3_ip6_event(struct notifier_block * this,unsigned long event,void * ptr)2934 static int qeth_l3_ip6_event(struct notifier_block *this,
2935 unsigned long event, void *ptr)
2936 {
2937 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
2938 struct net_device *dev = ifa->idev->dev;
2939 struct qeth_ipaddr addr;
2940 struct qeth_card *card;
2941
2942 card = qeth_l3_get_card_from_dev(dev);
2943 if (!card)
2944 return NOTIFY_DONE;
2945 QETH_CARD_TEXT(card, 3, "ip6event");
2946 if (!qeth_is_supported(card, IPA_IPV6))
2947 return NOTIFY_DONE;
2948
2949 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
2950 addr.u.a6.addr = ifa->addr;
2951 addr.u.a6.pfxlen = ifa->prefix_len;
2952
2953 return qeth_l3_handle_ip_event(card, &addr, event);
2954 }
2955
2956 static struct notifier_block qeth_l3_ip6_notifier = {
2957 qeth_l3_ip6_event,
2958 NULL,
2959 };
2960
qeth_l3_register_notifiers(void)2961 static int qeth_l3_register_notifiers(void)
2962 {
2963 int rc;
2964
2965 QETH_DBF_TEXT(SETUP, 5, "regnotif");
2966 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
2967 if (rc)
2968 return rc;
2969 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
2970 if (rc) {
2971 unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
2972 return rc;
2973 }
2974 return 0;
2975 }
2976
qeth_l3_unregister_notifiers(void)2977 static void qeth_l3_unregister_notifiers(void)
2978 {
2979 QETH_DBF_TEXT(SETUP, 5, "unregnot");
2980 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
2981 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
2982 }
2983
qeth_l3_init(void)2984 static int __init qeth_l3_init(void)
2985 {
2986 pr_info("register layer 3 discipline\n");
2987 return qeth_l3_register_notifiers();
2988 }
2989
qeth_l3_exit(void)2990 static void __exit qeth_l3_exit(void)
2991 {
2992 qeth_l3_unregister_notifiers();
2993 pr_info("unregister layer 3 discipline\n");
2994 }
2995
2996 module_init(qeth_l3_init);
2997 module_exit(qeth_l3_exit);
2998 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
2999 MODULE_DESCRIPTION("qeth layer 3 discipline");
3000 MODULE_LICENSE("GPL");
3001