1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/bitops.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/etherdevice.h>
20 #include <linux/ip.h>
21 #include <linux/in.h>
22 #include <linux/ipv6.h>
23 #include <linux/inetdevice.h>
24 #include <linux/igmp.h>
25 #include <linux/slab.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/skbuff.h>
29
30 #include <net/ip.h>
31 #include <net/arp.h>
32 #include <net/route.h>
33 #include <net/ipv6.h>
34 #include <net/ip6_route.h>
35 #include <net/ip6_fib.h>
36 #include <net/ip6_checksum.h>
37 #include <net/iucv/af_iucv.h>
38 #include <linux/hashtable.h>
39
40 #include "qeth_l3.h"
41
42
43 static int qeth_l3_set_offline(struct ccwgroup_device *);
44 static int qeth_l3_stop(struct net_device *);
45 static void qeth_l3_set_rx_mode(struct net_device *dev);
46 static int qeth_l3_register_addr_entry(struct qeth_card *,
47 struct qeth_ipaddr *);
48 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
49 struct qeth_ipaddr *);
50
qeth_l3_ipaddr4_to_string(const __u8 * addr,char * buf)51 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
52 {
53 sprintf(buf, "%pI4", addr);
54 }
55
qeth_l3_ipaddr6_to_string(const __u8 * addr,char * buf)56 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
57 {
58 sprintf(buf, "%pI6", addr);
59 }
60
qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto,const __u8 * addr,char * buf)61 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
62 char *buf)
63 {
64 if (proto == QETH_PROT_IPV4)
65 qeth_l3_ipaddr4_to_string(addr, buf);
66 else if (proto == QETH_PROT_IPV6)
67 qeth_l3_ipaddr6_to_string(addr, buf);
68 }
69
qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)70 static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
71 {
72 struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
73
74 if (addr)
75 qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot);
76 return addr;
77 }
78
qeth_l3_find_addr_by_ip(struct qeth_card * card,struct qeth_ipaddr * query)79 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
80 struct qeth_ipaddr *query)
81 {
82 u64 key = qeth_l3_ipaddr_hash(query);
83 struct qeth_ipaddr *addr;
84
85 if (query->is_multicast) {
86 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
87 if (qeth_l3_addr_match_ip(addr, query))
88 return addr;
89 } else {
90 hash_for_each_possible(card->ip_htable, addr, hnode, key)
91 if (qeth_l3_addr_match_ip(addr, query))
92 return addr;
93 }
94 return NULL;
95 }
96
qeth_l3_convert_addr_to_bits(u8 * addr,u8 * bits,int len)97 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
98 {
99 int i, j;
100 u8 octet;
101
102 for (i = 0; i < len; ++i) {
103 octet = addr[i];
104 for (j = 7; j >= 0; --j) {
105 bits[i*8 + j] = octet & 1;
106 octet >>= 1;
107 }
108 }
109 }
110
qeth_l3_is_addr_covered_by_ipato(struct qeth_card * card,struct qeth_ipaddr * addr)111 static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
112 struct qeth_ipaddr *addr)
113 {
114 struct qeth_ipato_entry *ipatoe;
115 u8 addr_bits[128] = {0, };
116 u8 ipatoe_bits[128] = {0, };
117 int rc = 0;
118
119 if (!card->ipato.enabled)
120 return false;
121 if (addr->type != QETH_IP_TYPE_NORMAL)
122 return false;
123
124 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
125 (addr->proto == QETH_PROT_IPV4)? 4:16);
126 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
127 if (addr->proto != ipatoe->proto)
128 continue;
129 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
130 (ipatoe->proto == QETH_PROT_IPV4) ?
131 4 : 16);
132 if (addr->proto == QETH_PROT_IPV4)
133 rc = !memcmp(addr_bits, ipatoe_bits,
134 min(32, ipatoe->mask_bits));
135 else
136 rc = !memcmp(addr_bits, ipatoe_bits,
137 min(128, ipatoe->mask_bits));
138 if (rc)
139 break;
140 }
141 /* invert? */
142 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
143 rc = !rc;
144 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
145 rc = !rc;
146
147 return rc;
148 }
149
qeth_l3_delete_ip(struct qeth_card * card,struct qeth_ipaddr * tmp_addr)150 static int qeth_l3_delete_ip(struct qeth_card *card,
151 struct qeth_ipaddr *tmp_addr)
152 {
153 int rc = 0;
154 struct qeth_ipaddr *addr;
155
156 if (tmp_addr->type == QETH_IP_TYPE_RXIP)
157 QETH_CARD_TEXT(card, 2, "delrxip");
158 else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
159 QETH_CARD_TEXT(card, 2, "delvipa");
160 else
161 QETH_CARD_TEXT(card, 2, "delip");
162
163 if (tmp_addr->proto == QETH_PROT_IPV4)
164 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
165 else {
166 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
167 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
168 }
169
170 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
171 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
172 return -ENOENT;
173
174 addr->ref_counter--;
175 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
176 return rc;
177 if (addr->in_progress)
178 return -EINPROGRESS;
179
180 if (qeth_card_hw_is_reachable(card))
181 rc = qeth_l3_deregister_addr_entry(card, addr);
182
183 hash_del(&addr->hnode);
184 kfree(addr);
185
186 return rc;
187 }
188
qeth_l3_add_ip(struct qeth_card * card,struct qeth_ipaddr * tmp_addr)189 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
190 {
191 int rc = 0;
192 struct qeth_ipaddr *addr;
193 char buf[40];
194
195 if (tmp_addr->type == QETH_IP_TYPE_RXIP)
196 QETH_CARD_TEXT(card, 2, "addrxip");
197 else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
198 QETH_CARD_TEXT(card, 2, "addvipa");
199 else
200 QETH_CARD_TEXT(card, 2, "addip");
201
202 if (tmp_addr->proto == QETH_PROT_IPV4)
203 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
204 else {
205 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
206 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
207 }
208
209 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
210 if (addr) {
211 if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
212 return -EADDRINUSE;
213 if (qeth_l3_addr_match_all(addr, tmp_addr)) {
214 addr->ref_counter++;
215 return 0;
216 }
217 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
218 buf);
219 dev_warn(&card->gdev->dev,
220 "Registering IP address %s failed\n", buf);
221 return -EADDRINUSE;
222 } else {
223 addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
224 if (!addr)
225 return -ENOMEM;
226
227 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
228 addr->ref_counter = 1;
229
230 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
231 QETH_CARD_TEXT(card, 2, "tkovaddr");
232 addr->ipato = 1;
233 }
234 hash_add(card->ip_htable, &addr->hnode,
235 qeth_l3_ipaddr_hash(addr));
236
237 if (!qeth_card_hw_is_reachable(card)) {
238 addr->disp_flag = QETH_DISP_ADDR_ADD;
239 return 0;
240 }
241
242 /* qeth_l3_register_addr_entry can go to sleep
243 * if we add a IPV4 addr. It is caused by the reason
244 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
245 * Thus we should unlock spinlock, and make a protection
246 * using in_progress variable to indicate that there is
247 * an hardware operation with this IPV4 address
248 */
249 if (addr->proto == QETH_PROT_IPV4) {
250 addr->in_progress = 1;
251 spin_unlock_bh(&card->ip_lock);
252 rc = qeth_l3_register_addr_entry(card, addr);
253 spin_lock_bh(&card->ip_lock);
254 addr->in_progress = 0;
255 } else
256 rc = qeth_l3_register_addr_entry(card, addr);
257
258 if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
259 (rc == IPA_RC_LAN_OFFLINE)) {
260 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
261 if (addr->ref_counter < 1) {
262 qeth_l3_deregister_addr_entry(card, addr);
263 hash_del(&addr->hnode);
264 kfree(addr);
265 }
266 } else {
267 hash_del(&addr->hnode);
268 kfree(addr);
269 }
270 }
271 return rc;
272 }
273
qeth_l3_clear_ip_htable(struct qeth_card * card,int recover)274 static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
275 {
276 struct qeth_ipaddr *addr;
277 struct hlist_node *tmp;
278 int i;
279
280 QETH_CARD_TEXT(card, 4, "clearip");
281
282 if (recover && card->options.sniffer)
283 return;
284
285 spin_lock_bh(&card->ip_lock);
286
287 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
288 if (!recover) {
289 hash_del(&addr->hnode);
290 kfree(addr);
291 continue;
292 }
293 addr->disp_flag = QETH_DISP_ADDR_ADD;
294 }
295
296 spin_unlock_bh(&card->ip_lock);
297
298 spin_lock_bh(&card->mclock);
299
300 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
301 hash_del(&addr->hnode);
302 kfree(addr);
303 }
304
305 spin_unlock_bh(&card->mclock);
306
307
308 }
qeth_l3_recover_ip(struct qeth_card * card)309 static void qeth_l3_recover_ip(struct qeth_card *card)
310 {
311 struct qeth_ipaddr *addr;
312 struct hlist_node *tmp;
313 int i;
314 int rc;
315
316 QETH_CARD_TEXT(card, 4, "recovrip");
317
318 spin_lock_bh(&card->ip_lock);
319
320 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
321 if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
322 if (addr->proto == QETH_PROT_IPV4) {
323 addr->in_progress = 1;
324 spin_unlock_bh(&card->ip_lock);
325 rc = qeth_l3_register_addr_entry(card, addr);
326 spin_lock_bh(&card->ip_lock);
327 addr->in_progress = 0;
328 } else
329 rc = qeth_l3_register_addr_entry(card, addr);
330
331 if (!rc) {
332 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
333 if (addr->ref_counter < 1)
334 qeth_l3_delete_ip(card, addr);
335 } else {
336 hash_del(&addr->hnode);
337 kfree(addr);
338 }
339 }
340 }
341
342 spin_unlock_bh(&card->ip_lock);
343
344 }
345
qeth_l3_send_setdelmc(struct qeth_card * card,struct qeth_ipaddr * addr,int ipacmd)346 static int qeth_l3_send_setdelmc(struct qeth_card *card,
347 struct qeth_ipaddr *addr, int ipacmd)
348 {
349 int rc;
350 struct qeth_cmd_buffer *iob;
351 struct qeth_ipa_cmd *cmd;
352
353 QETH_CARD_TEXT(card, 4, "setdelmc");
354
355 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
356 if (!iob)
357 return -ENOMEM;
358 cmd = __ipa_cmd(iob);
359 ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
360 if (addr->proto == QETH_PROT_IPV6)
361 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
362 sizeof(struct in6_addr));
363 else
364 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
365
366 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
367
368 return rc;
369 }
370
qeth_l3_fill_netmask(u8 * netmask,unsigned int len)371 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
372 {
373 int i, j;
374 for (i = 0; i < 16; i++) {
375 j = (len) - (i * 8);
376 if (j >= 8)
377 netmask[i] = 0xff;
378 else if (j > 0)
379 netmask[i] = (u8)(0xFF00 >> j);
380 else
381 netmask[i] = 0;
382 }
383 }
384
qeth_l3_get_setdelip_flags(struct qeth_ipaddr * addr,bool set)385 static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set)
386 {
387 switch (addr->type) {
388 case QETH_IP_TYPE_RXIP:
389 return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
390 case QETH_IP_TYPE_VIPA:
391 return (set) ? QETH_IPA_SETIP_VIPA_FLAG :
392 QETH_IPA_DELIP_VIPA_FLAG;
393 default:
394 return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
395 }
396 }
397
qeth_l3_send_setdelip(struct qeth_card * card,struct qeth_ipaddr * addr,enum qeth_ipa_cmds ipacmd)398 static int qeth_l3_send_setdelip(struct qeth_card *card,
399 struct qeth_ipaddr *addr,
400 enum qeth_ipa_cmds ipacmd)
401 {
402 struct qeth_cmd_buffer *iob;
403 struct qeth_ipa_cmd *cmd;
404 __u8 netmask[16];
405 u32 flags;
406
407 QETH_CARD_TEXT(card, 4, "setdelip");
408
409 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
410 if (!iob)
411 return -ENOMEM;
412 cmd = __ipa_cmd(iob);
413
414 flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP);
415 QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
416
417 if (addr->proto == QETH_PROT_IPV6) {
418 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
419 sizeof(struct in6_addr));
420 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
421 memcpy(cmd->data.setdelip6.mask, netmask,
422 sizeof(struct in6_addr));
423 cmd->data.setdelip6.flags = flags;
424 } else {
425 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
426 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
427 cmd->data.setdelip4.flags = flags;
428 }
429
430 return qeth_send_ipa_cmd(card, iob, NULL, NULL);
431 }
432
qeth_l3_send_setrouting(struct qeth_card * card,enum qeth_routing_types type,enum qeth_prot_versions prot)433 static int qeth_l3_send_setrouting(struct qeth_card *card,
434 enum qeth_routing_types type, enum qeth_prot_versions prot)
435 {
436 int rc;
437 struct qeth_ipa_cmd *cmd;
438 struct qeth_cmd_buffer *iob;
439
440 QETH_CARD_TEXT(card, 4, "setroutg");
441 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
442 if (!iob)
443 return -ENOMEM;
444 cmd = __ipa_cmd(iob);
445 cmd->data.setrtg.type = (type);
446 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
447
448 return rc;
449 }
450
qeth_l3_correct_routing_type(struct qeth_card * card,enum qeth_routing_types * type,enum qeth_prot_versions prot)451 static int qeth_l3_correct_routing_type(struct qeth_card *card,
452 enum qeth_routing_types *type, enum qeth_prot_versions prot)
453 {
454 if (card->info.type == QETH_CARD_TYPE_IQD) {
455 switch (*type) {
456 case NO_ROUTER:
457 case PRIMARY_CONNECTOR:
458 case SECONDARY_CONNECTOR:
459 case MULTICAST_ROUTER:
460 return 0;
461 default:
462 goto out_inval;
463 }
464 } else {
465 switch (*type) {
466 case NO_ROUTER:
467 case PRIMARY_ROUTER:
468 case SECONDARY_ROUTER:
469 return 0;
470 case MULTICAST_ROUTER:
471 if (qeth_is_ipafunc_supported(card, prot,
472 IPA_OSA_MC_ROUTER))
473 return 0;
474 default:
475 goto out_inval;
476 }
477 }
478 out_inval:
479 *type = NO_ROUTER;
480 return -EINVAL;
481 }
482
qeth_l3_setrouting_v4(struct qeth_card * card)483 int qeth_l3_setrouting_v4(struct qeth_card *card)
484 {
485 int rc;
486
487 QETH_CARD_TEXT(card, 3, "setrtg4");
488
489 rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
490 QETH_PROT_IPV4);
491 if (rc)
492 return rc;
493
494 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
495 QETH_PROT_IPV4);
496 if (rc) {
497 card->options.route4.type = NO_ROUTER;
498 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
499 " on %s. Type set to 'no router'.\n", rc,
500 QETH_CARD_IFNAME(card));
501 }
502 return rc;
503 }
504
qeth_l3_setrouting_v6(struct qeth_card * card)505 int qeth_l3_setrouting_v6(struct qeth_card *card)
506 {
507 int rc = 0;
508
509 QETH_CARD_TEXT(card, 3, "setrtg6");
510
511 if (!qeth_is_supported(card, IPA_IPV6))
512 return 0;
513 rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
514 QETH_PROT_IPV6);
515 if (rc)
516 return rc;
517
518 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
519 QETH_PROT_IPV6);
520 if (rc) {
521 card->options.route6.type = NO_ROUTER;
522 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
523 " on %s. Type set to 'no router'.\n", rc,
524 QETH_CARD_IFNAME(card));
525 }
526 return rc;
527 }
528
529 /*
530 * IP address takeover related functions
531 */
532
533 /**
534 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
535 *
536 * Caller must hold ip_lock.
537 */
qeth_l3_update_ipato(struct qeth_card * card)538 void qeth_l3_update_ipato(struct qeth_card *card)
539 {
540 struct qeth_ipaddr *addr;
541 unsigned int i;
542
543 hash_for_each(card->ip_htable, i, addr, hnode) {
544 if (addr->type != QETH_IP_TYPE_NORMAL)
545 continue;
546 addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr);
547 }
548 }
549
qeth_l3_clear_ipato_list(struct qeth_card * card)550 static void qeth_l3_clear_ipato_list(struct qeth_card *card)
551 {
552 struct qeth_ipato_entry *ipatoe, *tmp;
553
554 spin_lock_bh(&card->ip_lock);
555
556 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
557 list_del(&ipatoe->entry);
558 kfree(ipatoe);
559 }
560
561 qeth_l3_update_ipato(card);
562 spin_unlock_bh(&card->ip_lock);
563 }
564
qeth_l3_add_ipato_entry(struct qeth_card * card,struct qeth_ipato_entry * new)565 int qeth_l3_add_ipato_entry(struct qeth_card *card,
566 struct qeth_ipato_entry *new)
567 {
568 struct qeth_ipato_entry *ipatoe;
569 int rc = 0;
570
571 QETH_CARD_TEXT(card, 2, "addipato");
572
573 spin_lock_bh(&card->ip_lock);
574
575 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
576 if (ipatoe->proto != new->proto)
577 continue;
578 if (!memcmp(ipatoe->addr, new->addr,
579 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
580 (ipatoe->mask_bits == new->mask_bits)) {
581 rc = -EEXIST;
582 break;
583 }
584 }
585
586 if (!rc) {
587 list_add_tail(&new->entry, &card->ipato.entries);
588 qeth_l3_update_ipato(card);
589 }
590
591 spin_unlock_bh(&card->ip_lock);
592
593 return rc;
594 }
595
qeth_l3_del_ipato_entry(struct qeth_card * card,enum qeth_prot_versions proto,u8 * addr,int mask_bits)596 int qeth_l3_del_ipato_entry(struct qeth_card *card,
597 enum qeth_prot_versions proto, u8 *addr,
598 int mask_bits)
599 {
600 struct qeth_ipato_entry *ipatoe, *tmp;
601 int rc = -ENOENT;
602
603 QETH_CARD_TEXT(card, 2, "delipato");
604
605 spin_lock_bh(&card->ip_lock);
606
607 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
608 if (ipatoe->proto != proto)
609 continue;
610 if (!memcmp(ipatoe->addr, addr,
611 (proto == QETH_PROT_IPV4)? 4:16) &&
612 (ipatoe->mask_bits == mask_bits)) {
613 list_del(&ipatoe->entry);
614 qeth_l3_update_ipato(card);
615 kfree(ipatoe);
616 rc = 0;
617 }
618 }
619
620 spin_unlock_bh(&card->ip_lock);
621 return rc;
622 }
623
qeth_l3_modify_rxip_vipa(struct qeth_card * card,bool add,const u8 * ip,enum qeth_ip_types type,enum qeth_prot_versions proto)624 int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
625 enum qeth_ip_types type,
626 enum qeth_prot_versions proto)
627 {
628 struct qeth_ipaddr addr;
629 int rc;
630
631 qeth_l3_init_ipaddr(&addr, type, proto);
632 if (proto == QETH_PROT_IPV4)
633 memcpy(&addr.u.a4.addr, ip, 4);
634 else
635 memcpy(&addr.u.a6.addr, ip, 16);
636
637 spin_lock_bh(&card->ip_lock);
638 rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
639 spin_unlock_bh(&card->ip_lock);
640 return rc;
641 }
642
qeth_l3_modify_hsuid(struct qeth_card * card,bool add)643 int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
644 {
645 struct qeth_ipaddr addr;
646 int rc, i;
647
648 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
649 addr.u.a6.addr.s6_addr[0] = 0xfe;
650 addr.u.a6.addr.s6_addr[1] = 0x80;
651 for (i = 0; i < 8; i++)
652 addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
653
654 spin_lock_bh(&card->ip_lock);
655 rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
656 spin_unlock_bh(&card->ip_lock);
657 return rc;
658 }
659
qeth_l3_register_addr_entry(struct qeth_card * card,struct qeth_ipaddr * addr)660 static int qeth_l3_register_addr_entry(struct qeth_card *card,
661 struct qeth_ipaddr *addr)
662 {
663 char buf[50];
664 int rc = 0;
665 int cnt = 3;
666
667
668 if (addr->proto == QETH_PROT_IPV4) {
669 QETH_CARD_TEXT(card, 2, "setaddr4");
670 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
671 } else if (addr->proto == QETH_PROT_IPV6) {
672 QETH_CARD_TEXT(card, 2, "setaddr6");
673 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
674 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
675 } else {
676 QETH_CARD_TEXT(card, 2, "setaddr?");
677 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
678 }
679 do {
680 if (addr->is_multicast)
681 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
682 else
683 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP);
684 if (rc)
685 QETH_CARD_TEXT(card, 2, "failed");
686 } while ((--cnt > 0) && rc);
687 if (rc) {
688 QETH_CARD_TEXT(card, 2, "FAILED");
689 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
690 dev_warn(&card->gdev->dev,
691 "Registering IP address %s failed\n", buf);
692 }
693 return rc;
694 }
695
qeth_l3_deregister_addr_entry(struct qeth_card * card,struct qeth_ipaddr * addr)696 static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
697 struct qeth_ipaddr *addr)
698 {
699 int rc = 0;
700
701 if (addr->proto == QETH_PROT_IPV4) {
702 QETH_CARD_TEXT(card, 2, "deladdr4");
703 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
704 } else if (addr->proto == QETH_PROT_IPV6) {
705 QETH_CARD_TEXT(card, 2, "deladdr6");
706 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
707 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
708 } else {
709 QETH_CARD_TEXT(card, 2, "deladdr?");
710 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
711 }
712 if (addr->is_multicast)
713 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
714 else
715 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP);
716 if (rc)
717 QETH_CARD_TEXT(card, 2, "failed");
718
719 return rc;
720 }
721
qeth_l3_setadapter_parms(struct qeth_card * card)722 static int qeth_l3_setadapter_parms(struct qeth_card *card)
723 {
724 int rc = 0;
725
726 QETH_DBF_TEXT(SETUP, 2, "setadprm");
727
728 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
729 rc = qeth_setadpparms_change_macaddr(card);
730 if (rc)
731 dev_warn(&card->gdev->dev, "Reading the adapter MAC"
732 " address failed\n");
733 }
734
735 return rc;
736 }
737
qeth_l3_start_ipa_arp_processing(struct qeth_card * card)738 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
739 {
740 int rc;
741
742 QETH_CARD_TEXT(card, 3, "ipaarp");
743
744 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
745 dev_info(&card->gdev->dev,
746 "ARP processing not supported on %s!\n",
747 QETH_CARD_IFNAME(card));
748 return 0;
749 }
750 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
751 IPA_CMD_ASS_START, 0);
752 if (rc) {
753 dev_warn(&card->gdev->dev,
754 "Starting ARP processing support for %s failed\n",
755 QETH_CARD_IFNAME(card));
756 }
757 return rc;
758 }
759
qeth_l3_start_ipa_source_mac(struct qeth_card * card)760 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
761 {
762 int rc;
763
764 QETH_CARD_TEXT(card, 3, "stsrcmac");
765
766 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
767 dev_info(&card->gdev->dev,
768 "Inbound source MAC-address not supported on %s\n",
769 QETH_CARD_IFNAME(card));
770 return -EOPNOTSUPP;
771 }
772
773 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
774 IPA_CMD_ASS_START, 0);
775 if (rc)
776 dev_warn(&card->gdev->dev,
777 "Starting source MAC-address support for %s failed\n",
778 QETH_CARD_IFNAME(card));
779 return rc;
780 }
781
qeth_l3_start_ipa_vlan(struct qeth_card * card)782 static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
783 {
784 int rc = 0;
785
786 QETH_CARD_TEXT(card, 3, "strtvlan");
787
788 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
789 dev_info(&card->gdev->dev,
790 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
791 return -EOPNOTSUPP;
792 }
793
794 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
795 IPA_CMD_ASS_START, 0);
796 if (rc) {
797 dev_warn(&card->gdev->dev,
798 "Starting VLAN support for %s failed\n",
799 QETH_CARD_IFNAME(card));
800 } else {
801 dev_info(&card->gdev->dev, "VLAN enabled\n");
802 }
803 return rc;
804 }
805
qeth_l3_start_ipa_multicast(struct qeth_card * card)806 static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
807 {
808 int rc;
809
810 QETH_CARD_TEXT(card, 3, "stmcast");
811
812 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
813 dev_info(&card->gdev->dev,
814 "Multicast not supported on %s\n",
815 QETH_CARD_IFNAME(card));
816 return -EOPNOTSUPP;
817 }
818
819 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
820 IPA_CMD_ASS_START, 0);
821 if (rc) {
822 dev_warn(&card->gdev->dev,
823 "Starting multicast support for %s failed\n",
824 QETH_CARD_IFNAME(card));
825 } else {
826 dev_info(&card->gdev->dev, "Multicast enabled\n");
827 card->dev->flags |= IFF_MULTICAST;
828 }
829 return rc;
830 }
831
qeth_l3_softsetup_ipv6(struct qeth_card * card)832 static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
833 {
834 int rc;
835
836 QETH_CARD_TEXT(card, 3, "softipv6");
837
838 if (card->info.type == QETH_CARD_TYPE_IQD)
839 goto out;
840
841 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
842 IPA_CMD_ASS_START, 3);
843 if (rc) {
844 dev_err(&card->gdev->dev,
845 "Activating IPv6 support for %s failed\n",
846 QETH_CARD_IFNAME(card));
847 return rc;
848 }
849 rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
850 IPA_CMD_ASS_START, 0);
851 if (rc) {
852 dev_err(&card->gdev->dev,
853 "Activating IPv6 support for %s failed\n",
854 QETH_CARD_IFNAME(card));
855 return rc;
856 }
857 rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
858 IPA_CMD_ASS_START, 0);
859 if (rc) {
860 dev_warn(&card->gdev->dev,
861 "Enabling the passthrough mode for %s failed\n",
862 QETH_CARD_IFNAME(card));
863 return rc;
864 }
865 out:
866 dev_info(&card->gdev->dev, "IPV6 enabled\n");
867 return 0;
868 }
869
qeth_l3_start_ipa_ipv6(struct qeth_card * card)870 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
871 {
872 QETH_CARD_TEXT(card, 3, "strtipv6");
873
874 if (!qeth_is_supported(card, IPA_IPV6)) {
875 dev_info(&card->gdev->dev,
876 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
877 return 0;
878 }
879 return qeth_l3_softsetup_ipv6(card);
880 }
881
qeth_l3_start_ipa_broadcast(struct qeth_card * card)882 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
883 {
884 int rc;
885
886 QETH_CARD_TEXT(card, 3, "stbrdcst");
887 card->info.broadcast_capable = 0;
888 if (!qeth_is_supported(card, IPA_FILTERING)) {
889 dev_info(&card->gdev->dev,
890 "Broadcast not supported on %s\n",
891 QETH_CARD_IFNAME(card));
892 rc = -EOPNOTSUPP;
893 goto out;
894 }
895 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
896 IPA_CMD_ASS_START, 0);
897 if (rc) {
898 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
899 "%s failed\n", QETH_CARD_IFNAME(card));
900 goto out;
901 }
902
903 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
904 IPA_CMD_ASS_CONFIGURE, 1);
905 if (rc) {
906 dev_warn(&card->gdev->dev,
907 "Setting up broadcast filtering for %s failed\n",
908 QETH_CARD_IFNAME(card));
909 goto out;
910 }
911 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
912 dev_info(&card->gdev->dev, "Broadcast enabled\n");
913 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
914 IPA_CMD_ASS_ENABLE, 1);
915 if (rc) {
916 dev_warn(&card->gdev->dev, "Setting up broadcast echo "
917 "filtering for %s failed\n", QETH_CARD_IFNAME(card));
918 goto out;
919 }
920 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
921 out:
922 if (card->info.broadcast_capable)
923 card->dev->flags |= IFF_BROADCAST;
924 else
925 card->dev->flags &= ~IFF_BROADCAST;
926 return rc;
927 }
928
qeth_l3_start_ipassists(struct qeth_card * card)929 static int qeth_l3_start_ipassists(struct qeth_card *card)
930 {
931 QETH_CARD_TEXT(card, 3, "strtipas");
932
933 if (qeth_set_access_ctrl_online(card, 0))
934 return -EIO;
935 qeth_l3_start_ipa_arp_processing(card); /* go on*/
936 qeth_l3_start_ipa_source_mac(card); /* go on*/
937 qeth_l3_start_ipa_vlan(card); /* go on*/
938 qeth_l3_start_ipa_multicast(card); /* go on*/
939 qeth_l3_start_ipa_ipv6(card); /* go on*/
940 qeth_l3_start_ipa_broadcast(card); /* go on*/
941 return 0;
942 }
943
qeth_l3_iqd_read_initial_mac_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)944 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
945 struct qeth_reply *reply, unsigned long data)
946 {
947 struct qeth_ipa_cmd *cmd;
948
949 cmd = (struct qeth_ipa_cmd *) data;
950 if (cmd->hdr.return_code == 0)
951 ether_addr_copy(card->dev->dev_addr,
952 cmd->data.create_destroy_addr.unique_id);
953 else
954 eth_random_addr(card->dev->dev_addr);
955
956 return 0;
957 }
958
qeth_l3_iqd_read_initial_mac(struct qeth_card * card)959 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
960 {
961 int rc = 0;
962 struct qeth_cmd_buffer *iob;
963 struct qeth_ipa_cmd *cmd;
964
965 QETH_DBF_TEXT(SETUP, 2, "hsrmac");
966
967 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
968 QETH_PROT_IPV6);
969 if (!iob)
970 return -ENOMEM;
971 cmd = __ipa_cmd(iob);
972 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
973 card->info.unique_id;
974
975 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
976 NULL);
977 return rc;
978 }
979
qeth_l3_get_unique_id_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)980 static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
981 struct qeth_reply *reply, unsigned long data)
982 {
983 struct qeth_ipa_cmd *cmd;
984
985 cmd = (struct qeth_ipa_cmd *) data;
986 if (cmd->hdr.return_code == 0)
987 card->info.unique_id = *((__u16 *)
988 &cmd->data.create_destroy_addr.unique_id[6]);
989 else {
990 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
991 UNIQUE_ID_NOT_BY_CARD;
992 dev_warn(&card->gdev->dev, "The network adapter failed to "
993 "generate a unique ID\n");
994 }
995 return 0;
996 }
997
qeth_l3_get_unique_id(struct qeth_card * card)998 static int qeth_l3_get_unique_id(struct qeth_card *card)
999 {
1000 int rc = 0;
1001 struct qeth_cmd_buffer *iob;
1002 struct qeth_ipa_cmd *cmd;
1003
1004 QETH_DBF_TEXT(SETUP, 2, "guniqeid");
1005
1006 if (!qeth_is_supported(card, IPA_IPV6)) {
1007 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1008 UNIQUE_ID_NOT_BY_CARD;
1009 return 0;
1010 }
1011
1012 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1013 QETH_PROT_IPV6);
1014 if (!iob)
1015 return -ENOMEM;
1016 cmd = __ipa_cmd(iob);
1017 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1018 card->info.unique_id;
1019
1020 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
1021 return rc;
1022 }
1023
1024 static int
qeth_diags_trace_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1025 qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
1026 unsigned long data)
1027 {
1028 struct qeth_ipa_cmd *cmd;
1029 __u16 rc;
1030
1031 QETH_DBF_TEXT(SETUP, 2, "diastrcb");
1032
1033 cmd = (struct qeth_ipa_cmd *)data;
1034 rc = cmd->hdr.return_code;
1035 if (rc)
1036 QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
1037 switch (cmd->data.diagass.action) {
1038 case QETH_DIAGS_CMD_TRACE_QUERY:
1039 break;
1040 case QETH_DIAGS_CMD_TRACE_DISABLE:
1041 switch (rc) {
1042 case 0:
1043 case IPA_RC_INVALID_SUBCMD:
1044 card->info.promisc_mode = SET_PROMISC_MODE_OFF;
1045 dev_info(&card->gdev->dev, "The HiperSockets network "
1046 "traffic analyzer is deactivated\n");
1047 break;
1048 default:
1049 break;
1050 }
1051 break;
1052 case QETH_DIAGS_CMD_TRACE_ENABLE:
1053 switch (rc) {
1054 case 0:
1055 card->info.promisc_mode = SET_PROMISC_MODE_ON;
1056 dev_info(&card->gdev->dev, "The HiperSockets network "
1057 "traffic analyzer is activated\n");
1058 break;
1059 case IPA_RC_HARDWARE_AUTH_ERROR:
1060 dev_warn(&card->gdev->dev, "The device is not "
1061 "authorized to run as a HiperSockets network "
1062 "traffic analyzer\n");
1063 break;
1064 case IPA_RC_TRACE_ALREADY_ACTIVE:
1065 dev_warn(&card->gdev->dev, "A HiperSockets "
1066 "network traffic analyzer is already "
1067 "active in the HiperSockets LAN\n");
1068 break;
1069 default:
1070 break;
1071 }
1072 break;
1073 default:
1074 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
1075 cmd->data.diagass.action, QETH_CARD_IFNAME(card));
1076 }
1077
1078 return 0;
1079 }
1080
1081 static int
qeth_diags_trace(struct qeth_card * card,enum qeth_diags_trace_cmds diags_cmd)1082 qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1083 {
1084 struct qeth_cmd_buffer *iob;
1085 struct qeth_ipa_cmd *cmd;
1086
1087 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1088
1089 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1090 if (!iob)
1091 return -ENOMEM;
1092 cmd = __ipa_cmd(iob);
1093 cmd->data.diagass.subcmd_len = 16;
1094 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
1095 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
1096 cmd->data.diagass.action = diags_cmd;
1097 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
1098 }
1099
1100 static void
qeth_l3_add_mc_to_hash(struct qeth_card * card,struct in_device * in4_dev)1101 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1102 {
1103 struct ip_mc_list *im4;
1104 struct qeth_ipaddr *tmp, *ipm;
1105
1106 QETH_CARD_TEXT(card, 4, "addmc");
1107
1108 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1109 if (!tmp)
1110 return;
1111
1112 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1113 im4 = rcu_dereference(im4->next_rcu)) {
1114 ip_eth_mc_map(im4->multiaddr, tmp->mac);
1115 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
1116 tmp->is_multicast = 1;
1117
1118 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1119 if (ipm) {
1120 /* for mcast, by-IP match means full match */
1121 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1122 } else {
1123 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1124 if (!ipm)
1125 continue;
1126 ether_addr_copy(ipm->mac, tmp->mac);
1127 ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
1128 ipm->is_multicast = 1;
1129 ipm->disp_flag = QETH_DISP_ADDR_ADD;
1130 hash_add(card->ip_mc_htable,
1131 &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
1132 }
1133 }
1134
1135 kfree(tmp);
1136 }
1137
1138 /* called with rcu_read_lock */
qeth_l3_add_vlan_mc(struct qeth_card * card)1139 static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1140 {
1141 struct in_device *in_dev;
1142 u16 vid;
1143
1144 QETH_CARD_TEXT(card, 4, "addmcvl");
1145
1146 if (!qeth_is_supported(card, IPA_FULL_VLAN))
1147 return;
1148
1149 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1150 struct net_device *netdev;
1151
1152 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
1153 vid);
1154 if (netdev == NULL ||
1155 !(netdev->flags & IFF_UP))
1156 continue;
1157 in_dev = __in_dev_get_rcu(netdev);
1158 if (!in_dev)
1159 continue;
1160 qeth_l3_add_mc_to_hash(card, in_dev);
1161 }
1162 }
1163
qeth_l3_add_multicast_ipv4(struct qeth_card * card)1164 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1165 {
1166 struct in_device *in4_dev;
1167
1168 QETH_CARD_TEXT(card, 4, "chkmcv4");
1169
1170 rcu_read_lock();
1171 in4_dev = __in_dev_get_rcu(card->dev);
1172 if (in4_dev == NULL)
1173 goto unlock;
1174 qeth_l3_add_mc_to_hash(card, in4_dev);
1175 qeth_l3_add_vlan_mc(card);
1176 unlock:
1177 rcu_read_unlock();
1178 }
1179
qeth_l3_add_mc6_to_hash(struct qeth_card * card,struct inet6_dev * in6_dev)1180 static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
1181 struct inet6_dev *in6_dev)
1182 {
1183 struct qeth_ipaddr *ipm;
1184 struct ifmcaddr6 *im6;
1185 struct qeth_ipaddr *tmp;
1186
1187 QETH_CARD_TEXT(card, 4, "addmc6");
1188
1189 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1190 if (!tmp)
1191 return;
1192
1193 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1194 ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
1195 memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
1196 sizeof(struct in6_addr));
1197 tmp->is_multicast = 1;
1198
1199 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1200 if (ipm) {
1201 /* for mcast, by-IP match means full match */
1202 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1203 continue;
1204 }
1205
1206 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1207 if (!ipm)
1208 continue;
1209
1210 ether_addr_copy(ipm->mac, tmp->mac);
1211 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1212 sizeof(struct in6_addr));
1213 ipm->is_multicast = 1;
1214 ipm->disp_flag = QETH_DISP_ADDR_ADD;
1215 hash_add(card->ip_mc_htable,
1216 &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
1217
1218 }
1219 kfree(tmp);
1220 }
1221
1222 /* called with rcu_read_lock */
qeth_l3_add_vlan_mc6(struct qeth_card * card)1223 static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1224 {
1225 struct inet6_dev *in_dev;
1226 u16 vid;
1227
1228 QETH_CARD_TEXT(card, 4, "admc6vl");
1229
1230 if (!qeth_is_supported(card, IPA_FULL_VLAN))
1231 return;
1232
1233 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1234 struct net_device *netdev;
1235
1236 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
1237 vid);
1238 if (netdev == NULL ||
1239 !(netdev->flags & IFF_UP))
1240 continue;
1241 in_dev = in6_dev_get(netdev);
1242 if (!in_dev)
1243 continue;
1244 read_lock_bh(&in_dev->lock);
1245 qeth_l3_add_mc6_to_hash(card, in_dev);
1246 read_unlock_bh(&in_dev->lock);
1247 in6_dev_put(in_dev);
1248 }
1249 }
1250
qeth_l3_add_multicast_ipv6(struct qeth_card * card)1251 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1252 {
1253 struct inet6_dev *in6_dev;
1254
1255 QETH_CARD_TEXT(card, 4, "chkmcv6");
1256
1257 if (!qeth_is_supported(card, IPA_IPV6))
1258 return ;
1259 in6_dev = in6_dev_get(card->dev);
1260 if (!in6_dev)
1261 return;
1262
1263 rcu_read_lock();
1264 read_lock_bh(&in6_dev->lock);
1265 qeth_l3_add_mc6_to_hash(card, in6_dev);
1266 qeth_l3_add_vlan_mc6(card);
1267 read_unlock_bh(&in6_dev->lock);
1268 rcu_read_unlock();
1269 in6_dev_put(in6_dev);
1270 }
1271
qeth_l3_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1272 static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
1273 __be16 proto, u16 vid)
1274 {
1275 struct qeth_card *card = dev->ml_priv;
1276
1277 set_bit(vid, card->active_vlans);
1278 return 0;
1279 }
1280
qeth_l3_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1281 static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
1282 __be16 proto, u16 vid)
1283 {
1284 struct qeth_card *card = dev->ml_priv;
1285
1286 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
1287
1288 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
1289 QETH_CARD_TEXT(card, 3, "kidREC");
1290 return 0;
1291 }
1292 clear_bit(vid, card->active_vlans);
1293 qeth_l3_set_rx_mode(dev);
1294 return 0;
1295 }
1296
qeth_l3_rebuild_skb(struct qeth_card * card,struct sk_buff * skb,struct qeth_hdr * hdr)1297 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
1298 struct qeth_hdr *hdr)
1299 {
1300 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
1301 u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
1302 ETH_P_IP;
1303 unsigned char tg_addr[ETH_ALEN];
1304
1305 skb_reset_network_header(skb);
1306 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
1307 case QETH_CAST_MULTICAST:
1308 if (prot == ETH_P_IP)
1309 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
1310 else
1311 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
1312
1313 card->stats.multicast++;
1314 break;
1315 case QETH_CAST_BROADCAST:
1316 ether_addr_copy(tg_addr, card->dev->broadcast);
1317 card->stats.multicast++;
1318 break;
1319 default:
1320 if (card->options.sniffer)
1321 skb->pkt_type = PACKET_OTHERHOST;
1322 ether_addr_copy(tg_addr, card->dev->dev_addr);
1323 }
1324
1325 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
1326 card->dev->header_ops->create(skb, card->dev, prot,
1327 tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
1328 skb->len);
1329 else
1330 card->dev->header_ops->create(skb, card->dev, prot,
1331 tg_addr, "FAKELL", skb->len);
1332 }
1333
1334 skb->protocol = eth_type_trans(skb, card->dev);
1335
1336 /* copy VLAN tag from hdr into skb */
1337 if (!card->options.sniffer &&
1338 (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
1339 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
1340 u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
1341 hdr->hdr.l3.vlan_id :
1342 hdr->hdr.l3.next_hop.rx.vlan_id;
1343 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
1344 }
1345
1346 qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
1347 }
1348
qeth_l3_process_inbound_buffer(struct qeth_card * card,int budget,int * done)1349 static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1350 int budget, int *done)
1351 {
1352 int work_done = 0;
1353 struct sk_buff *skb;
1354 struct qeth_hdr *hdr;
1355 unsigned int len;
1356 __u16 magic;
1357
1358 *done = 0;
1359 WARN_ON_ONCE(!budget);
1360 while (budget) {
1361 skb = qeth_core_get_next_skb(card,
1362 &card->qdio.in_q->bufs[card->rx.b_index],
1363 &card->rx.b_element, &card->rx.e_offset, &hdr);
1364 if (!skb) {
1365 *done = 1;
1366 break;
1367 }
1368 switch (hdr->hdr.l3.id) {
1369 case QETH_HEADER_TYPE_LAYER3:
1370 magic = *(__u16 *)skb->data;
1371 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
1372 (magic == ETH_P_AF_IUCV)) {
1373 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
1374 len = skb->len;
1375 card->dev->header_ops->create(skb, card->dev, 0,
1376 card->dev->dev_addr, "FAKELL", len);
1377 skb_reset_mac_header(skb);
1378 netif_receive_skb(skb);
1379 } else {
1380 qeth_l3_rebuild_skb(card, skb, hdr);
1381 len = skb->len;
1382 napi_gro_receive(&card->napi, skb);
1383 }
1384 break;
1385 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
1386 skb->protocol = eth_type_trans(skb, skb->dev);
1387 len = skb->len;
1388 netif_receive_skb(skb);
1389 break;
1390 default:
1391 dev_kfree_skb_any(skb);
1392 QETH_CARD_TEXT(card, 3, "inbunkno");
1393 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
1394 continue;
1395 }
1396 work_done++;
1397 budget--;
1398 card->stats.rx_packets++;
1399 card->stats.rx_bytes += len;
1400 }
1401 return work_done;
1402 }
1403
qeth_l3_stop_card(struct qeth_card * card,int recovery_mode)1404 static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
1405 {
1406 QETH_DBF_TEXT(SETUP, 2, "stopcard");
1407 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1408
1409 qeth_set_allowed_threads(card, 0, 1);
1410 if (card->options.sniffer &&
1411 (card->info.promisc_mode == SET_PROMISC_MODE_ON))
1412 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
1413 if (card->read.state == CH_STATE_UP &&
1414 card->write.state == CH_STATE_UP &&
1415 (card->state == CARD_STATE_UP)) {
1416 if (recovery_mode)
1417 qeth_l3_stop(card->dev);
1418 else {
1419 rtnl_lock();
1420 dev_close(card->dev);
1421 rtnl_unlock();
1422 }
1423 card->state = CARD_STATE_SOFTSETUP;
1424 }
1425 if (card->state == CARD_STATE_SOFTSETUP) {
1426 qeth_l3_clear_ip_htable(card, 1);
1427 qeth_clear_ipacmd_list(card);
1428 card->state = CARD_STATE_HARDSETUP;
1429 }
1430 if (card->state == CARD_STATE_HARDSETUP) {
1431 qeth_qdio_clear_card(card, 0);
1432 qeth_clear_qdio_buffers(card);
1433 qeth_clear_working_pool_list(card);
1434 card->state = CARD_STATE_DOWN;
1435 }
1436 if (card->state == CARD_STATE_DOWN) {
1437 qeth_clear_cmd_buffers(&card->read);
1438 qeth_clear_cmd_buffers(&card->write);
1439 }
1440 }
1441
1442 /*
1443 * test for and Switch promiscuous mode (on or off)
1444 * either for guestlan or HiperSocket Sniffer
1445 */
1446 static void
qeth_l3_handle_promisc_mode(struct qeth_card * card)1447 qeth_l3_handle_promisc_mode(struct qeth_card *card)
1448 {
1449 struct net_device *dev = card->dev;
1450
1451 if (((dev->flags & IFF_PROMISC) &&
1452 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
1453 (!(dev->flags & IFF_PROMISC) &&
1454 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
1455 return;
1456
1457 if (card->info.guestlan) { /* Guestlan trace */
1458 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
1459 qeth_setadp_promisc_mode(card);
1460 } else if (card->options.sniffer && /* HiperSockets trace */
1461 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
1462 if (dev->flags & IFF_PROMISC) {
1463 QETH_CARD_TEXT(card, 3, "+promisc");
1464 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
1465 } else {
1466 QETH_CARD_TEXT(card, 3, "-promisc");
1467 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
1468 }
1469 }
1470 }
1471
qeth_l3_set_rx_mode(struct net_device * dev)1472 static void qeth_l3_set_rx_mode(struct net_device *dev)
1473 {
1474 struct qeth_card *card = dev->ml_priv;
1475 struct qeth_ipaddr *addr;
1476 struct hlist_node *tmp;
1477 int i, rc;
1478
1479 QETH_CARD_TEXT(card, 3, "setmulti");
1480 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
1481 (card->state != CARD_STATE_UP))
1482 return;
1483 if (!card->options.sniffer) {
1484 spin_lock_bh(&card->mclock);
1485
1486 qeth_l3_add_multicast_ipv4(card);
1487 qeth_l3_add_multicast_ipv6(card);
1488
1489 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
1490 switch (addr->disp_flag) {
1491 case QETH_DISP_ADDR_DELETE:
1492 rc = qeth_l3_deregister_addr_entry(card, addr);
1493 if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
1494 hash_del(&addr->hnode);
1495 kfree(addr);
1496 }
1497 break;
1498 case QETH_DISP_ADDR_ADD:
1499 rc = qeth_l3_register_addr_entry(card, addr);
1500 if (rc && rc != IPA_RC_LAN_OFFLINE) {
1501 hash_del(&addr->hnode);
1502 kfree(addr);
1503 break;
1504 }
1505 addr->ref_counter = 1;
1506 /* fall through */
1507 default:
1508 /* for next call to set_rx_mode(): */
1509 addr->disp_flag = QETH_DISP_ADDR_DELETE;
1510 }
1511 }
1512
1513 spin_unlock_bh(&card->mclock);
1514
1515 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
1516 return;
1517 }
1518 qeth_l3_handle_promisc_mode(card);
1519 }
1520
qeth_l3_arp_get_error_cause(int * rc)1521 static const char *qeth_l3_arp_get_error_cause(int *rc)
1522 {
1523 switch (*rc) {
1524 case QETH_IPA_ARP_RC_FAILED:
1525 *rc = -EIO;
1526 return "operation failed";
1527 case QETH_IPA_ARP_RC_NOTSUPP:
1528 *rc = -EOPNOTSUPP;
1529 return "operation not supported";
1530 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
1531 *rc = -EINVAL;
1532 return "argument out of range";
1533 case QETH_IPA_ARP_RC_Q_NOTSUPP:
1534 *rc = -EOPNOTSUPP;
1535 return "query operation not supported";
1536 case QETH_IPA_ARP_RC_Q_NO_DATA:
1537 *rc = -ENOENT;
1538 return "no query data available";
1539 default:
1540 return "unknown error";
1541 }
1542 }
1543
qeth_l3_arp_set_no_entries(struct qeth_card * card,int no_entries)1544 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
1545 {
1546 int tmp;
1547 int rc;
1548
1549 QETH_CARD_TEXT(card, 3, "arpstnoe");
1550
1551 /*
1552 * currently GuestLAN only supports the ARP assist function
1553 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
1554 * thus we say EOPNOTSUPP for this ARP function
1555 */
1556 if (card->info.guestlan)
1557 return -EOPNOTSUPP;
1558 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1559 return -EOPNOTSUPP;
1560 }
1561 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1562 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
1563 no_entries);
1564 if (rc) {
1565 tmp = rc;
1566 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
1567 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
1568 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1569 }
1570 return rc;
1571 }
1572
get_arp_entry_size(struct qeth_card * card,struct qeth_arp_query_data * qdata,struct qeth_arp_entrytype * type,__u8 strip_entries)1573 static __u32 get_arp_entry_size(struct qeth_card *card,
1574 struct qeth_arp_query_data *qdata,
1575 struct qeth_arp_entrytype *type, __u8 strip_entries)
1576 {
1577 __u32 rc;
1578 __u8 is_hsi;
1579
1580 is_hsi = qdata->reply_bits == 5;
1581 if (type->ip == QETHARP_IP_ADDR_V4) {
1582 QETH_CARD_TEXT(card, 4, "arpev4");
1583 if (strip_entries) {
1584 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
1585 sizeof(struct qeth_arp_qi_entry7_short);
1586 } else {
1587 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
1588 sizeof(struct qeth_arp_qi_entry7);
1589 }
1590 } else if (type->ip == QETHARP_IP_ADDR_V6) {
1591 QETH_CARD_TEXT(card, 4, "arpev6");
1592 if (strip_entries) {
1593 rc = is_hsi ?
1594 sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
1595 sizeof(struct qeth_arp_qi_entry7_short_ipv6);
1596 } else {
1597 rc = is_hsi ?
1598 sizeof(struct qeth_arp_qi_entry5_ipv6) :
1599 sizeof(struct qeth_arp_qi_entry7_ipv6);
1600 }
1601 } else {
1602 QETH_CARD_TEXT(card, 4, "arpinv");
1603 rc = 0;
1604 }
1605
1606 return rc;
1607 }
1608
arpentry_matches_prot(struct qeth_arp_entrytype * type,__u16 prot)1609 static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
1610 {
1611 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
1612 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
1613 }
1614
qeth_l3_arp_query_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1615 static int qeth_l3_arp_query_cb(struct qeth_card *card,
1616 struct qeth_reply *reply, unsigned long data)
1617 {
1618 struct qeth_ipa_cmd *cmd;
1619 struct qeth_arp_query_data *qdata;
1620 struct qeth_arp_query_info *qinfo;
1621 int i;
1622 int e;
1623 int entrybytes_done;
1624 int stripped_bytes;
1625 __u8 do_strip_entries;
1626
1627 QETH_CARD_TEXT(card, 3, "arpquecb");
1628
1629 qinfo = (struct qeth_arp_query_info *) reply->param;
1630 cmd = (struct qeth_ipa_cmd *) data;
1631 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
1632 if (cmd->hdr.return_code) {
1633 QETH_CARD_TEXT(card, 4, "arpcberr");
1634 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
1635 return 0;
1636 }
1637 if (cmd->data.setassparms.hdr.return_code) {
1638 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1639 QETH_CARD_TEXT(card, 4, "setaperr");
1640 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
1641 return 0;
1642 }
1643 qdata = &cmd->data.setassparms.data.query_arp;
1644 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
1645
1646 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
1647 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
1648 entrybytes_done = 0;
1649 for (e = 0; e < qdata->no_entries; ++e) {
1650 char *cur_entry;
1651 __u32 esize;
1652 struct qeth_arp_entrytype *etype;
1653
1654 cur_entry = &qdata->data + entrybytes_done;
1655 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
1656 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
1657 QETH_CARD_TEXT(card, 4, "pmis");
1658 QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
1659 break;
1660 }
1661 esize = get_arp_entry_size(card, qdata, etype,
1662 do_strip_entries);
1663 QETH_CARD_TEXT_(card, 5, "esz%i", esize);
1664 if (!esize)
1665 break;
1666
1667 if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
1668 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
1669 cmd->hdr.return_code = IPA_RC_ENOMEM;
1670 goto out_error;
1671 }
1672
1673 memcpy(qinfo->udata + qinfo->udata_offset,
1674 &qdata->data + entrybytes_done + stripped_bytes,
1675 esize);
1676 entrybytes_done += esize + stripped_bytes;
1677 qinfo->udata_offset += esize;
1678 ++qinfo->no_entries;
1679 }
1680 /* check if all replies received ... */
1681 if (cmd->data.setassparms.hdr.seq_no <
1682 cmd->data.setassparms.hdr.number_of_replies)
1683 return 1;
1684 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
1685 memcpy(qinfo->udata, &qinfo->no_entries, 4);
1686 /* keep STRIP_ENTRIES flag so the user program can distinguish
1687 * stripped entries from normal ones */
1688 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
1689 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
1690 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
1691 QETH_CARD_TEXT_(card, 4, "rc%i", 0);
1692 return 0;
1693 out_error:
1694 i = 0;
1695 memcpy(qinfo->udata, &i, 4);
1696 return 0;
1697 }
1698
qeth_l3_send_ipa_arp_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob,int len,int (* reply_cb)(struct qeth_card *,struct qeth_reply *,unsigned long),void * reply_param)1699 static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
1700 struct qeth_cmd_buffer *iob, int len,
1701 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1702 unsigned long),
1703 void *reply_param)
1704 {
1705 QETH_CARD_TEXT(card, 4, "sendarp");
1706
1707 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1708 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1709 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1710 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
1711 reply_cb, reply_param);
1712 }
1713
qeth_l3_query_arp_cache_info(struct qeth_card * card,enum qeth_prot_versions prot,struct qeth_arp_query_info * qinfo)1714 static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
1715 enum qeth_prot_versions prot,
1716 struct qeth_arp_query_info *qinfo)
1717 {
1718 struct qeth_cmd_buffer *iob;
1719 struct qeth_ipa_cmd *cmd;
1720 int tmp;
1721 int rc;
1722
1723 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
1724
1725 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
1726 IPA_CMD_ASS_ARP_QUERY_INFO,
1727 sizeof(struct qeth_arp_query_data)
1728 - sizeof(char),
1729 prot);
1730 if (!iob)
1731 return -ENOMEM;
1732 cmd = __ipa_cmd(iob);
1733 cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
1734 cmd->data.setassparms.data.query_arp.reply_bits = 0;
1735 cmd->data.setassparms.data.query_arp.no_entries = 0;
1736 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
1737 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
1738 qeth_l3_arp_query_cb, (void *)qinfo);
1739 if (rc) {
1740 tmp = rc;
1741 QETH_DBF_MESSAGE(2,
1742 "Error while querying ARP cache on %s: %s "
1743 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
1744 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1745 }
1746
1747 return rc;
1748 }
1749
qeth_l3_arp_query(struct qeth_card * card,char __user * udata)1750 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
1751 {
1752 struct qeth_arp_query_info qinfo = {0, };
1753 int rc;
1754
1755 QETH_CARD_TEXT(card, 3, "arpquery");
1756
1757 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
1758 IPA_ARP_PROCESSING)) {
1759 QETH_CARD_TEXT(card, 3, "arpqnsup");
1760 rc = -EOPNOTSUPP;
1761 goto out;
1762 }
1763 /* get size of userspace buffer and mask_bits -> 6 bytes */
1764 if (copy_from_user(&qinfo, udata, 6)) {
1765 rc = -EFAULT;
1766 goto out;
1767 }
1768 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
1769 if (!qinfo.udata) {
1770 rc = -ENOMEM;
1771 goto out;
1772 }
1773 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
1774 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
1775 if (rc) {
1776 if (copy_to_user(udata, qinfo.udata, 4))
1777 rc = -EFAULT;
1778 goto free_and_out;
1779 }
1780 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
1781 /* fails in case of GuestLAN QDIO mode */
1782 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
1783 }
1784 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
1785 QETH_CARD_TEXT(card, 4, "qactf");
1786 rc = -EFAULT;
1787 goto free_and_out;
1788 }
1789 QETH_CARD_TEXT(card, 4, "qacts");
1790
1791 free_and_out:
1792 kfree(qinfo.udata);
1793 out:
1794 return rc;
1795 }
1796
qeth_l3_arp_add_entry(struct qeth_card * card,struct qeth_arp_cache_entry * entry)1797 static int qeth_l3_arp_add_entry(struct qeth_card *card,
1798 struct qeth_arp_cache_entry *entry)
1799 {
1800 struct qeth_cmd_buffer *iob;
1801 char buf[16];
1802 int tmp;
1803 int rc;
1804
1805 QETH_CARD_TEXT(card, 3, "arpadent");
1806
1807 /*
1808 * currently GuestLAN only supports the ARP assist function
1809 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
1810 * thus we say EOPNOTSUPP for this ARP function
1811 */
1812 if (card->info.guestlan)
1813 return -EOPNOTSUPP;
1814 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1815 return -EOPNOTSUPP;
1816 }
1817
1818 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
1819 IPA_CMD_ASS_ARP_ADD_ENTRY,
1820 sizeof(struct qeth_arp_cache_entry),
1821 QETH_PROT_IPV4);
1822 if (!iob)
1823 return -ENOMEM;
1824 rc = qeth_send_setassparms(card, iob,
1825 sizeof(struct qeth_arp_cache_entry),
1826 (unsigned long) entry,
1827 qeth_setassparms_cb, NULL);
1828 if (rc) {
1829 tmp = rc;
1830 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
1831 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
1832 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
1833 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1834 }
1835 return rc;
1836 }
1837
qeth_l3_arp_remove_entry(struct qeth_card * card,struct qeth_arp_cache_entry * entry)1838 static int qeth_l3_arp_remove_entry(struct qeth_card *card,
1839 struct qeth_arp_cache_entry *entry)
1840 {
1841 struct qeth_cmd_buffer *iob;
1842 char buf[16] = {0, };
1843 int tmp;
1844 int rc;
1845
1846 QETH_CARD_TEXT(card, 3, "arprment");
1847
1848 /*
1849 * currently GuestLAN only supports the ARP assist function
1850 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
1851 * thus we say EOPNOTSUPP for this ARP function
1852 */
1853 if (card->info.guestlan)
1854 return -EOPNOTSUPP;
1855 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1856 return -EOPNOTSUPP;
1857 }
1858 memcpy(buf, entry, 12);
1859 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
1860 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
1861 12,
1862 QETH_PROT_IPV4);
1863 if (!iob)
1864 return -ENOMEM;
1865 rc = qeth_send_setassparms(card, iob,
1866 12, (unsigned long)buf,
1867 qeth_setassparms_cb, NULL);
1868 if (rc) {
1869 tmp = rc;
1870 memset(buf, 0, 16);
1871 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
1872 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
1873 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
1874 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1875 }
1876 return rc;
1877 }
1878
qeth_l3_arp_flush_cache(struct qeth_card * card)1879 static int qeth_l3_arp_flush_cache(struct qeth_card *card)
1880 {
1881 int rc;
1882 int tmp;
1883
1884 QETH_CARD_TEXT(card, 3, "arpflush");
1885
1886 /*
1887 * currently GuestLAN only supports the ARP assist function
1888 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
1889 * thus we say EOPNOTSUPP for this ARP function
1890 */
1891 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
1892 return -EOPNOTSUPP;
1893 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1894 return -EOPNOTSUPP;
1895 }
1896 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1897 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
1898 if (rc) {
1899 tmp = rc;
1900 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
1901 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
1902 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1903 }
1904 return rc;
1905 }
1906
qeth_l3_do_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1907 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1908 {
1909 struct qeth_card *card = dev->ml_priv;
1910 struct qeth_arp_cache_entry arp_entry;
1911 int rc = 0;
1912
1913 switch (cmd) {
1914 case SIOC_QETH_ARP_SET_NO_ENTRIES:
1915 if (!capable(CAP_NET_ADMIN)) {
1916 rc = -EPERM;
1917 break;
1918 }
1919 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
1920 break;
1921 case SIOC_QETH_ARP_QUERY_INFO:
1922 if (!capable(CAP_NET_ADMIN)) {
1923 rc = -EPERM;
1924 break;
1925 }
1926 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
1927 break;
1928 case SIOC_QETH_ARP_ADD_ENTRY:
1929 if (!capable(CAP_NET_ADMIN)) {
1930 rc = -EPERM;
1931 break;
1932 }
1933 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
1934 sizeof(struct qeth_arp_cache_entry)))
1935 rc = -EFAULT;
1936 else
1937 rc = qeth_l3_arp_add_entry(card, &arp_entry);
1938 break;
1939 case SIOC_QETH_ARP_REMOVE_ENTRY:
1940 if (!capable(CAP_NET_ADMIN)) {
1941 rc = -EPERM;
1942 break;
1943 }
1944 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
1945 sizeof(struct qeth_arp_cache_entry)))
1946 rc = -EFAULT;
1947 else
1948 rc = qeth_l3_arp_remove_entry(card, &arp_entry);
1949 break;
1950 case SIOC_QETH_ARP_FLUSH_CACHE:
1951 if (!capable(CAP_NET_ADMIN)) {
1952 rc = -EPERM;
1953 break;
1954 }
1955 rc = qeth_l3_arp_flush_cache(card);
1956 break;
1957 default:
1958 rc = -EOPNOTSUPP;
1959 }
1960 return rc;
1961 }
1962
qeth_l3_get_cast_type(struct sk_buff * skb)1963 static int qeth_l3_get_cast_type(struct sk_buff *skb)
1964 {
1965 struct neighbour *n = NULL;
1966 struct dst_entry *dst;
1967
1968 rcu_read_lock();
1969 dst = skb_dst(skb);
1970 if (dst)
1971 n = dst_neigh_lookup_skb(dst, skb);
1972 if (n) {
1973 int cast_type = n->type;
1974
1975 rcu_read_unlock();
1976 neigh_release(n);
1977 if ((cast_type == RTN_BROADCAST) ||
1978 (cast_type == RTN_MULTICAST) ||
1979 (cast_type == RTN_ANYCAST))
1980 return cast_type;
1981 return RTN_UNICAST;
1982 }
1983 rcu_read_unlock();
1984
1985 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
1986 if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
1987 return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
1988 RTN_MULTICAST : RTN_UNICAST;
1989 else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
1990 return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
1991 RTN_MULTICAST : RTN_UNICAST;
1992
1993 /* ... and MAC address */
1994 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
1995 return RTN_BROADCAST;
1996 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1997 return RTN_MULTICAST;
1998
1999 /* default to unicast */
2000 return RTN_UNICAST;
2001 }
2002
qeth_l3_fill_af_iucv_hdr(struct qeth_hdr * hdr,struct sk_buff * skb,unsigned int data_len)2003 static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
2004 unsigned int data_len)
2005 {
2006 char daddr[16];
2007 struct af_iucv_trans_hdr *iucv_hdr;
2008
2009 memset(hdr, 0, sizeof(struct qeth_hdr));
2010 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2011 hdr->hdr.l3.length = data_len;
2012 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2013
2014 iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
2015 memset(daddr, 0, sizeof(daddr));
2016 daddr[0] = 0xfe;
2017 daddr[1] = 0x80;
2018 memcpy(&daddr[8], iucv_hdr->destUserID, 8);
2019 memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
2020 }
2021
qeth_l3_cast_type_to_flag(int cast_type)2022 static u8 qeth_l3_cast_type_to_flag(int cast_type)
2023 {
2024 if (cast_type == RTN_MULTICAST)
2025 return QETH_CAST_MULTICAST;
2026 if (cast_type == RTN_ANYCAST)
2027 return QETH_CAST_ANYCAST;
2028 if (cast_type == RTN_BROADCAST)
2029 return QETH_CAST_BROADCAST;
2030 return QETH_CAST_UNICAST;
2031 }
2032
qeth_l3_fill_header(struct qeth_card * card,struct qeth_hdr * hdr,struct sk_buff * skb,int ipv,int cast_type,unsigned int data_len)2033 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2034 struct sk_buff *skb, int ipv, int cast_type,
2035 unsigned int data_len)
2036 {
2037 memset(hdr, 0, sizeof(struct qeth_hdr));
2038 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2039 hdr->hdr.l3.length = data_len;
2040
2041 /*
2042 * before we're going to overwrite this location with next hop ip.
2043 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2044 */
2045 if (skb_vlan_tag_present(skb)) {
2046 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
2047 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
2048 else
2049 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2050 hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
2051 }
2052
2053 if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
2054 qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
2055 if (card->options.performance_stats)
2056 card->perf_stats.tx_csum++;
2057 }
2058
2059 /* OSA only: */
2060 if (!ipv) {
2061 hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
2062 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
2063 skb->dev->broadcast))
2064 hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
2065 else
2066 hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
2067 QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
2068 return;
2069 }
2070
2071 hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
2072 rcu_read_lock();
2073 if (ipv == 4) {
2074 struct rtable *rt = skb_rtable(skb);
2075
2076 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
2077 rt_nexthop(rt, ip_hdr(skb)->daddr) :
2078 ip_hdr(skb)->daddr;
2079 } else {
2080 /* IPv6 */
2081 const struct rt6_info *rt = skb_rt6_info(skb);
2082 const struct in6_addr *next_hop;
2083
2084 if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
2085 next_hop = &rt->rt6i_gateway;
2086 else
2087 next_hop = &ipv6_hdr(skb)->daddr;
2088 memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
2089
2090 hdr->hdr.l3.flags |= QETH_HDR_IPV6;
2091 if (card->info.type != QETH_CARD_TYPE_IQD)
2092 hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
2093 }
2094 rcu_read_unlock();
2095 }
2096
qeth_tso_fill_header(struct qeth_card * card,struct qeth_hdr * qhdr,struct sk_buff * skb)2097 static void qeth_tso_fill_header(struct qeth_card *card,
2098 struct qeth_hdr *qhdr, struct sk_buff *skb)
2099 {
2100 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
2101 struct tcphdr *tcph = tcp_hdr(skb);
2102 struct iphdr *iph = ip_hdr(skb);
2103 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2104
2105 /*fix header to TSO values ...*/
2106 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
2107 /*set values which are fix for the first approach ...*/
2108 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2109 hdr->ext.imb_hdr_no = 1;
2110 hdr->ext.hdr_type = 1;
2111 hdr->ext.hdr_version = 1;
2112 hdr->ext.hdr_len = 28;
2113 /*insert non-fix values */
2114 hdr->ext.mss = skb_shinfo(skb)->gso_size;
2115 hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
2116 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
2117 sizeof(struct qeth_hdr_tso));
2118 tcph->check = 0;
2119 if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) {
2120 ip6h->payload_len = 0;
2121 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
2122 0, IPPROTO_TCP, 0);
2123 } else {
2124 /*OSA want us to set these values ...*/
2125 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2126 0, IPPROTO_TCP, 0);
2127 iph->tot_len = 0;
2128 iph->check = 0;
2129 }
2130 }
2131
2132 /**
2133 * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
2134 * @card: qeth card structure, to check max. elems.
2135 * @skb: SKB address
2136 * @extra_elems: extra elems needed, to check against max.
2137 *
2138 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
2139 * skb data, including linear part and fragments, but excluding TCP header.
2140 * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
2141 * Checks if the result plus extra_elems fits under the limit for the card.
2142 * Returns 0 if it does not.
2143 * Note: extra_elems is not included in the returned result.
2144 */
qeth_l3_get_elements_no_tso(struct qeth_card * card,struct sk_buff * skb,int extra_elems)2145 static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
2146 struct sk_buff *skb, int extra_elems)
2147 {
2148 addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
2149 addr_t end = (addr_t)skb->data + skb_headlen(skb);
2150 int elements = qeth_get_elements_for_frags(skb);
2151
2152 if (start != end)
2153 elements += qeth_get_elements_for_range(start, end);
2154
2155 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
2156 QETH_DBF_MESSAGE(2,
2157 "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
2158 elements + extra_elems, skb->len);
2159 return 0;
2160 }
2161 return elements;
2162 }
2163
qeth_l3_xmit_offload(struct qeth_card * card,struct sk_buff * skb,struct qeth_qdio_out_q * queue,int ipv,int cast_type)2164 static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
2165 struct qeth_qdio_out_q *queue, int ipv,
2166 int cast_type)
2167 {
2168 const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
2169 unsigned int frame_len, elements;
2170 unsigned char eth_hdr[ETH_HLEN];
2171 struct qeth_hdr *hdr = NULL;
2172 unsigned int hd_len = 0;
2173 int push_len, rc;
2174 bool is_sg;
2175
2176 /* re-use the L2 header area for the HW header: */
2177 rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
2178 if (rc)
2179 return rc;
2180 skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
2181 skb_pull(skb, ETH_HLEN);
2182 frame_len = skb->len;
2183
2184 push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
2185 &elements);
2186 if (push_len < 0)
2187 return push_len;
2188 if (!push_len) {
2189 /* hdr was added discontiguous from skb->data */
2190 hd_len = hw_hdr_len;
2191 }
2192
2193 if (skb->protocol == htons(ETH_P_AF_IUCV))
2194 qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
2195 else
2196 qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
2197
2198 is_sg = skb_is_nonlinear(skb);
2199 if (IS_IQD(card)) {
2200 rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
2201 } else {
2202 /* TODO: drop skb_orphan() once TX completion is fast enough */
2203 skb_orphan(skb);
2204 rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
2205 elements);
2206 }
2207
2208 if (!rc) {
2209 if (card->options.performance_stats) {
2210 card->perf_stats.buf_elements_sent += elements;
2211 if (is_sg)
2212 card->perf_stats.sg_skbs_sent++;
2213 }
2214 } else {
2215 if (!push_len)
2216 kmem_cache_free(qeth_core_header_cache, hdr);
2217 if (rc == -EBUSY) {
2218 /* roll back to ETH header */
2219 skb_pull(skb, push_len);
2220 skb_push(skb, ETH_HLEN);
2221 skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
2222 }
2223 }
2224 return rc;
2225 }
2226
qeth_l3_xmit(struct qeth_card * card,struct sk_buff * skb,struct qeth_qdio_out_q * queue,int ipv,int cast_type)2227 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
2228 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
2229 {
2230 int elements, len, rc;
2231 __be16 *tag;
2232 struct qeth_hdr *hdr = NULL;
2233 int hdr_elements = 0;
2234 struct sk_buff *new_skb = NULL;
2235 int tx_bytes = skb->len;
2236 unsigned int hd_len;
2237 bool use_tso, is_sg;
2238
2239 /* Ignore segment size from skb_is_gso(), 1 page is always used. */
2240 use_tso = skb_is_gso(skb) &&
2241 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
2242
2243 /* create a clone with writeable headroom */
2244 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
2245 VLAN_HLEN);
2246 if (!new_skb)
2247 return -ENOMEM;
2248
2249 if (ipv == 4) {
2250 skb_pull(new_skb, ETH_HLEN);
2251 } else if (skb_vlan_tag_present(new_skb)) {
2252 skb_push(new_skb, VLAN_HLEN);
2253 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
2254 skb_copy_to_linear_data_offset(new_skb, 4,
2255 new_skb->data + 8, 4);
2256 skb_copy_to_linear_data_offset(new_skb, 8,
2257 new_skb->data + 12, 4);
2258 tag = (__be16 *)(new_skb->data + 12);
2259 *tag = cpu_to_be16(ETH_P_8021Q);
2260 *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
2261 }
2262
2263 /* fix hardware limitation: as long as we do not have sbal
2264 * chaining we can not send long frag lists
2265 */
2266 if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
2267 (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
2268 rc = skb_linearize(new_skb);
2269
2270 if (card->options.performance_stats) {
2271 if (rc)
2272 card->perf_stats.tx_linfail++;
2273 else
2274 card->perf_stats.tx_lin++;
2275 }
2276 if (rc)
2277 goto out;
2278 }
2279
2280 if (use_tso) {
2281 hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
2282 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2283 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
2284 new_skb->len - sizeof(struct qeth_hdr_tso));
2285 qeth_tso_fill_header(card, hdr, new_skb);
2286 hdr_elements++;
2287 } else {
2288 hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
2289 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
2290 new_skb->len - sizeof(struct qeth_hdr));
2291 }
2292
2293 elements = use_tso ?
2294 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2295 qeth_get_elements_no(card, new_skb, hdr_elements, 0);
2296 if (!elements) {
2297 rc = -E2BIG;
2298 goto out;
2299 }
2300 elements += hdr_elements;
2301
2302 if (use_tso) {
2303 hd_len = sizeof(struct qeth_hdr_tso) +
2304 ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
2305 len = hd_len;
2306 } else {
2307 hd_len = 0;
2308 len = sizeof(struct qeth_hdr_layer3);
2309 }
2310
2311 if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
2312 rc = -EINVAL;
2313 goto out;
2314 }
2315
2316 is_sg = skb_is_nonlinear(new_skb);
2317 rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
2318 elements);
2319 out:
2320 if (!rc) {
2321 if (new_skb != skb)
2322 dev_kfree_skb_any(skb);
2323 if (card->options.performance_stats) {
2324 card->perf_stats.buf_elements_sent += elements;
2325 if (is_sg)
2326 card->perf_stats.sg_skbs_sent++;
2327 if (use_tso) {
2328 card->perf_stats.large_send_bytes += tx_bytes;
2329 card->perf_stats.large_send_cnt++;
2330 }
2331 }
2332 } else {
2333 if (new_skb != skb)
2334 dev_kfree_skb_any(new_skb);
2335 }
2336 return rc;
2337 }
2338
qeth_l3_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)2339 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
2340 struct net_device *dev)
2341 {
2342 int cast_type = qeth_l3_get_cast_type(skb);
2343 struct qeth_card *card = dev->ml_priv;
2344 int ipv = qeth_get_ip_version(skb);
2345 struct qeth_qdio_out_q *queue;
2346 int tx_bytes = skb->len;
2347 int rc;
2348
2349 if (IS_IQD(card)) {
2350 if (card->options.sniffer)
2351 goto tx_drop;
2352 if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
2353 (card->options.cq == QETH_CQ_ENABLED &&
2354 skb->protocol != htons(ETH_P_AF_IUCV)))
2355 goto tx_drop;
2356 }
2357
2358 if (card->state != CARD_STATE_UP || !card->lan_online) {
2359 card->stats.tx_carrier_errors++;
2360 goto tx_drop;
2361 }
2362
2363 if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
2364 goto tx_drop;
2365
2366 queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
2367
2368 if (card->options.performance_stats) {
2369 card->perf_stats.outbound_cnt++;
2370 card->perf_stats.outbound_start_time = qeth_get_micros();
2371 }
2372 netif_stop_queue(dev);
2373
2374 if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
2375 rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
2376 else
2377 rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
2378
2379 if (!rc) {
2380 card->stats.tx_packets++;
2381 card->stats.tx_bytes += tx_bytes;
2382 if (card->options.performance_stats)
2383 card->perf_stats.outbound_time += qeth_get_micros() -
2384 card->perf_stats.outbound_start_time;
2385 netif_wake_queue(dev);
2386 return NETDEV_TX_OK;
2387 } else if (rc == -EBUSY) {
2388 return NETDEV_TX_BUSY;
2389 } /* else fall through */
2390
2391 tx_drop:
2392 card->stats.tx_dropped++;
2393 card->stats.tx_errors++;
2394 dev_kfree_skb_any(skb);
2395 netif_wake_queue(dev);
2396 return NETDEV_TX_OK;
2397 }
2398
__qeth_l3_open(struct net_device * dev)2399 static int __qeth_l3_open(struct net_device *dev)
2400 {
2401 struct qeth_card *card = dev->ml_priv;
2402 int rc = 0;
2403
2404 QETH_CARD_TEXT(card, 4, "qethopen");
2405 if (card->state == CARD_STATE_UP)
2406 return rc;
2407 if (card->state != CARD_STATE_SOFTSETUP)
2408 return -ENODEV;
2409 card->data.state = CH_STATE_UP;
2410 card->state = CARD_STATE_UP;
2411 netif_start_queue(dev);
2412
2413 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
2414 napi_enable(&card->napi);
2415 napi_schedule(&card->napi);
2416 } else
2417 rc = -EIO;
2418 return rc;
2419 }
2420
qeth_l3_open(struct net_device * dev)2421 static int qeth_l3_open(struct net_device *dev)
2422 {
2423 struct qeth_card *card = dev->ml_priv;
2424
2425 QETH_CARD_TEXT(card, 5, "qethope_");
2426 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
2427 QETH_CARD_TEXT(card, 3, "openREC");
2428 return -ERESTARTSYS;
2429 }
2430 return __qeth_l3_open(dev);
2431 }
2432
qeth_l3_stop(struct net_device * dev)2433 static int qeth_l3_stop(struct net_device *dev)
2434 {
2435 struct qeth_card *card = dev->ml_priv;
2436
2437 QETH_CARD_TEXT(card, 4, "qethstop");
2438 netif_tx_disable(dev);
2439 if (card->state == CARD_STATE_UP) {
2440 card->state = CARD_STATE_SOFTSETUP;
2441 napi_disable(&card->napi);
2442 }
2443 return 0;
2444 }
2445
2446 static const struct ethtool_ops qeth_l3_ethtool_ops = {
2447 .get_link = ethtool_op_get_link,
2448 .get_strings = qeth_core_get_strings,
2449 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2450 .get_sset_count = qeth_core_get_sset_count,
2451 .get_drvinfo = qeth_core_get_drvinfo,
2452 .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
2453 };
2454
2455 /*
2456 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
2457 * NOARP on the netdevice is no option because it also turns off neighbor
2458 * solicitation. For IPv4 we install a neighbor_setup function. We don't want
2459 * arp resolution but we want the hard header (packet socket will work
2460 * e.g. tcpdump)
2461 */
qeth_l3_neigh_setup_noarp(struct neighbour * n)2462 static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
2463 {
2464 n->nud_state = NUD_NOARP;
2465 memcpy(n->ha, "FAKELL", 6);
2466 n->output = n->ops->connected_output;
2467 return 0;
2468 }
2469
2470 static int
qeth_l3_neigh_setup(struct net_device * dev,struct neigh_parms * np)2471 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
2472 {
2473 if (np->tbl->family == AF_INET)
2474 np->neigh_setup = qeth_l3_neigh_setup_noarp;
2475
2476 return 0;
2477 }
2478
2479 static const struct net_device_ops qeth_l3_netdev_ops = {
2480 .ndo_open = qeth_l3_open,
2481 .ndo_stop = qeth_l3_stop,
2482 .ndo_get_stats = qeth_get_stats,
2483 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2484 .ndo_validate_addr = eth_validate_addr,
2485 .ndo_set_rx_mode = qeth_l3_set_rx_mode,
2486 .ndo_do_ioctl = qeth_do_ioctl,
2487 .ndo_fix_features = qeth_fix_features,
2488 .ndo_set_features = qeth_set_features,
2489 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
2490 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
2491 .ndo_tx_timeout = qeth_tx_timeout,
2492 };
2493
2494 static const struct net_device_ops qeth_l3_osa_netdev_ops = {
2495 .ndo_open = qeth_l3_open,
2496 .ndo_stop = qeth_l3_stop,
2497 .ndo_get_stats = qeth_get_stats,
2498 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2499 .ndo_features_check = qeth_features_check,
2500 .ndo_validate_addr = eth_validate_addr,
2501 .ndo_set_rx_mode = qeth_l3_set_rx_mode,
2502 .ndo_do_ioctl = qeth_do_ioctl,
2503 .ndo_fix_features = qeth_fix_features,
2504 .ndo_set_features = qeth_set_features,
2505 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
2506 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
2507 .ndo_tx_timeout = qeth_tx_timeout,
2508 .ndo_neigh_setup = qeth_l3_neigh_setup,
2509 };
2510
qeth_l3_setup_netdev(struct qeth_card * card)2511 static int qeth_l3_setup_netdev(struct qeth_card *card)
2512 {
2513 int rc;
2514
2515 if (card->dev->netdev_ops)
2516 return 0;
2517
2518 if (card->info.type == QETH_CARD_TYPE_OSD ||
2519 card->info.type == QETH_CARD_TYPE_OSX) {
2520 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
2521 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
2522 pr_info("qeth_l3: ignoring TR device\n");
2523 return -ENODEV;
2524 }
2525
2526 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
2527
2528 /*IPv6 address autoconfiguration stuff*/
2529 qeth_l3_get_unique_id(card);
2530 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
2531 card->dev->dev_id = card->info.unique_id & 0xffff;
2532
2533 if (!card->info.guestlan) {
2534 card->dev->features |= NETIF_F_SG;
2535 card->dev->hw_features |= NETIF_F_TSO |
2536 NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2537 card->dev->vlan_features |= NETIF_F_TSO |
2538 NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2539 }
2540
2541 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
2542 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
2543 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
2544 }
2545 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
2546 card->dev->flags |= IFF_NOARP;
2547 card->dev->netdev_ops = &qeth_l3_netdev_ops;
2548
2549 rc = qeth_l3_iqd_read_initial_mac(card);
2550 if (rc)
2551 goto out;
2552
2553 if (card->options.hsuid[0])
2554 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
2555 } else
2556 return -ENODEV;
2557
2558 card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
2559 card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
2560 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2561 NETIF_F_HW_VLAN_CTAG_RX |
2562 NETIF_F_HW_VLAN_CTAG_FILTER;
2563
2564 netif_keep_dst(card->dev);
2565 if (card->dev->hw_features & NETIF_F_TSO)
2566 netif_set_gso_max_size(card->dev,
2567 PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
2568
2569 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
2570 rc = register_netdev(card->dev);
2571 out:
2572 if (rc)
2573 card->dev->netdev_ops = NULL;
2574 return rc;
2575 }
2576
2577 static const struct device_type qeth_l3_devtype = {
2578 .name = "qeth_layer3",
2579 .groups = qeth_l3_attr_groups,
2580 };
2581
qeth_l3_probe_device(struct ccwgroup_device * gdev)2582 static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
2583 {
2584 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2585 int rc;
2586
2587 if (gdev->dev.type == &qeth_generic_devtype) {
2588 rc = qeth_l3_create_device_attributes(&gdev->dev);
2589 if (rc)
2590 return rc;
2591 }
2592 hash_init(card->ip_htable);
2593 hash_init(card->ip_mc_htable);
2594 card->options.layer2 = 0;
2595 card->info.hwtrap = 0;
2596 return 0;
2597 }
2598
qeth_l3_remove_device(struct ccwgroup_device * cgdev)2599 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2600 {
2601 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
2602
2603 if (cgdev->dev.type == &qeth_generic_devtype)
2604 qeth_l3_remove_device_attributes(&cgdev->dev);
2605
2606 qeth_set_allowed_threads(card, 0, 1);
2607 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2608
2609 if (cgdev->state == CCWGROUP_ONLINE)
2610 qeth_l3_set_offline(cgdev);
2611
2612 unregister_netdev(card->dev);
2613 qeth_l3_clear_ip_htable(card, 0);
2614 qeth_l3_clear_ipato_list(card);
2615 }
2616
__qeth_l3_set_online(struct ccwgroup_device * gdev,int recovery_mode)2617 static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2618 {
2619 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2620 int rc = 0;
2621 enum qeth_card_states recover_flag;
2622
2623 mutex_lock(&card->discipline_mutex);
2624 mutex_lock(&card->conf_mutex);
2625 QETH_DBF_TEXT(SETUP, 2, "setonlin");
2626 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2627
2628 recover_flag = card->state;
2629 rc = qeth_core_hardsetup_card(card);
2630 if (rc) {
2631 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
2632 rc = -ENODEV;
2633 goto out_remove;
2634 }
2635
2636 rc = qeth_l3_setup_netdev(card);
2637 if (rc)
2638 goto out_remove;
2639
2640 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
2641 if (card->info.hwtrap &&
2642 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
2643 card->info.hwtrap = 0;
2644 } else
2645 card->info.hwtrap = 0;
2646
2647 card->state = CARD_STATE_HARDSETUP;
2648 qeth_print_status_message(card);
2649
2650 /* softsetup */
2651 QETH_DBF_TEXT(SETUP, 2, "softsetp");
2652
2653 rc = qeth_l3_setadapter_parms(card);
2654 if (rc)
2655 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
2656 if (!card->options.sniffer) {
2657 rc = qeth_l3_start_ipassists(card);
2658 if (rc) {
2659 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2660 goto out_remove;
2661 }
2662 rc = qeth_l3_setrouting_v4(card);
2663 if (rc)
2664 QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
2665 rc = qeth_l3_setrouting_v6(card);
2666 if (rc)
2667 QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
2668 }
2669 netif_tx_disable(card->dev);
2670
2671 rc = qeth_init_qdio_queues(card);
2672 if (rc) {
2673 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2674 rc = -ENODEV;
2675 goto out_remove;
2676 }
2677 card->state = CARD_STATE_SOFTSETUP;
2678
2679 qeth_set_allowed_threads(card, 0xffffffff, 0);
2680 qeth_l3_recover_ip(card);
2681 if (card->lan_online)
2682 netif_carrier_on(card->dev);
2683 else
2684 netif_carrier_off(card->dev);
2685
2686 qeth_enable_hw_features(card->dev);
2687 if (recover_flag == CARD_STATE_RECOVER) {
2688 rtnl_lock();
2689 if (recovery_mode) {
2690 __qeth_l3_open(card->dev);
2691 qeth_l3_set_rx_mode(card->dev);
2692 } else {
2693 dev_open(card->dev);
2694 }
2695 rtnl_unlock();
2696 }
2697 qeth_trace_features(card);
2698 /* let user_space know that device is online */
2699 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
2700 mutex_unlock(&card->conf_mutex);
2701 mutex_unlock(&card->discipline_mutex);
2702 return 0;
2703 out_remove:
2704 qeth_l3_stop_card(card, 0);
2705 ccw_device_set_offline(CARD_DDEV(card));
2706 ccw_device_set_offline(CARD_WDEV(card));
2707 ccw_device_set_offline(CARD_RDEV(card));
2708 qdio_free(CARD_DDEV(card));
2709 if (recover_flag == CARD_STATE_RECOVER)
2710 card->state = CARD_STATE_RECOVER;
2711 else
2712 card->state = CARD_STATE_DOWN;
2713 mutex_unlock(&card->conf_mutex);
2714 mutex_unlock(&card->discipline_mutex);
2715 return rc;
2716 }
2717
qeth_l3_set_online(struct ccwgroup_device * gdev)2718 static int qeth_l3_set_online(struct ccwgroup_device *gdev)
2719 {
2720 return __qeth_l3_set_online(gdev, 0);
2721 }
2722
__qeth_l3_set_offline(struct ccwgroup_device * cgdev,int recovery_mode)2723 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
2724 int recovery_mode)
2725 {
2726 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
2727 int rc = 0, rc2 = 0, rc3 = 0;
2728 enum qeth_card_states recover_flag;
2729
2730 mutex_lock(&card->discipline_mutex);
2731 mutex_lock(&card->conf_mutex);
2732 QETH_DBF_TEXT(SETUP, 3, "setoffl");
2733 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
2734
2735 netif_carrier_off(card->dev);
2736 recover_flag = card->state;
2737 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
2738 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
2739 card->info.hwtrap = 1;
2740 }
2741 qeth_l3_stop_card(card, recovery_mode);
2742 if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
2743 rtnl_lock();
2744 call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
2745 rtnl_unlock();
2746 }
2747 rc = ccw_device_set_offline(CARD_DDEV(card));
2748 rc2 = ccw_device_set_offline(CARD_WDEV(card));
2749 rc3 = ccw_device_set_offline(CARD_RDEV(card));
2750 if (!rc)
2751 rc = (rc2) ? rc2 : rc3;
2752 if (rc)
2753 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2754 qdio_free(CARD_DDEV(card));
2755 if (recover_flag == CARD_STATE_UP)
2756 card->state = CARD_STATE_RECOVER;
2757 /* let user_space know that device is offline */
2758 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
2759 mutex_unlock(&card->conf_mutex);
2760 mutex_unlock(&card->discipline_mutex);
2761 return 0;
2762 }
2763
qeth_l3_set_offline(struct ccwgroup_device * cgdev)2764 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
2765 {
2766 return __qeth_l3_set_offline(cgdev, 0);
2767 }
2768
qeth_l3_recover(void * ptr)2769 static int qeth_l3_recover(void *ptr)
2770 {
2771 struct qeth_card *card;
2772 int rc = 0;
2773
2774 card = (struct qeth_card *) ptr;
2775 QETH_CARD_TEXT(card, 2, "recover1");
2776 QETH_CARD_HEX(card, 2, &card, sizeof(void *));
2777 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
2778 return 0;
2779 QETH_CARD_TEXT(card, 2, "recover2");
2780 dev_warn(&card->gdev->dev,
2781 "A recovery process has been started for the device\n");
2782 qeth_set_recovery_task(card);
2783 __qeth_l3_set_offline(card->gdev, 1);
2784 rc = __qeth_l3_set_online(card->gdev, 1);
2785 if (!rc)
2786 dev_info(&card->gdev->dev,
2787 "Device successfully recovered!\n");
2788 else {
2789 qeth_close_dev(card);
2790 dev_warn(&card->gdev->dev, "The qeth device driver "
2791 "failed to recover an error on the device\n");
2792 }
2793 qeth_clear_recovery_task(card);
2794 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
2795 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
2796 return 0;
2797 }
2798
qeth_l3_pm_suspend(struct ccwgroup_device * gdev)2799 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
2800 {
2801 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2802
2803 netif_device_detach(card->dev);
2804 qeth_set_allowed_threads(card, 0, 1);
2805 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2806 if (gdev->state == CCWGROUP_OFFLINE)
2807 return 0;
2808 if (card->state == CARD_STATE_UP) {
2809 if (card->info.hwtrap)
2810 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
2811 __qeth_l3_set_offline(card->gdev, 1);
2812 } else
2813 __qeth_l3_set_offline(card->gdev, 0);
2814 return 0;
2815 }
2816
qeth_l3_pm_resume(struct ccwgroup_device * gdev)2817 static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
2818 {
2819 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2820 int rc = 0;
2821
2822 if (gdev->state == CCWGROUP_OFFLINE)
2823 goto out;
2824
2825 if (card->state == CARD_STATE_RECOVER) {
2826 rc = __qeth_l3_set_online(card->gdev, 1);
2827 if (rc) {
2828 rtnl_lock();
2829 dev_close(card->dev);
2830 rtnl_unlock();
2831 }
2832 } else
2833 rc = __qeth_l3_set_online(card->gdev, 0);
2834 out:
2835 qeth_set_allowed_threads(card, 0xffffffff, 0);
2836 netif_device_attach(card->dev);
2837 if (rc)
2838 dev_warn(&card->gdev->dev, "The qeth device driver "
2839 "failed to recover an error on the device\n");
2840 return rc;
2841 }
2842
2843 /* Returns zero if the command is successfully "consumed" */
qeth_l3_control_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)2844 static int qeth_l3_control_event(struct qeth_card *card,
2845 struct qeth_ipa_cmd *cmd)
2846 {
2847 return 1;
2848 }
2849
2850 struct qeth_discipline qeth_l3_discipline = {
2851 .devtype = &qeth_l3_devtype,
2852 .process_rx_buffer = qeth_l3_process_inbound_buffer,
2853 .recover = qeth_l3_recover,
2854 .setup = qeth_l3_probe_device,
2855 .remove = qeth_l3_remove_device,
2856 .set_online = qeth_l3_set_online,
2857 .set_offline = qeth_l3_set_offline,
2858 .freeze = qeth_l3_pm_suspend,
2859 .thaw = qeth_l3_pm_resume,
2860 .restore = qeth_l3_pm_resume,
2861 .do_ioctl = qeth_l3_do_ioctl,
2862 .control_event_handler = qeth_l3_control_event,
2863 };
2864 EXPORT_SYMBOL_GPL(qeth_l3_discipline);
2865
qeth_l3_handle_ip_event(struct qeth_card * card,struct qeth_ipaddr * addr,unsigned long event)2866 static int qeth_l3_handle_ip_event(struct qeth_card *card,
2867 struct qeth_ipaddr *addr,
2868 unsigned long event)
2869 {
2870 switch (event) {
2871 case NETDEV_UP:
2872 spin_lock_bh(&card->ip_lock);
2873 qeth_l3_add_ip(card, addr);
2874 spin_unlock_bh(&card->ip_lock);
2875 return NOTIFY_OK;
2876 case NETDEV_DOWN:
2877 spin_lock_bh(&card->ip_lock);
2878 qeth_l3_delete_ip(card, addr);
2879 spin_unlock_bh(&card->ip_lock);
2880 return NOTIFY_OK;
2881 default:
2882 return NOTIFY_DONE;
2883 }
2884 }
2885
qeth_l3_get_card_from_dev(struct net_device * dev)2886 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2887 {
2888 if (is_vlan_dev(dev))
2889 dev = vlan_dev_real_dev(dev);
2890 if (dev->netdev_ops == &qeth_l3_osa_netdev_ops ||
2891 dev->netdev_ops == &qeth_l3_netdev_ops)
2892 return (struct qeth_card *) dev->ml_priv;
2893 return NULL;
2894 }
2895
qeth_l3_ip_event(struct notifier_block * this,unsigned long event,void * ptr)2896 static int qeth_l3_ip_event(struct notifier_block *this,
2897 unsigned long event, void *ptr)
2898 {
2899
2900 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2901 struct net_device *dev = ifa->ifa_dev->dev;
2902 struct qeth_ipaddr addr;
2903 struct qeth_card *card;
2904
2905 if (dev_net(dev) != &init_net)
2906 return NOTIFY_DONE;
2907
2908 card = qeth_l3_get_card_from_dev(dev);
2909 if (!card)
2910 return NOTIFY_DONE;
2911 QETH_CARD_TEXT(card, 3, "ipevent");
2912
2913 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
2914 addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
2915 addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
2916
2917 return qeth_l3_handle_ip_event(card, &addr, event);
2918 }
2919
2920 static struct notifier_block qeth_l3_ip_notifier = {
2921 qeth_l3_ip_event,
2922 NULL,
2923 };
2924
qeth_l3_ip6_event(struct notifier_block * this,unsigned long event,void * ptr)2925 static int qeth_l3_ip6_event(struct notifier_block *this,
2926 unsigned long event, void *ptr)
2927 {
2928 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
2929 struct net_device *dev = ifa->idev->dev;
2930 struct qeth_ipaddr addr;
2931 struct qeth_card *card;
2932
2933 card = qeth_l3_get_card_from_dev(dev);
2934 if (!card)
2935 return NOTIFY_DONE;
2936 QETH_CARD_TEXT(card, 3, "ip6event");
2937 if (!qeth_is_supported(card, IPA_IPV6))
2938 return NOTIFY_DONE;
2939
2940 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
2941 addr.u.a6.addr = ifa->addr;
2942 addr.u.a6.pfxlen = ifa->prefix_len;
2943
2944 return qeth_l3_handle_ip_event(card, &addr, event);
2945 }
2946
2947 static struct notifier_block qeth_l3_ip6_notifier = {
2948 qeth_l3_ip6_event,
2949 NULL,
2950 };
2951
qeth_l3_register_notifiers(void)2952 static int qeth_l3_register_notifiers(void)
2953 {
2954 int rc;
2955
2956 QETH_DBF_TEXT(SETUP, 5, "regnotif");
2957 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
2958 if (rc)
2959 return rc;
2960 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
2961 if (rc) {
2962 unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
2963 return rc;
2964 }
2965 return 0;
2966 }
2967
qeth_l3_unregister_notifiers(void)2968 static void qeth_l3_unregister_notifiers(void)
2969 {
2970 QETH_DBF_TEXT(SETUP, 5, "unregnot");
2971 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
2972 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
2973 }
2974
qeth_l3_init(void)2975 static int __init qeth_l3_init(void)
2976 {
2977 pr_info("register layer 3 discipline\n");
2978 return qeth_l3_register_notifiers();
2979 }
2980
qeth_l3_exit(void)2981 static void __exit qeth_l3_exit(void)
2982 {
2983 qeth_l3_unregister_notifiers();
2984 pr_info("unregister layer 3 discipline\n");
2985 }
2986
2987 module_init(qeth_l3_init);
2988 module_exit(qeth_l3_exit);
2989 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
2990 MODULE_DESCRIPTION("qeth layer 3 discipline");
2991 MODULE_LICENSE("GPL");
2992