1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic HDLC support routines for Linux
4 * Frame Relay support
5 *
6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7 *
8
9 Theory of PVC state
10
11 DCE mode:
12
13 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
14 0,x -> 1,1 if "link reliable" when sending FULL STATUS
15 1,1 -> 1,0 if received FULL STATUS ACK
16
17 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
18 -> 1 when "PVC up" and (exist,new) = 1,0
19
20 DTE mode:
21 (exist,new,active) = FULL STATUS if "link reliable"
22 = 0, 0, 0 if "link unreliable"
23 No LMI:
24 active = open and "link reliable"
25 exist = new = not used
26
27 CCITT LMI: ITU-T Q.933 Annex A
28 ANSI LMI: ANSI T1.617 Annex D
29 CISCO LMI: the original, aka "Gang of Four" LMI
30
31 */
32
33 #include <linux/errno.h>
34 #include <linux/etherdevice.h>
35 #include <linux/hdlc.h>
36 #include <linux/if_arp.h>
37 #include <linux/inetdevice.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pkt_sched.h>
42 #include <linux/poll.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/skbuff.h>
45 #include <linux/slab.h>
46
47 #undef DEBUG_PKT
48 #undef DEBUG_ECN
49 #undef DEBUG_LINK
50 #undef DEBUG_PROTO
51 #undef DEBUG_PVC
52
53 #define FR_UI 0x03
54 #define FR_PAD 0x00
55
56 #define NLPID_IP 0xCC
57 #define NLPID_IPV6 0x8E
58 #define NLPID_SNAP 0x80
59 #define NLPID_PAD 0x00
60 #define NLPID_CCITT_ANSI_LMI 0x08
61 #define NLPID_CISCO_LMI 0x09
62
63
64 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
65 #define LMI_CISCO_DLCI 1023
66
67 #define LMI_CALLREF 0x00 /* Call Reference */
68 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
69 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
70 #define LMI_CCITT_REPTYPE 0x51
71 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
72 #define LMI_CCITT_ALIVE 0x53
73 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
74 #define LMI_CCITT_PVCSTAT 0x57
75
76 #define LMI_FULLREP 0x00 /* full report */
77 #define LMI_INTEGRITY 0x01 /* link integrity report */
78 #define LMI_SINGLE 0x02 /* single PVC report */
79
80 #define LMI_STATUS_ENQUIRY 0x75
81 #define LMI_STATUS 0x7D /* reply */
82
83 #define LMI_REPT_LEN 1 /* report type element length */
84 #define LMI_INTEG_LEN 2 /* link integrity element length */
85
86 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
87 #define LMI_ANSI_LENGTH 14
88
89
90 struct fr_hdr {
91 #if defined(__LITTLE_ENDIAN_BITFIELD)
92 unsigned ea1: 1;
93 unsigned cr: 1;
94 unsigned dlcih: 6;
95
96 unsigned ea2: 1;
97 unsigned de: 1;
98 unsigned becn: 1;
99 unsigned fecn: 1;
100 unsigned dlcil: 4;
101 #else
102 unsigned dlcih: 6;
103 unsigned cr: 1;
104 unsigned ea1: 1;
105
106 unsigned dlcil: 4;
107 unsigned fecn: 1;
108 unsigned becn: 1;
109 unsigned de: 1;
110 unsigned ea2: 1;
111 #endif
112 } __packed;
113
114
115 struct pvc_device {
116 struct net_device *frad;
117 struct net_device *main;
118 struct net_device *ether; /* bridged Ethernet interface */
119 struct pvc_device *next; /* Sorted in ascending DLCI order */
120 int dlci;
121 int open_count;
122
123 struct {
124 unsigned int new: 1;
125 unsigned int active: 1;
126 unsigned int exist: 1;
127 unsigned int deleted: 1;
128 unsigned int fecn: 1;
129 unsigned int becn: 1;
130 unsigned int bandwidth; /* Cisco LMI reporting only */
131 }state;
132 };
133
134 struct frad_state {
135 fr_proto settings;
136 struct pvc_device *first_pvc;
137 int dce_pvc_count;
138
139 struct timer_list timer;
140 struct net_device *dev;
141 unsigned long last_poll;
142 int reliable;
143 int dce_changed;
144 int request;
145 int fullrep_sent;
146 u32 last_errors; /* last errors bit list */
147 u8 n391cnt;
148 u8 txseq; /* TX sequence number */
149 u8 rxseq; /* RX sequence number */
150 };
151
152
153 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
154
155
q922_to_dlci(u8 * hdr)156 static inline u16 q922_to_dlci(u8 *hdr)
157 {
158 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
159 }
160
161
dlci_to_q922(u8 * hdr,u16 dlci)162 static inline void dlci_to_q922(u8 *hdr, u16 dlci)
163 {
164 hdr[0] = (dlci >> 2) & 0xFC;
165 hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
166 }
167
168
state(hdlc_device * hdlc)169 static inline struct frad_state* state(hdlc_device *hdlc)
170 {
171 return(struct frad_state *)(hdlc->state);
172 }
173
174
find_pvc(hdlc_device * hdlc,u16 dlci)175 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
176 {
177 struct pvc_device *pvc = state(hdlc)->first_pvc;
178
179 while (pvc) {
180 if (pvc->dlci == dlci)
181 return pvc;
182 if (pvc->dlci > dlci)
183 return NULL; /* the list is sorted */
184 pvc = pvc->next;
185 }
186
187 return NULL;
188 }
189
190
add_pvc(struct net_device * dev,u16 dlci)191 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
192 {
193 hdlc_device *hdlc = dev_to_hdlc(dev);
194 struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
195
196 while (*pvc_p) {
197 if ((*pvc_p)->dlci == dlci)
198 return *pvc_p;
199 if ((*pvc_p)->dlci > dlci)
200 break; /* the list is sorted */
201 pvc_p = &(*pvc_p)->next;
202 }
203
204 pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
205 #ifdef DEBUG_PVC
206 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
207 #endif
208 if (!pvc)
209 return NULL;
210
211 pvc->dlci = dlci;
212 pvc->frad = dev;
213 pvc->next = *pvc_p; /* Put it in the chain */
214 *pvc_p = pvc;
215 return pvc;
216 }
217
218
pvc_is_used(struct pvc_device * pvc)219 static inline int pvc_is_used(struct pvc_device *pvc)
220 {
221 return pvc->main || pvc->ether;
222 }
223
224
pvc_carrier(int on,struct pvc_device * pvc)225 static inline void pvc_carrier(int on, struct pvc_device *pvc)
226 {
227 if (on) {
228 if (pvc->main)
229 if (!netif_carrier_ok(pvc->main))
230 netif_carrier_on(pvc->main);
231 if (pvc->ether)
232 if (!netif_carrier_ok(pvc->ether))
233 netif_carrier_on(pvc->ether);
234 } else {
235 if (pvc->main)
236 if (netif_carrier_ok(pvc->main))
237 netif_carrier_off(pvc->main);
238 if (pvc->ether)
239 if (netif_carrier_ok(pvc->ether))
240 netif_carrier_off(pvc->ether);
241 }
242 }
243
244
delete_unused_pvcs(hdlc_device * hdlc)245 static inline void delete_unused_pvcs(hdlc_device *hdlc)
246 {
247 struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
248
249 while (*pvc_p) {
250 if (!pvc_is_used(*pvc_p)) {
251 struct pvc_device *pvc = *pvc_p;
252 #ifdef DEBUG_PVC
253 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
254 #endif
255 *pvc_p = pvc->next;
256 kfree(pvc);
257 continue;
258 }
259 pvc_p = &(*pvc_p)->next;
260 }
261 }
262
263
get_dev_p(struct pvc_device * pvc,int type)264 static inline struct net_device **get_dev_p(struct pvc_device *pvc,
265 int type)
266 {
267 if (type == ARPHRD_ETHER)
268 return &pvc->ether;
269 else
270 return &pvc->main;
271 }
272
273
fr_hard_header(struct sk_buff * skb,u16 dlci)274 static int fr_hard_header(struct sk_buff *skb, u16 dlci)
275 {
276 if (!skb->dev) { /* Control packets */
277 switch (dlci) {
278 case LMI_CCITT_ANSI_DLCI:
279 skb_push(skb, 4);
280 skb->data[3] = NLPID_CCITT_ANSI_LMI;
281 break;
282
283 case LMI_CISCO_DLCI:
284 skb_push(skb, 4);
285 skb->data[3] = NLPID_CISCO_LMI;
286 break;
287
288 default:
289 return -EINVAL;
290 }
291
292 } else if (skb->dev->type == ARPHRD_DLCI) {
293 switch (skb->protocol) {
294 case htons(ETH_P_IP):
295 skb_push(skb, 4);
296 skb->data[3] = NLPID_IP;
297 break;
298
299 case htons(ETH_P_IPV6):
300 skb_push(skb, 4);
301 skb->data[3] = NLPID_IPV6;
302 break;
303
304 default:
305 skb_push(skb, 10);
306 skb->data[3] = FR_PAD;
307 skb->data[4] = NLPID_SNAP;
308 /* OUI 00-00-00 indicates an Ethertype follows */
309 skb->data[5] = 0x00;
310 skb->data[6] = 0x00;
311 skb->data[7] = 0x00;
312 /* This should be an Ethertype: */
313 *(__be16 *)(skb->data + 8) = skb->protocol;
314 }
315
316 } else if (skb->dev->type == ARPHRD_ETHER) {
317 skb_push(skb, 10);
318 skb->data[3] = FR_PAD;
319 skb->data[4] = NLPID_SNAP;
320 /* OUI 00-80-C2 stands for the 802.1 organization */
321 skb->data[5] = 0x00;
322 skb->data[6] = 0x80;
323 skb->data[7] = 0xC2;
324 /* PID 00-07 stands for Ethernet frames without FCS */
325 skb->data[8] = 0x00;
326 skb->data[9] = 0x07;
327
328 } else {
329 return -EINVAL;
330 }
331
332 dlci_to_q922(skb->data, dlci);
333 skb->data[2] = FR_UI;
334 return 0;
335 }
336
337
338
pvc_open(struct net_device * dev)339 static int pvc_open(struct net_device *dev)
340 {
341 struct pvc_device *pvc = dev->ml_priv;
342
343 if ((pvc->frad->flags & IFF_UP) == 0)
344 return -EIO; /* Frad must be UP in order to activate PVC */
345
346 if (pvc->open_count++ == 0) {
347 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
348 if (state(hdlc)->settings.lmi == LMI_NONE)
349 pvc->state.active = netif_carrier_ok(pvc->frad);
350
351 pvc_carrier(pvc->state.active, pvc);
352 state(hdlc)->dce_changed = 1;
353 }
354 return 0;
355 }
356
357
358
pvc_close(struct net_device * dev)359 static int pvc_close(struct net_device *dev)
360 {
361 struct pvc_device *pvc = dev->ml_priv;
362
363 if (--pvc->open_count == 0) {
364 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
365 if (state(hdlc)->settings.lmi == LMI_NONE)
366 pvc->state.active = 0;
367
368 if (state(hdlc)->settings.dce) {
369 state(hdlc)->dce_changed = 1;
370 pvc->state.active = 0;
371 }
372 }
373 return 0;
374 }
375
376
377
pvc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)378 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
379 {
380 struct pvc_device *pvc = dev->ml_priv;
381 fr_proto_pvc_info info;
382
383 if (ifr->ifr_settings.type == IF_GET_PROTO) {
384 if (dev->type == ARPHRD_ETHER)
385 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
386 else
387 ifr->ifr_settings.type = IF_PROTO_FR_PVC;
388
389 if (ifr->ifr_settings.size < sizeof(info)) {
390 /* data size wanted */
391 ifr->ifr_settings.size = sizeof(info);
392 return -ENOBUFS;
393 }
394
395 info.dlci = pvc->dlci;
396 memcpy(info.master, pvc->frad->name, IFNAMSIZ);
397 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
398 &info, sizeof(info)))
399 return -EFAULT;
400 return 0;
401 }
402
403 return -EINVAL;
404 }
405
pvc_xmit(struct sk_buff * skb,struct net_device * dev)406 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
407 {
408 struct pvc_device *pvc = dev->ml_priv;
409
410 if (!pvc->state.active)
411 goto drop;
412
413 if (dev->type == ARPHRD_ETHER) {
414 int pad = ETH_ZLEN - skb->len;
415
416 if (pad > 0) { /* Pad the frame with zeros */
417 if (__skb_pad(skb, pad, false))
418 goto drop;
419 skb_put(skb, pad);
420 }
421 }
422
423 /* We already requested the header space with dev->needed_headroom.
424 * So this is just a protection in case the upper layer didn't take
425 * dev->needed_headroom into consideration.
426 */
427 if (skb_headroom(skb) < 10) {
428 struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
429
430 if (!skb2)
431 goto drop;
432 dev_kfree_skb(skb);
433 skb = skb2;
434 }
435
436 skb->dev = dev;
437 if (fr_hard_header(skb, pvc->dlci))
438 goto drop;
439
440 dev->stats.tx_bytes += skb->len;
441 dev->stats.tx_packets++;
442 if (pvc->state.fecn) /* TX Congestion counter */
443 dev->stats.tx_compressed++;
444 skb->dev = pvc->frad;
445 skb->protocol = htons(ETH_P_HDLC);
446 skb_reset_network_header(skb);
447 dev_queue_xmit(skb);
448 return NETDEV_TX_OK;
449
450 drop:
451 dev->stats.tx_dropped++;
452 kfree_skb(skb);
453 return NETDEV_TX_OK;
454 }
455
fr_log_dlci_active(struct pvc_device * pvc)456 static inline void fr_log_dlci_active(struct pvc_device *pvc)
457 {
458 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
459 pvc->dlci,
460 pvc->main ? pvc->main->name : "",
461 pvc->main && pvc->ether ? " " : "",
462 pvc->ether ? pvc->ether->name : "",
463 pvc->state.new ? " new" : "",
464 !pvc->state.exist ? "deleted" :
465 pvc->state.active ? "active" : "inactive");
466 }
467
468
469
fr_lmi_nextseq(u8 x)470 static inline u8 fr_lmi_nextseq(u8 x)
471 {
472 x++;
473 return x ? x : 1;
474 }
475
476
fr_lmi_send(struct net_device * dev,int fullrep)477 static void fr_lmi_send(struct net_device *dev, int fullrep)
478 {
479 hdlc_device *hdlc = dev_to_hdlc(dev);
480 struct sk_buff *skb;
481 struct pvc_device *pvc = state(hdlc)->first_pvc;
482 int lmi = state(hdlc)->settings.lmi;
483 int dce = state(hdlc)->settings.dce;
484 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
485 int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
486 u8 *data;
487 int i = 0;
488
489 if (dce && fullrep) {
490 len += state(hdlc)->dce_pvc_count * (2 + stat_len);
491 if (len > HDLC_MAX_MRU) {
492 netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
493 return;
494 }
495 }
496
497 skb = dev_alloc_skb(len);
498 if (!skb) {
499 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
500 return;
501 }
502 memset(skb->data, 0, len);
503 skb_reserve(skb, 4);
504 if (lmi == LMI_CISCO) {
505 fr_hard_header(skb, LMI_CISCO_DLCI);
506 } else {
507 fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
508 }
509 data = skb_tail_pointer(skb);
510 data[i++] = LMI_CALLREF;
511 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
512 if (lmi == LMI_ANSI)
513 data[i++] = LMI_ANSI_LOCKSHIFT;
514 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
515 LMI_ANSI_CISCO_REPTYPE;
516 data[i++] = LMI_REPT_LEN;
517 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
518 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
519 data[i++] = LMI_INTEG_LEN;
520 data[i++] = state(hdlc)->txseq =
521 fr_lmi_nextseq(state(hdlc)->txseq);
522 data[i++] = state(hdlc)->rxseq;
523
524 if (dce && fullrep) {
525 while (pvc) {
526 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
527 LMI_ANSI_CISCO_PVCSTAT;
528 data[i++] = stat_len;
529
530 /* LMI start/restart */
531 if (state(hdlc)->reliable && !pvc->state.exist) {
532 pvc->state.exist = pvc->state.new = 1;
533 fr_log_dlci_active(pvc);
534 }
535
536 /* ifconfig PVC up */
537 if (pvc->open_count && !pvc->state.active &&
538 pvc->state.exist && !pvc->state.new) {
539 pvc_carrier(1, pvc);
540 pvc->state.active = 1;
541 fr_log_dlci_active(pvc);
542 }
543
544 if (lmi == LMI_CISCO) {
545 data[i] = pvc->dlci >> 8;
546 data[i + 1] = pvc->dlci & 0xFF;
547 } else {
548 data[i] = (pvc->dlci >> 4) & 0x3F;
549 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
550 data[i + 2] = 0x80;
551 }
552
553 if (pvc->state.new)
554 data[i + 2] |= 0x08;
555 else if (pvc->state.active)
556 data[i + 2] |= 0x02;
557
558 i += stat_len;
559 pvc = pvc->next;
560 }
561 }
562
563 skb_put(skb, i);
564 skb->priority = TC_PRIO_CONTROL;
565 skb->dev = dev;
566 skb->protocol = htons(ETH_P_HDLC);
567 skb_reset_network_header(skb);
568
569 dev_queue_xmit(skb);
570 }
571
572
573
fr_set_link_state(int reliable,struct net_device * dev)574 static void fr_set_link_state(int reliable, struct net_device *dev)
575 {
576 hdlc_device *hdlc = dev_to_hdlc(dev);
577 struct pvc_device *pvc = state(hdlc)->first_pvc;
578
579 state(hdlc)->reliable = reliable;
580 if (reliable) {
581 netif_dormant_off(dev);
582 state(hdlc)->n391cnt = 0; /* Request full status */
583 state(hdlc)->dce_changed = 1;
584
585 if (state(hdlc)->settings.lmi == LMI_NONE) {
586 while (pvc) { /* Activate all PVCs */
587 pvc_carrier(1, pvc);
588 pvc->state.exist = pvc->state.active = 1;
589 pvc->state.new = 0;
590 pvc = pvc->next;
591 }
592 }
593 } else {
594 netif_dormant_on(dev);
595 while (pvc) { /* Deactivate all PVCs */
596 pvc_carrier(0, pvc);
597 pvc->state.exist = pvc->state.active = 0;
598 pvc->state.new = 0;
599 if (!state(hdlc)->settings.dce)
600 pvc->state.bandwidth = 0;
601 pvc = pvc->next;
602 }
603 }
604 }
605
606
fr_timer(struct timer_list * t)607 static void fr_timer(struct timer_list *t)
608 {
609 struct frad_state *st = from_timer(st, t, timer);
610 struct net_device *dev = st->dev;
611 hdlc_device *hdlc = dev_to_hdlc(dev);
612 int i, cnt = 0, reliable;
613 u32 list;
614
615 if (state(hdlc)->settings.dce) {
616 reliable = state(hdlc)->request &&
617 time_before(jiffies, state(hdlc)->last_poll +
618 state(hdlc)->settings.t392 * HZ);
619 state(hdlc)->request = 0;
620 } else {
621 state(hdlc)->last_errors <<= 1; /* Shift the list */
622 if (state(hdlc)->request) {
623 if (state(hdlc)->reliable)
624 netdev_info(dev, "No LMI status reply received\n");
625 state(hdlc)->last_errors |= 1;
626 }
627
628 list = state(hdlc)->last_errors;
629 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
630 cnt += (list & 1); /* errors count */
631
632 reliable = (cnt < state(hdlc)->settings.n392);
633 }
634
635 if (state(hdlc)->reliable != reliable) {
636 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
637 fr_set_link_state(reliable, dev);
638 }
639
640 if (state(hdlc)->settings.dce)
641 state(hdlc)->timer.expires = jiffies +
642 state(hdlc)->settings.t392 * HZ;
643 else {
644 if (state(hdlc)->n391cnt)
645 state(hdlc)->n391cnt--;
646
647 fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
648
649 state(hdlc)->last_poll = jiffies;
650 state(hdlc)->request = 1;
651 state(hdlc)->timer.expires = jiffies +
652 state(hdlc)->settings.t391 * HZ;
653 }
654
655 add_timer(&state(hdlc)->timer);
656 }
657
658
fr_lmi_recv(struct net_device * dev,struct sk_buff * skb)659 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
660 {
661 hdlc_device *hdlc = dev_to_hdlc(dev);
662 struct pvc_device *pvc;
663 u8 rxseq, txseq;
664 int lmi = state(hdlc)->settings.lmi;
665 int dce = state(hdlc)->settings.dce;
666 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
667
668 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
669 LMI_CCITT_CISCO_LENGTH)) {
670 netdev_info(dev, "Short LMI frame\n");
671 return 1;
672 }
673
674 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
675 NLPID_CCITT_ANSI_LMI)) {
676 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
677 return 1;
678 }
679
680 if (skb->data[4] != LMI_CALLREF) {
681 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
682 skb->data[4]);
683 return 1;
684 }
685
686 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
687 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
688 skb->data[5]);
689 return 1;
690 }
691
692 if (lmi == LMI_ANSI) {
693 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
694 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
695 skb->data[6]);
696 return 1;
697 }
698 i = 7;
699 } else
700 i = 6;
701
702 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
703 LMI_ANSI_CISCO_REPTYPE)) {
704 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
705 skb->data[i]);
706 return 1;
707 }
708
709 if (skb->data[++i] != LMI_REPT_LEN) {
710 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
711 skb->data[i]);
712 return 1;
713 }
714
715 reptype = skb->data[++i];
716 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
717 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
718 reptype);
719 return 1;
720 }
721
722 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
723 LMI_ANSI_CISCO_ALIVE)) {
724 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
725 skb->data[i]);
726 return 1;
727 }
728
729 if (skb->data[++i] != LMI_INTEG_LEN) {
730 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
731 skb->data[i]);
732 return 1;
733 }
734 i++;
735
736 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
737 rxseq = skb->data[i++]; /* Should confirm our sequence */
738
739 txseq = state(hdlc)->txseq;
740
741 if (dce)
742 state(hdlc)->last_poll = jiffies;
743
744 error = 0;
745 if (!state(hdlc)->reliable)
746 error = 1;
747
748 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
749 state(hdlc)->n391cnt = 0;
750 error = 1;
751 }
752
753 if (dce) {
754 if (state(hdlc)->fullrep_sent && !error) {
755 /* Stop sending full report - the last one has been confirmed by DTE */
756 state(hdlc)->fullrep_sent = 0;
757 pvc = state(hdlc)->first_pvc;
758 while (pvc) {
759 if (pvc->state.new) {
760 pvc->state.new = 0;
761
762 /* Tell DTE that new PVC is now active */
763 state(hdlc)->dce_changed = 1;
764 }
765 pvc = pvc->next;
766 }
767 }
768
769 if (state(hdlc)->dce_changed) {
770 reptype = LMI_FULLREP;
771 state(hdlc)->fullrep_sent = 1;
772 state(hdlc)->dce_changed = 0;
773 }
774
775 state(hdlc)->request = 1; /* got request */
776 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
777 return 0;
778 }
779
780 /* DTE */
781
782 state(hdlc)->request = 0; /* got response, no request pending */
783
784 if (error)
785 return 0;
786
787 if (reptype != LMI_FULLREP)
788 return 0;
789
790 pvc = state(hdlc)->first_pvc;
791
792 while (pvc) {
793 pvc->state.deleted = 1;
794 pvc = pvc->next;
795 }
796
797 no_ram = 0;
798 while (skb->len >= i + 2 + stat_len) {
799 u16 dlci;
800 u32 bw;
801 unsigned int active, new;
802
803 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
804 LMI_ANSI_CISCO_PVCSTAT)) {
805 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
806 skb->data[i]);
807 return 1;
808 }
809
810 if (skb->data[++i] != stat_len) {
811 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
812 skb->data[i]);
813 return 1;
814 }
815 i++;
816
817 new = !! (skb->data[i + 2] & 0x08);
818 active = !! (skb->data[i + 2] & 0x02);
819 if (lmi == LMI_CISCO) {
820 dlci = (skb->data[i] << 8) | skb->data[i + 1];
821 bw = (skb->data[i + 3] << 16) |
822 (skb->data[i + 4] << 8) |
823 (skb->data[i + 5]);
824 } else {
825 dlci = ((skb->data[i] & 0x3F) << 4) |
826 ((skb->data[i + 1] & 0x78) >> 3);
827 bw = 0;
828 }
829
830 pvc = add_pvc(dev, dlci);
831
832 if (!pvc && !no_ram) {
833 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
834 no_ram = 1;
835 }
836
837 if (pvc) {
838 pvc->state.exist = 1;
839 pvc->state.deleted = 0;
840 if (active != pvc->state.active ||
841 new != pvc->state.new ||
842 bw != pvc->state.bandwidth ||
843 !pvc->state.exist) {
844 pvc->state.new = new;
845 pvc->state.active = active;
846 pvc->state.bandwidth = bw;
847 pvc_carrier(active, pvc);
848 fr_log_dlci_active(pvc);
849 }
850 }
851
852 i += stat_len;
853 }
854
855 pvc = state(hdlc)->first_pvc;
856
857 while (pvc) {
858 if (pvc->state.deleted && pvc->state.exist) {
859 pvc_carrier(0, pvc);
860 pvc->state.active = pvc->state.new = 0;
861 pvc->state.exist = 0;
862 pvc->state.bandwidth = 0;
863 fr_log_dlci_active(pvc);
864 }
865 pvc = pvc->next;
866 }
867
868 /* Next full report after N391 polls */
869 state(hdlc)->n391cnt = state(hdlc)->settings.n391;
870
871 return 0;
872 }
873
874
fr_rx(struct sk_buff * skb)875 static int fr_rx(struct sk_buff *skb)
876 {
877 struct net_device *frad = skb->dev;
878 hdlc_device *hdlc = dev_to_hdlc(frad);
879 struct fr_hdr *fh = (struct fr_hdr *)skb->data;
880 u8 *data = skb->data;
881 u16 dlci;
882 struct pvc_device *pvc;
883 struct net_device *dev = NULL;
884
885 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
886 goto rx_error;
887
888 dlci = q922_to_dlci(skb->data);
889
890 if ((dlci == LMI_CCITT_ANSI_DLCI &&
891 (state(hdlc)->settings.lmi == LMI_ANSI ||
892 state(hdlc)->settings.lmi == LMI_CCITT)) ||
893 (dlci == LMI_CISCO_DLCI &&
894 state(hdlc)->settings.lmi == LMI_CISCO)) {
895 if (fr_lmi_recv(frad, skb))
896 goto rx_error;
897 dev_kfree_skb_any(skb);
898 return NET_RX_SUCCESS;
899 }
900
901 pvc = find_pvc(hdlc, dlci);
902 if (!pvc) {
903 #ifdef DEBUG_PKT
904 netdev_info(frad, "No PVC for received frame's DLCI %d\n",
905 dlci);
906 #endif
907 dev_kfree_skb_any(skb);
908 return NET_RX_DROP;
909 }
910
911 if (pvc->state.fecn != fh->fecn) {
912 #ifdef DEBUG_ECN
913 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
914 dlci, fh->fecn ? "N" : "FF");
915 #endif
916 pvc->state.fecn ^= 1;
917 }
918
919 if (pvc->state.becn != fh->becn) {
920 #ifdef DEBUG_ECN
921 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
922 dlci, fh->becn ? "N" : "FF");
923 #endif
924 pvc->state.becn ^= 1;
925 }
926
927
928 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
929 frad->stats.rx_dropped++;
930 return NET_RX_DROP;
931 }
932
933 if (data[3] == NLPID_IP) {
934 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
935 dev = pvc->main;
936 skb->protocol = htons(ETH_P_IP);
937
938 } else if (data[3] == NLPID_IPV6) {
939 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
940 dev = pvc->main;
941 skb->protocol = htons(ETH_P_IPV6);
942
943 } else if (skb->len > 10 && data[3] == FR_PAD &&
944 data[4] == NLPID_SNAP && data[5] == FR_PAD) {
945 u16 oui = ntohs(*(__be16*)(data + 6));
946 u16 pid = ntohs(*(__be16*)(data + 8));
947 skb_pull(skb, 10);
948
949 switch ((((u32)oui) << 16) | pid) {
950 case ETH_P_ARP: /* routed frame with SNAP */
951 case ETH_P_IPX:
952 case ETH_P_IP: /* a long variant */
953 case ETH_P_IPV6:
954 dev = pvc->main;
955 skb->protocol = htons(pid);
956 break;
957
958 case 0x80C20007: /* bridged Ethernet frame */
959 if ((dev = pvc->ether) != NULL)
960 skb->protocol = eth_type_trans(skb, dev);
961 break;
962
963 default:
964 netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
965 oui, pid);
966 dev_kfree_skb_any(skb);
967 return NET_RX_DROP;
968 }
969 } else {
970 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
971 data[3], skb->len);
972 dev_kfree_skb_any(skb);
973 return NET_RX_DROP;
974 }
975
976 if (dev) {
977 dev->stats.rx_packets++; /* PVC traffic */
978 dev->stats.rx_bytes += skb->len;
979 if (pvc->state.becn)
980 dev->stats.rx_compressed++;
981 skb->dev = dev;
982 netif_rx(skb);
983 return NET_RX_SUCCESS;
984 } else {
985 dev_kfree_skb_any(skb);
986 return NET_RX_DROP;
987 }
988
989 rx_error:
990 frad->stats.rx_errors++; /* Mark error */
991 dev_kfree_skb_any(skb);
992 return NET_RX_DROP;
993 }
994
995
996
fr_start(struct net_device * dev)997 static void fr_start(struct net_device *dev)
998 {
999 hdlc_device *hdlc = dev_to_hdlc(dev);
1000 #ifdef DEBUG_LINK
1001 printk(KERN_DEBUG "fr_start\n");
1002 #endif
1003 if (state(hdlc)->settings.lmi != LMI_NONE) {
1004 state(hdlc)->reliable = 0;
1005 state(hdlc)->dce_changed = 1;
1006 state(hdlc)->request = 0;
1007 state(hdlc)->fullrep_sent = 0;
1008 state(hdlc)->last_errors = 0xFFFFFFFF;
1009 state(hdlc)->n391cnt = 0;
1010 state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1011
1012 state(hdlc)->dev = dev;
1013 timer_setup(&state(hdlc)->timer, fr_timer, 0);
1014 /* First poll after 1 s */
1015 state(hdlc)->timer.expires = jiffies + HZ;
1016 add_timer(&state(hdlc)->timer);
1017 } else
1018 fr_set_link_state(1, dev);
1019 }
1020
1021
fr_stop(struct net_device * dev)1022 static void fr_stop(struct net_device *dev)
1023 {
1024 hdlc_device *hdlc = dev_to_hdlc(dev);
1025 #ifdef DEBUG_LINK
1026 printk(KERN_DEBUG "fr_stop\n");
1027 #endif
1028 if (state(hdlc)->settings.lmi != LMI_NONE)
1029 del_timer_sync(&state(hdlc)->timer);
1030 fr_set_link_state(0, dev);
1031 }
1032
1033
fr_close(struct net_device * dev)1034 static void fr_close(struct net_device *dev)
1035 {
1036 hdlc_device *hdlc = dev_to_hdlc(dev);
1037 struct pvc_device *pvc = state(hdlc)->first_pvc;
1038
1039 while (pvc) { /* Shutdown all PVCs for this FRAD */
1040 if (pvc->main)
1041 dev_close(pvc->main);
1042 if (pvc->ether)
1043 dev_close(pvc->ether);
1044 pvc = pvc->next;
1045 }
1046 }
1047
1048
pvc_setup(struct net_device * dev)1049 static void pvc_setup(struct net_device *dev)
1050 {
1051 dev->type = ARPHRD_DLCI;
1052 dev->flags = IFF_POINTOPOINT;
1053 dev->hard_header_len = 0;
1054 dev->addr_len = 2;
1055 netif_keep_dst(dev);
1056 }
1057
1058 static const struct net_device_ops pvc_ops = {
1059 .ndo_open = pvc_open,
1060 .ndo_stop = pvc_close,
1061 .ndo_start_xmit = pvc_xmit,
1062 .ndo_do_ioctl = pvc_ioctl,
1063 };
1064
fr_add_pvc(struct net_device * frad,unsigned int dlci,int type)1065 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1066 {
1067 hdlc_device *hdlc = dev_to_hdlc(frad);
1068 struct pvc_device *pvc;
1069 struct net_device *dev;
1070 int used;
1071
1072 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1073 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1074 return -ENOBUFS;
1075 }
1076
1077 if (*get_dev_p(pvc, type))
1078 return -EEXIST;
1079
1080 used = pvc_is_used(pvc);
1081
1082 if (type == ARPHRD_ETHER)
1083 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1084 ether_setup);
1085 else
1086 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1087
1088 if (!dev) {
1089 netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1090 delete_unused_pvcs(hdlc);
1091 return -ENOBUFS;
1092 }
1093
1094 if (type == ARPHRD_ETHER) {
1095 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1096 eth_hw_addr_random(dev);
1097 } else {
1098 *(__be16*)dev->dev_addr = htons(dlci);
1099 dlci_to_q922(dev->broadcast, dlci);
1100 }
1101 dev->netdev_ops = &pvc_ops;
1102 dev->mtu = HDLC_MAX_MTU;
1103 dev->min_mtu = 68;
1104 dev->max_mtu = HDLC_MAX_MTU;
1105 dev->needed_headroom = 10;
1106 dev->priv_flags |= IFF_NO_QUEUE;
1107 dev->ml_priv = pvc;
1108
1109 if (register_netdevice(dev) != 0) {
1110 free_netdev(dev);
1111 delete_unused_pvcs(hdlc);
1112 return -EIO;
1113 }
1114
1115 dev->needs_free_netdev = true;
1116 *get_dev_p(pvc, type) = dev;
1117 if (!used) {
1118 state(hdlc)->dce_changed = 1;
1119 state(hdlc)->dce_pvc_count++;
1120 }
1121 return 0;
1122 }
1123
1124
1125
fr_del_pvc(hdlc_device * hdlc,unsigned int dlci,int type)1126 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1127 {
1128 struct pvc_device *pvc;
1129 struct net_device *dev;
1130
1131 if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1132 return -ENOENT;
1133
1134 if ((dev = *get_dev_p(pvc, type)) == NULL)
1135 return -ENOENT;
1136
1137 if (dev->flags & IFF_UP)
1138 return -EBUSY; /* PVC in use */
1139
1140 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1141 *get_dev_p(pvc, type) = NULL;
1142
1143 if (!pvc_is_used(pvc)) {
1144 state(hdlc)->dce_pvc_count--;
1145 state(hdlc)->dce_changed = 1;
1146 }
1147 delete_unused_pvcs(hdlc);
1148 return 0;
1149 }
1150
1151
1152
fr_destroy(struct net_device * frad)1153 static void fr_destroy(struct net_device *frad)
1154 {
1155 hdlc_device *hdlc = dev_to_hdlc(frad);
1156 struct pvc_device *pvc = state(hdlc)->first_pvc;
1157 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1158 state(hdlc)->dce_pvc_count = 0;
1159 state(hdlc)->dce_changed = 1;
1160
1161 while (pvc) {
1162 struct pvc_device *next = pvc->next;
1163 /* destructors will free_netdev() main and ether */
1164 if (pvc->main)
1165 unregister_netdevice(pvc->main);
1166
1167 if (pvc->ether)
1168 unregister_netdevice(pvc->ether);
1169
1170 kfree(pvc);
1171 pvc = next;
1172 }
1173 }
1174
1175
1176 static struct hdlc_proto proto = {
1177 .close = fr_close,
1178 .start = fr_start,
1179 .stop = fr_stop,
1180 .detach = fr_destroy,
1181 .ioctl = fr_ioctl,
1182 .netif_rx = fr_rx,
1183 .module = THIS_MODULE,
1184 };
1185
1186
fr_ioctl(struct net_device * dev,struct ifreq * ifr)1187 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1188 {
1189 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1190 const size_t size = sizeof(fr_proto);
1191 fr_proto new_settings;
1192 hdlc_device *hdlc = dev_to_hdlc(dev);
1193 fr_proto_pvc pvc;
1194 int result;
1195
1196 switch (ifr->ifr_settings.type) {
1197 case IF_GET_PROTO:
1198 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1199 return -EINVAL;
1200 ifr->ifr_settings.type = IF_PROTO_FR;
1201 if (ifr->ifr_settings.size < size) {
1202 ifr->ifr_settings.size = size; /* data size wanted */
1203 return -ENOBUFS;
1204 }
1205 if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1206 return -EFAULT;
1207 return 0;
1208
1209 case IF_PROTO_FR:
1210 if (!capable(CAP_NET_ADMIN))
1211 return -EPERM;
1212
1213 if (dev->flags & IFF_UP)
1214 return -EBUSY;
1215
1216 if (copy_from_user(&new_settings, fr_s, size))
1217 return -EFAULT;
1218
1219 if (new_settings.lmi == LMI_DEFAULT)
1220 new_settings.lmi = LMI_ANSI;
1221
1222 if ((new_settings.lmi != LMI_NONE &&
1223 new_settings.lmi != LMI_ANSI &&
1224 new_settings.lmi != LMI_CCITT &&
1225 new_settings.lmi != LMI_CISCO) ||
1226 new_settings.t391 < 1 ||
1227 new_settings.t392 < 2 ||
1228 new_settings.n391 < 1 ||
1229 new_settings.n392 < 1 ||
1230 new_settings.n393 < new_settings.n392 ||
1231 new_settings.n393 > 32 ||
1232 (new_settings.dce != 0 &&
1233 new_settings.dce != 1))
1234 return -EINVAL;
1235
1236 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1237 if (result)
1238 return result;
1239
1240 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1241 result = attach_hdlc_protocol(dev, &proto,
1242 sizeof(struct frad_state));
1243 if (result)
1244 return result;
1245 state(hdlc)->first_pvc = NULL;
1246 state(hdlc)->dce_pvc_count = 0;
1247 }
1248 memcpy(&state(hdlc)->settings, &new_settings, size);
1249 dev->type = ARPHRD_FRAD;
1250 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1251 return 0;
1252
1253 case IF_PROTO_FR_ADD_PVC:
1254 case IF_PROTO_FR_DEL_PVC:
1255 case IF_PROTO_FR_ADD_ETH_PVC:
1256 case IF_PROTO_FR_DEL_ETH_PVC:
1257 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1258 return -EINVAL;
1259
1260 if (!capable(CAP_NET_ADMIN))
1261 return -EPERM;
1262
1263 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1264 sizeof(fr_proto_pvc)))
1265 return -EFAULT;
1266
1267 if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1268 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1269
1270 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1271 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1272 result = ARPHRD_ETHER; /* bridged Ethernet device */
1273 else
1274 result = ARPHRD_DLCI;
1275
1276 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1277 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1278 return fr_add_pvc(dev, pvc.dlci, result);
1279 else
1280 return fr_del_pvc(hdlc, pvc.dlci, result);
1281 }
1282
1283 return -EINVAL;
1284 }
1285
1286
mod_init(void)1287 static int __init mod_init(void)
1288 {
1289 register_hdlc_protocol(&proto);
1290 return 0;
1291 }
1292
1293
mod_exit(void)1294 static void __exit mod_exit(void)
1295 {
1296 unregister_hdlc_protocol(&proto);
1297 }
1298
1299
1300 module_init(mod_init);
1301 module_exit(mod_exit);
1302
1303 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1304 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1305 MODULE_LICENSE("GPL v2");
1306