1 /*
2  * Generic HDLC support routines for Linux
3  * Frame Relay support
4  *
5  * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of version 2 of the GNU General Public License
9  * as published by the Free Software Foundation.
10  *
11 
12             Theory of PVC state
13 
14  DCE mode:
15 
16  (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
17          0,x -> 1,1 if "link reliable" when sending FULL STATUS
18          1,1 -> 1,0 if received FULL STATUS ACK
19 
20  (active)    -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
21              -> 1 when "PVC up" and (exist,new) = 1,0
22 
23  DTE mode:
24  (exist,new,active) = FULL STATUS if "link reliable"
25 		    = 0, 0, 0 if "link unreliable"
26  No LMI:
27  active = open and "link reliable"
28  exist = new = not used
29 
30  CCITT LMI: ITU-T Q.933 Annex A
31  ANSI LMI: ANSI T1.617 Annex D
32  CISCO LMI: the original, aka "Gang of Four" LMI
33 
34 */
35 
36 #include <linux/errno.h>
37 #include <linux/etherdevice.h>
38 #include <linux/hdlc.h>
39 #include <linux/if_arp.h>
40 #include <linux/inetdevice.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/pkt_sched.h>
45 #include <linux/poll.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/skbuff.h>
48 #include <linux/slab.h>
49 
50 #undef DEBUG_PKT
51 #undef DEBUG_ECN
52 #undef DEBUG_LINK
53 #undef DEBUG_PROTO
54 #undef DEBUG_PVC
55 
56 #define FR_UI			0x03
57 #define FR_PAD			0x00
58 
59 #define NLPID_IP		0xCC
60 #define NLPID_IPV6		0x8E
61 #define NLPID_SNAP		0x80
62 #define NLPID_PAD		0x00
63 #define NLPID_CCITT_ANSI_LMI	0x08
64 #define NLPID_CISCO_LMI		0x09
65 
66 
67 #define LMI_CCITT_ANSI_DLCI	   0 /* LMI DLCI */
68 #define LMI_CISCO_DLCI		1023
69 
70 #define LMI_CALLREF		0x00 /* Call Reference */
71 #define LMI_ANSI_LOCKSHIFT	0x95 /* ANSI locking shift */
72 #define LMI_ANSI_CISCO_REPTYPE	0x01 /* report type */
73 #define LMI_CCITT_REPTYPE	0x51
74 #define LMI_ANSI_CISCO_ALIVE	0x03 /* keep alive */
75 #define LMI_CCITT_ALIVE		0x53
76 #define LMI_ANSI_CISCO_PVCSTAT	0x07 /* PVC status */
77 #define LMI_CCITT_PVCSTAT	0x57
78 
79 #define LMI_FULLREP		0x00 /* full report  */
80 #define LMI_INTEGRITY		0x01 /* link integrity report */
81 #define LMI_SINGLE		0x02 /* single PVC report */
82 
83 #define LMI_STATUS_ENQUIRY      0x75
84 #define LMI_STATUS              0x7D /* reply */
85 
86 #define LMI_REPT_LEN               1 /* report type element length */
87 #define LMI_INTEG_LEN              2 /* link integrity element length */
88 
89 #define LMI_CCITT_CISCO_LENGTH	  13 /* LMI frame lengths */
90 #define LMI_ANSI_LENGTH		  14
91 
92 
93 struct fr_hdr {
94 #if defined(__LITTLE_ENDIAN_BITFIELD)
95 	unsigned ea1:	1;
96 	unsigned cr:	1;
97 	unsigned dlcih:	6;
98 
99 	unsigned ea2:	1;
100 	unsigned de:	1;
101 	unsigned becn:	1;
102 	unsigned fecn:	1;
103 	unsigned dlcil:	4;
104 #else
105 	unsigned dlcih:	6;
106 	unsigned cr:	1;
107 	unsigned ea1:	1;
108 
109 	unsigned dlcil:	4;
110 	unsigned fecn:	1;
111 	unsigned becn:	1;
112 	unsigned de:	1;
113 	unsigned ea2:	1;
114 #endif
115 } __packed;
116 
117 
118 struct pvc_device {
119 	struct net_device *frad;
120 	struct net_device *main;
121 	struct net_device *ether;	/* bridged Ethernet interface	*/
122 	struct pvc_device *next;	/* Sorted in ascending DLCI order */
123 	int dlci;
124 	int open_count;
125 
126 	struct {
127 		unsigned int new: 1;
128 		unsigned int active: 1;
129 		unsigned int exist: 1;
130 		unsigned int deleted: 1;
131 		unsigned int fecn: 1;
132 		unsigned int becn: 1;
133 		unsigned int bandwidth;	/* Cisco LMI reporting only */
134 	}state;
135 };
136 
137 struct frad_state {
138 	fr_proto settings;
139 	struct pvc_device *first_pvc;
140 	int dce_pvc_count;
141 
142 	struct timer_list timer;
143 	struct net_device *dev;
144 	unsigned long last_poll;
145 	int reliable;
146 	int dce_changed;
147 	int request;
148 	int fullrep_sent;
149 	u32 last_errors; /* last errors bit list */
150 	u8 n391cnt;
151 	u8 txseq; /* TX sequence number */
152 	u8 rxseq; /* RX sequence number */
153 };
154 
155 
156 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
157 
158 
q922_to_dlci(u8 * hdr)159 static inline u16 q922_to_dlci(u8 *hdr)
160 {
161 	return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
162 }
163 
164 
dlci_to_q922(u8 * hdr,u16 dlci)165 static inline void dlci_to_q922(u8 *hdr, u16 dlci)
166 {
167 	hdr[0] = (dlci >> 2) & 0xFC;
168 	hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
169 }
170 
171 
state(hdlc_device * hdlc)172 static inline struct frad_state* state(hdlc_device *hdlc)
173 {
174 	return(struct frad_state *)(hdlc->state);
175 }
176 
177 
find_pvc(hdlc_device * hdlc,u16 dlci)178 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
179 {
180 	struct pvc_device *pvc = state(hdlc)->first_pvc;
181 
182 	while (pvc) {
183 		if (pvc->dlci == dlci)
184 			return pvc;
185 		if (pvc->dlci > dlci)
186 			return NULL; /* the list is sorted */
187 		pvc = pvc->next;
188 	}
189 
190 	return NULL;
191 }
192 
193 
add_pvc(struct net_device * dev,u16 dlci)194 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
195 {
196 	hdlc_device *hdlc = dev_to_hdlc(dev);
197 	struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
198 
199 	while (*pvc_p) {
200 		if ((*pvc_p)->dlci == dlci)
201 			return *pvc_p;
202 		if ((*pvc_p)->dlci > dlci)
203 			break;	/* the list is sorted */
204 		pvc_p = &(*pvc_p)->next;
205 	}
206 
207 	pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
208 #ifdef DEBUG_PVC
209 	printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
210 #endif
211 	if (!pvc)
212 		return NULL;
213 
214 	pvc->dlci = dlci;
215 	pvc->frad = dev;
216 	pvc->next = *pvc_p;	/* Put it in the chain */
217 	*pvc_p = pvc;
218 	return pvc;
219 }
220 
221 
pvc_is_used(struct pvc_device * pvc)222 static inline int pvc_is_used(struct pvc_device *pvc)
223 {
224 	return pvc->main || pvc->ether;
225 }
226 
227 
pvc_carrier(int on,struct pvc_device * pvc)228 static inline void pvc_carrier(int on, struct pvc_device *pvc)
229 {
230 	if (on) {
231 		if (pvc->main)
232 			if (!netif_carrier_ok(pvc->main))
233 				netif_carrier_on(pvc->main);
234 		if (pvc->ether)
235 			if (!netif_carrier_ok(pvc->ether))
236 				netif_carrier_on(pvc->ether);
237 	} else {
238 		if (pvc->main)
239 			if (netif_carrier_ok(pvc->main))
240 				netif_carrier_off(pvc->main);
241 		if (pvc->ether)
242 			if (netif_carrier_ok(pvc->ether))
243 				netif_carrier_off(pvc->ether);
244 	}
245 }
246 
247 
delete_unused_pvcs(hdlc_device * hdlc)248 static inline void delete_unused_pvcs(hdlc_device *hdlc)
249 {
250 	struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
251 
252 	while (*pvc_p) {
253 		if (!pvc_is_used(*pvc_p)) {
254 			struct pvc_device *pvc = *pvc_p;
255 #ifdef DEBUG_PVC
256 			printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
257 #endif
258 			*pvc_p = pvc->next;
259 			kfree(pvc);
260 			continue;
261 		}
262 		pvc_p = &(*pvc_p)->next;
263 	}
264 }
265 
266 
get_dev_p(struct pvc_device * pvc,int type)267 static inline struct net_device **get_dev_p(struct pvc_device *pvc,
268 					    int type)
269 {
270 	if (type == ARPHRD_ETHER)
271 		return &pvc->ether;
272 	else
273 		return &pvc->main;
274 }
275 
276 
fr_hard_header(struct sk_buff ** skb_p,u16 dlci)277 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
278 {
279 	u16 head_len;
280 	struct sk_buff *skb = *skb_p;
281 
282 	switch (skb->protocol) {
283 	case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
284 		head_len = 4;
285 		skb_push(skb, head_len);
286 		skb->data[3] = NLPID_CCITT_ANSI_LMI;
287 		break;
288 
289 	case cpu_to_be16(NLPID_CISCO_LMI):
290 		head_len = 4;
291 		skb_push(skb, head_len);
292 		skb->data[3] = NLPID_CISCO_LMI;
293 		break;
294 
295 	case cpu_to_be16(ETH_P_IP):
296 		head_len = 4;
297 		skb_push(skb, head_len);
298 		skb->data[3] = NLPID_IP;
299 		break;
300 
301 	case cpu_to_be16(ETH_P_IPV6):
302 		head_len = 4;
303 		skb_push(skb, head_len);
304 		skb->data[3] = NLPID_IPV6;
305 		break;
306 
307 	case cpu_to_be16(ETH_P_802_3):
308 		head_len = 10;
309 		if (skb_headroom(skb) < head_len) {
310 			struct sk_buff *skb2 = skb_realloc_headroom(skb,
311 								    head_len);
312 			if (!skb2)
313 				return -ENOBUFS;
314 			dev_kfree_skb(skb);
315 			skb = *skb_p = skb2;
316 		}
317 		skb_push(skb, head_len);
318 		skb->data[3] = FR_PAD;
319 		skb->data[4] = NLPID_SNAP;
320 		skb->data[5] = FR_PAD;
321 		skb->data[6] = 0x80;
322 		skb->data[7] = 0xC2;
323 		skb->data[8] = 0x00;
324 		skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
325 		break;
326 
327 	default:
328 		head_len = 10;
329 		skb_push(skb, head_len);
330 		skb->data[3] = FR_PAD;
331 		skb->data[4] = NLPID_SNAP;
332 		skb->data[5] = FR_PAD;
333 		skb->data[6] = FR_PAD;
334 		skb->data[7] = FR_PAD;
335 		*(__be16*)(skb->data + 8) = skb->protocol;
336 	}
337 
338 	dlci_to_q922(skb->data, dlci);
339 	skb->data[2] = FR_UI;
340 	return 0;
341 }
342 
343 
344 
pvc_open(struct net_device * dev)345 static int pvc_open(struct net_device *dev)
346 {
347 	struct pvc_device *pvc = dev->ml_priv;
348 
349 	if ((pvc->frad->flags & IFF_UP) == 0)
350 		return -EIO;  /* Frad must be UP in order to activate PVC */
351 
352 	if (pvc->open_count++ == 0) {
353 		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
354 		if (state(hdlc)->settings.lmi == LMI_NONE)
355 			pvc->state.active = netif_carrier_ok(pvc->frad);
356 
357 		pvc_carrier(pvc->state.active, pvc);
358 		state(hdlc)->dce_changed = 1;
359 	}
360 	return 0;
361 }
362 
363 
364 
pvc_close(struct net_device * dev)365 static int pvc_close(struct net_device *dev)
366 {
367 	struct pvc_device *pvc = dev->ml_priv;
368 
369 	if (--pvc->open_count == 0) {
370 		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
371 		if (state(hdlc)->settings.lmi == LMI_NONE)
372 			pvc->state.active = 0;
373 
374 		if (state(hdlc)->settings.dce) {
375 			state(hdlc)->dce_changed = 1;
376 			pvc->state.active = 0;
377 		}
378 	}
379 	return 0;
380 }
381 
382 
383 
pvc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)384 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
385 {
386 	struct pvc_device *pvc = dev->ml_priv;
387 	fr_proto_pvc_info info;
388 
389 	if (ifr->ifr_settings.type == IF_GET_PROTO) {
390 		if (dev->type == ARPHRD_ETHER)
391 			ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
392 		else
393 			ifr->ifr_settings.type = IF_PROTO_FR_PVC;
394 
395 		if (ifr->ifr_settings.size < sizeof(info)) {
396 			/* data size wanted */
397 			ifr->ifr_settings.size = sizeof(info);
398 			return -ENOBUFS;
399 		}
400 
401 		info.dlci = pvc->dlci;
402 		memcpy(info.master, pvc->frad->name, IFNAMSIZ);
403 		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
404 				 &info, sizeof(info)))
405 			return -EFAULT;
406 		return 0;
407 	}
408 
409 	return -EINVAL;
410 }
411 
pvc_xmit(struct sk_buff * skb,struct net_device * dev)412 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
413 {
414 	struct pvc_device *pvc = dev->ml_priv;
415 
416 	if (pvc->state.active) {
417 		if (dev->type == ARPHRD_ETHER) {
418 			int pad = ETH_ZLEN - skb->len;
419 			if (pad > 0) { /* Pad the frame with zeros */
420 				int len = skb->len;
421 				if (skb_tailroom(skb) < pad)
422 					if (pskb_expand_head(skb, 0, pad,
423 							     GFP_ATOMIC)) {
424 						dev->stats.tx_dropped++;
425 						dev_kfree_skb(skb);
426 						return NETDEV_TX_OK;
427 					}
428 				skb_put(skb, pad);
429 				memset(skb->data + len, 0, pad);
430 			}
431 			skb->protocol = cpu_to_be16(ETH_P_802_3);
432 		}
433 		if (!fr_hard_header(&skb, pvc->dlci)) {
434 			dev->stats.tx_bytes += skb->len;
435 			dev->stats.tx_packets++;
436 			if (pvc->state.fecn) /* TX Congestion counter */
437 				dev->stats.tx_compressed++;
438 			skb->dev = pvc->frad;
439 			dev_queue_xmit(skb);
440 			return NETDEV_TX_OK;
441 		}
442 	}
443 
444 	dev->stats.tx_dropped++;
445 	dev_kfree_skb(skb);
446 	return NETDEV_TX_OK;
447 }
448 
fr_log_dlci_active(struct pvc_device * pvc)449 static inline void fr_log_dlci_active(struct pvc_device *pvc)
450 {
451 	netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
452 		    pvc->dlci,
453 		    pvc->main ? pvc->main->name : "",
454 		    pvc->main && pvc->ether ? " " : "",
455 		    pvc->ether ? pvc->ether->name : "",
456 		    pvc->state.new ? " new" : "",
457 		    !pvc->state.exist ? "deleted" :
458 		    pvc->state.active ? "active" : "inactive");
459 }
460 
461 
462 
fr_lmi_nextseq(u8 x)463 static inline u8 fr_lmi_nextseq(u8 x)
464 {
465 	x++;
466 	return x ? x : 1;
467 }
468 
469 
fr_lmi_send(struct net_device * dev,int fullrep)470 static void fr_lmi_send(struct net_device *dev, int fullrep)
471 {
472 	hdlc_device *hdlc = dev_to_hdlc(dev);
473 	struct sk_buff *skb;
474 	struct pvc_device *pvc = state(hdlc)->first_pvc;
475 	int lmi = state(hdlc)->settings.lmi;
476 	int dce = state(hdlc)->settings.dce;
477 	int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
478 	int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
479 	u8 *data;
480 	int i = 0;
481 
482 	if (dce && fullrep) {
483 		len += state(hdlc)->dce_pvc_count * (2 + stat_len);
484 		if (len > HDLC_MAX_MRU) {
485 			netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
486 			return;
487 		}
488 	}
489 
490 	skb = dev_alloc_skb(len);
491 	if (!skb) {
492 		netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
493 		return;
494 	}
495 	memset(skb->data, 0, len);
496 	skb_reserve(skb, 4);
497 	if (lmi == LMI_CISCO) {
498 		skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
499 		fr_hard_header(&skb, LMI_CISCO_DLCI);
500 	} else {
501 		skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
502 		fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
503 	}
504 	data = skb_tail_pointer(skb);
505 	data[i++] = LMI_CALLREF;
506 	data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
507 	if (lmi == LMI_ANSI)
508 		data[i++] = LMI_ANSI_LOCKSHIFT;
509 	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
510 		LMI_ANSI_CISCO_REPTYPE;
511 	data[i++] = LMI_REPT_LEN;
512 	data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
513 	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
514 	data[i++] = LMI_INTEG_LEN;
515 	data[i++] = state(hdlc)->txseq =
516 		fr_lmi_nextseq(state(hdlc)->txseq);
517 	data[i++] = state(hdlc)->rxseq;
518 
519 	if (dce && fullrep) {
520 		while (pvc) {
521 			data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
522 				LMI_ANSI_CISCO_PVCSTAT;
523 			data[i++] = stat_len;
524 
525 			/* LMI start/restart */
526 			if (state(hdlc)->reliable && !pvc->state.exist) {
527 				pvc->state.exist = pvc->state.new = 1;
528 				fr_log_dlci_active(pvc);
529 			}
530 
531 			/* ifconfig PVC up */
532 			if (pvc->open_count && !pvc->state.active &&
533 			    pvc->state.exist && !pvc->state.new) {
534 				pvc_carrier(1, pvc);
535 				pvc->state.active = 1;
536 				fr_log_dlci_active(pvc);
537 			}
538 
539 			if (lmi == LMI_CISCO) {
540 				data[i] = pvc->dlci >> 8;
541 				data[i + 1] = pvc->dlci & 0xFF;
542 			} else {
543 				data[i] = (pvc->dlci >> 4) & 0x3F;
544 				data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
545 				data[i + 2] = 0x80;
546 			}
547 
548 			if (pvc->state.new)
549 				data[i + 2] |= 0x08;
550 			else if (pvc->state.active)
551 				data[i + 2] |= 0x02;
552 
553 			i += stat_len;
554 			pvc = pvc->next;
555 		}
556 	}
557 
558 	skb_put(skb, i);
559 	skb->priority = TC_PRIO_CONTROL;
560 	skb->dev = dev;
561 	skb_reset_network_header(skb);
562 
563 	dev_queue_xmit(skb);
564 }
565 
566 
567 
fr_set_link_state(int reliable,struct net_device * dev)568 static void fr_set_link_state(int reliable, struct net_device *dev)
569 {
570 	hdlc_device *hdlc = dev_to_hdlc(dev);
571 	struct pvc_device *pvc = state(hdlc)->first_pvc;
572 
573 	state(hdlc)->reliable = reliable;
574 	if (reliable) {
575 		netif_dormant_off(dev);
576 		state(hdlc)->n391cnt = 0; /* Request full status */
577 		state(hdlc)->dce_changed = 1;
578 
579 		if (state(hdlc)->settings.lmi == LMI_NONE) {
580 			while (pvc) {	/* Activate all PVCs */
581 				pvc_carrier(1, pvc);
582 				pvc->state.exist = pvc->state.active = 1;
583 				pvc->state.new = 0;
584 				pvc = pvc->next;
585 			}
586 		}
587 	} else {
588 		netif_dormant_on(dev);
589 		while (pvc) {		/* Deactivate all PVCs */
590 			pvc_carrier(0, pvc);
591 			pvc->state.exist = pvc->state.active = 0;
592 			pvc->state.new = 0;
593 			if (!state(hdlc)->settings.dce)
594 				pvc->state.bandwidth = 0;
595 			pvc = pvc->next;
596 		}
597 	}
598 }
599 
600 
fr_timer(struct timer_list * t)601 static void fr_timer(struct timer_list *t)
602 {
603 	struct frad_state *st = from_timer(st, t, timer);
604 	struct net_device *dev = st->dev;
605 	hdlc_device *hdlc = dev_to_hdlc(dev);
606 	int i, cnt = 0, reliable;
607 	u32 list;
608 
609 	if (state(hdlc)->settings.dce) {
610 		reliable = state(hdlc)->request &&
611 			time_before(jiffies, state(hdlc)->last_poll +
612 				    state(hdlc)->settings.t392 * HZ);
613 		state(hdlc)->request = 0;
614 	} else {
615 		state(hdlc)->last_errors <<= 1; /* Shift the list */
616 		if (state(hdlc)->request) {
617 			if (state(hdlc)->reliable)
618 				netdev_info(dev, "No LMI status reply received\n");
619 			state(hdlc)->last_errors |= 1;
620 		}
621 
622 		list = state(hdlc)->last_errors;
623 		for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
624 			cnt += (list & 1);	/* errors count */
625 
626 		reliable = (cnt < state(hdlc)->settings.n392);
627 	}
628 
629 	if (state(hdlc)->reliable != reliable) {
630 		netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
631 		fr_set_link_state(reliable, dev);
632 	}
633 
634 	if (state(hdlc)->settings.dce)
635 		state(hdlc)->timer.expires = jiffies +
636 			state(hdlc)->settings.t392 * HZ;
637 	else {
638 		if (state(hdlc)->n391cnt)
639 			state(hdlc)->n391cnt--;
640 
641 		fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
642 
643 		state(hdlc)->last_poll = jiffies;
644 		state(hdlc)->request = 1;
645 		state(hdlc)->timer.expires = jiffies +
646 			state(hdlc)->settings.t391 * HZ;
647 	}
648 
649 	add_timer(&state(hdlc)->timer);
650 }
651 
652 
fr_lmi_recv(struct net_device * dev,struct sk_buff * skb)653 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
654 {
655 	hdlc_device *hdlc = dev_to_hdlc(dev);
656 	struct pvc_device *pvc;
657 	u8 rxseq, txseq;
658 	int lmi = state(hdlc)->settings.lmi;
659 	int dce = state(hdlc)->settings.dce;
660 	int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
661 
662 	if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
663 			LMI_CCITT_CISCO_LENGTH)) {
664 		netdev_info(dev, "Short LMI frame\n");
665 		return 1;
666 	}
667 
668 	if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
669 			     NLPID_CCITT_ANSI_LMI)) {
670 		netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
671 		return 1;
672 	}
673 
674 	if (skb->data[4] != LMI_CALLREF) {
675 		netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
676 			    skb->data[4]);
677 		return 1;
678 	}
679 
680 	if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
681 		netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
682 			    skb->data[5]);
683 		return 1;
684 	}
685 
686 	if (lmi == LMI_ANSI) {
687 		if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
688 			netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
689 				    skb->data[6]);
690 			return 1;
691 		}
692 		i = 7;
693 	} else
694 		i = 6;
695 
696 	if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
697 			     LMI_ANSI_CISCO_REPTYPE)) {
698 		netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
699 			    skb->data[i]);
700 		return 1;
701 	}
702 
703 	if (skb->data[++i] != LMI_REPT_LEN) {
704 		netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
705 			    skb->data[i]);
706 		return 1;
707 	}
708 
709 	reptype = skb->data[++i];
710 	if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
711 		netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
712 			    reptype);
713 		return 1;
714 	}
715 
716 	if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
717 			       LMI_ANSI_CISCO_ALIVE)) {
718 		netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
719 			    skb->data[i]);
720 		return 1;
721 	}
722 
723 	if (skb->data[++i] != LMI_INTEG_LEN) {
724 		netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
725 			    skb->data[i]);
726 		return 1;
727 	}
728 	i++;
729 
730 	state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
731 	rxseq = skb->data[i++];	/* Should confirm our sequence */
732 
733 	txseq = state(hdlc)->txseq;
734 
735 	if (dce)
736 		state(hdlc)->last_poll = jiffies;
737 
738 	error = 0;
739 	if (!state(hdlc)->reliable)
740 		error = 1;
741 
742 	if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
743 		state(hdlc)->n391cnt = 0;
744 		error = 1;
745 	}
746 
747 	if (dce) {
748 		if (state(hdlc)->fullrep_sent && !error) {
749 /* Stop sending full report - the last one has been confirmed by DTE */
750 			state(hdlc)->fullrep_sent = 0;
751 			pvc = state(hdlc)->first_pvc;
752 			while (pvc) {
753 				if (pvc->state.new) {
754 					pvc->state.new = 0;
755 
756 /* Tell DTE that new PVC is now active */
757 					state(hdlc)->dce_changed = 1;
758 				}
759 				pvc = pvc->next;
760 			}
761 		}
762 
763 		if (state(hdlc)->dce_changed) {
764 			reptype = LMI_FULLREP;
765 			state(hdlc)->fullrep_sent = 1;
766 			state(hdlc)->dce_changed = 0;
767 		}
768 
769 		state(hdlc)->request = 1; /* got request */
770 		fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
771 		return 0;
772 	}
773 
774 	/* DTE */
775 
776 	state(hdlc)->request = 0; /* got response, no request pending */
777 
778 	if (error)
779 		return 0;
780 
781 	if (reptype != LMI_FULLREP)
782 		return 0;
783 
784 	pvc = state(hdlc)->first_pvc;
785 
786 	while (pvc) {
787 		pvc->state.deleted = 1;
788 		pvc = pvc->next;
789 	}
790 
791 	no_ram = 0;
792 	while (skb->len >= i + 2 + stat_len) {
793 		u16 dlci;
794 		u32 bw;
795 		unsigned int active, new;
796 
797 		if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
798 				       LMI_ANSI_CISCO_PVCSTAT)) {
799 			netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
800 				    skb->data[i]);
801 			return 1;
802 		}
803 
804 		if (skb->data[++i] != stat_len) {
805 			netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
806 				    skb->data[i]);
807 			return 1;
808 		}
809 		i++;
810 
811 		new = !! (skb->data[i + 2] & 0x08);
812 		active = !! (skb->data[i + 2] & 0x02);
813 		if (lmi == LMI_CISCO) {
814 			dlci = (skb->data[i] << 8) | skb->data[i + 1];
815 			bw = (skb->data[i + 3] << 16) |
816 				(skb->data[i + 4] << 8) |
817 				(skb->data[i + 5]);
818 		} else {
819 			dlci = ((skb->data[i] & 0x3F) << 4) |
820 				((skb->data[i + 1] & 0x78) >> 3);
821 			bw = 0;
822 		}
823 
824 		pvc = add_pvc(dev, dlci);
825 
826 		if (!pvc && !no_ram) {
827 			netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
828 			no_ram = 1;
829 		}
830 
831 		if (pvc) {
832 			pvc->state.exist = 1;
833 			pvc->state.deleted = 0;
834 			if (active != pvc->state.active ||
835 			    new != pvc->state.new ||
836 			    bw != pvc->state.bandwidth ||
837 			    !pvc->state.exist) {
838 				pvc->state.new = new;
839 				pvc->state.active = active;
840 				pvc->state.bandwidth = bw;
841 				pvc_carrier(active, pvc);
842 				fr_log_dlci_active(pvc);
843 			}
844 		}
845 
846 		i += stat_len;
847 	}
848 
849 	pvc = state(hdlc)->first_pvc;
850 
851 	while (pvc) {
852 		if (pvc->state.deleted && pvc->state.exist) {
853 			pvc_carrier(0, pvc);
854 			pvc->state.active = pvc->state.new = 0;
855 			pvc->state.exist = 0;
856 			pvc->state.bandwidth = 0;
857 			fr_log_dlci_active(pvc);
858 		}
859 		pvc = pvc->next;
860 	}
861 
862 	/* Next full report after N391 polls */
863 	state(hdlc)->n391cnt = state(hdlc)->settings.n391;
864 
865 	return 0;
866 }
867 
868 
fr_rx(struct sk_buff * skb)869 static int fr_rx(struct sk_buff *skb)
870 {
871 	struct net_device *frad = skb->dev;
872 	hdlc_device *hdlc = dev_to_hdlc(frad);
873 	struct fr_hdr *fh = (struct fr_hdr *)skb->data;
874 	u8 *data = skb->data;
875 	u16 dlci;
876 	struct pvc_device *pvc;
877 	struct net_device *dev = NULL;
878 
879 	if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
880 		goto rx_error;
881 
882 	dlci = q922_to_dlci(skb->data);
883 
884 	if ((dlci == LMI_CCITT_ANSI_DLCI &&
885 	     (state(hdlc)->settings.lmi == LMI_ANSI ||
886 	      state(hdlc)->settings.lmi == LMI_CCITT)) ||
887 	    (dlci == LMI_CISCO_DLCI &&
888 	     state(hdlc)->settings.lmi == LMI_CISCO)) {
889 		if (fr_lmi_recv(frad, skb))
890 			goto rx_error;
891 		dev_kfree_skb_any(skb);
892 		return NET_RX_SUCCESS;
893 	}
894 
895 	pvc = find_pvc(hdlc, dlci);
896 	if (!pvc) {
897 #ifdef DEBUG_PKT
898 		netdev_info(frad, "No PVC for received frame's DLCI %d\n",
899 			    dlci);
900 #endif
901 		dev_kfree_skb_any(skb);
902 		return NET_RX_DROP;
903 	}
904 
905 	if (pvc->state.fecn != fh->fecn) {
906 #ifdef DEBUG_ECN
907 		printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
908 		       dlci, fh->fecn ? "N" : "FF");
909 #endif
910 		pvc->state.fecn ^= 1;
911 	}
912 
913 	if (pvc->state.becn != fh->becn) {
914 #ifdef DEBUG_ECN
915 		printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
916 		       dlci, fh->becn ? "N" : "FF");
917 #endif
918 		pvc->state.becn ^= 1;
919 	}
920 
921 
922 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
923 		frad->stats.rx_dropped++;
924 		return NET_RX_DROP;
925 	}
926 
927 	if (data[3] == NLPID_IP) {
928 		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
929 		dev = pvc->main;
930 		skb->protocol = htons(ETH_P_IP);
931 
932 	} else if (data[3] == NLPID_IPV6) {
933 		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
934 		dev = pvc->main;
935 		skb->protocol = htons(ETH_P_IPV6);
936 
937 	} else if (skb->len > 10 && data[3] == FR_PAD &&
938 		   data[4] == NLPID_SNAP && data[5] == FR_PAD) {
939 		u16 oui = ntohs(*(__be16*)(data + 6));
940 		u16 pid = ntohs(*(__be16*)(data + 8));
941 		skb_pull(skb, 10);
942 
943 		switch ((((u32)oui) << 16) | pid) {
944 		case ETH_P_ARP: /* routed frame with SNAP */
945 		case ETH_P_IPX:
946 		case ETH_P_IP:	/* a long variant */
947 		case ETH_P_IPV6:
948 			dev = pvc->main;
949 			skb->protocol = htons(pid);
950 			break;
951 
952 		case 0x80C20007: /* bridged Ethernet frame */
953 			if ((dev = pvc->ether) != NULL)
954 				skb->protocol = eth_type_trans(skb, dev);
955 			break;
956 
957 		default:
958 			netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
959 				    oui, pid);
960 			dev_kfree_skb_any(skb);
961 			return NET_RX_DROP;
962 		}
963 	} else {
964 		netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
965 			    data[3], skb->len);
966 		dev_kfree_skb_any(skb);
967 		return NET_RX_DROP;
968 	}
969 
970 	if (dev) {
971 		dev->stats.rx_packets++; /* PVC traffic */
972 		dev->stats.rx_bytes += skb->len;
973 		if (pvc->state.becn)
974 			dev->stats.rx_compressed++;
975 		skb->dev = dev;
976 		netif_rx(skb);
977 		return NET_RX_SUCCESS;
978 	} else {
979 		dev_kfree_skb_any(skb);
980 		return NET_RX_DROP;
981 	}
982 
983  rx_error:
984 	frad->stats.rx_errors++; /* Mark error */
985 	dev_kfree_skb_any(skb);
986 	return NET_RX_DROP;
987 }
988 
989 
990 
fr_start(struct net_device * dev)991 static void fr_start(struct net_device *dev)
992 {
993 	hdlc_device *hdlc = dev_to_hdlc(dev);
994 #ifdef DEBUG_LINK
995 	printk(KERN_DEBUG "fr_start\n");
996 #endif
997 	if (state(hdlc)->settings.lmi != LMI_NONE) {
998 		state(hdlc)->reliable = 0;
999 		state(hdlc)->dce_changed = 1;
1000 		state(hdlc)->request = 0;
1001 		state(hdlc)->fullrep_sent = 0;
1002 		state(hdlc)->last_errors = 0xFFFFFFFF;
1003 		state(hdlc)->n391cnt = 0;
1004 		state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1005 
1006 		state(hdlc)->dev = dev;
1007 		timer_setup(&state(hdlc)->timer, fr_timer, 0);
1008 		/* First poll after 1 s */
1009 		state(hdlc)->timer.expires = jiffies + HZ;
1010 		add_timer(&state(hdlc)->timer);
1011 	} else
1012 		fr_set_link_state(1, dev);
1013 }
1014 
1015 
fr_stop(struct net_device * dev)1016 static void fr_stop(struct net_device *dev)
1017 {
1018 	hdlc_device *hdlc = dev_to_hdlc(dev);
1019 #ifdef DEBUG_LINK
1020 	printk(KERN_DEBUG "fr_stop\n");
1021 #endif
1022 	if (state(hdlc)->settings.lmi != LMI_NONE)
1023 		del_timer_sync(&state(hdlc)->timer);
1024 	fr_set_link_state(0, dev);
1025 }
1026 
1027 
fr_close(struct net_device * dev)1028 static void fr_close(struct net_device *dev)
1029 {
1030 	hdlc_device *hdlc = dev_to_hdlc(dev);
1031 	struct pvc_device *pvc = state(hdlc)->first_pvc;
1032 
1033 	while (pvc) {		/* Shutdown all PVCs for this FRAD */
1034 		if (pvc->main)
1035 			dev_close(pvc->main);
1036 		if (pvc->ether)
1037 			dev_close(pvc->ether);
1038 		pvc = pvc->next;
1039 	}
1040 }
1041 
1042 
pvc_setup(struct net_device * dev)1043 static void pvc_setup(struct net_device *dev)
1044 {
1045 	dev->type = ARPHRD_DLCI;
1046 	dev->flags = IFF_POINTOPOINT;
1047 	dev->hard_header_len = 10;
1048 	dev->addr_len = 2;
1049 	netif_keep_dst(dev);
1050 }
1051 
1052 static const struct net_device_ops pvc_ops = {
1053 	.ndo_open       = pvc_open,
1054 	.ndo_stop       = pvc_close,
1055 	.ndo_start_xmit = pvc_xmit,
1056 	.ndo_do_ioctl   = pvc_ioctl,
1057 };
1058 
fr_add_pvc(struct net_device * frad,unsigned int dlci,int type)1059 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1060 {
1061 	hdlc_device *hdlc = dev_to_hdlc(frad);
1062 	struct pvc_device *pvc;
1063 	struct net_device *dev;
1064 	int used;
1065 
1066 	if ((pvc = add_pvc(frad, dlci)) == NULL) {
1067 		netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1068 		return -ENOBUFS;
1069 	}
1070 
1071 	if (*get_dev_p(pvc, type))
1072 		return -EEXIST;
1073 
1074 	used = pvc_is_used(pvc);
1075 
1076 	if (type == ARPHRD_ETHER)
1077 		dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1078 				   ether_setup);
1079 	else
1080 		dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1081 
1082 	if (!dev) {
1083 		netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1084 		delete_unused_pvcs(hdlc);
1085 		return -ENOBUFS;
1086 	}
1087 
1088 	if (type == ARPHRD_ETHER) {
1089 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1090 		eth_hw_addr_random(dev);
1091 	} else {
1092 		*(__be16*)dev->dev_addr = htons(dlci);
1093 		dlci_to_q922(dev->broadcast, dlci);
1094 	}
1095 	dev->netdev_ops = &pvc_ops;
1096 	dev->mtu = HDLC_MAX_MTU;
1097 	dev->min_mtu = 68;
1098 	dev->max_mtu = HDLC_MAX_MTU;
1099 	dev->priv_flags |= IFF_NO_QUEUE;
1100 	dev->ml_priv = pvc;
1101 
1102 	if (register_netdevice(dev) != 0) {
1103 		free_netdev(dev);
1104 		delete_unused_pvcs(hdlc);
1105 		return -EIO;
1106 	}
1107 
1108 	dev->needs_free_netdev = true;
1109 	*get_dev_p(pvc, type) = dev;
1110 	if (!used) {
1111 		state(hdlc)->dce_changed = 1;
1112 		state(hdlc)->dce_pvc_count++;
1113 	}
1114 	return 0;
1115 }
1116 
1117 
1118 
fr_del_pvc(hdlc_device * hdlc,unsigned int dlci,int type)1119 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1120 {
1121 	struct pvc_device *pvc;
1122 	struct net_device *dev;
1123 
1124 	if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1125 		return -ENOENT;
1126 
1127 	if ((dev = *get_dev_p(pvc, type)) == NULL)
1128 		return -ENOENT;
1129 
1130 	if (dev->flags & IFF_UP)
1131 		return -EBUSY;		/* PVC in use */
1132 
1133 	unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1134 	*get_dev_p(pvc, type) = NULL;
1135 
1136 	if (!pvc_is_used(pvc)) {
1137 		state(hdlc)->dce_pvc_count--;
1138 		state(hdlc)->dce_changed = 1;
1139 	}
1140 	delete_unused_pvcs(hdlc);
1141 	return 0;
1142 }
1143 
1144 
1145 
fr_destroy(struct net_device * frad)1146 static void fr_destroy(struct net_device *frad)
1147 {
1148 	hdlc_device *hdlc = dev_to_hdlc(frad);
1149 	struct pvc_device *pvc = state(hdlc)->first_pvc;
1150 	state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1151 	state(hdlc)->dce_pvc_count = 0;
1152 	state(hdlc)->dce_changed = 1;
1153 
1154 	while (pvc) {
1155 		struct pvc_device *next = pvc->next;
1156 		/* destructors will free_netdev() main and ether */
1157 		if (pvc->main)
1158 			unregister_netdevice(pvc->main);
1159 
1160 		if (pvc->ether)
1161 			unregister_netdevice(pvc->ether);
1162 
1163 		kfree(pvc);
1164 		pvc = next;
1165 	}
1166 }
1167 
1168 
1169 static struct hdlc_proto proto = {
1170 	.close		= fr_close,
1171 	.start		= fr_start,
1172 	.stop		= fr_stop,
1173 	.detach		= fr_destroy,
1174 	.ioctl		= fr_ioctl,
1175 	.netif_rx	= fr_rx,
1176 	.module		= THIS_MODULE,
1177 };
1178 
1179 
fr_ioctl(struct net_device * dev,struct ifreq * ifr)1180 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1181 {
1182 	fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1183 	const size_t size = sizeof(fr_proto);
1184 	fr_proto new_settings;
1185 	hdlc_device *hdlc = dev_to_hdlc(dev);
1186 	fr_proto_pvc pvc;
1187 	int result;
1188 
1189 	switch (ifr->ifr_settings.type) {
1190 	case IF_GET_PROTO:
1191 		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1192 			return -EINVAL;
1193 		ifr->ifr_settings.type = IF_PROTO_FR;
1194 		if (ifr->ifr_settings.size < size) {
1195 			ifr->ifr_settings.size = size; /* data size wanted */
1196 			return -ENOBUFS;
1197 		}
1198 		if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1199 			return -EFAULT;
1200 		return 0;
1201 
1202 	case IF_PROTO_FR:
1203 		if (!capable(CAP_NET_ADMIN))
1204 			return -EPERM;
1205 
1206 		if (dev->flags & IFF_UP)
1207 			return -EBUSY;
1208 
1209 		if (copy_from_user(&new_settings, fr_s, size))
1210 			return -EFAULT;
1211 
1212 		if (new_settings.lmi == LMI_DEFAULT)
1213 			new_settings.lmi = LMI_ANSI;
1214 
1215 		if ((new_settings.lmi != LMI_NONE &&
1216 		     new_settings.lmi != LMI_ANSI &&
1217 		     new_settings.lmi != LMI_CCITT &&
1218 		     new_settings.lmi != LMI_CISCO) ||
1219 		    new_settings.t391 < 1 ||
1220 		    new_settings.t392 < 2 ||
1221 		    new_settings.n391 < 1 ||
1222 		    new_settings.n392 < 1 ||
1223 		    new_settings.n393 < new_settings.n392 ||
1224 		    new_settings.n393 > 32 ||
1225 		    (new_settings.dce != 0 &&
1226 		     new_settings.dce != 1))
1227 			return -EINVAL;
1228 
1229 		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1230 		if (result)
1231 			return result;
1232 
1233 		if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1234 			result = attach_hdlc_protocol(dev, &proto,
1235 						      sizeof(struct frad_state));
1236 			if (result)
1237 				return result;
1238 			state(hdlc)->first_pvc = NULL;
1239 			state(hdlc)->dce_pvc_count = 0;
1240 		}
1241 		memcpy(&state(hdlc)->settings, &new_settings, size);
1242 		dev->type = ARPHRD_FRAD;
1243 		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1244 		return 0;
1245 
1246 	case IF_PROTO_FR_ADD_PVC:
1247 	case IF_PROTO_FR_DEL_PVC:
1248 	case IF_PROTO_FR_ADD_ETH_PVC:
1249 	case IF_PROTO_FR_DEL_ETH_PVC:
1250 		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1251 			return -EINVAL;
1252 
1253 		if (!capable(CAP_NET_ADMIN))
1254 			return -EPERM;
1255 
1256 		if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1257 				   sizeof(fr_proto_pvc)))
1258 			return -EFAULT;
1259 
1260 		if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1261 			return -EINVAL;	/* Only 10 bits, DLCI 0 reserved */
1262 
1263 		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1264 		    ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1265 			result = ARPHRD_ETHER; /* bridged Ethernet device */
1266 		else
1267 			result = ARPHRD_DLCI;
1268 
1269 		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1270 		    ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1271 			return fr_add_pvc(dev, pvc.dlci, result);
1272 		else
1273 			return fr_del_pvc(hdlc, pvc.dlci, result);
1274 	}
1275 
1276 	return -EINVAL;
1277 }
1278 
1279 
mod_init(void)1280 static int __init mod_init(void)
1281 {
1282 	register_hdlc_protocol(&proto);
1283 	return 0;
1284 }
1285 
1286 
mod_exit(void)1287 static void __exit mod_exit(void)
1288 {
1289 	unregister_hdlc_protocol(&proto);
1290 }
1291 
1292 
1293 module_init(mod_init);
1294 module_exit(mod_exit);
1295 
1296 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1297 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1298 MODULE_LICENSE("GPL v2");
1299