1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * X.25 Packet Layer release 002
4 *
5 * This is ALPHA test software. This code may break your machine, randomly fail to work with new
6 * releases, misbehave and/or generally screw up. It might even work.
7 *
8 * This code REQUIRES 2.1.15 or higher
9 *
10 * History
11 * X.25 001 Jonathan Naylor Started coding.
12 * 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
13 */
14
15 #define pr_fmt(fmt) "X25: " fmt
16
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/slab.h>
21 #include <net/sock.h>
22 #include <linux/if_arp.h>
23 #include <net/x25.h>
24 #include <net/x25device.h>
25
x25_receive_data(struct sk_buff * skb,struct x25_neigh * nb)26 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
27 {
28 struct sock *sk;
29 unsigned short frametype;
30 unsigned int lci;
31
32 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
33 return 0;
34
35 frametype = skb->data[2];
36 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
37
38 /*
39 * LCI of zero is always for us, and its always a link control
40 * frame.
41 */
42 if (lci == 0) {
43 x25_link_control(skb, nb, frametype);
44 return 0;
45 }
46
47 /*
48 * Find an existing socket.
49 */
50 if ((sk = x25_find_socket(lci, nb)) != NULL) {
51 int queued = 1;
52
53 skb_reset_transport_header(skb);
54 bh_lock_sock(sk);
55 if (!sock_owned_by_user(sk)) {
56 queued = x25_process_rx_frame(sk, skb);
57 } else {
58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
59 }
60 bh_unlock_sock(sk);
61 sock_put(sk);
62 return queued;
63 }
64
65 /*
66 * Is is a Call Request ? if so process it.
67 */
68 if (frametype == X25_CALL_REQUEST)
69 return x25_rx_call_request(skb, nb, lci);
70
71 /*
72 * Its not a Call Request, nor is it a control frame.
73 * Can we forward it?
74 */
75
76 if (x25_forward_data(lci, nb, skb)) {
77 if (frametype == X25_CLEAR_CONFIRMATION) {
78 x25_clear_forward_by_lci(lci);
79 }
80 kfree_skb(skb);
81 return 1;
82 }
83
84 /*
85 x25_transmit_clear_request(nb, lci, 0x0D);
86 */
87
88 if (frametype != X25_CLEAR_CONFIRMATION)
89 pr_debug("x25_receive_data(): unknown frame type %2x\n",frametype);
90
91 return 0;
92 }
93
x25_lapb_receive_frame(struct sk_buff * skb,struct net_device * dev,struct packet_type * ptype,struct net_device * orig_dev)94 int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *ptype, struct net_device *orig_dev)
96 {
97 struct sk_buff *nskb;
98 struct x25_neigh *nb;
99
100 if (!net_eq(dev_net(dev), &init_net))
101 goto drop;
102
103 nskb = skb_copy(skb, GFP_ATOMIC);
104 if (!nskb)
105 goto drop;
106 kfree_skb(skb);
107 skb = nskb;
108
109 /*
110 * Packet received from unrecognised device, throw it away.
111 */
112 nb = x25_get_neigh(dev);
113 if (!nb) {
114 pr_debug("unknown neighbour - %s\n", dev->name);
115 goto drop;
116 }
117
118 if (!pskb_may_pull(skb, 1)) {
119 x25_neigh_put(nb);
120 return 0;
121 }
122
123 switch (skb->data[0]) {
124
125 case X25_IFACE_DATA:
126 skb_pull(skb, 1);
127 if (x25_receive_data(skb, nb)) {
128 x25_neigh_put(nb);
129 goto out;
130 }
131 break;
132
133 case X25_IFACE_CONNECT:
134 x25_link_established(nb);
135 break;
136
137 case X25_IFACE_DISCONNECT:
138 x25_link_terminated(nb);
139 break;
140 }
141 x25_neigh_put(nb);
142 drop:
143 kfree_skb(skb);
144 out:
145 return 0;
146 }
147
x25_establish_link(struct x25_neigh * nb)148 void x25_establish_link(struct x25_neigh *nb)
149 {
150 struct sk_buff *skb;
151 unsigned char *ptr;
152
153 switch (nb->dev->type) {
154 case ARPHRD_X25:
155 if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
156 pr_err("x25_dev: out of memory\n");
157 return;
158 }
159 ptr = skb_put(skb, 1);
160 *ptr = X25_IFACE_CONNECT;
161 break;
162
163 #if IS_ENABLED(CONFIG_LLC)
164 case ARPHRD_ETHER:
165 return;
166 #endif
167 default:
168 return;
169 }
170
171 skb->protocol = htons(ETH_P_X25);
172 skb->dev = nb->dev;
173
174 dev_queue_xmit(skb);
175 }
176
x25_terminate_link(struct x25_neigh * nb)177 void x25_terminate_link(struct x25_neigh *nb)
178 {
179 struct sk_buff *skb;
180 unsigned char *ptr;
181
182 #if IS_ENABLED(CONFIG_LLC)
183 if (nb->dev->type == ARPHRD_ETHER)
184 return;
185 #endif
186 if (nb->dev->type != ARPHRD_X25)
187 return;
188
189 skb = alloc_skb(1, GFP_ATOMIC);
190 if (!skb) {
191 pr_err("x25_dev: out of memory\n");
192 return;
193 }
194
195 ptr = skb_put(skb, 1);
196 *ptr = X25_IFACE_DISCONNECT;
197
198 skb->protocol = htons(ETH_P_X25);
199 skb->dev = nb->dev;
200 dev_queue_xmit(skb);
201 }
202
x25_send_frame(struct sk_buff * skb,struct x25_neigh * nb)203 void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
204 {
205 unsigned char *dptr;
206
207 skb_reset_network_header(skb);
208
209 switch (nb->dev->type) {
210 case ARPHRD_X25:
211 dptr = skb_push(skb, 1);
212 *dptr = X25_IFACE_DATA;
213 break;
214
215 #if IS_ENABLED(CONFIG_LLC)
216 case ARPHRD_ETHER:
217 kfree_skb(skb);
218 return;
219 #endif
220 default:
221 kfree_skb(skb);
222 return;
223 }
224
225 skb->protocol = htons(ETH_P_X25);
226 skb->dev = nb->dev;
227
228 dev_queue_xmit(skb);
229 }
230