1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vxcan.c - Virtual CAN Tunnel for cross namespace communication
4  *
5  * This code is derived from drivers/net/can/vcan.c for the virtual CAN
6  * specific parts and from drivers/net/veth.c to implement the netlink API
7  * for network interface pairs in a common and established way.
8  *
9  * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_ether.h>
17 #include <linux/can.h>
18 #include <linux/can/dev.h>
19 #include <linux/can/skb.h>
20 #include <linux/can/vxcan.h>
21 #include <linux/can/can-ml.h>
22 #include <linux/slab.h>
23 #include <net/rtnetlink.h>
24 
25 #define DRV_NAME "vxcan"
26 
27 MODULE_DESCRIPTION("Virtual CAN Tunnel");
28 MODULE_LICENSE("GPL");
29 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
30 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
31 
32 struct vxcan_priv {
33 	struct net_device __rcu	*peer;
34 };
35 
vxcan_xmit(struct sk_buff * skb,struct net_device * dev)36 static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
37 {
38 	struct vxcan_priv *priv = netdev_priv(dev);
39 	struct net_device *peer;
40 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
41 	struct net_device_stats *peerstats, *srcstats = &dev->stats;
42 
43 	if (can_dropped_invalid_skb(dev, skb))
44 		return NETDEV_TX_OK;
45 
46 	rcu_read_lock();
47 	peer = rcu_dereference(priv->peer);
48 	if (unlikely(!peer)) {
49 		kfree_skb(skb);
50 		dev->stats.tx_dropped++;
51 		goto out_unlock;
52 	}
53 
54 	skb = can_create_echo_skb(skb);
55 	if (!skb)
56 		goto out_unlock;
57 
58 	/* reset CAN GW hop counter */
59 	skb->csum_start = 0;
60 	skb->pkt_type   = PACKET_BROADCAST;
61 	skb->dev        = peer;
62 	skb->ip_summed  = CHECKSUM_UNNECESSARY;
63 
64 	if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
65 		srcstats->tx_packets++;
66 		srcstats->tx_bytes += cfd->len;
67 		peerstats = &peer->stats;
68 		peerstats->rx_packets++;
69 		peerstats->rx_bytes += cfd->len;
70 	}
71 
72 out_unlock:
73 	rcu_read_unlock();
74 	return NETDEV_TX_OK;
75 }
76 
77 
vxcan_open(struct net_device * dev)78 static int vxcan_open(struct net_device *dev)
79 {
80 	struct vxcan_priv *priv = netdev_priv(dev);
81 	struct net_device *peer = rtnl_dereference(priv->peer);
82 
83 	if (!peer)
84 		return -ENOTCONN;
85 
86 	if (peer->flags & IFF_UP) {
87 		netif_carrier_on(dev);
88 		netif_carrier_on(peer);
89 	}
90 	return 0;
91 }
92 
vxcan_close(struct net_device * dev)93 static int vxcan_close(struct net_device *dev)
94 {
95 	struct vxcan_priv *priv = netdev_priv(dev);
96 	struct net_device *peer = rtnl_dereference(priv->peer);
97 
98 	netif_carrier_off(dev);
99 	if (peer)
100 		netif_carrier_off(peer);
101 
102 	return 0;
103 }
104 
vxcan_get_iflink(const struct net_device * dev)105 static int vxcan_get_iflink(const struct net_device *dev)
106 {
107 	struct vxcan_priv *priv = netdev_priv(dev);
108 	struct net_device *peer;
109 	int iflink;
110 
111 	rcu_read_lock();
112 	peer = rcu_dereference(priv->peer);
113 	iflink = peer ? peer->ifindex : 0;
114 	rcu_read_unlock();
115 
116 	return iflink;
117 }
118 
vxcan_change_mtu(struct net_device * dev,int new_mtu)119 static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
120 {
121 	/* Do not allow changing the MTU while running */
122 	if (dev->flags & IFF_UP)
123 		return -EBUSY;
124 
125 	if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
126 		return -EINVAL;
127 
128 	dev->mtu = new_mtu;
129 	return 0;
130 }
131 
132 static const struct net_device_ops vxcan_netdev_ops = {
133 	.ndo_open	= vxcan_open,
134 	.ndo_stop	= vxcan_close,
135 	.ndo_start_xmit	= vxcan_xmit,
136 	.ndo_get_iflink	= vxcan_get_iflink,
137 	.ndo_change_mtu = vxcan_change_mtu,
138 };
139 
vxcan_setup(struct net_device * dev)140 static void vxcan_setup(struct net_device *dev)
141 {
142 	dev->type		= ARPHRD_CAN;
143 	dev->mtu		= CANFD_MTU;
144 	dev->hard_header_len	= 0;
145 	dev->addr_len		= 0;
146 	dev->tx_queue_len	= 0;
147 	dev->flags		= (IFF_NOARP|IFF_ECHO);
148 	dev->netdev_ops		= &vxcan_netdev_ops;
149 	dev->needs_free_netdev	= true;
150 	dev->ml_priv		= netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
151 }
152 
153 /* forward declaration for rtnl_create_link() */
154 static struct rtnl_link_ops vxcan_link_ops;
155 
vxcan_newlink(struct net * net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)156 static int vxcan_newlink(struct net *net, struct net_device *dev,
157 			 struct nlattr *tb[], struct nlattr *data[],
158 			 struct netlink_ext_ack *extack)
159 {
160 	struct vxcan_priv *priv;
161 	struct net_device *peer;
162 	struct net *peer_net;
163 
164 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
165 	char ifname[IFNAMSIZ];
166 	unsigned char name_assign_type;
167 	struct ifinfomsg *ifmp = NULL;
168 	int err;
169 
170 	/* register peer device */
171 	if (data && data[VXCAN_INFO_PEER]) {
172 		struct nlattr *nla_peer;
173 
174 		nla_peer = data[VXCAN_INFO_PEER];
175 		ifmp = nla_data(nla_peer);
176 		err = rtnl_nla_parse_ifla(peer_tb,
177 					  nla_data(nla_peer) +
178 					  sizeof(struct ifinfomsg),
179 					  nla_len(nla_peer) -
180 					  sizeof(struct ifinfomsg),
181 					  NULL);
182 		if (err < 0)
183 			return err;
184 
185 		tbp = peer_tb;
186 	}
187 
188 	if (ifmp && tbp[IFLA_IFNAME]) {
189 		nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
190 		name_assign_type = NET_NAME_USER;
191 	} else {
192 		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
193 		name_assign_type = NET_NAME_ENUM;
194 	}
195 
196 	peer_net = rtnl_link_get_net(net, tbp);
197 	if (IS_ERR(peer_net))
198 		return PTR_ERR(peer_net);
199 
200 	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
201 				&vxcan_link_ops, tbp, extack);
202 	if (IS_ERR(peer)) {
203 		put_net(peer_net);
204 		return PTR_ERR(peer);
205 	}
206 
207 	if (ifmp && dev->ifindex)
208 		peer->ifindex = ifmp->ifi_index;
209 
210 	err = register_netdevice(peer);
211 	put_net(peer_net);
212 	peer_net = NULL;
213 	if (err < 0) {
214 		free_netdev(peer);
215 		return err;
216 	}
217 
218 	netif_carrier_off(peer);
219 
220 	err = rtnl_configure_link(peer, ifmp);
221 	if (err < 0)
222 		goto unregister_network_device;
223 
224 	/* register first device */
225 	if (tb[IFLA_IFNAME])
226 		nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
227 	else
228 		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
229 
230 	err = register_netdevice(dev);
231 	if (err < 0)
232 		goto unregister_network_device;
233 
234 	netif_carrier_off(dev);
235 
236 	/* cross link the device pair */
237 	priv = netdev_priv(dev);
238 	rcu_assign_pointer(priv->peer, peer);
239 
240 	priv = netdev_priv(peer);
241 	rcu_assign_pointer(priv->peer, dev);
242 
243 	return 0;
244 
245 unregister_network_device:
246 	unregister_netdevice(peer);
247 	return err;
248 }
249 
vxcan_dellink(struct net_device * dev,struct list_head * head)250 static void vxcan_dellink(struct net_device *dev, struct list_head *head)
251 {
252 	struct vxcan_priv *priv;
253 	struct net_device *peer;
254 
255 	priv = netdev_priv(dev);
256 	peer = rtnl_dereference(priv->peer);
257 
258 	/* Note : dellink() is called from default_device_exit_batch(),
259 	 * before a rcu_synchronize() point. The devices are guaranteed
260 	 * not being freed before one RCU grace period.
261 	 */
262 	RCU_INIT_POINTER(priv->peer, NULL);
263 	unregister_netdevice_queue(dev, head);
264 
265 	if (peer) {
266 		priv = netdev_priv(peer);
267 		RCU_INIT_POINTER(priv->peer, NULL);
268 		unregister_netdevice_queue(peer, head);
269 	}
270 }
271 
272 static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
273 	[VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
274 };
275 
vxcan_get_link_net(const struct net_device * dev)276 static struct net *vxcan_get_link_net(const struct net_device *dev)
277 {
278 	struct vxcan_priv *priv = netdev_priv(dev);
279 	struct net_device *peer = rtnl_dereference(priv->peer);
280 
281 	return peer ? dev_net(peer) : dev_net(dev);
282 }
283 
284 static struct rtnl_link_ops vxcan_link_ops = {
285 	.kind		= DRV_NAME,
286 	.priv_size	= ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
287 	.setup		= vxcan_setup,
288 	.newlink	= vxcan_newlink,
289 	.dellink	= vxcan_dellink,
290 	.policy		= vxcan_policy,
291 	.maxtype	= VXCAN_INFO_MAX,
292 	.get_link_net	= vxcan_get_link_net,
293 };
294 
vxcan_init(void)295 static __init int vxcan_init(void)
296 {
297 	pr_info("vxcan: Virtual CAN Tunnel driver\n");
298 
299 	return rtnl_link_register(&vxcan_link_ops);
300 }
301 
vxcan_exit(void)302 static __exit void vxcan_exit(void)
303 {
304 	rtnl_link_unregister(&vxcan_link_ops);
305 }
306 
307 module_init(vxcan_init);
308 module_exit(vxcan_exit);
309