1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3  *
4  * RMNET configuration engine
5  */
6 
7 #include <net/sock.h>
8 #include <linux/module.h>
9 #include <linux/netlink.h>
10 #include <linux/netdevice.h>
11 #include "rmnet_config.h"
12 #include "rmnet_handlers.h"
13 #include "rmnet_vnd.h"
14 #include "rmnet_private.h"
15 
16 /* Locking scheme -
17  * The shared resource which needs to be protected is realdev->rx_handler_data.
18  * For the writer path, this is using rtnl_lock(). The writer paths are
19  * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
20  * paths are already called with rtnl_lock() acquired in. There is also an
21  * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
22  * dereference here, we will need to use rtnl_dereference(). Dev list writing
23  * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
24  * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
25  * path. We only need rcu_read_lock() for these scenarios. In these cases,
26  * the rcu_read_lock() is held in __dev_queue_xmit() and
27  * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
28  * to get the relevant information. For dev list reading, we again acquire
29  * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
30  * We also use unregister_netdevice_many() to free all rmnet devices in
31  * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
32  * same context.
33  */
34 
35 /* Local Definitions and Declarations */
36 
37 static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
38 	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
39 	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
40 };
41 
rmnet_is_real_dev_registered(const struct net_device * real_dev)42 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
43 {
44 	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
45 }
46 
47 /* Needs rtnl lock */
48 static struct rmnet_port*
rmnet_get_port_rtnl(const struct net_device * real_dev)49 rmnet_get_port_rtnl(const struct net_device *real_dev)
50 {
51 	return rtnl_dereference(real_dev->rx_handler_data);
52 }
53 
rmnet_unregister_real_device(struct net_device * real_dev,struct rmnet_port * port)54 static int rmnet_unregister_real_device(struct net_device *real_dev,
55 					struct rmnet_port *port)
56 {
57 	if (port->nr_rmnet_devs)
58 		return -EINVAL;
59 
60 	netdev_rx_handler_unregister(real_dev);
61 
62 	kfree(port);
63 
64 	/* release reference on real_dev */
65 	dev_put(real_dev);
66 
67 	netdev_dbg(real_dev, "Removed from rmnet\n");
68 	return 0;
69 }
70 
rmnet_register_real_device(struct net_device * real_dev)71 static int rmnet_register_real_device(struct net_device *real_dev)
72 {
73 	struct rmnet_port *port;
74 	int rc, entry;
75 
76 	ASSERT_RTNL();
77 
78 	if (rmnet_is_real_dev_registered(real_dev))
79 		return 0;
80 
81 	port = kzalloc(sizeof(*port), GFP_ATOMIC);
82 	if (!port)
83 		return -ENOMEM;
84 
85 	port->dev = real_dev;
86 	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
87 	if (rc) {
88 		kfree(port);
89 		return -EBUSY;
90 	}
91 
92 	/* hold on to real dev for MAP data */
93 	dev_hold(real_dev);
94 
95 	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
96 		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
97 
98 	netdev_dbg(real_dev, "registered with rmnet\n");
99 	return 0;
100 }
101 
rmnet_unregister_bridge(struct net_device * dev,struct rmnet_port * port)102 static void rmnet_unregister_bridge(struct net_device *dev,
103 				    struct rmnet_port *port)
104 {
105 	struct rmnet_port *bridge_port;
106 	struct net_device *bridge_dev;
107 
108 	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
109 		return;
110 
111 	/* bridge slave handling */
112 	if (!port->nr_rmnet_devs) {
113 		bridge_dev = port->bridge_ep;
114 
115 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
116 		bridge_port->bridge_ep = NULL;
117 		bridge_port->rmnet_mode = RMNET_EPMODE_VND;
118 	} else {
119 		bridge_dev = port->bridge_ep;
120 
121 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
122 		rmnet_unregister_real_device(bridge_dev, bridge_port);
123 	}
124 }
125 
rmnet_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)126 static int rmnet_newlink(struct net *src_net, struct net_device *dev,
127 			 struct nlattr *tb[], struct nlattr *data[],
128 			 struct netlink_ext_ack *extack)
129 {
130 	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
131 	struct net_device *real_dev;
132 	int mode = RMNET_EPMODE_VND;
133 	struct rmnet_endpoint *ep;
134 	struct rmnet_port *port;
135 	int err = 0;
136 	u16 mux_id;
137 
138 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
139 	if (!real_dev || !dev)
140 		return -ENODEV;
141 
142 	if (!data[IFLA_RMNET_MUX_ID])
143 		return -EINVAL;
144 
145 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
146 	if (!ep)
147 		return -ENOMEM;
148 
149 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
150 
151 	err = rmnet_register_real_device(real_dev);
152 	if (err)
153 		goto err0;
154 
155 	port = rmnet_get_port_rtnl(real_dev);
156 	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
157 	if (err)
158 		goto err1;
159 
160 	port->rmnet_mode = mode;
161 
162 	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
163 
164 	if (data[IFLA_RMNET_FLAGS]) {
165 		struct ifla_rmnet_flags *flags;
166 
167 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
168 		data_format = flags->flags & flags->mask;
169 	}
170 
171 	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
172 	port->data_format = data_format;
173 
174 	return 0;
175 
176 err1:
177 	rmnet_unregister_real_device(real_dev, port);
178 err0:
179 	kfree(ep);
180 	return err;
181 }
182 
rmnet_dellink(struct net_device * dev,struct list_head * head)183 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
184 {
185 	struct rmnet_priv *priv = netdev_priv(dev);
186 	struct net_device *real_dev;
187 	struct rmnet_endpoint *ep;
188 	struct rmnet_port *port;
189 	u8 mux_id;
190 
191 	real_dev = priv->real_dev;
192 
193 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
194 		return;
195 
196 	port = rmnet_get_port_rtnl(real_dev);
197 
198 	mux_id = rmnet_vnd_get_mux(dev);
199 
200 	ep = rmnet_get_endpoint(port, mux_id);
201 	if (ep) {
202 		hlist_del_init_rcu(&ep->hlnode);
203 		rmnet_unregister_bridge(dev, port);
204 		rmnet_vnd_dellink(mux_id, port, ep);
205 		kfree(ep);
206 	}
207 	rmnet_unregister_real_device(real_dev, port);
208 
209 	unregister_netdevice_queue(dev, head);
210 }
211 
rmnet_force_unassociate_device(struct net_device * dev)212 static void rmnet_force_unassociate_device(struct net_device *dev)
213 {
214 	struct net_device *real_dev = dev;
215 	struct hlist_node *tmp_ep;
216 	struct rmnet_endpoint *ep;
217 	struct rmnet_port *port;
218 	unsigned long bkt_ep;
219 	LIST_HEAD(list);
220 
221 	if (!rmnet_is_real_dev_registered(real_dev))
222 		return;
223 
224 	ASSERT_RTNL();
225 
226 	port = rmnet_get_port_rtnl(dev);
227 
228 	rcu_read_lock();
229 	rmnet_unregister_bridge(dev, port);
230 
231 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
232 		unregister_netdevice_queue(ep->egress_dev, &list);
233 		rmnet_vnd_dellink(ep->mux_id, port, ep);
234 
235 		hlist_del_init_rcu(&ep->hlnode);
236 		kfree(ep);
237 	}
238 
239 	rcu_read_unlock();
240 	unregister_netdevice_many(&list);
241 
242 	rmnet_unregister_real_device(real_dev, port);
243 }
244 
rmnet_config_notify_cb(struct notifier_block * nb,unsigned long event,void * data)245 static int rmnet_config_notify_cb(struct notifier_block *nb,
246 				  unsigned long event, void *data)
247 {
248 	struct net_device *dev = netdev_notifier_info_to_dev(data);
249 
250 	if (!dev)
251 		return NOTIFY_DONE;
252 
253 	switch (event) {
254 	case NETDEV_UNREGISTER:
255 		netdev_dbg(dev, "Kernel unregister\n");
256 		rmnet_force_unassociate_device(dev);
257 		break;
258 
259 	default:
260 		break;
261 	}
262 
263 	return NOTIFY_DONE;
264 }
265 
266 static struct notifier_block rmnet_dev_notifier __read_mostly = {
267 	.notifier_call = rmnet_config_notify_cb,
268 };
269 
rmnet_rtnl_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)270 static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
271 			       struct netlink_ext_ack *extack)
272 {
273 	u16 mux_id;
274 
275 	if (!data || !data[IFLA_RMNET_MUX_ID])
276 		return -EINVAL;
277 
278 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
279 	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
280 		return -ERANGE;
281 
282 	return 0;
283 }
284 
rmnet_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)285 static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
286 			    struct nlattr *data[],
287 			    struct netlink_ext_ack *extack)
288 {
289 	struct rmnet_priv *priv = netdev_priv(dev);
290 	struct net_device *real_dev;
291 	struct rmnet_endpoint *ep;
292 	struct rmnet_port *port;
293 	u16 mux_id;
294 
295 	if (!dev)
296 		return -ENODEV;
297 
298 	real_dev = __dev_get_by_index(dev_net(dev),
299 				      nla_get_u32(tb[IFLA_LINK]));
300 
301 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
302 		return -ENODEV;
303 
304 	port = rmnet_get_port_rtnl(real_dev);
305 
306 	if (data[IFLA_RMNET_MUX_ID]) {
307 		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
308 		ep = rmnet_get_endpoint(port, priv->mux_id);
309 		if (!ep)
310 			return -ENODEV;
311 
312 		hlist_del_init_rcu(&ep->hlnode);
313 		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
314 
315 		ep->mux_id = mux_id;
316 		priv->mux_id = mux_id;
317 	}
318 
319 	if (data[IFLA_RMNET_FLAGS]) {
320 		struct ifla_rmnet_flags *flags;
321 
322 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
323 		port->data_format = flags->flags & flags->mask;
324 	}
325 
326 	return 0;
327 }
328 
rmnet_get_size(const struct net_device * dev)329 static size_t rmnet_get_size(const struct net_device *dev)
330 {
331 	return
332 		/* IFLA_RMNET_MUX_ID */
333 		nla_total_size(2) +
334 		/* IFLA_RMNET_FLAGS */
335 		nla_total_size(sizeof(struct ifla_rmnet_flags));
336 }
337 
rmnet_fill_info(struct sk_buff * skb,const struct net_device * dev)338 static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
339 {
340 	struct rmnet_priv *priv = netdev_priv(dev);
341 	struct net_device *real_dev;
342 	struct ifla_rmnet_flags f;
343 	struct rmnet_port *port;
344 
345 	real_dev = priv->real_dev;
346 
347 	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
348 		goto nla_put_failure;
349 
350 	if (rmnet_is_real_dev_registered(real_dev)) {
351 		port = rmnet_get_port_rtnl(real_dev);
352 		f.flags = port->data_format;
353 	} else {
354 		f.flags = 0;
355 	}
356 
357 	f.mask  = ~0;
358 
359 	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
360 		goto nla_put_failure;
361 
362 	return 0;
363 
364 nla_put_failure:
365 	return -EMSGSIZE;
366 }
367 
368 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
369 	.kind		= "rmnet",
370 	.maxtype	= __IFLA_RMNET_MAX,
371 	.priv_size	= sizeof(struct rmnet_priv),
372 	.setup		= rmnet_vnd_setup,
373 	.validate	= rmnet_rtnl_validate,
374 	.newlink	= rmnet_newlink,
375 	.dellink	= rmnet_dellink,
376 	.get_size	= rmnet_get_size,
377 	.changelink     = rmnet_changelink,
378 	.policy		= rmnet_policy,
379 	.fill_info	= rmnet_fill_info,
380 };
381 
382 /* Needs either rcu_read_lock() or rtnl lock */
rmnet_get_port(struct net_device * real_dev)383 struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
384 {
385 	if (rmnet_is_real_dev_registered(real_dev))
386 		return rcu_dereference_rtnl(real_dev->rx_handler_data);
387 	else
388 		return NULL;
389 }
390 
rmnet_get_endpoint(struct rmnet_port * port,u8 mux_id)391 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
392 {
393 	struct rmnet_endpoint *ep;
394 
395 	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
396 		if (ep->mux_id == mux_id)
397 			return ep;
398 	}
399 
400 	return NULL;
401 }
402 
rmnet_add_bridge(struct net_device * rmnet_dev,struct net_device * slave_dev,struct netlink_ext_ack * extack)403 int rmnet_add_bridge(struct net_device *rmnet_dev,
404 		     struct net_device *slave_dev,
405 		     struct netlink_ext_ack *extack)
406 {
407 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
408 	struct net_device *real_dev = priv->real_dev;
409 	struct rmnet_port *port, *slave_port;
410 	int err;
411 
412 	port = rmnet_get_port(real_dev);
413 
414 	/* If there is more than one rmnet dev attached, its probably being
415 	 * used for muxing. Skip the briding in that case
416 	 */
417 	if (port->nr_rmnet_devs > 1)
418 		return -EINVAL;
419 
420 	if (rmnet_is_real_dev_registered(slave_dev))
421 		return -EBUSY;
422 
423 	err = rmnet_register_real_device(slave_dev);
424 	if (err)
425 		return -EBUSY;
426 
427 	slave_port = rmnet_get_port(slave_dev);
428 	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
429 	slave_port->bridge_ep = real_dev;
430 
431 	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
432 	port->bridge_ep = slave_dev;
433 
434 	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
435 	return 0;
436 }
437 
rmnet_del_bridge(struct net_device * rmnet_dev,struct net_device * slave_dev)438 int rmnet_del_bridge(struct net_device *rmnet_dev,
439 		     struct net_device *slave_dev)
440 {
441 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
442 	struct net_device *real_dev = priv->real_dev;
443 	struct rmnet_port *port, *slave_port;
444 
445 	port = rmnet_get_port(real_dev);
446 	port->rmnet_mode = RMNET_EPMODE_VND;
447 	port->bridge_ep = NULL;
448 
449 	slave_port = rmnet_get_port(slave_dev);
450 	rmnet_unregister_real_device(slave_dev, slave_port);
451 
452 	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
453 	return 0;
454 }
455 
456 /* Startup/Shutdown */
457 
rmnet_init(void)458 static int __init rmnet_init(void)
459 {
460 	int rc;
461 
462 	rc = register_netdevice_notifier(&rmnet_dev_notifier);
463 	if (rc != 0)
464 		return rc;
465 
466 	rc = rtnl_link_register(&rmnet_link_ops);
467 	if (rc != 0) {
468 		unregister_netdevice_notifier(&rmnet_dev_notifier);
469 		return rc;
470 	}
471 	return rc;
472 }
473 
rmnet_exit(void)474 static void __exit rmnet_exit(void)
475 {
476 	unregister_netdevice_notifier(&rmnet_dev_notifier);
477 	rtnl_link_unregister(&rmnet_link_ops);
478 }
479 
480 module_init(rmnet_init)
481 module_exit(rmnet_exit)
482 MODULE_LICENSE("GPL v2");
483