1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * umh - the kernel usermode helper
4 */
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/sched/task.h>
8 #include <linux/binfmts.h>
9 #include <linux/syscalls.h>
10 #include <linux/unistd.h>
11 #include <linux/kmod.h>
12 #include <linux/slab.h>
13 #include <linux/completion.h>
14 #include <linux/cred.h>
15 #include <linux/file.h>
16 #include <linux/fdtable.h>
17 #include <linux/fs_struct.h>
18 #include <linux/workqueue.h>
19 #include <linux/security.h>
20 #include <linux/mount.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/resource.h>
24 #include <linux/notifier.h>
25 #include <linux/suspend.h>
26 #include <linux/rwsem.h>
27 #include <linux/ptrace.h>
28 #include <linux/async.h>
29 #include <linux/uaccess.h>
30
31 #include <trace/events/module.h>
32
33 #define CAP_BSET (void *)1
34 #define CAP_PI (void *)2
35
36 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
37 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
38 static DEFINE_SPINLOCK(umh_sysctl_lock);
39 static DECLARE_RWSEM(umhelper_sem);
40
call_usermodehelper_freeinfo(struct subprocess_info * info)41 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
42 {
43 if (info->cleanup)
44 (*info->cleanup)(info);
45 kfree(info);
46 }
47
umh_complete(struct subprocess_info * sub_info)48 static void umh_complete(struct subprocess_info *sub_info)
49 {
50 struct completion *comp = xchg(&sub_info->complete, NULL);
51 /*
52 * See call_usermodehelper_exec(). If xchg() returns NULL
53 * we own sub_info, the UMH_KILLABLE caller has gone away
54 * or the caller used UMH_NO_WAIT.
55 */
56 if (comp)
57 complete(comp);
58 else
59 call_usermodehelper_freeinfo(sub_info);
60 }
61
62 /*
63 * This is the task which runs the usermode application
64 */
call_usermodehelper_exec_async(void * data)65 static int call_usermodehelper_exec_async(void *data)
66 {
67 struct subprocess_info *sub_info = data;
68 struct cred *new;
69 int retval;
70
71 spin_lock_irq(¤t->sighand->siglock);
72 flush_signal_handlers(current, 1);
73 spin_unlock_irq(¤t->sighand->siglock);
74
75 /*
76 * Initial kernel threads share ther FS with init, in order to
77 * get the init root directory. But we've now created a new
78 * thread that is going to execve a user process and has its own
79 * 'struct fs_struct'. Reset umask to the default.
80 */
81 current->fs->umask = 0022;
82
83 /*
84 * Our parent (unbound workqueue) runs with elevated scheduling
85 * priority. Avoid propagating that into the userspace child.
86 */
87 set_user_nice(current, 0);
88
89 retval = -ENOMEM;
90 new = prepare_kernel_cred(current);
91 if (!new)
92 goto out;
93
94 spin_lock(&umh_sysctl_lock);
95 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
96 new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
97 new->cap_inheritable);
98 spin_unlock(&umh_sysctl_lock);
99
100 if (sub_info->init) {
101 retval = sub_info->init(sub_info, new);
102 if (retval) {
103 abort_creds(new);
104 goto out;
105 }
106 }
107
108 commit_creds(new);
109
110 retval = kernel_execve(sub_info->path,
111 (const char *const *)sub_info->argv,
112 (const char *const *)sub_info->envp);
113 out:
114 sub_info->retval = retval;
115 /*
116 * call_usermodehelper_exec_sync() will call umh_complete
117 * if UHM_WAIT_PROC.
118 */
119 if (!(sub_info->wait & UMH_WAIT_PROC))
120 umh_complete(sub_info);
121 if (!retval)
122 return 0;
123 do_exit(0);
124 }
125
126 /* Handles UMH_WAIT_PROC. */
call_usermodehelper_exec_sync(struct subprocess_info * sub_info)127 static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
128 {
129 pid_t pid;
130
131 /* If SIGCLD is ignored do_wait won't populate the status. */
132 kernel_sigaction(SIGCHLD, SIG_DFL);
133 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
134 if (pid < 0)
135 sub_info->retval = pid;
136 else
137 kernel_wait(pid, &sub_info->retval);
138
139 /* Restore default kernel sig handler */
140 kernel_sigaction(SIGCHLD, SIG_IGN);
141 umh_complete(sub_info);
142 }
143
144 /*
145 * We need to create the usermodehelper kernel thread from a task that is affine
146 * to an optimized set of CPUs (or nohz housekeeping ones) such that they
147 * inherit a widest affinity irrespective of call_usermodehelper() callers with
148 * possibly reduced affinity (eg: per-cpu workqueues). We don't want
149 * usermodehelper targets to contend a busy CPU.
150 *
151 * Unbound workqueues provide such wide affinity and allow to block on
152 * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
153 *
154 * Besides, workqueues provide the privilege level that caller might not have
155 * to perform the usermodehelper request.
156 *
157 */
call_usermodehelper_exec_work(struct work_struct * work)158 static void call_usermodehelper_exec_work(struct work_struct *work)
159 {
160 struct subprocess_info *sub_info =
161 container_of(work, struct subprocess_info, work);
162
163 if (sub_info->wait & UMH_WAIT_PROC) {
164 call_usermodehelper_exec_sync(sub_info);
165 } else {
166 pid_t pid;
167 /*
168 * Use CLONE_PARENT to reparent it to kthreadd; we do not
169 * want to pollute current->children, and we need a parent
170 * that always ignores SIGCHLD to ensure auto-reaping.
171 */
172 pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
173 CLONE_PARENT | SIGCHLD);
174 if (pid < 0) {
175 sub_info->retval = pid;
176 umh_complete(sub_info);
177 }
178 }
179 }
180
181 /*
182 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
183 * (used for preventing user land processes from being created after the user
184 * land has been frozen during a system-wide hibernation or suspend operation).
185 * Should always be manipulated under umhelper_sem acquired for write.
186 */
187 static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
188
189 /* Number of helpers running */
190 static atomic_t running_helpers = ATOMIC_INIT(0);
191
192 /*
193 * Wait queue head used by usermodehelper_disable() to wait for all running
194 * helpers to finish.
195 */
196 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
197
198 /*
199 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
200 * to become 'false'.
201 */
202 static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
203
204 /*
205 * Time to wait for running_helpers to become zero before the setting of
206 * usermodehelper_disabled in usermodehelper_disable() fails
207 */
208 #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
209
usermodehelper_read_trylock(void)210 int usermodehelper_read_trylock(void)
211 {
212 DEFINE_WAIT(wait);
213 int ret = 0;
214
215 down_read(&umhelper_sem);
216 for (;;) {
217 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
218 TASK_INTERRUPTIBLE);
219 if (!usermodehelper_disabled)
220 break;
221
222 if (usermodehelper_disabled == UMH_DISABLED)
223 ret = -EAGAIN;
224
225 up_read(&umhelper_sem);
226
227 if (ret)
228 break;
229
230 schedule();
231 try_to_freeze();
232
233 down_read(&umhelper_sem);
234 }
235 finish_wait(&usermodehelper_disabled_waitq, &wait);
236 return ret;
237 }
238 EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
239
usermodehelper_read_lock_wait(long timeout)240 long usermodehelper_read_lock_wait(long timeout)
241 {
242 DEFINE_WAIT(wait);
243
244 if (timeout < 0)
245 return -EINVAL;
246
247 down_read(&umhelper_sem);
248 for (;;) {
249 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
250 TASK_UNINTERRUPTIBLE);
251 if (!usermodehelper_disabled)
252 break;
253
254 up_read(&umhelper_sem);
255
256 timeout = schedule_timeout(timeout);
257 if (!timeout)
258 break;
259
260 down_read(&umhelper_sem);
261 }
262 finish_wait(&usermodehelper_disabled_waitq, &wait);
263 return timeout;
264 }
265 EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
266
usermodehelper_read_unlock(void)267 void usermodehelper_read_unlock(void)
268 {
269 up_read(&umhelper_sem);
270 }
271 EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
272
273 /**
274 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
275 * @depth: New value to assign to usermodehelper_disabled.
276 *
277 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
278 * writing) and wakeup tasks waiting for it to change.
279 */
__usermodehelper_set_disable_depth(enum umh_disable_depth depth)280 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
281 {
282 down_write(&umhelper_sem);
283 usermodehelper_disabled = depth;
284 wake_up(&usermodehelper_disabled_waitq);
285 up_write(&umhelper_sem);
286 }
287
288 /**
289 * __usermodehelper_disable - Prevent new helpers from being started.
290 * @depth: New value to assign to usermodehelper_disabled.
291 *
292 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
293 */
__usermodehelper_disable(enum umh_disable_depth depth)294 int __usermodehelper_disable(enum umh_disable_depth depth)
295 {
296 long retval;
297
298 if (!depth)
299 return -EINVAL;
300
301 down_write(&umhelper_sem);
302 usermodehelper_disabled = depth;
303 up_write(&umhelper_sem);
304
305 /*
306 * From now on call_usermodehelper_exec() won't start any new
307 * helpers, so it is sufficient if running_helpers turns out to
308 * be zero at one point (it may be increased later, but that
309 * doesn't matter).
310 */
311 retval = wait_event_timeout(running_helpers_waitq,
312 atomic_read(&running_helpers) == 0,
313 RUNNING_HELPERS_TIMEOUT);
314 if (retval)
315 return 0;
316
317 __usermodehelper_set_disable_depth(UMH_ENABLED);
318 return -EAGAIN;
319 }
320
helper_lock(void)321 static void helper_lock(void)
322 {
323 atomic_inc(&running_helpers);
324 smp_mb__after_atomic();
325 }
326
helper_unlock(void)327 static void helper_unlock(void)
328 {
329 if (atomic_dec_and_test(&running_helpers))
330 wake_up(&running_helpers_waitq);
331 }
332
333 /**
334 * call_usermodehelper_setup - prepare to call a usermode helper
335 * @path: path to usermode executable
336 * @argv: arg vector for process
337 * @envp: environment for process
338 * @gfp_mask: gfp mask for memory allocation
339 * @cleanup: a cleanup function
340 * @init: an init function
341 * @data: arbitrary context sensitive data
342 *
343 * Returns either %NULL on allocation failure, or a subprocess_info
344 * structure. This should be passed to call_usermodehelper_exec to
345 * exec the process and free the structure.
346 *
347 * The init function is used to customize the helper process prior to
348 * exec. A non-zero return code causes the process to error out, exit,
349 * and return the failure to the calling process
350 *
351 * The cleanup function is just before ethe subprocess_info is about to
352 * be freed. This can be used for freeing the argv and envp. The
353 * Function must be runnable in either a process context or the
354 * context in which call_usermodehelper_exec is called.
355 */
call_usermodehelper_setup(const char * path,char ** argv,char ** envp,gfp_t gfp_mask,int (* init)(struct subprocess_info * info,struct cred * new),void (* cleanup)(struct subprocess_info * info),void * data)356 struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
357 char **envp, gfp_t gfp_mask,
358 int (*init)(struct subprocess_info *info, struct cred *new),
359 void (*cleanup)(struct subprocess_info *info),
360 void *data)
361 {
362 struct subprocess_info *sub_info;
363 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
364 if (!sub_info)
365 goto out;
366
367 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
368
369 #ifdef CONFIG_STATIC_USERMODEHELPER
370 sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
371 #else
372 sub_info->path = path;
373 #endif
374 sub_info->argv = argv;
375 sub_info->envp = envp;
376
377 sub_info->cleanup = cleanup;
378 sub_info->init = init;
379 sub_info->data = data;
380 out:
381 return sub_info;
382 }
383 EXPORT_SYMBOL(call_usermodehelper_setup);
384
385 /**
386 * call_usermodehelper_exec - start a usermode application
387 * @sub_info: information about the subprocessa
388 * @wait: wait for the application to finish and return status.
389 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
390 * when the program couldn't be exec'ed. This makes it safe to call
391 * from interrupt context.
392 *
393 * Runs a user-space application. The application is started
394 * asynchronously if wait is not set, and runs as a child of system workqueues.
395 * (ie. it runs with full root capabilities and optimized affinity).
396 *
397 * Note: successful return value does not guarantee the helper was called at
398 * all. You can't rely on sub_info->{init,cleanup} being called even for
399 * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
400 * into a successful no-op.
401 */
call_usermodehelper_exec(struct subprocess_info * sub_info,int wait)402 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
403 {
404 DECLARE_COMPLETION_ONSTACK(done);
405 int retval = 0;
406
407 if (!sub_info->path) {
408 call_usermodehelper_freeinfo(sub_info);
409 return -EINVAL;
410 }
411 helper_lock();
412 if (usermodehelper_disabled) {
413 retval = -EBUSY;
414 goto out;
415 }
416
417 /*
418 * If there is no binary for us to call, then just return and get out of
419 * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and
420 * disable all call_usermodehelper() calls.
421 */
422 if (strlen(sub_info->path) == 0)
423 goto out;
424
425 /*
426 * Set the completion pointer only if there is a waiter.
427 * This makes it possible to use umh_complete to free
428 * the data structure in case of UMH_NO_WAIT.
429 */
430 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
431 sub_info->wait = wait;
432
433 queue_work(system_unbound_wq, &sub_info->work);
434 if (wait == UMH_NO_WAIT) /* task has freed sub_info */
435 goto unlock;
436
437 if (wait & UMH_KILLABLE) {
438 retval = wait_for_completion_killable(&done);
439 if (!retval)
440 goto wait_done;
441
442 /* umh_complete() will see NULL and free sub_info */
443 if (xchg(&sub_info->complete, NULL))
444 goto unlock;
445 /* fallthrough, umh_complete() was already called */
446 }
447
448 wait_for_completion(&done);
449 wait_done:
450 retval = sub_info->retval;
451 out:
452 call_usermodehelper_freeinfo(sub_info);
453 unlock:
454 helper_unlock();
455 return retval;
456 }
457 EXPORT_SYMBOL(call_usermodehelper_exec);
458
459 /**
460 * call_usermodehelper() - prepare and start a usermode application
461 * @path: path to usermode executable
462 * @argv: arg vector for process
463 * @envp: environment for process
464 * @wait: wait for the application to finish and return status.
465 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
466 * when the program couldn't be exec'ed. This makes it safe to call
467 * from interrupt context.
468 *
469 * This function is the equivalent to use call_usermodehelper_setup() and
470 * call_usermodehelper_exec().
471 */
call_usermodehelper(const char * path,char ** argv,char ** envp,int wait)472 int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
473 {
474 struct subprocess_info *info;
475 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
476
477 info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
478 NULL, NULL, NULL);
479 if (info == NULL)
480 return -ENOMEM;
481
482 return call_usermodehelper_exec(info, wait);
483 }
484 EXPORT_SYMBOL(call_usermodehelper);
485
proc_cap_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)486 static int proc_cap_handler(struct ctl_table *table, int write,
487 void *buffer, size_t *lenp, loff_t *ppos)
488 {
489 struct ctl_table t;
490 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
491 kernel_cap_t new_cap;
492 int err, i;
493
494 if (write && (!capable(CAP_SETPCAP) ||
495 !capable(CAP_SYS_MODULE)))
496 return -EPERM;
497
498 /*
499 * convert from the global kernel_cap_t to the ulong array to print to
500 * userspace if this is a read.
501 */
502 spin_lock(&umh_sysctl_lock);
503 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) {
504 if (table->data == CAP_BSET)
505 cap_array[i] = usermodehelper_bset.cap[i];
506 else if (table->data == CAP_PI)
507 cap_array[i] = usermodehelper_inheritable.cap[i];
508 else
509 BUG();
510 }
511 spin_unlock(&umh_sysctl_lock);
512
513 t = *table;
514 t.data = &cap_array;
515
516 /*
517 * actually read or write and array of ulongs from userspace. Remember
518 * these are least significant 32 bits first
519 */
520 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
521 if (err < 0)
522 return err;
523
524 /*
525 * convert from the sysctl array of ulongs to the kernel_cap_t
526 * internal representation
527 */
528 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
529 new_cap.cap[i] = cap_array[i];
530
531 /*
532 * Drop everything not in the new_cap (but don't add things)
533 */
534 if (write) {
535 spin_lock(&umh_sysctl_lock);
536 if (table->data == CAP_BSET)
537 usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
538 if (table->data == CAP_PI)
539 usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
540 spin_unlock(&umh_sysctl_lock);
541 }
542
543 return 0;
544 }
545
546 struct ctl_table usermodehelper_table[] = {
547 {
548 .procname = "bset",
549 .data = CAP_BSET,
550 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
551 .mode = 0600,
552 .proc_handler = proc_cap_handler,
553 },
554 {
555 .procname = "inheritable",
556 .data = CAP_PI,
557 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
558 .mode = 0600,
559 .proc_handler = proc_cap_handler,
560 },
561 { }
562 };
563