1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation, version 2 of the
5 * License.
6 */
7
8 #include <linux/export.h>
9 #include <linux/nsproxy.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/user_namespace.h>
13 #include <linux/proc_ns.h>
14 #include <linux/highuid.h>
15 #include <linux/cred.h>
16 #include <linux/securebits.h>
17 #include <linux/keyctl.h>
18 #include <linux/key-type.h>
19 #include <keys/user-type.h>
20 #include <linux/seq_file.h>
21 #include <linux/fs.h>
22 #include <linux/uaccess.h>
23 #include <linux/ctype.h>
24 #include <linux/projid.h>
25 #include <linux/fs_struct.h>
26 #include <linux/bsearch.h>
27 #include <linux/sort.h>
28
29 static struct kmem_cache *user_ns_cachep __read_mostly;
30 static DEFINE_MUTEX(userns_state_mutex);
31
32 static bool new_idmap_permitted(const struct file *file,
33 struct user_namespace *ns, int cap_setid,
34 struct uid_gid_map *map);
35 static void free_user_ns(struct work_struct *work);
36
inc_user_namespaces(struct user_namespace * ns,kuid_t uid)37 static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
38 {
39 return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
40 }
41
dec_user_namespaces(struct ucounts * ucounts)42 static void dec_user_namespaces(struct ucounts *ucounts)
43 {
44 return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
45 }
46
set_cred_user_ns(struct cred * cred,struct user_namespace * user_ns)47 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
48 {
49 /* Start with the same capabilities as init but useless for doing
50 * anything as the capabilities are bound to the new user namespace.
51 */
52 cred->securebits = SECUREBITS_DEFAULT;
53 cred->cap_inheritable = CAP_EMPTY_SET;
54 cred->cap_permitted = CAP_FULL_SET;
55 cred->cap_effective = CAP_FULL_SET;
56 cred->cap_ambient = CAP_EMPTY_SET;
57 cred->cap_bset = CAP_FULL_SET;
58 #ifdef CONFIG_KEYS
59 key_put(cred->request_key_auth);
60 cred->request_key_auth = NULL;
61 #endif
62 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
63 cred->user_ns = user_ns;
64 }
65
66 /*
67 * Create a new user namespace, deriving the creator from the user in the
68 * passed credentials, and replacing that user with the new root user for the
69 * new namespace.
70 *
71 * This is called by copy_creds(), which will finish setting the target task's
72 * credentials.
73 */
create_user_ns(struct cred * new)74 int create_user_ns(struct cred *new)
75 {
76 struct user_namespace *ns, *parent_ns = new->user_ns;
77 kuid_t owner = new->euid;
78 kgid_t group = new->egid;
79 struct ucounts *ucounts;
80 int ret, i;
81
82 ret = -ENOSPC;
83 if (parent_ns->level > 32)
84 goto fail;
85
86 ucounts = inc_user_namespaces(parent_ns, owner);
87 if (!ucounts)
88 goto fail;
89
90 /*
91 * Verify that we can not violate the policy of which files
92 * may be accessed that is specified by the root directory,
93 * by verifing that the root directory is at the root of the
94 * mount namespace which allows all files to be accessed.
95 */
96 ret = -EPERM;
97 if (current_chrooted())
98 goto fail_dec;
99
100 /* The creator needs a mapping in the parent user namespace
101 * or else we won't be able to reasonably tell userspace who
102 * created a user_namespace.
103 */
104 ret = -EPERM;
105 if (!kuid_has_mapping(parent_ns, owner) ||
106 !kgid_has_mapping(parent_ns, group))
107 goto fail_dec;
108
109 ret = -ENOMEM;
110 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
111 if (!ns)
112 goto fail_dec;
113
114 ret = ns_alloc_inum(&ns->ns);
115 if (ret)
116 goto fail_free;
117 ns->ns.ops = &userns_operations;
118
119 atomic_set(&ns->count, 1);
120 /* Leave the new->user_ns reference with the new user namespace. */
121 ns->parent = parent_ns;
122 ns->level = parent_ns->level + 1;
123 ns->owner = owner;
124 ns->group = group;
125 INIT_WORK(&ns->work, free_user_ns);
126 for (i = 0; i < UCOUNT_COUNTS; i++) {
127 ns->ucount_max[i] = INT_MAX;
128 }
129 ns->ucounts = ucounts;
130
131 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
132 mutex_lock(&userns_state_mutex);
133 ns->flags = parent_ns->flags;
134 mutex_unlock(&userns_state_mutex);
135
136 #ifdef CONFIG_PERSISTENT_KEYRINGS
137 init_rwsem(&ns->persistent_keyring_register_sem);
138 #endif
139 ret = -ENOMEM;
140 if (!setup_userns_sysctls(ns))
141 goto fail_keyring;
142
143 set_cred_user_ns(new, ns);
144 return 0;
145 fail_keyring:
146 #ifdef CONFIG_PERSISTENT_KEYRINGS
147 key_put(ns->persistent_keyring_register);
148 #endif
149 ns_free_inum(&ns->ns);
150 fail_free:
151 kmem_cache_free(user_ns_cachep, ns);
152 fail_dec:
153 dec_user_namespaces(ucounts);
154 fail:
155 return ret;
156 }
157
unshare_userns(unsigned long unshare_flags,struct cred ** new_cred)158 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
159 {
160 struct cred *cred;
161 int err = -ENOMEM;
162
163 if (!(unshare_flags & CLONE_NEWUSER))
164 return 0;
165
166 cred = prepare_creds();
167 if (cred) {
168 err = create_user_ns(cred);
169 if (err)
170 put_cred(cred);
171 else
172 *new_cred = cred;
173 }
174
175 return err;
176 }
177
free_user_ns(struct work_struct * work)178 static void free_user_ns(struct work_struct *work)
179 {
180 struct user_namespace *parent, *ns =
181 container_of(work, struct user_namespace, work);
182
183 do {
184 struct ucounts *ucounts = ns->ucounts;
185 parent = ns->parent;
186 if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
187 kfree(ns->gid_map.forward);
188 kfree(ns->gid_map.reverse);
189 }
190 if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
191 kfree(ns->uid_map.forward);
192 kfree(ns->uid_map.reverse);
193 }
194 if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
195 kfree(ns->projid_map.forward);
196 kfree(ns->projid_map.reverse);
197 }
198 retire_userns_sysctls(ns);
199 #ifdef CONFIG_PERSISTENT_KEYRINGS
200 key_put(ns->persistent_keyring_register);
201 #endif
202 ns_free_inum(&ns->ns);
203 kmem_cache_free(user_ns_cachep, ns);
204 dec_user_namespaces(ucounts);
205 ns = parent;
206 } while (atomic_dec_and_test(&parent->count));
207 }
208
__put_user_ns(struct user_namespace * ns)209 void __put_user_ns(struct user_namespace *ns)
210 {
211 schedule_work(&ns->work);
212 }
213 EXPORT_SYMBOL(__put_user_ns);
214
215 /**
216 * idmap_key struct holds the information necessary to find an idmapping in a
217 * sorted idmap array. It is passed to cmp_map_id() as first argument.
218 */
219 struct idmap_key {
220 bool map_up; /* true -> id from kid; false -> kid from id */
221 u32 id; /* id to find */
222 u32 count; /* == 0 unless used with map_id_range_down() */
223 };
224
225 /**
226 * cmp_map_id - Function to be passed to bsearch() to find the requested
227 * idmapping. Expects struct idmap_key to be passed via @k.
228 */
cmp_map_id(const void * k,const void * e)229 static int cmp_map_id(const void *k, const void *e)
230 {
231 u32 first, last, id2;
232 const struct idmap_key *key = k;
233 const struct uid_gid_extent *el = e;
234
235 id2 = key->id + key->count - 1;
236
237 /* handle map_id_{down,up}() */
238 if (key->map_up)
239 first = el->lower_first;
240 else
241 first = el->first;
242
243 last = first + el->count - 1;
244
245 if (key->id >= first && key->id <= last &&
246 (id2 >= first && id2 <= last))
247 return 0;
248
249 if (key->id < first || id2 < first)
250 return -1;
251
252 return 1;
253 }
254
255 /**
256 * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
257 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
258 */
259 static struct uid_gid_extent *
map_id_range_down_max(unsigned extents,struct uid_gid_map * map,u32 id,u32 count)260 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
261 {
262 struct idmap_key key;
263
264 key.map_up = false;
265 key.count = count;
266 key.id = id;
267
268 return bsearch(&key, map->forward, extents,
269 sizeof(struct uid_gid_extent), cmp_map_id);
270 }
271
272 /**
273 * map_id_range_down_base - Find idmap via binary search in static extent array.
274 * Can only be called if number of mappings is equal or less than
275 * UID_GID_MAP_MAX_BASE_EXTENTS.
276 */
277 static struct uid_gid_extent *
map_id_range_down_base(unsigned extents,struct uid_gid_map * map,u32 id,u32 count)278 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
279 {
280 unsigned idx;
281 u32 first, last, id2;
282
283 id2 = id + count - 1;
284
285 /* Find the matching extent */
286 for (idx = 0; idx < extents; idx++) {
287 first = map->extent[idx].first;
288 last = first + map->extent[idx].count - 1;
289 if (id >= first && id <= last &&
290 (id2 >= first && id2 <= last))
291 return &map->extent[idx];
292 }
293 return NULL;
294 }
295
map_id_range_down(struct uid_gid_map * map,u32 id,u32 count)296 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
297 {
298 struct uid_gid_extent *extent;
299 unsigned extents = map->nr_extents;
300 smp_rmb();
301
302 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
303 extent = map_id_range_down_base(extents, map, id, count);
304 else
305 extent = map_id_range_down_max(extents, map, id, count);
306
307 /* Map the id or note failure */
308 if (extent)
309 id = (id - extent->first) + extent->lower_first;
310 else
311 id = (u32) -1;
312
313 return id;
314 }
315
map_id_down(struct uid_gid_map * map,u32 id)316 static u32 map_id_down(struct uid_gid_map *map, u32 id)
317 {
318 return map_id_range_down(map, id, 1);
319 }
320
321 /**
322 * map_id_up_base - Find idmap via binary search in static extent array.
323 * Can only be called if number of mappings is equal or less than
324 * UID_GID_MAP_MAX_BASE_EXTENTS.
325 */
326 static struct uid_gid_extent *
map_id_up_base(unsigned extents,struct uid_gid_map * map,u32 id)327 map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
328 {
329 unsigned idx;
330 u32 first, last;
331
332 /* Find the matching extent */
333 for (idx = 0; idx < extents; idx++) {
334 first = map->extent[idx].lower_first;
335 last = first + map->extent[idx].count - 1;
336 if (id >= first && id <= last)
337 return &map->extent[idx];
338 }
339 return NULL;
340 }
341
342 /**
343 * map_id_up_max - Find idmap via binary search in ordered idmap array.
344 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
345 */
346 static struct uid_gid_extent *
map_id_up_max(unsigned extents,struct uid_gid_map * map,u32 id)347 map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
348 {
349 struct idmap_key key;
350
351 key.map_up = true;
352 key.count = 1;
353 key.id = id;
354
355 return bsearch(&key, map->reverse, extents,
356 sizeof(struct uid_gid_extent), cmp_map_id);
357 }
358
map_id_up(struct uid_gid_map * map,u32 id)359 static u32 map_id_up(struct uid_gid_map *map, u32 id)
360 {
361 struct uid_gid_extent *extent;
362 unsigned extents = map->nr_extents;
363 smp_rmb();
364
365 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
366 extent = map_id_up_base(extents, map, id);
367 else
368 extent = map_id_up_max(extents, map, id);
369
370 /* Map the id or note failure */
371 if (extent)
372 id = (id - extent->lower_first) + extent->first;
373 else
374 id = (u32) -1;
375
376 return id;
377 }
378
379 /**
380 * make_kuid - Map a user-namespace uid pair into a kuid.
381 * @ns: User namespace that the uid is in
382 * @uid: User identifier
383 *
384 * Maps a user-namespace uid pair into a kernel internal kuid,
385 * and returns that kuid.
386 *
387 * When there is no mapping defined for the user-namespace uid
388 * pair INVALID_UID is returned. Callers are expected to test
389 * for and handle INVALID_UID being returned. INVALID_UID
390 * may be tested for using uid_valid().
391 */
make_kuid(struct user_namespace * ns,uid_t uid)392 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
393 {
394 /* Map the uid to a global kernel uid */
395 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
396 }
397 EXPORT_SYMBOL(make_kuid);
398
399 /**
400 * from_kuid - Create a uid from a kuid user-namespace pair.
401 * @targ: The user namespace we want a uid in.
402 * @kuid: The kernel internal uid to start with.
403 *
404 * Map @kuid into the user-namespace specified by @targ and
405 * return the resulting uid.
406 *
407 * There is always a mapping into the initial user_namespace.
408 *
409 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
410 */
from_kuid(struct user_namespace * targ,kuid_t kuid)411 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
412 {
413 /* Map the uid from a global kernel uid */
414 return map_id_up(&targ->uid_map, __kuid_val(kuid));
415 }
416 EXPORT_SYMBOL(from_kuid);
417
418 /**
419 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
420 * @targ: The user namespace we want a uid in.
421 * @kuid: The kernel internal uid to start with.
422 *
423 * Map @kuid into the user-namespace specified by @targ and
424 * return the resulting uid.
425 *
426 * There is always a mapping into the initial user_namespace.
427 *
428 * Unlike from_kuid from_kuid_munged never fails and always
429 * returns a valid uid. This makes from_kuid_munged appropriate
430 * for use in syscalls like stat and getuid where failing the
431 * system call and failing to provide a valid uid are not an
432 * options.
433 *
434 * If @kuid has no mapping in @targ overflowuid is returned.
435 */
from_kuid_munged(struct user_namespace * targ,kuid_t kuid)436 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
437 {
438 uid_t uid;
439 uid = from_kuid(targ, kuid);
440
441 if (uid == (uid_t) -1)
442 uid = overflowuid;
443 return uid;
444 }
445 EXPORT_SYMBOL(from_kuid_munged);
446
447 /**
448 * make_kgid - Map a user-namespace gid pair into a kgid.
449 * @ns: User namespace that the gid is in
450 * @gid: group identifier
451 *
452 * Maps a user-namespace gid pair into a kernel internal kgid,
453 * and returns that kgid.
454 *
455 * When there is no mapping defined for the user-namespace gid
456 * pair INVALID_GID is returned. Callers are expected to test
457 * for and handle INVALID_GID being returned. INVALID_GID may be
458 * tested for using gid_valid().
459 */
make_kgid(struct user_namespace * ns,gid_t gid)460 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
461 {
462 /* Map the gid to a global kernel gid */
463 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
464 }
465 EXPORT_SYMBOL(make_kgid);
466
467 /**
468 * from_kgid - Create a gid from a kgid user-namespace pair.
469 * @targ: The user namespace we want a gid in.
470 * @kgid: The kernel internal gid to start with.
471 *
472 * Map @kgid into the user-namespace specified by @targ and
473 * return the resulting gid.
474 *
475 * There is always a mapping into the initial user_namespace.
476 *
477 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
478 */
from_kgid(struct user_namespace * targ,kgid_t kgid)479 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
480 {
481 /* Map the gid from a global kernel gid */
482 return map_id_up(&targ->gid_map, __kgid_val(kgid));
483 }
484 EXPORT_SYMBOL(from_kgid);
485
486 /**
487 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
488 * @targ: The user namespace we want a gid in.
489 * @kgid: The kernel internal gid to start with.
490 *
491 * Map @kgid into the user-namespace specified by @targ and
492 * return the resulting gid.
493 *
494 * There is always a mapping into the initial user_namespace.
495 *
496 * Unlike from_kgid from_kgid_munged never fails and always
497 * returns a valid gid. This makes from_kgid_munged appropriate
498 * for use in syscalls like stat and getgid where failing the
499 * system call and failing to provide a valid gid are not options.
500 *
501 * If @kgid has no mapping in @targ overflowgid is returned.
502 */
from_kgid_munged(struct user_namespace * targ,kgid_t kgid)503 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
504 {
505 gid_t gid;
506 gid = from_kgid(targ, kgid);
507
508 if (gid == (gid_t) -1)
509 gid = overflowgid;
510 return gid;
511 }
512 EXPORT_SYMBOL(from_kgid_munged);
513
514 /**
515 * make_kprojid - Map a user-namespace projid pair into a kprojid.
516 * @ns: User namespace that the projid is in
517 * @projid: Project identifier
518 *
519 * Maps a user-namespace uid pair into a kernel internal kuid,
520 * and returns that kuid.
521 *
522 * When there is no mapping defined for the user-namespace projid
523 * pair INVALID_PROJID is returned. Callers are expected to test
524 * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
525 * may be tested for using projid_valid().
526 */
make_kprojid(struct user_namespace * ns,projid_t projid)527 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
528 {
529 /* Map the uid to a global kernel uid */
530 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
531 }
532 EXPORT_SYMBOL(make_kprojid);
533
534 /**
535 * from_kprojid - Create a projid from a kprojid user-namespace pair.
536 * @targ: The user namespace we want a projid in.
537 * @kprojid: The kernel internal project identifier to start with.
538 *
539 * Map @kprojid into the user-namespace specified by @targ and
540 * return the resulting projid.
541 *
542 * There is always a mapping into the initial user_namespace.
543 *
544 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
545 */
from_kprojid(struct user_namespace * targ,kprojid_t kprojid)546 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
547 {
548 /* Map the uid from a global kernel uid */
549 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
550 }
551 EXPORT_SYMBOL(from_kprojid);
552
553 /**
554 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
555 * @targ: The user namespace we want a projid in.
556 * @kprojid: The kernel internal projid to start with.
557 *
558 * Map @kprojid into the user-namespace specified by @targ and
559 * return the resulting projid.
560 *
561 * There is always a mapping into the initial user_namespace.
562 *
563 * Unlike from_kprojid from_kprojid_munged never fails and always
564 * returns a valid projid. This makes from_kprojid_munged
565 * appropriate for use in syscalls like stat and where
566 * failing the system call and failing to provide a valid projid are
567 * not an options.
568 *
569 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
570 */
from_kprojid_munged(struct user_namespace * targ,kprojid_t kprojid)571 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
572 {
573 projid_t projid;
574 projid = from_kprojid(targ, kprojid);
575
576 if (projid == (projid_t) -1)
577 projid = OVERFLOW_PROJID;
578 return projid;
579 }
580 EXPORT_SYMBOL(from_kprojid_munged);
581
582
uid_m_show(struct seq_file * seq,void * v)583 static int uid_m_show(struct seq_file *seq, void *v)
584 {
585 struct user_namespace *ns = seq->private;
586 struct uid_gid_extent *extent = v;
587 struct user_namespace *lower_ns;
588 uid_t lower;
589
590 lower_ns = seq_user_ns(seq);
591 if ((lower_ns == ns) && lower_ns->parent)
592 lower_ns = lower_ns->parent;
593
594 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
595
596 seq_printf(seq, "%10u %10u %10u\n",
597 extent->first,
598 lower,
599 extent->count);
600
601 return 0;
602 }
603
gid_m_show(struct seq_file * seq,void * v)604 static int gid_m_show(struct seq_file *seq, void *v)
605 {
606 struct user_namespace *ns = seq->private;
607 struct uid_gid_extent *extent = v;
608 struct user_namespace *lower_ns;
609 gid_t lower;
610
611 lower_ns = seq_user_ns(seq);
612 if ((lower_ns == ns) && lower_ns->parent)
613 lower_ns = lower_ns->parent;
614
615 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
616
617 seq_printf(seq, "%10u %10u %10u\n",
618 extent->first,
619 lower,
620 extent->count);
621
622 return 0;
623 }
624
projid_m_show(struct seq_file * seq,void * v)625 static int projid_m_show(struct seq_file *seq, void *v)
626 {
627 struct user_namespace *ns = seq->private;
628 struct uid_gid_extent *extent = v;
629 struct user_namespace *lower_ns;
630 projid_t lower;
631
632 lower_ns = seq_user_ns(seq);
633 if ((lower_ns == ns) && lower_ns->parent)
634 lower_ns = lower_ns->parent;
635
636 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
637
638 seq_printf(seq, "%10u %10u %10u\n",
639 extent->first,
640 lower,
641 extent->count);
642
643 return 0;
644 }
645
m_start(struct seq_file * seq,loff_t * ppos,struct uid_gid_map * map)646 static void *m_start(struct seq_file *seq, loff_t *ppos,
647 struct uid_gid_map *map)
648 {
649 loff_t pos = *ppos;
650 unsigned extents = map->nr_extents;
651 smp_rmb();
652
653 if (pos >= extents)
654 return NULL;
655
656 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
657 return &map->extent[pos];
658
659 return &map->forward[pos];
660 }
661
uid_m_start(struct seq_file * seq,loff_t * ppos)662 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
663 {
664 struct user_namespace *ns = seq->private;
665
666 return m_start(seq, ppos, &ns->uid_map);
667 }
668
gid_m_start(struct seq_file * seq,loff_t * ppos)669 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
670 {
671 struct user_namespace *ns = seq->private;
672
673 return m_start(seq, ppos, &ns->gid_map);
674 }
675
projid_m_start(struct seq_file * seq,loff_t * ppos)676 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
677 {
678 struct user_namespace *ns = seq->private;
679
680 return m_start(seq, ppos, &ns->projid_map);
681 }
682
m_next(struct seq_file * seq,void * v,loff_t * pos)683 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
684 {
685 (*pos)++;
686 return seq->op->start(seq, pos);
687 }
688
m_stop(struct seq_file * seq,void * v)689 static void m_stop(struct seq_file *seq, void *v)
690 {
691 return;
692 }
693
694 const struct seq_operations proc_uid_seq_operations = {
695 .start = uid_m_start,
696 .stop = m_stop,
697 .next = m_next,
698 .show = uid_m_show,
699 };
700
701 const struct seq_operations proc_gid_seq_operations = {
702 .start = gid_m_start,
703 .stop = m_stop,
704 .next = m_next,
705 .show = gid_m_show,
706 };
707
708 const struct seq_operations proc_projid_seq_operations = {
709 .start = projid_m_start,
710 .stop = m_stop,
711 .next = m_next,
712 .show = projid_m_show,
713 };
714
mappings_overlap(struct uid_gid_map * new_map,struct uid_gid_extent * extent)715 static bool mappings_overlap(struct uid_gid_map *new_map,
716 struct uid_gid_extent *extent)
717 {
718 u32 upper_first, lower_first, upper_last, lower_last;
719 unsigned idx;
720
721 upper_first = extent->first;
722 lower_first = extent->lower_first;
723 upper_last = upper_first + extent->count - 1;
724 lower_last = lower_first + extent->count - 1;
725
726 for (idx = 0; idx < new_map->nr_extents; idx++) {
727 u32 prev_upper_first, prev_lower_first;
728 u32 prev_upper_last, prev_lower_last;
729 struct uid_gid_extent *prev;
730
731 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
732 prev = &new_map->extent[idx];
733 else
734 prev = &new_map->forward[idx];
735
736 prev_upper_first = prev->first;
737 prev_lower_first = prev->lower_first;
738 prev_upper_last = prev_upper_first + prev->count - 1;
739 prev_lower_last = prev_lower_first + prev->count - 1;
740
741 /* Does the upper range intersect a previous extent? */
742 if ((prev_upper_first <= upper_last) &&
743 (prev_upper_last >= upper_first))
744 return true;
745
746 /* Does the lower range intersect a previous extent? */
747 if ((prev_lower_first <= lower_last) &&
748 (prev_lower_last >= lower_first))
749 return true;
750 }
751 return false;
752 }
753
754 /**
755 * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
756 * Takes care to allocate a 4K block of memory if the number of mappings exceeds
757 * UID_GID_MAP_MAX_BASE_EXTENTS.
758 */
insert_extent(struct uid_gid_map * map,struct uid_gid_extent * extent)759 static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
760 {
761 struct uid_gid_extent *dest;
762
763 if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
764 struct uid_gid_extent *forward;
765
766 /* Allocate memory for 340 mappings. */
767 forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
768 sizeof(struct uid_gid_extent),
769 GFP_KERNEL);
770 if (!forward)
771 return -ENOMEM;
772
773 /* Copy over memory. Only set up memory for the forward pointer.
774 * Defer the memory setup for the reverse pointer.
775 */
776 memcpy(forward, map->extent,
777 map->nr_extents * sizeof(map->extent[0]));
778
779 map->forward = forward;
780 map->reverse = NULL;
781 }
782
783 if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
784 dest = &map->extent[map->nr_extents];
785 else
786 dest = &map->forward[map->nr_extents];
787
788 *dest = *extent;
789 map->nr_extents++;
790 return 0;
791 }
792
793 /* cmp function to sort() forward mappings */
cmp_extents_forward(const void * a,const void * b)794 static int cmp_extents_forward(const void *a, const void *b)
795 {
796 const struct uid_gid_extent *e1 = a;
797 const struct uid_gid_extent *e2 = b;
798
799 if (e1->first < e2->first)
800 return -1;
801
802 if (e1->first > e2->first)
803 return 1;
804
805 return 0;
806 }
807
808 /* cmp function to sort() reverse mappings */
cmp_extents_reverse(const void * a,const void * b)809 static int cmp_extents_reverse(const void *a, const void *b)
810 {
811 const struct uid_gid_extent *e1 = a;
812 const struct uid_gid_extent *e2 = b;
813
814 if (e1->lower_first < e2->lower_first)
815 return -1;
816
817 if (e1->lower_first > e2->lower_first)
818 return 1;
819
820 return 0;
821 }
822
823 /**
824 * sort_idmaps - Sorts an array of idmap entries.
825 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
826 */
sort_idmaps(struct uid_gid_map * map)827 static int sort_idmaps(struct uid_gid_map *map)
828 {
829 if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
830 return 0;
831
832 /* Sort forward array. */
833 sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
834 cmp_extents_forward, NULL);
835
836 /* Only copy the memory from forward we actually need. */
837 map->reverse = kmemdup(map->forward,
838 map->nr_extents * sizeof(struct uid_gid_extent),
839 GFP_KERNEL);
840 if (!map->reverse)
841 return -ENOMEM;
842
843 /* Sort reverse array. */
844 sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
845 cmp_extents_reverse, NULL);
846
847 return 0;
848 }
849
map_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos,int cap_setid,struct uid_gid_map * map,struct uid_gid_map * parent_map)850 static ssize_t map_write(struct file *file, const char __user *buf,
851 size_t count, loff_t *ppos,
852 int cap_setid,
853 struct uid_gid_map *map,
854 struct uid_gid_map *parent_map)
855 {
856 struct seq_file *seq = file->private_data;
857 struct user_namespace *ns = seq->private;
858 struct uid_gid_map new_map;
859 unsigned idx;
860 struct uid_gid_extent extent;
861 char *kbuf = NULL, *pos, *next_line;
862 ssize_t ret;
863
864 /* Only allow < page size writes at the beginning of the file */
865 if ((*ppos != 0) || (count >= PAGE_SIZE))
866 return -EINVAL;
867
868 /* Slurp in the user data */
869 kbuf = memdup_user_nul(buf, count);
870 if (IS_ERR(kbuf))
871 return PTR_ERR(kbuf);
872
873 /*
874 * The userns_state_mutex serializes all writes to any given map.
875 *
876 * Any map is only ever written once.
877 *
878 * An id map fits within 1 cache line on most architectures.
879 *
880 * On read nothing needs to be done unless you are on an
881 * architecture with a crazy cache coherency model like alpha.
882 *
883 * There is a one time data dependency between reading the
884 * count of the extents and the values of the extents. The
885 * desired behavior is to see the values of the extents that
886 * were written before the count of the extents.
887 *
888 * To achieve this smp_wmb() is used on guarantee the write
889 * order and smp_rmb() is guaranteed that we don't have crazy
890 * architectures returning stale data.
891 */
892 mutex_lock(&userns_state_mutex);
893
894 memset(&new_map, 0, sizeof(struct uid_gid_map));
895
896 ret = -EPERM;
897 /* Only allow one successful write to the map */
898 if (map->nr_extents != 0)
899 goto out;
900
901 /*
902 * Adjusting namespace settings requires capabilities on the target.
903 */
904 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
905 goto out;
906
907 /* Parse the user data */
908 ret = -EINVAL;
909 pos = kbuf;
910 for (; pos; pos = next_line) {
911
912 /* Find the end of line and ensure I don't look past it */
913 next_line = strchr(pos, '\n');
914 if (next_line) {
915 *next_line = '\0';
916 next_line++;
917 if (*next_line == '\0')
918 next_line = NULL;
919 }
920
921 pos = skip_spaces(pos);
922 extent.first = simple_strtoul(pos, &pos, 10);
923 if (!isspace(*pos))
924 goto out;
925
926 pos = skip_spaces(pos);
927 extent.lower_first = simple_strtoul(pos, &pos, 10);
928 if (!isspace(*pos))
929 goto out;
930
931 pos = skip_spaces(pos);
932 extent.count = simple_strtoul(pos, &pos, 10);
933 if (*pos && !isspace(*pos))
934 goto out;
935
936 /* Verify there is not trailing junk on the line */
937 pos = skip_spaces(pos);
938 if (*pos != '\0')
939 goto out;
940
941 /* Verify we have been given valid starting values */
942 if ((extent.first == (u32) -1) ||
943 (extent.lower_first == (u32) -1))
944 goto out;
945
946 /* Verify count is not zero and does not cause the
947 * extent to wrap
948 */
949 if ((extent.first + extent.count) <= extent.first)
950 goto out;
951 if ((extent.lower_first + extent.count) <=
952 extent.lower_first)
953 goto out;
954
955 /* Do the ranges in extent overlap any previous extents? */
956 if (mappings_overlap(&new_map, &extent))
957 goto out;
958
959 if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
960 (next_line != NULL))
961 goto out;
962
963 ret = insert_extent(&new_map, &extent);
964 if (ret < 0)
965 goto out;
966 ret = -EINVAL;
967 }
968 /* Be very certaint the new map actually exists */
969 if (new_map.nr_extents == 0)
970 goto out;
971
972 ret = -EPERM;
973 /* Validate the user is allowed to use user id's mapped to. */
974 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
975 goto out;
976
977 ret = sort_idmaps(&new_map);
978 if (ret < 0)
979 goto out;
980
981 ret = -EPERM;
982 /* Map the lower ids from the parent user namespace to the
983 * kernel global id space.
984 */
985 for (idx = 0; idx < new_map.nr_extents; idx++) {
986 struct uid_gid_extent *e;
987 u32 lower_first;
988
989 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
990 e = &new_map.extent[idx];
991 else
992 e = &new_map.forward[idx];
993
994 lower_first = map_id_range_down(parent_map,
995 e->lower_first,
996 e->count);
997
998 /* Fail if we can not map the specified extent to
999 * the kernel global id space.
1000 */
1001 if (lower_first == (u32) -1)
1002 goto out;
1003
1004 e->lower_first = lower_first;
1005 }
1006
1007 /* Install the map */
1008 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1009 memcpy(map->extent, new_map.extent,
1010 new_map.nr_extents * sizeof(new_map.extent[0]));
1011 } else {
1012 map->forward = new_map.forward;
1013 map->reverse = new_map.reverse;
1014 }
1015 smp_wmb();
1016 map->nr_extents = new_map.nr_extents;
1017
1018 *ppos = count;
1019 ret = count;
1020 out:
1021 if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1022 kfree(new_map.forward);
1023 kfree(new_map.reverse);
1024 map->forward = NULL;
1025 map->reverse = NULL;
1026 map->nr_extents = 0;
1027 }
1028
1029 mutex_unlock(&userns_state_mutex);
1030 kfree(kbuf);
1031 return ret;
1032 }
1033
proc_uid_map_write(struct file * file,const char __user * buf,size_t size,loff_t * ppos)1034 ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1035 size_t size, loff_t *ppos)
1036 {
1037 struct seq_file *seq = file->private_data;
1038 struct user_namespace *ns = seq->private;
1039 struct user_namespace *seq_ns = seq_user_ns(seq);
1040
1041 if (!ns->parent)
1042 return -EPERM;
1043
1044 if ((seq_ns != ns) && (seq_ns != ns->parent))
1045 return -EPERM;
1046
1047 return map_write(file, buf, size, ppos, CAP_SETUID,
1048 &ns->uid_map, &ns->parent->uid_map);
1049 }
1050
proc_gid_map_write(struct file * file,const char __user * buf,size_t size,loff_t * ppos)1051 ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1052 size_t size, loff_t *ppos)
1053 {
1054 struct seq_file *seq = file->private_data;
1055 struct user_namespace *ns = seq->private;
1056 struct user_namespace *seq_ns = seq_user_ns(seq);
1057
1058 if (!ns->parent)
1059 return -EPERM;
1060
1061 if ((seq_ns != ns) && (seq_ns != ns->parent))
1062 return -EPERM;
1063
1064 return map_write(file, buf, size, ppos, CAP_SETGID,
1065 &ns->gid_map, &ns->parent->gid_map);
1066 }
1067
proc_projid_map_write(struct file * file,const char __user * buf,size_t size,loff_t * ppos)1068 ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1069 size_t size, loff_t *ppos)
1070 {
1071 struct seq_file *seq = file->private_data;
1072 struct user_namespace *ns = seq->private;
1073 struct user_namespace *seq_ns = seq_user_ns(seq);
1074
1075 if (!ns->parent)
1076 return -EPERM;
1077
1078 if ((seq_ns != ns) && (seq_ns != ns->parent))
1079 return -EPERM;
1080
1081 /* Anyone can set any valid project id no capability needed */
1082 return map_write(file, buf, size, ppos, -1,
1083 &ns->projid_map, &ns->parent->projid_map);
1084 }
1085
new_idmap_permitted(const struct file * file,struct user_namespace * ns,int cap_setid,struct uid_gid_map * new_map)1086 static bool new_idmap_permitted(const struct file *file,
1087 struct user_namespace *ns, int cap_setid,
1088 struct uid_gid_map *new_map)
1089 {
1090 const struct cred *cred = file->f_cred;
1091 /* Don't allow mappings that would allow anything that wouldn't
1092 * be allowed without the establishment of unprivileged mappings.
1093 */
1094 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1095 uid_eq(ns->owner, cred->euid)) {
1096 u32 id = new_map->extent[0].lower_first;
1097 if (cap_setid == CAP_SETUID) {
1098 kuid_t uid = make_kuid(ns->parent, id);
1099 if (uid_eq(uid, cred->euid))
1100 return true;
1101 } else if (cap_setid == CAP_SETGID) {
1102 kgid_t gid = make_kgid(ns->parent, id);
1103 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1104 gid_eq(gid, cred->egid))
1105 return true;
1106 }
1107 }
1108
1109 /* Allow anyone to set a mapping that doesn't require privilege */
1110 if (!cap_valid(cap_setid))
1111 return true;
1112
1113 /* Allow the specified ids if we have the appropriate capability
1114 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1115 * And the opener of the id file also had the approprpiate capability.
1116 */
1117 if (ns_capable(ns->parent, cap_setid) &&
1118 file_ns_capable(file, ns->parent, cap_setid))
1119 return true;
1120
1121 return false;
1122 }
1123
proc_setgroups_show(struct seq_file * seq,void * v)1124 int proc_setgroups_show(struct seq_file *seq, void *v)
1125 {
1126 struct user_namespace *ns = seq->private;
1127 unsigned long userns_flags = READ_ONCE(ns->flags);
1128
1129 seq_printf(seq, "%s\n",
1130 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1131 "allow" : "deny");
1132 return 0;
1133 }
1134
proc_setgroups_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1135 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1136 size_t count, loff_t *ppos)
1137 {
1138 struct seq_file *seq = file->private_data;
1139 struct user_namespace *ns = seq->private;
1140 char kbuf[8], *pos;
1141 bool setgroups_allowed;
1142 ssize_t ret;
1143
1144 /* Only allow a very narrow range of strings to be written */
1145 ret = -EINVAL;
1146 if ((*ppos != 0) || (count >= sizeof(kbuf)))
1147 goto out;
1148
1149 /* What was written? */
1150 ret = -EFAULT;
1151 if (copy_from_user(kbuf, buf, count))
1152 goto out;
1153 kbuf[count] = '\0';
1154 pos = kbuf;
1155
1156 /* What is being requested? */
1157 ret = -EINVAL;
1158 if (strncmp(pos, "allow", 5) == 0) {
1159 pos += 5;
1160 setgroups_allowed = true;
1161 }
1162 else if (strncmp(pos, "deny", 4) == 0) {
1163 pos += 4;
1164 setgroups_allowed = false;
1165 }
1166 else
1167 goto out;
1168
1169 /* Verify there is not trailing junk on the line */
1170 pos = skip_spaces(pos);
1171 if (*pos != '\0')
1172 goto out;
1173
1174 ret = -EPERM;
1175 mutex_lock(&userns_state_mutex);
1176 if (setgroups_allowed) {
1177 /* Enabling setgroups after setgroups has been disabled
1178 * is not allowed.
1179 */
1180 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1181 goto out_unlock;
1182 } else {
1183 /* Permanently disabling setgroups after setgroups has
1184 * been enabled by writing the gid_map is not allowed.
1185 */
1186 if (ns->gid_map.nr_extents != 0)
1187 goto out_unlock;
1188 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1189 }
1190 mutex_unlock(&userns_state_mutex);
1191
1192 /* Report a successful write */
1193 *ppos = count;
1194 ret = count;
1195 out:
1196 return ret;
1197 out_unlock:
1198 mutex_unlock(&userns_state_mutex);
1199 goto out;
1200 }
1201
userns_may_setgroups(const struct user_namespace * ns)1202 bool userns_may_setgroups(const struct user_namespace *ns)
1203 {
1204 bool allowed;
1205
1206 mutex_lock(&userns_state_mutex);
1207 /* It is not safe to use setgroups until a gid mapping in
1208 * the user namespace has been established.
1209 */
1210 allowed = ns->gid_map.nr_extents != 0;
1211 /* Is setgroups allowed? */
1212 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1213 mutex_unlock(&userns_state_mutex);
1214
1215 return allowed;
1216 }
1217
1218 /*
1219 * Returns true if @child is the same namespace or a descendant of
1220 * @ancestor.
1221 */
in_userns(const struct user_namespace * ancestor,const struct user_namespace * child)1222 bool in_userns(const struct user_namespace *ancestor,
1223 const struct user_namespace *child)
1224 {
1225 const struct user_namespace *ns;
1226 for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1227 ;
1228 return (ns == ancestor);
1229 }
1230
current_in_userns(const struct user_namespace * target_ns)1231 bool current_in_userns(const struct user_namespace *target_ns)
1232 {
1233 return in_userns(target_ns, current_user_ns());
1234 }
1235 EXPORT_SYMBOL(current_in_userns);
1236
to_user_ns(struct ns_common * ns)1237 static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1238 {
1239 return container_of(ns, struct user_namespace, ns);
1240 }
1241
userns_get(struct task_struct * task)1242 static struct ns_common *userns_get(struct task_struct *task)
1243 {
1244 struct user_namespace *user_ns;
1245
1246 rcu_read_lock();
1247 user_ns = get_user_ns(__task_cred(task)->user_ns);
1248 rcu_read_unlock();
1249
1250 return user_ns ? &user_ns->ns : NULL;
1251 }
1252
userns_put(struct ns_common * ns)1253 static void userns_put(struct ns_common *ns)
1254 {
1255 put_user_ns(to_user_ns(ns));
1256 }
1257
userns_install(struct nsproxy * nsproxy,struct ns_common * ns)1258 static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1259 {
1260 struct user_namespace *user_ns = to_user_ns(ns);
1261 struct cred *cred;
1262
1263 /* Don't allow gaining capabilities by reentering
1264 * the same user namespace.
1265 */
1266 if (user_ns == current_user_ns())
1267 return -EINVAL;
1268
1269 /* Tasks that share a thread group must share a user namespace */
1270 if (!thread_group_empty(current))
1271 return -EINVAL;
1272
1273 if (current->fs->users != 1)
1274 return -EINVAL;
1275
1276 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1277 return -EPERM;
1278
1279 cred = prepare_creds();
1280 if (!cred)
1281 return -ENOMEM;
1282
1283 put_user_ns(cred->user_ns);
1284 set_cred_user_ns(cred, get_user_ns(user_ns));
1285
1286 return commit_creds(cred);
1287 }
1288
ns_get_owner(struct ns_common * ns)1289 struct ns_common *ns_get_owner(struct ns_common *ns)
1290 {
1291 struct user_namespace *my_user_ns = current_user_ns();
1292 struct user_namespace *owner, *p;
1293
1294 /* See if the owner is in the current user namespace */
1295 owner = p = ns->ops->owner(ns);
1296 for (;;) {
1297 if (!p)
1298 return ERR_PTR(-EPERM);
1299 if (p == my_user_ns)
1300 break;
1301 p = p->parent;
1302 }
1303
1304 return &get_user_ns(owner)->ns;
1305 }
1306
userns_owner(struct ns_common * ns)1307 static struct user_namespace *userns_owner(struct ns_common *ns)
1308 {
1309 return to_user_ns(ns)->parent;
1310 }
1311
1312 const struct proc_ns_operations userns_operations = {
1313 .name = "user",
1314 .type = CLONE_NEWUSER,
1315 .get = userns_get,
1316 .put = userns_put,
1317 .install = userns_install,
1318 .owner = userns_owner,
1319 .get_parent = ns_get_owner,
1320 };
1321
user_namespaces_init(void)1322 static __init int user_namespaces_init(void)
1323 {
1324 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1325 return 0;
1326 }
1327 subsys_initcall(user_namespaces_init);
1328