1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Neil Brown <neilb@cse.unsw.edu.au>
4  * J. Bruce Fields <bfields@umich.edu>
5  * Andy Adamson <andros@umich.edu>
6  * Dug Song <dugsong@monkey.org>
7  *
8  * RPCSEC_GSS server authentication.
9  * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
10  * (gssapi)
11  *
12  * The RPCSEC_GSS involves three stages:
13  *  1/ context creation
14  *  2/ data exchange
15  *  3/ context destruction
16  *
17  * Context creation is handled largely by upcalls to user-space.
18  *  In particular, GSS_Accept_sec_context is handled by an upcall
19  * Data exchange is handled entirely within the kernel
20  *  In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
21  * Context destruction is handled in-kernel
22  *  GSS_Delete_sec_context is in-kernel
23  *
24  * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
25  * The context handle and gss_token are used as a key into the rpcsec_init cache.
26  * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
27  * being major_status, minor_status, context_handle, reply_token.
28  * These are sent back to the client.
29  * Sequence window management is handled by the kernel.  The window size if currently
30  * a compile time constant.
31  *
32  * When user-space is happy that a context is established, it places an entry
33  * in the rpcsec_context cache. The key for this cache is the context_handle.
34  * The content includes:
35  *   uid/gidlist - for determining access rights
36  *   mechanism type
37  *   mechanism specific information, such as a key
38  *
39  */
40 
41 #include <linux/slab.h>
42 #include <linux/types.h>
43 #include <linux/module.h>
44 #include <linux/pagemap.h>
45 #include <linux/user_namespace.h>
46 
47 #include <linux/sunrpc/auth_gss.h>
48 #include <linux/sunrpc/gss_err.h>
49 #include <linux/sunrpc/svcauth.h>
50 #include <linux/sunrpc/svcauth_gss.h>
51 #include <linux/sunrpc/cache.h>
52 #include <linux/sunrpc/gss_krb5.h>
53 
54 #include <trace/events/rpcgss.h>
55 
56 #include "gss_rpc_upcall.h"
57 
58 /*
59  * Unfortunately there isn't a maximum checksum size exported via the
60  * GSS API. Manufacture one based on GSS mechanisms supported by this
61  * implementation.
62  */
63 #define GSS_MAX_CKSUMSIZE (GSS_KRB5_TOK_HDR_LEN + GSS_KRB5_MAX_CKSUM_LEN)
64 
65 /*
66  * This value may be increased in the future to accommodate other
67  * usage of the scratch buffer.
68  */
69 #define GSS_SCRATCH_SIZE GSS_MAX_CKSUMSIZE
70 
71 struct gss_svc_data {
72 	/* decoded gss client cred: */
73 	struct rpc_gss_wire_cred	clcred;
74 	u32				gsd_databody_offset;
75 	struct rsc			*rsci;
76 
77 	/* for temporary results */
78 	__be32				gsd_seq_num;
79 	u8				gsd_scratch[GSS_SCRATCH_SIZE];
80 };
81 
82 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
83  * into replies.
84  *
85  * Key is context handle (\x if empty) and gss_token.
86  * Content is major_status minor_status (integers) context_handle, reply_token.
87  *
88  */
89 
netobj_equal(struct xdr_netobj * a,struct xdr_netobj * b)90 static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
91 {
92 	return a->len == b->len && 0 == memcmp(a->data, b->data, a->len);
93 }
94 
95 #define	RSI_HASHBITS	6
96 #define	RSI_HASHMAX	(1<<RSI_HASHBITS)
97 
98 struct rsi {
99 	struct cache_head	h;
100 	struct xdr_netobj	in_handle, in_token;
101 	struct xdr_netobj	out_handle, out_token;
102 	int			major_status, minor_status;
103 	struct rcu_head		rcu_head;
104 };
105 
106 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
107 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item);
108 
rsi_free(struct rsi * rsii)109 static void rsi_free(struct rsi *rsii)
110 {
111 	kfree(rsii->in_handle.data);
112 	kfree(rsii->in_token.data);
113 	kfree(rsii->out_handle.data);
114 	kfree(rsii->out_token.data);
115 }
116 
rsi_free_rcu(struct rcu_head * head)117 static void rsi_free_rcu(struct rcu_head *head)
118 {
119 	struct rsi *rsii = container_of(head, struct rsi, rcu_head);
120 
121 	rsi_free(rsii);
122 	kfree(rsii);
123 }
124 
rsi_put(struct kref * ref)125 static void rsi_put(struct kref *ref)
126 {
127 	struct rsi *rsii = container_of(ref, struct rsi, h.ref);
128 
129 	call_rcu(&rsii->rcu_head, rsi_free_rcu);
130 }
131 
rsi_hash(struct rsi * item)132 static inline int rsi_hash(struct rsi *item)
133 {
134 	return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
135 	     ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS);
136 }
137 
rsi_match(struct cache_head * a,struct cache_head * b)138 static int rsi_match(struct cache_head *a, struct cache_head *b)
139 {
140 	struct rsi *item = container_of(a, struct rsi, h);
141 	struct rsi *tmp = container_of(b, struct rsi, h);
142 	return netobj_equal(&item->in_handle, &tmp->in_handle) &&
143 	       netobj_equal(&item->in_token, &tmp->in_token);
144 }
145 
dup_to_netobj(struct xdr_netobj * dst,char * src,int len)146 static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
147 {
148 	dst->len = len;
149 	dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL);
150 	if (len && !dst->data)
151 		return -ENOMEM;
152 	return 0;
153 }
154 
dup_netobj(struct xdr_netobj * dst,struct xdr_netobj * src)155 static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src)
156 {
157 	return dup_to_netobj(dst, src->data, src->len);
158 }
159 
rsi_init(struct cache_head * cnew,struct cache_head * citem)160 static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
161 {
162 	struct rsi *new = container_of(cnew, struct rsi, h);
163 	struct rsi *item = container_of(citem, struct rsi, h);
164 
165 	new->out_handle.data = NULL;
166 	new->out_handle.len = 0;
167 	new->out_token.data = NULL;
168 	new->out_token.len = 0;
169 	new->in_handle.len = item->in_handle.len;
170 	item->in_handle.len = 0;
171 	new->in_token.len = item->in_token.len;
172 	item->in_token.len = 0;
173 	new->in_handle.data = item->in_handle.data;
174 	item->in_handle.data = NULL;
175 	new->in_token.data = item->in_token.data;
176 	item->in_token.data = NULL;
177 }
178 
update_rsi(struct cache_head * cnew,struct cache_head * citem)179 static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
180 {
181 	struct rsi *new = container_of(cnew, struct rsi, h);
182 	struct rsi *item = container_of(citem, struct rsi, h);
183 
184 	BUG_ON(new->out_handle.data || new->out_token.data);
185 	new->out_handle.len = item->out_handle.len;
186 	item->out_handle.len = 0;
187 	new->out_token.len = item->out_token.len;
188 	item->out_token.len = 0;
189 	new->out_handle.data = item->out_handle.data;
190 	item->out_handle.data = NULL;
191 	new->out_token.data = item->out_token.data;
192 	item->out_token.data = NULL;
193 
194 	new->major_status = item->major_status;
195 	new->minor_status = item->minor_status;
196 }
197 
rsi_alloc(void)198 static struct cache_head *rsi_alloc(void)
199 {
200 	struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
201 	if (rsii)
202 		return &rsii->h;
203 	else
204 		return NULL;
205 }
206 
rsi_upcall(struct cache_detail * cd,struct cache_head * h)207 static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
208 {
209 	return sunrpc_cache_pipe_upcall_timeout(cd, h);
210 }
211 
rsi_request(struct cache_detail * cd,struct cache_head * h,char ** bpp,int * blen)212 static void rsi_request(struct cache_detail *cd,
213 		       struct cache_head *h,
214 		       char **bpp, int *blen)
215 {
216 	struct rsi *rsii = container_of(h, struct rsi, h);
217 
218 	qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
219 	qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
220 	(*bpp)[-1] = '\n';
221 	WARN_ONCE(*blen < 0,
222 		  "RPCSEC/GSS credential too large - please use gssproxy\n");
223 }
224 
rsi_parse(struct cache_detail * cd,char * mesg,int mlen)225 static int rsi_parse(struct cache_detail *cd,
226 		    char *mesg, int mlen)
227 {
228 	/* context token expiry major minor context token */
229 	char *buf = mesg;
230 	char *ep;
231 	int len;
232 	struct rsi rsii, *rsip = NULL;
233 	time64_t expiry;
234 	int status = -EINVAL;
235 
236 	memset(&rsii, 0, sizeof(rsii));
237 	/* handle */
238 	len = qword_get(&mesg, buf, mlen);
239 	if (len < 0)
240 		goto out;
241 	status = -ENOMEM;
242 	if (dup_to_netobj(&rsii.in_handle, buf, len))
243 		goto out;
244 
245 	/* token */
246 	len = qword_get(&mesg, buf, mlen);
247 	status = -EINVAL;
248 	if (len < 0)
249 		goto out;
250 	status = -ENOMEM;
251 	if (dup_to_netobj(&rsii.in_token, buf, len))
252 		goto out;
253 
254 	rsip = rsi_lookup(cd, &rsii);
255 	if (!rsip)
256 		goto out;
257 
258 	rsii.h.flags = 0;
259 	/* expiry */
260 	status = get_expiry(&mesg, &expiry);
261 	if (status)
262 		goto out;
263 
264 	status = -EINVAL;
265 	/* major/minor */
266 	len = qword_get(&mesg, buf, mlen);
267 	if (len <= 0)
268 		goto out;
269 	rsii.major_status = simple_strtoul(buf, &ep, 10);
270 	if (*ep)
271 		goto out;
272 	len = qword_get(&mesg, buf, mlen);
273 	if (len <= 0)
274 		goto out;
275 	rsii.minor_status = simple_strtoul(buf, &ep, 10);
276 	if (*ep)
277 		goto out;
278 
279 	/* out_handle */
280 	len = qword_get(&mesg, buf, mlen);
281 	if (len < 0)
282 		goto out;
283 	status = -ENOMEM;
284 	if (dup_to_netobj(&rsii.out_handle, buf, len))
285 		goto out;
286 
287 	/* out_token */
288 	len = qword_get(&mesg, buf, mlen);
289 	status = -EINVAL;
290 	if (len < 0)
291 		goto out;
292 	status = -ENOMEM;
293 	if (dup_to_netobj(&rsii.out_token, buf, len))
294 		goto out;
295 	rsii.h.expiry_time = expiry;
296 	rsip = rsi_update(cd, &rsii, rsip);
297 	status = 0;
298 out:
299 	rsi_free(&rsii);
300 	if (rsip)
301 		cache_put(&rsip->h, cd);
302 	else
303 		status = -ENOMEM;
304 	return status;
305 }
306 
307 static const struct cache_detail rsi_cache_template = {
308 	.owner		= THIS_MODULE,
309 	.hash_size	= RSI_HASHMAX,
310 	.name           = "auth.rpcsec.init",
311 	.cache_put      = rsi_put,
312 	.cache_upcall	= rsi_upcall,
313 	.cache_request  = rsi_request,
314 	.cache_parse    = rsi_parse,
315 	.match		= rsi_match,
316 	.init		= rsi_init,
317 	.update		= update_rsi,
318 	.alloc		= rsi_alloc,
319 };
320 
rsi_lookup(struct cache_detail * cd,struct rsi * item)321 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
322 {
323 	struct cache_head *ch;
324 	int hash = rsi_hash(item);
325 
326 	ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
327 	if (ch)
328 		return container_of(ch, struct rsi, h);
329 	else
330 		return NULL;
331 }
332 
rsi_update(struct cache_detail * cd,struct rsi * new,struct rsi * old)333 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old)
334 {
335 	struct cache_head *ch;
336 	int hash = rsi_hash(new);
337 
338 	ch = sunrpc_cache_update(cd, &new->h,
339 				 &old->h, hash);
340 	if (ch)
341 		return container_of(ch, struct rsi, h);
342 	else
343 		return NULL;
344 }
345 
346 
347 /*
348  * The rpcsec_context cache is used to store a context that is
349  * used in data exchange.
350  * The key is a context handle. The content is:
351  *  uid, gidlist, mechanism, service-set, mech-specific-data
352  */
353 
354 #define	RSC_HASHBITS	10
355 #define	RSC_HASHMAX	(1<<RSC_HASHBITS)
356 
357 #define GSS_SEQ_WIN	128
358 
359 struct gss_svc_seq_data {
360 	/* highest seq number seen so far: */
361 	u32			sd_max;
362 	/* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
363 	 * sd_win is nonzero iff sequence number i has been seen already: */
364 	unsigned long		sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
365 	spinlock_t		sd_lock;
366 };
367 
368 struct rsc {
369 	struct cache_head	h;
370 	struct xdr_netobj	handle;
371 	struct svc_cred		cred;
372 	struct gss_svc_seq_data	seqdata;
373 	struct gss_ctx		*mechctx;
374 	struct rcu_head		rcu_head;
375 };
376 
377 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
378 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item);
379 
rsc_free(struct rsc * rsci)380 static void rsc_free(struct rsc *rsci)
381 {
382 	kfree(rsci->handle.data);
383 	if (rsci->mechctx)
384 		gss_delete_sec_context(&rsci->mechctx);
385 	free_svc_cred(&rsci->cred);
386 }
387 
rsc_free_rcu(struct rcu_head * head)388 static void rsc_free_rcu(struct rcu_head *head)
389 {
390 	struct rsc *rsci = container_of(head, struct rsc, rcu_head);
391 
392 	kfree(rsci->handle.data);
393 	kfree(rsci);
394 }
395 
rsc_put(struct kref * ref)396 static void rsc_put(struct kref *ref)
397 {
398 	struct rsc *rsci = container_of(ref, struct rsc, h.ref);
399 
400 	if (rsci->mechctx)
401 		gss_delete_sec_context(&rsci->mechctx);
402 	free_svc_cred(&rsci->cred);
403 	call_rcu(&rsci->rcu_head, rsc_free_rcu);
404 }
405 
406 static inline int
rsc_hash(struct rsc * rsci)407 rsc_hash(struct rsc *rsci)
408 {
409 	return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS);
410 }
411 
412 static int
rsc_match(struct cache_head * a,struct cache_head * b)413 rsc_match(struct cache_head *a, struct cache_head *b)
414 {
415 	struct rsc *new = container_of(a, struct rsc, h);
416 	struct rsc *tmp = container_of(b, struct rsc, h);
417 
418 	return netobj_equal(&new->handle, &tmp->handle);
419 }
420 
421 static void
rsc_init(struct cache_head * cnew,struct cache_head * ctmp)422 rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
423 {
424 	struct rsc *new = container_of(cnew, struct rsc, h);
425 	struct rsc *tmp = container_of(ctmp, struct rsc, h);
426 
427 	new->handle.len = tmp->handle.len;
428 	tmp->handle.len = 0;
429 	new->handle.data = tmp->handle.data;
430 	tmp->handle.data = NULL;
431 	new->mechctx = NULL;
432 	init_svc_cred(&new->cred);
433 }
434 
435 static void
update_rsc(struct cache_head * cnew,struct cache_head * ctmp)436 update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
437 {
438 	struct rsc *new = container_of(cnew, struct rsc, h);
439 	struct rsc *tmp = container_of(ctmp, struct rsc, h);
440 
441 	new->mechctx = tmp->mechctx;
442 	tmp->mechctx = NULL;
443 	memset(&new->seqdata, 0, sizeof(new->seqdata));
444 	spin_lock_init(&new->seqdata.sd_lock);
445 	new->cred = tmp->cred;
446 	init_svc_cred(&tmp->cred);
447 }
448 
449 static struct cache_head *
rsc_alloc(void)450 rsc_alloc(void)
451 {
452 	struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL);
453 	if (rsci)
454 		return &rsci->h;
455 	else
456 		return NULL;
457 }
458 
rsc_upcall(struct cache_detail * cd,struct cache_head * h)459 static int rsc_upcall(struct cache_detail *cd, struct cache_head *h)
460 {
461 	return -EINVAL;
462 }
463 
rsc_parse(struct cache_detail * cd,char * mesg,int mlen)464 static int rsc_parse(struct cache_detail *cd,
465 		     char *mesg, int mlen)
466 {
467 	/* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
468 	char *buf = mesg;
469 	int id;
470 	int len, rv;
471 	struct rsc rsci, *rscp = NULL;
472 	time64_t expiry;
473 	int status = -EINVAL;
474 	struct gss_api_mech *gm = NULL;
475 
476 	memset(&rsci, 0, sizeof(rsci));
477 	/* context handle */
478 	len = qword_get(&mesg, buf, mlen);
479 	if (len < 0) goto out;
480 	status = -ENOMEM;
481 	if (dup_to_netobj(&rsci.handle, buf, len))
482 		goto out;
483 
484 	rsci.h.flags = 0;
485 	/* expiry */
486 	status = get_expiry(&mesg, &expiry);
487 	if (status)
488 		goto out;
489 
490 	status = -EINVAL;
491 	rscp = rsc_lookup(cd, &rsci);
492 	if (!rscp)
493 		goto out;
494 
495 	/* uid, or NEGATIVE */
496 	rv = get_int(&mesg, &id);
497 	if (rv == -EINVAL)
498 		goto out;
499 	if (rv == -ENOENT)
500 		set_bit(CACHE_NEGATIVE, &rsci.h.flags);
501 	else {
502 		int N, i;
503 
504 		/*
505 		 * NOTE: we skip uid_valid()/gid_valid() checks here:
506 		 * instead, * -1 id's are later mapped to the
507 		 * (export-specific) anonymous id by nfsd_setuser.
508 		 *
509 		 * (But supplementary gid's get no such special
510 		 * treatment so are checked for validity here.)
511 		 */
512 		/* uid */
513 		rsci.cred.cr_uid = make_kuid(current_user_ns(), id);
514 
515 		/* gid */
516 		if (get_int(&mesg, &id))
517 			goto out;
518 		rsci.cred.cr_gid = make_kgid(current_user_ns(), id);
519 
520 		/* number of additional gid's */
521 		if (get_int(&mesg, &N))
522 			goto out;
523 		if (N < 0 || N > NGROUPS_MAX)
524 			goto out;
525 		status = -ENOMEM;
526 		rsci.cred.cr_group_info = groups_alloc(N);
527 		if (rsci.cred.cr_group_info == NULL)
528 			goto out;
529 
530 		/* gid's */
531 		status = -EINVAL;
532 		for (i=0; i<N; i++) {
533 			kgid_t kgid;
534 			if (get_int(&mesg, &id))
535 				goto out;
536 			kgid = make_kgid(current_user_ns(), id);
537 			if (!gid_valid(kgid))
538 				goto out;
539 			rsci.cred.cr_group_info->gid[i] = kgid;
540 		}
541 		groups_sort(rsci.cred.cr_group_info);
542 
543 		/* mech name */
544 		len = qword_get(&mesg, buf, mlen);
545 		if (len < 0)
546 			goto out;
547 		gm = rsci.cred.cr_gss_mech = gss_mech_get_by_name(buf);
548 		status = -EOPNOTSUPP;
549 		if (!gm)
550 			goto out;
551 
552 		status = -EINVAL;
553 		/* mech-specific data: */
554 		len = qword_get(&mesg, buf, mlen);
555 		if (len < 0)
556 			goto out;
557 		status = gss_import_sec_context(buf, len, gm, &rsci.mechctx,
558 						NULL, GFP_KERNEL);
559 		if (status)
560 			goto out;
561 
562 		/* get client name */
563 		len = qword_get(&mesg, buf, mlen);
564 		if (len > 0) {
565 			rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
566 			if (!rsci.cred.cr_principal) {
567 				status = -ENOMEM;
568 				goto out;
569 			}
570 		}
571 
572 	}
573 	rsci.h.expiry_time = expiry;
574 	rscp = rsc_update(cd, &rsci, rscp);
575 	status = 0;
576 out:
577 	rsc_free(&rsci);
578 	if (rscp)
579 		cache_put(&rscp->h, cd);
580 	else
581 		status = -ENOMEM;
582 	return status;
583 }
584 
585 static const struct cache_detail rsc_cache_template = {
586 	.owner		= THIS_MODULE,
587 	.hash_size	= RSC_HASHMAX,
588 	.name		= "auth.rpcsec.context",
589 	.cache_put	= rsc_put,
590 	.cache_upcall	= rsc_upcall,
591 	.cache_parse	= rsc_parse,
592 	.match		= rsc_match,
593 	.init		= rsc_init,
594 	.update		= update_rsc,
595 	.alloc		= rsc_alloc,
596 };
597 
rsc_lookup(struct cache_detail * cd,struct rsc * item)598 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
599 {
600 	struct cache_head *ch;
601 	int hash = rsc_hash(item);
602 
603 	ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
604 	if (ch)
605 		return container_of(ch, struct rsc, h);
606 	else
607 		return NULL;
608 }
609 
rsc_update(struct cache_detail * cd,struct rsc * new,struct rsc * old)610 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old)
611 {
612 	struct cache_head *ch;
613 	int hash = rsc_hash(new);
614 
615 	ch = sunrpc_cache_update(cd, &new->h,
616 				 &old->h, hash);
617 	if (ch)
618 		return container_of(ch, struct rsc, h);
619 	else
620 		return NULL;
621 }
622 
623 
624 static struct rsc *
gss_svc_searchbyctx(struct cache_detail * cd,struct xdr_netobj * handle)625 gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
626 {
627 	struct rsc rsci;
628 	struct rsc *found;
629 
630 	memset(&rsci, 0, sizeof(rsci));
631 	if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
632 		return NULL;
633 	found = rsc_lookup(cd, &rsci);
634 	rsc_free(&rsci);
635 	if (!found)
636 		return NULL;
637 	if (cache_check(cd, &found->h, NULL))
638 		return NULL;
639 	return found;
640 }
641 
642 /**
643  * gss_check_seq_num - GSS sequence number window check
644  * @rqstp: RPC Call to use when reporting errors
645  * @rsci: cached GSS context state (updated on return)
646  * @seq_num: sequence number to check
647  *
648  * Implements sequence number algorithm as specified in
649  * RFC 2203, Section 5.3.3.1. "Context Management".
650  *
651  * Return values:
652  *   %true: @rqstp's GSS sequence number is inside the window
653  *   %false: @rqstp's GSS sequence number is outside the window
654  */
gss_check_seq_num(const struct svc_rqst * rqstp,struct rsc * rsci,u32 seq_num)655 static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
656 			      u32 seq_num)
657 {
658 	struct gss_svc_seq_data *sd = &rsci->seqdata;
659 	bool result = false;
660 
661 	spin_lock(&sd->sd_lock);
662 	if (seq_num > sd->sd_max) {
663 		if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
664 			memset(sd->sd_win, 0, sizeof(sd->sd_win));
665 			sd->sd_max = seq_num;
666 		} else while (sd->sd_max < seq_num) {
667 			sd->sd_max++;
668 			__clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win);
669 		}
670 		__set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
671 		goto ok;
672 	} else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
673 		goto toolow;
674 	}
675 	if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
676 		goto alreadyseen;
677 
678 ok:
679 	result = true;
680 out:
681 	spin_unlock(&sd->sd_lock);
682 	return result;
683 
684 toolow:
685 	trace_rpcgss_svc_seqno_low(rqstp, seq_num,
686 				   sd->sd_max - GSS_SEQ_WIN,
687 				   sd->sd_max);
688 	goto out;
689 alreadyseen:
690 	trace_rpcgss_svc_seqno_seen(rqstp, seq_num);
691 	goto out;
692 }
693 
694 /*
695  * Decode and verify a Call's verifier field. For RPC_AUTH_GSS Calls,
696  * the body of this field contains a variable length checksum.
697  *
698  * GSS-specific auth_stat values are mandated by RFC 2203 Section
699  * 5.3.3.3.
700  */
701 static int
svcauth_gss_verify_header(struct svc_rqst * rqstp,struct rsc * rsci,__be32 * rpcstart,struct rpc_gss_wire_cred * gc)702 svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
703 			  __be32 *rpcstart, struct rpc_gss_wire_cred *gc)
704 {
705 	struct xdr_stream	*xdr = &rqstp->rq_arg_stream;
706 	struct gss_ctx		*ctx_id = rsci->mechctx;
707 	u32			flavor, maj_stat;
708 	struct xdr_buf		rpchdr;
709 	struct xdr_netobj	checksum;
710 	struct kvec		iov;
711 
712 	/*
713 	 * Compute the checksum of the incoming Call from the
714 	 * XID field to credential field:
715 	 */
716 	iov.iov_base = rpcstart;
717 	iov.iov_len = (u8 *)xdr->p - (u8 *)rpcstart;
718 	xdr_buf_from_iov(&iov, &rpchdr);
719 
720 	/* Call's verf field: */
721 	if (xdr_stream_decode_opaque_auth(xdr, &flavor,
722 					  (void **)&checksum.data,
723 					  &checksum.len) < 0) {
724 		rqstp->rq_auth_stat = rpc_autherr_badverf;
725 		return SVC_DENIED;
726 	}
727 	if (flavor != RPC_AUTH_GSS) {
728 		rqstp->rq_auth_stat = rpc_autherr_badverf;
729 		return SVC_DENIED;
730 	}
731 
732 	if (rqstp->rq_deferred)
733 		return SVC_OK;
734 	maj_stat = gss_verify_mic(ctx_id, &rpchdr, &checksum);
735 	if (maj_stat != GSS_S_COMPLETE) {
736 		trace_rpcgss_svc_mic(rqstp, maj_stat);
737 		rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
738 		return SVC_DENIED;
739 	}
740 
741 	if (gc->gc_seq > MAXSEQ) {
742 		trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq);
743 		rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
744 		return SVC_DENIED;
745 	}
746 	if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq))
747 		return SVC_DROP;
748 	return SVC_OK;
749 }
750 
751 /*
752  * Construct and encode a Reply's verifier field. The verifier's body
753  * field contains a variable-length checksum of the GSS sequence
754  * number.
755  */
756 static bool
svcauth_gss_encode_verf(struct svc_rqst * rqstp,struct gss_ctx * ctx_id,u32 seq)757 svcauth_gss_encode_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
758 {
759 	struct gss_svc_data	*gsd = rqstp->rq_auth_data;
760 	u32			maj_stat;
761 	struct xdr_buf		verf_data;
762 	struct xdr_netobj	checksum;
763 	struct kvec		iov;
764 
765 	gsd->gsd_seq_num = cpu_to_be32(seq);
766 	iov.iov_base = &gsd->gsd_seq_num;
767 	iov.iov_len = XDR_UNIT;
768 	xdr_buf_from_iov(&iov, &verf_data);
769 
770 	checksum.data = gsd->gsd_scratch;
771 	maj_stat = gss_get_mic(ctx_id, &verf_data, &checksum);
772 	if (maj_stat != GSS_S_COMPLETE)
773 		goto bad_mic;
774 
775 	return xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, RPC_AUTH_GSS,
776 					     checksum.data, checksum.len) > 0;
777 
778 bad_mic:
779 	trace_rpcgss_svc_get_mic(rqstp, maj_stat);
780 	return false;
781 }
782 
783 struct gss_domain {
784 	struct auth_domain	h;
785 	u32			pseudoflavor;
786 };
787 
788 static struct auth_domain *
find_gss_auth_domain(struct gss_ctx * ctx,u32 svc)789 find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
790 {
791 	char *name;
792 
793 	name = gss_service_to_auth_domain_name(ctx->mech_type, svc);
794 	if (!name)
795 		return NULL;
796 	return auth_domain_find(name);
797 }
798 
799 static struct auth_ops svcauthops_gss;
800 
svcauth_gss_flavor(struct auth_domain * dom)801 u32 svcauth_gss_flavor(struct auth_domain *dom)
802 {
803 	struct gss_domain *gd = container_of(dom, struct gss_domain, h);
804 
805 	return gd->pseudoflavor;
806 }
807 
808 EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
809 
810 struct auth_domain *
svcauth_gss_register_pseudoflavor(u32 pseudoflavor,char * name)811 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
812 {
813 	struct gss_domain	*new;
814 	struct auth_domain	*test;
815 	int			stat = -ENOMEM;
816 
817 	new = kmalloc(sizeof(*new), GFP_KERNEL);
818 	if (!new)
819 		goto out;
820 	kref_init(&new->h.ref);
821 	new->h.name = kstrdup(name, GFP_KERNEL);
822 	if (!new->h.name)
823 		goto out_free_dom;
824 	new->h.flavour = &svcauthops_gss;
825 	new->pseudoflavor = pseudoflavor;
826 
827 	test = auth_domain_lookup(name, &new->h);
828 	if (test != &new->h) {
829 		pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
830 			name);
831 		stat = -EADDRINUSE;
832 		auth_domain_put(test);
833 		goto out_free_name;
834 	}
835 	return test;
836 
837 out_free_name:
838 	kfree(new->h.name);
839 out_free_dom:
840 	kfree(new);
841 out:
842 	return ERR_PTR(stat);
843 }
844 EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
845 
846 /*
847  * RFC 2203, Section 5.3.2.2
848  *
849  *	struct rpc_gss_integ_data {
850  *		opaque databody_integ<>;
851  *		opaque checksum<>;
852  *	};
853  *
854  *	struct rpc_gss_data_t {
855  *		unsigned int seq_num;
856  *		proc_req_arg_t arg;
857  *	};
858  */
859 static noinline_for_stack int
svcauth_gss_unwrap_integ(struct svc_rqst * rqstp,u32 seq,struct gss_ctx * ctx)860 svcauth_gss_unwrap_integ(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
861 {
862 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
863 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
864 	u32 len, offset, seq_num, maj_stat;
865 	struct xdr_buf *buf = xdr->buf;
866 	struct xdr_buf databody_integ;
867 	struct xdr_netobj checksum;
868 
869 	/* NFS READ normally uses splice to send data in-place. However
870 	 * the data in cache can change after the reply's MIC is computed
871 	 * but before the RPC reply is sent. To prevent the client from
872 	 * rejecting the server-computed MIC in this somewhat rare case,
873 	 * do not use splice with the GSS integrity service.
874 	 */
875 	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
876 
877 	/* Did we already verify the signature on the original pass through? */
878 	if (rqstp->rq_deferred)
879 		return 0;
880 
881 	if (xdr_stream_decode_u32(xdr, &len) < 0)
882 		goto unwrap_failed;
883 	if (len & 3)
884 		goto unwrap_failed;
885 	offset = xdr_stream_pos(xdr);
886 	if (xdr_buf_subsegment(buf, &databody_integ, offset, len))
887 		goto unwrap_failed;
888 
889 	/*
890 	 * The xdr_stream now points to the @seq_num field. The next
891 	 * XDR data item is the @arg field, which contains the clear
892 	 * text RPC program payload. The checksum, which follows the
893 	 * @arg field, is located and decoded without updating the
894 	 * xdr_stream.
895 	 */
896 
897 	offset += len;
898 	if (xdr_decode_word(buf, offset, &checksum.len))
899 		goto unwrap_failed;
900 	if (checksum.len > sizeof(gsd->gsd_scratch))
901 		goto unwrap_failed;
902 	checksum.data = gsd->gsd_scratch;
903 	if (read_bytes_from_xdr_buf(buf, offset + XDR_UNIT, checksum.data,
904 				    checksum.len))
905 		goto unwrap_failed;
906 
907 	maj_stat = gss_verify_mic(ctx, &databody_integ, &checksum);
908 	if (maj_stat != GSS_S_COMPLETE)
909 		goto bad_mic;
910 
911 	/* The received seqno is protected by the checksum. */
912 	if (xdr_stream_decode_u32(xdr, &seq_num) < 0)
913 		goto unwrap_failed;
914 	if (seq_num != seq)
915 		goto bad_seqno;
916 
917 	xdr_truncate_decode(xdr, XDR_UNIT + checksum.len);
918 	return 0;
919 
920 unwrap_failed:
921 	trace_rpcgss_svc_unwrap_failed(rqstp);
922 	return -EINVAL;
923 bad_seqno:
924 	trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num);
925 	return -EINVAL;
926 bad_mic:
927 	trace_rpcgss_svc_mic(rqstp, maj_stat);
928 	return -EINVAL;
929 }
930 
931 /*
932  * RFC 2203, Section 5.3.2.3
933  *
934  *	struct rpc_gss_priv_data {
935  *		opaque databody_priv<>
936  *	};
937  *
938  *	struct rpc_gss_data_t {
939  *		unsigned int seq_num;
940  *		proc_req_arg_t arg;
941  *	};
942  */
943 static noinline_for_stack int
svcauth_gss_unwrap_priv(struct svc_rqst * rqstp,u32 seq,struct gss_ctx * ctx)944 svcauth_gss_unwrap_priv(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
945 {
946 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
947 	u32 len, maj_stat, seq_num, offset;
948 	struct xdr_buf *buf = xdr->buf;
949 	unsigned int saved_len;
950 
951 	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
952 
953 	if (xdr_stream_decode_u32(xdr, &len) < 0)
954 		goto unwrap_failed;
955 	if (rqstp->rq_deferred) {
956 		/* Already decrypted last time through! The sequence number
957 		 * check at out_seq is unnecessary but harmless: */
958 		goto out_seq;
959 	}
960 	if (len > xdr_stream_remaining(xdr))
961 		goto unwrap_failed;
962 	offset = xdr_stream_pos(xdr);
963 
964 	saved_len = buf->len;
965 	maj_stat = gss_unwrap(ctx, offset, offset + len, buf);
966 	if (maj_stat != GSS_S_COMPLETE)
967 		goto bad_unwrap;
968 	xdr->nwords -= XDR_QUADLEN(saved_len - buf->len);
969 
970 out_seq:
971 	/* gss_unwrap() decrypted the sequence number. */
972 	if (xdr_stream_decode_u32(xdr, &seq_num) < 0)
973 		goto unwrap_failed;
974 	if (seq_num != seq)
975 		goto bad_seqno;
976 	return 0;
977 
978 unwrap_failed:
979 	trace_rpcgss_svc_unwrap_failed(rqstp);
980 	return -EINVAL;
981 bad_seqno:
982 	trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num);
983 	return -EINVAL;
984 bad_unwrap:
985 	trace_rpcgss_svc_unwrap(rqstp, maj_stat);
986 	return -EINVAL;
987 }
988 
989 static enum svc_auth_status
svcauth_gss_set_client(struct svc_rqst * rqstp)990 svcauth_gss_set_client(struct svc_rqst *rqstp)
991 {
992 	struct gss_svc_data *svcdata = rqstp->rq_auth_data;
993 	struct rsc *rsci = svcdata->rsci;
994 	struct rpc_gss_wire_cred *gc = &svcdata->clcred;
995 	int stat;
996 
997 	rqstp->rq_auth_stat = rpc_autherr_badcred;
998 
999 	/*
1000 	 * A gss export can be specified either by:
1001 	 * 	export	*(sec=krb5,rw)
1002 	 * or by
1003 	 * 	export gss/krb5(rw)
1004 	 * The latter is deprecated; but for backwards compatibility reasons
1005 	 * the nfsd code will still fall back on trying it if the former
1006 	 * doesn't work; so we try to make both available to nfsd, below.
1007 	 */
1008 	rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
1009 	if (rqstp->rq_gssclient == NULL)
1010 		return SVC_DENIED;
1011 	stat = svcauth_unix_set_client(rqstp);
1012 	if (stat == SVC_DROP || stat == SVC_CLOSE)
1013 		return stat;
1014 
1015 	rqstp->rq_auth_stat = rpc_auth_ok;
1016 	return SVC_OK;
1017 }
1018 
1019 static bool
svcauth_gss_proc_init_verf(struct cache_detail * cd,struct svc_rqst * rqstp,struct xdr_netobj * out_handle,int * major_status,u32 seq_num)1020 svcauth_gss_proc_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
1021 			   struct xdr_netobj *out_handle, int *major_status,
1022 			   u32 seq_num)
1023 {
1024 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
1025 	struct rsc *rsci;
1026 	bool rc;
1027 
1028 	if (*major_status != GSS_S_COMPLETE)
1029 		goto null_verifier;
1030 	rsci = gss_svc_searchbyctx(cd, out_handle);
1031 	if (rsci == NULL) {
1032 		*major_status = GSS_S_NO_CONTEXT;
1033 		goto null_verifier;
1034 	}
1035 
1036 	rc = svcauth_gss_encode_verf(rqstp, rsci->mechctx, seq_num);
1037 	cache_put(&rsci->h, cd);
1038 	return rc;
1039 
1040 null_verifier:
1041 	return xdr_stream_encode_opaque_auth(xdr, RPC_AUTH_NULL, NULL, 0) > 0;
1042 }
1043 
gss_free_in_token_pages(struct gssp_in_token * in_token)1044 static void gss_free_in_token_pages(struct gssp_in_token *in_token)
1045 {
1046 	u32 inlen;
1047 	int i;
1048 
1049 	i = 0;
1050 	inlen = in_token->page_len;
1051 	while (inlen) {
1052 		if (in_token->pages[i])
1053 			put_page(in_token->pages[i]);
1054 		inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
1055 	}
1056 
1057 	kfree(in_token->pages);
1058 	in_token->pages = NULL;
1059 }
1060 
gss_read_proxy_verf(struct svc_rqst * rqstp,struct rpc_gss_wire_cred * gc,struct xdr_netobj * in_handle,struct gssp_in_token * in_token)1061 static int gss_read_proxy_verf(struct svc_rqst *rqstp,
1062 			       struct rpc_gss_wire_cred *gc,
1063 			       struct xdr_netobj *in_handle,
1064 			       struct gssp_in_token *in_token)
1065 {
1066 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1067 	unsigned int length, pgto_offs, pgfrom_offs;
1068 	int pages, i, pgto, pgfrom;
1069 	size_t to_offs, from_offs;
1070 	u32 inlen;
1071 
1072 	if (dup_netobj(in_handle, &gc->gc_ctx))
1073 		return SVC_CLOSE;
1074 
1075 	/*
1076 	 *  RFC 2203 Section 5.2.2
1077 	 *
1078 	 *	struct rpc_gss_init_arg {
1079 	 *		opaque gss_token<>;
1080 	 *	};
1081 	 */
1082 	if (xdr_stream_decode_u32(xdr, &inlen) < 0)
1083 		goto out_denied_free;
1084 	if (inlen > xdr_stream_remaining(xdr))
1085 		goto out_denied_free;
1086 
1087 	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
1088 	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
1089 	if (!in_token->pages)
1090 		goto out_denied_free;
1091 	in_token->page_base = 0;
1092 	in_token->page_len = inlen;
1093 	for (i = 0; i < pages; i++) {
1094 		in_token->pages[i] = alloc_page(GFP_KERNEL);
1095 		if (!in_token->pages[i]) {
1096 			gss_free_in_token_pages(in_token);
1097 			goto out_denied_free;
1098 		}
1099 	}
1100 
1101 	length = min_t(unsigned int, inlen, (char *)xdr->end - (char *)xdr->p);
1102 	memcpy(page_address(in_token->pages[0]), xdr->p, length);
1103 	inlen -= length;
1104 
1105 	to_offs = length;
1106 	from_offs = rqstp->rq_arg.page_base;
1107 	while (inlen) {
1108 		pgto = to_offs >> PAGE_SHIFT;
1109 		pgfrom = from_offs >> PAGE_SHIFT;
1110 		pgto_offs = to_offs & ~PAGE_MASK;
1111 		pgfrom_offs = from_offs & ~PAGE_MASK;
1112 
1113 		length = min_t(unsigned int, inlen,
1114 			 min_t(unsigned int, PAGE_SIZE - pgto_offs,
1115 			       PAGE_SIZE - pgfrom_offs));
1116 		memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
1117 		       page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
1118 		       length);
1119 
1120 		to_offs += length;
1121 		from_offs += length;
1122 		inlen -= length;
1123 	}
1124 	return 0;
1125 
1126 out_denied_free:
1127 	kfree(in_handle->data);
1128 	return SVC_DENIED;
1129 }
1130 
1131 /*
1132  * RFC 2203, Section 5.2.3.1.
1133  *
1134  *	struct rpc_gss_init_res {
1135  *		opaque handle<>;
1136  *		unsigned int gss_major;
1137  *		unsigned int gss_minor;
1138  *		unsigned int seq_window;
1139  *		opaque gss_token<>;
1140  *	};
1141  */
1142 static bool
svcxdr_encode_gss_init_res(struct xdr_stream * xdr,struct xdr_netobj * handle,struct xdr_netobj * gss_token,unsigned int major_status,unsigned int minor_status,u32 seq_num)1143 svcxdr_encode_gss_init_res(struct xdr_stream *xdr,
1144 			   struct xdr_netobj *handle,
1145 			   struct xdr_netobj *gss_token,
1146 			   unsigned int major_status,
1147 			   unsigned int minor_status, u32 seq_num)
1148 {
1149 	if (xdr_stream_encode_opaque(xdr, handle->data, handle->len) < 0)
1150 		return false;
1151 	if (xdr_stream_encode_u32(xdr, major_status) < 0)
1152 		return false;
1153 	if (xdr_stream_encode_u32(xdr, minor_status) < 0)
1154 		return false;
1155 	if (xdr_stream_encode_u32(xdr, seq_num) < 0)
1156 		return false;
1157 	if (xdr_stream_encode_opaque(xdr, gss_token->data, gss_token->len) < 0)
1158 		return false;
1159 	return true;
1160 }
1161 
1162 /*
1163  * Having read the cred already and found we're in the context
1164  * initiation case, read the verifier and initiate (or check the results
1165  * of) upcalls to userspace for help with context initiation.  If
1166  * the upcall results are available, write the verifier and result.
1167  * Otherwise, drop the request pending an answer to the upcall.
1168  */
1169 static int
svcauth_gss_legacy_init(struct svc_rqst * rqstp,struct rpc_gss_wire_cred * gc)1170 svcauth_gss_legacy_init(struct svc_rqst *rqstp,
1171 			struct rpc_gss_wire_cred *gc)
1172 {
1173 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1174 	struct rsi *rsip, rsikey;
1175 	__be32 *p;
1176 	u32 len;
1177 	int ret;
1178 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1179 
1180 	memset(&rsikey, 0, sizeof(rsikey));
1181 	if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
1182 		return SVC_CLOSE;
1183 
1184 	/*
1185 	 *  RFC 2203 Section 5.2.2
1186 	 *
1187 	 *	struct rpc_gss_init_arg {
1188 	 *		opaque gss_token<>;
1189 	 *	};
1190 	 */
1191 	if (xdr_stream_decode_u32(xdr, &len) < 0) {
1192 		kfree(rsikey.in_handle.data);
1193 		return SVC_DENIED;
1194 	}
1195 	p = xdr_inline_decode(xdr, len);
1196 	if (!p) {
1197 		kfree(rsikey.in_handle.data);
1198 		return SVC_DENIED;
1199 	}
1200 	rsikey.in_token.data = kmalloc(len, GFP_KERNEL);
1201 	if (ZERO_OR_NULL_PTR(rsikey.in_token.data)) {
1202 		kfree(rsikey.in_handle.data);
1203 		return SVC_CLOSE;
1204 	}
1205 	memcpy(rsikey.in_token.data, p, len);
1206 	rsikey.in_token.len = len;
1207 
1208 	/* Perform upcall, or find upcall result: */
1209 	rsip = rsi_lookup(sn->rsi_cache, &rsikey);
1210 	rsi_free(&rsikey);
1211 	if (!rsip)
1212 		return SVC_CLOSE;
1213 	if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
1214 		/* No upcall result: */
1215 		return SVC_CLOSE;
1216 
1217 	ret = SVC_CLOSE;
1218 	if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &rsip->out_handle,
1219 					&rsip->major_status, GSS_SEQ_WIN))
1220 		goto out;
1221 	if (!svcxdr_set_accept_stat(rqstp))
1222 		goto out;
1223 	if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &rsip->out_handle,
1224 					&rsip->out_token, rsip->major_status,
1225 					rsip->minor_status, GSS_SEQ_WIN))
1226 		goto out;
1227 
1228 	ret = SVC_COMPLETE;
1229 out:
1230 	cache_put(&rsip->h, sn->rsi_cache);
1231 	return ret;
1232 }
1233 
gss_proxy_save_rsc(struct cache_detail * cd,struct gssp_upcall_data * ud,uint64_t * handle)1234 static int gss_proxy_save_rsc(struct cache_detail *cd,
1235 				struct gssp_upcall_data *ud,
1236 				uint64_t *handle)
1237 {
1238 	struct rsc rsci, *rscp = NULL;
1239 	static atomic64_t ctxhctr;
1240 	long long ctxh;
1241 	struct gss_api_mech *gm = NULL;
1242 	time64_t expiry;
1243 	int status;
1244 
1245 	memset(&rsci, 0, sizeof(rsci));
1246 	/* context handle */
1247 	status = -ENOMEM;
1248 	/* the handle needs to be just a unique id,
1249 	 * use a static counter */
1250 	ctxh = atomic64_inc_return(&ctxhctr);
1251 
1252 	/* make a copy for the caller */
1253 	*handle = ctxh;
1254 
1255 	/* make a copy for the rsc cache */
1256 	if (dup_to_netobj(&rsci.handle, (char *)handle, sizeof(uint64_t)))
1257 		goto out;
1258 	rscp = rsc_lookup(cd, &rsci);
1259 	if (!rscp)
1260 		goto out;
1261 
1262 	/* creds */
1263 	if (!ud->found_creds) {
1264 		/* userspace seem buggy, we should always get at least a
1265 		 * mapping to nobody */
1266 		goto out;
1267 	} else {
1268 		struct timespec64 boot;
1269 
1270 		/* steal creds */
1271 		rsci.cred = ud->creds;
1272 		memset(&ud->creds, 0, sizeof(struct svc_cred));
1273 
1274 		status = -EOPNOTSUPP;
1275 		/* get mech handle from OID */
1276 		gm = gss_mech_get_by_OID(&ud->mech_oid);
1277 		if (!gm)
1278 			goto out;
1279 		rsci.cred.cr_gss_mech = gm;
1280 
1281 		status = -EINVAL;
1282 		/* mech-specific data: */
1283 		status = gss_import_sec_context(ud->out_handle.data,
1284 						ud->out_handle.len,
1285 						gm, &rsci.mechctx,
1286 						&expiry, GFP_KERNEL);
1287 		if (status)
1288 			goto out;
1289 
1290 		getboottime64(&boot);
1291 		expiry -= boot.tv_sec;
1292 	}
1293 
1294 	rsci.h.expiry_time = expiry;
1295 	rscp = rsc_update(cd, &rsci, rscp);
1296 	status = 0;
1297 out:
1298 	rsc_free(&rsci);
1299 	if (rscp)
1300 		cache_put(&rscp->h, cd);
1301 	else
1302 		status = -ENOMEM;
1303 	return status;
1304 }
1305 
svcauth_gss_proxy_init(struct svc_rqst * rqstp,struct rpc_gss_wire_cred * gc)1306 static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
1307 				  struct rpc_gss_wire_cred *gc)
1308 {
1309 	struct xdr_netobj cli_handle;
1310 	struct gssp_upcall_data ud;
1311 	uint64_t handle;
1312 	int status;
1313 	int ret;
1314 	struct net *net = SVC_NET(rqstp);
1315 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1316 
1317 	memset(&ud, 0, sizeof(ud));
1318 	ret = gss_read_proxy_verf(rqstp, gc, &ud.in_handle, &ud.in_token);
1319 	if (ret)
1320 		return ret;
1321 
1322 	ret = SVC_CLOSE;
1323 
1324 	/* Perform synchronous upcall to gss-proxy */
1325 	status = gssp_accept_sec_context_upcall(net, &ud);
1326 	if (status)
1327 		goto out;
1328 
1329 	trace_rpcgss_svc_accept_upcall(rqstp, ud.major_status, ud.minor_status);
1330 
1331 	switch (ud.major_status) {
1332 	case GSS_S_CONTINUE_NEEDED:
1333 		cli_handle = ud.out_handle;
1334 		break;
1335 	case GSS_S_COMPLETE:
1336 		status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
1337 		if (status)
1338 			goto out;
1339 		cli_handle.data = (u8 *)&handle;
1340 		cli_handle.len = sizeof(handle);
1341 		break;
1342 	default:
1343 		goto out;
1344 	}
1345 
1346 	if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &cli_handle,
1347 					&ud.major_status, GSS_SEQ_WIN))
1348 		goto out;
1349 	if (!svcxdr_set_accept_stat(rqstp))
1350 		goto out;
1351 	if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &cli_handle,
1352 					&ud.out_token, ud.major_status,
1353 					ud.minor_status, GSS_SEQ_WIN))
1354 		goto out;
1355 
1356 	ret = SVC_COMPLETE;
1357 out:
1358 	gss_free_in_token_pages(&ud.in_token);
1359 	gssp_free_upcall_data(&ud);
1360 	return ret;
1361 }
1362 
1363 /*
1364  * Try to set the sn->use_gss_proxy variable to a new value. We only allow
1365  * it to be changed if it's currently undefined (-1). If it's any other value
1366  * then return -EBUSY unless the type wouldn't have changed anyway.
1367  */
set_gss_proxy(struct net * net,int type)1368 static int set_gss_proxy(struct net *net, int type)
1369 {
1370 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1371 	int ret;
1372 
1373 	WARN_ON_ONCE(type != 0 && type != 1);
1374 	ret = cmpxchg(&sn->use_gss_proxy, -1, type);
1375 	if (ret != -1 && ret != type)
1376 		return -EBUSY;
1377 	return 0;
1378 }
1379 
use_gss_proxy(struct net * net)1380 static bool use_gss_proxy(struct net *net)
1381 {
1382 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1383 
1384 	/* If use_gss_proxy is still undefined, then try to disable it */
1385 	if (sn->use_gss_proxy == -1)
1386 		set_gss_proxy(net, 0);
1387 	return sn->use_gss_proxy;
1388 }
1389 
1390 static noinline_for_stack int
svcauth_gss_proc_init(struct svc_rqst * rqstp,struct rpc_gss_wire_cred * gc)1391 svcauth_gss_proc_init(struct svc_rqst *rqstp, struct rpc_gss_wire_cred *gc)
1392 {
1393 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1394 	u32 flavor, len;
1395 	void *body;
1396 
1397 	/* Call's verf field: */
1398 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
1399 		return SVC_GARBAGE;
1400 	if (flavor != RPC_AUTH_NULL || len != 0) {
1401 		rqstp->rq_auth_stat = rpc_autherr_badverf;
1402 		return SVC_DENIED;
1403 	}
1404 
1405 	if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) {
1406 		rqstp->rq_auth_stat = rpc_autherr_badcred;
1407 		return SVC_DENIED;
1408 	}
1409 
1410 	if (!use_gss_proxy(SVC_NET(rqstp)))
1411 		return svcauth_gss_legacy_init(rqstp, gc);
1412 	return svcauth_gss_proxy_init(rqstp, gc);
1413 }
1414 
1415 #ifdef CONFIG_PROC_FS
1416 
write_gssp(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1417 static ssize_t write_gssp(struct file *file, const char __user *buf,
1418 			 size_t count, loff_t *ppos)
1419 {
1420 	struct net *net = pde_data(file_inode(file));
1421 	char tbuf[20];
1422 	unsigned long i;
1423 	int res;
1424 
1425 	if (*ppos || count > sizeof(tbuf)-1)
1426 		return -EINVAL;
1427 	if (copy_from_user(tbuf, buf, count))
1428 		return -EFAULT;
1429 
1430 	tbuf[count] = 0;
1431 	res = kstrtoul(tbuf, 0, &i);
1432 	if (res)
1433 		return res;
1434 	if (i != 1)
1435 		return -EINVAL;
1436 	res = set_gssp_clnt(net);
1437 	if (res)
1438 		return res;
1439 	res = set_gss_proxy(net, 1);
1440 	if (res)
1441 		return res;
1442 	return count;
1443 }
1444 
read_gssp(struct file * file,char __user * buf,size_t count,loff_t * ppos)1445 static ssize_t read_gssp(struct file *file, char __user *buf,
1446 			 size_t count, loff_t *ppos)
1447 {
1448 	struct net *net = pde_data(file_inode(file));
1449 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1450 	unsigned long p = *ppos;
1451 	char tbuf[10];
1452 	size_t len;
1453 
1454 	snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
1455 	len = strlen(tbuf);
1456 	if (p >= len)
1457 		return 0;
1458 	len -= p;
1459 	if (len > count)
1460 		len = count;
1461 	if (copy_to_user(buf, (void *)(tbuf+p), len))
1462 		return -EFAULT;
1463 	*ppos += len;
1464 	return len;
1465 }
1466 
1467 static const struct proc_ops use_gss_proxy_proc_ops = {
1468 	.proc_open	= nonseekable_open,
1469 	.proc_write	= write_gssp,
1470 	.proc_read	= read_gssp,
1471 };
1472 
create_use_gss_proxy_proc_entry(struct net * net)1473 static int create_use_gss_proxy_proc_entry(struct net *net)
1474 {
1475 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1476 	struct proc_dir_entry **p = &sn->use_gssp_proc;
1477 
1478 	sn->use_gss_proxy = -1;
1479 	*p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
1480 			      sn->proc_net_rpc,
1481 			      &use_gss_proxy_proc_ops, net);
1482 	if (!*p)
1483 		return -ENOMEM;
1484 	init_gssp_clnt(sn);
1485 	return 0;
1486 }
1487 
destroy_use_gss_proxy_proc_entry(struct net * net)1488 static void destroy_use_gss_proxy_proc_entry(struct net *net)
1489 {
1490 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1491 
1492 	if (sn->use_gssp_proc) {
1493 		remove_proc_entry("use-gss-proxy", sn->proc_net_rpc);
1494 		clear_gssp_clnt(sn);
1495 	}
1496 }
1497 
read_gss_krb5_enctypes(struct file * file,char __user * buf,size_t count,loff_t * ppos)1498 static ssize_t read_gss_krb5_enctypes(struct file *file, char __user *buf,
1499 				      size_t count, loff_t *ppos)
1500 {
1501 	struct rpcsec_gss_oid oid = {
1502 		.len	= 9,
1503 		.data	= "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02",
1504 	};
1505 	struct gss_api_mech *mech;
1506 	ssize_t ret;
1507 
1508 	mech = gss_mech_get_by_OID(&oid);
1509 	if (!mech)
1510 		return 0;
1511 	if (!mech->gm_upcall_enctypes) {
1512 		gss_mech_put(mech);
1513 		return 0;
1514 	}
1515 
1516 	ret = simple_read_from_buffer(buf, count, ppos,
1517 				      mech->gm_upcall_enctypes,
1518 				      strlen(mech->gm_upcall_enctypes));
1519 	gss_mech_put(mech);
1520 	return ret;
1521 }
1522 
1523 static const struct proc_ops gss_krb5_enctypes_proc_ops = {
1524 	.proc_open	= nonseekable_open,
1525 	.proc_read	= read_gss_krb5_enctypes,
1526 };
1527 
create_krb5_enctypes_proc_entry(struct net * net)1528 static int create_krb5_enctypes_proc_entry(struct net *net)
1529 {
1530 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1531 
1532 	sn->gss_krb5_enctypes =
1533 		proc_create_data("gss_krb5_enctypes", S_IFREG | 0444,
1534 				 sn->proc_net_rpc, &gss_krb5_enctypes_proc_ops,
1535 				 net);
1536 	return sn->gss_krb5_enctypes ? 0 : -ENOMEM;
1537 }
1538 
destroy_krb5_enctypes_proc_entry(struct net * net)1539 static void destroy_krb5_enctypes_proc_entry(struct net *net)
1540 {
1541 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1542 
1543 	if (sn->gss_krb5_enctypes)
1544 		remove_proc_entry("gss_krb5_enctypes", sn->proc_net_rpc);
1545 }
1546 
1547 #else /* CONFIG_PROC_FS */
1548 
create_use_gss_proxy_proc_entry(struct net * net)1549 static int create_use_gss_proxy_proc_entry(struct net *net)
1550 {
1551 	return 0;
1552 }
1553 
destroy_use_gss_proxy_proc_entry(struct net * net)1554 static void destroy_use_gss_proxy_proc_entry(struct net *net) {}
1555 
create_krb5_enctypes_proc_entry(struct net * net)1556 static int create_krb5_enctypes_proc_entry(struct net *net)
1557 {
1558 	return 0;
1559 }
1560 
destroy_krb5_enctypes_proc_entry(struct net * net)1561 static void destroy_krb5_enctypes_proc_entry(struct net *net) {}
1562 
1563 #endif /* CONFIG_PROC_FS */
1564 
1565 /*
1566  * The Call's credential body should contain a struct rpc_gss_cred_t.
1567  *
1568  * RFC 2203 Section 5
1569  *
1570  *	struct rpc_gss_cred_t {
1571  *		union switch (unsigned int version) {
1572  *		case RPCSEC_GSS_VERS_1:
1573  *			struct {
1574  *				rpc_gss_proc_t gss_proc;
1575  *				unsigned int seq_num;
1576  *				rpc_gss_service_t service;
1577  *				opaque handle<>;
1578  *			} rpc_gss_cred_vers_1_t;
1579  *		}
1580  *	};
1581  */
1582 static bool
svcauth_gss_decode_credbody(struct xdr_stream * xdr,struct rpc_gss_wire_cred * gc,__be32 ** rpcstart)1583 svcauth_gss_decode_credbody(struct xdr_stream *xdr,
1584 			    struct rpc_gss_wire_cred *gc,
1585 			    __be32 **rpcstart)
1586 {
1587 	ssize_t handle_len;
1588 	u32 body_len;
1589 	__be32 *p;
1590 
1591 	p = xdr_inline_decode(xdr, XDR_UNIT);
1592 	if (!p)
1593 		return false;
1594 	/*
1595 	 * start of rpc packet is 7 u32's back from here:
1596 	 * xid direction rpcversion prog vers proc flavour
1597 	 */
1598 	*rpcstart = p - 7;
1599 	body_len = be32_to_cpup(p);
1600 	if (body_len > RPC_MAX_AUTH_SIZE)
1601 		return false;
1602 
1603 	/* struct rpc_gss_cred_t */
1604 	if (xdr_stream_decode_u32(xdr, &gc->gc_v) < 0)
1605 		return false;
1606 	if (xdr_stream_decode_u32(xdr, &gc->gc_proc) < 0)
1607 		return false;
1608 	if (xdr_stream_decode_u32(xdr, &gc->gc_seq) < 0)
1609 		return false;
1610 	if (xdr_stream_decode_u32(xdr, &gc->gc_svc) < 0)
1611 		return false;
1612 	handle_len = xdr_stream_decode_opaque_inline(xdr,
1613 						     (void **)&gc->gc_ctx.data,
1614 						     body_len);
1615 	if (handle_len < 0)
1616 		return false;
1617 	if (body_len != XDR_UNIT * 5 + xdr_align_size(handle_len))
1618 		return false;
1619 
1620 	gc->gc_ctx.len = handle_len;
1621 	return true;
1622 }
1623 
1624 /**
1625  * svcauth_gss_accept - Decode and validate incoming RPC_AUTH_GSS credential
1626  * @rqstp: RPC transaction
1627  *
1628  * Return values:
1629  *   %SVC_OK: Success
1630  *   %SVC_COMPLETE: GSS context lifetime event
1631  *   %SVC_DENIED: Credential or verifier is not valid
1632  *   %SVC_GARBAGE: Failed to decode credential or verifier
1633  *   %SVC_CLOSE: Temporary failure
1634  *
1635  * The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531).
1636  */
1637 static enum svc_auth_status
svcauth_gss_accept(struct svc_rqst * rqstp)1638 svcauth_gss_accept(struct svc_rqst *rqstp)
1639 {
1640 	struct gss_svc_data *svcdata = rqstp->rq_auth_data;
1641 	__be32		*rpcstart;
1642 	struct rpc_gss_wire_cred *gc;
1643 	struct rsc	*rsci = NULL;
1644 	int		ret;
1645 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1646 
1647 	rqstp->rq_auth_stat = rpc_autherr_badcred;
1648 	if (!svcdata)
1649 		svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
1650 	if (!svcdata)
1651 		goto auth_err;
1652 	rqstp->rq_auth_data = svcdata;
1653 	svcdata->gsd_databody_offset = 0;
1654 	svcdata->rsci = NULL;
1655 	gc = &svcdata->clcred;
1656 
1657 	if (!svcauth_gss_decode_credbody(&rqstp->rq_arg_stream, gc, &rpcstart))
1658 		goto auth_err;
1659 	if (gc->gc_v != RPC_GSS_VERSION)
1660 		goto auth_err;
1661 
1662 	switch (gc->gc_proc) {
1663 	case RPC_GSS_PROC_INIT:
1664 	case RPC_GSS_PROC_CONTINUE_INIT:
1665 		if (rqstp->rq_proc != 0)
1666 			goto auth_err;
1667 		return svcauth_gss_proc_init(rqstp, gc);
1668 	case RPC_GSS_PROC_DESTROY:
1669 		if (rqstp->rq_proc != 0)
1670 			goto auth_err;
1671 		fallthrough;
1672 	case RPC_GSS_PROC_DATA:
1673 		rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
1674 		rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx);
1675 		if (!rsci)
1676 			goto auth_err;
1677 		switch (svcauth_gss_verify_header(rqstp, rsci, rpcstart, gc)) {
1678 		case SVC_OK:
1679 			break;
1680 		case SVC_DENIED:
1681 			goto auth_err;
1682 		case SVC_DROP:
1683 			goto drop;
1684 		}
1685 		break;
1686 	default:
1687 		if (rqstp->rq_proc != 0)
1688 			goto auth_err;
1689 		rqstp->rq_auth_stat = rpc_autherr_rejectedcred;
1690 		goto auth_err;
1691 	}
1692 
1693 	/* now act upon the command: */
1694 	switch (gc->gc_proc) {
1695 	case RPC_GSS_PROC_DESTROY:
1696 		if (!svcauth_gss_encode_verf(rqstp, rsci->mechctx, gc->gc_seq))
1697 			goto auth_err;
1698 		if (!svcxdr_set_accept_stat(rqstp))
1699 			goto auth_err;
1700 		/* Delete the entry from the cache_list and call cache_put */
1701 		sunrpc_cache_unhash(sn->rsc_cache, &rsci->h);
1702 		goto complete;
1703 	case RPC_GSS_PROC_DATA:
1704 		rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
1705 		if (!svcauth_gss_encode_verf(rqstp, rsci->mechctx, gc->gc_seq))
1706 			goto auth_err;
1707 		if (!svcxdr_set_accept_stat(rqstp))
1708 			goto auth_err;
1709 		svcdata->gsd_databody_offset = xdr_stream_pos(&rqstp->rq_res_stream);
1710 		rqstp->rq_cred = rsci->cred;
1711 		get_group_info(rsci->cred.cr_group_info);
1712 		rqstp->rq_auth_stat = rpc_autherr_badcred;
1713 		switch (gc->gc_svc) {
1714 		case RPC_GSS_SVC_NONE:
1715 			break;
1716 		case RPC_GSS_SVC_INTEGRITY:
1717 			/* placeholders for body length and seq. number: */
1718 			xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2);
1719 			if (svcauth_gss_unwrap_integ(rqstp, gc->gc_seq,
1720 						     rsci->mechctx))
1721 				goto garbage_args;
1722 			svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE);
1723 			break;
1724 		case RPC_GSS_SVC_PRIVACY:
1725 			/* placeholders for body length and seq. number: */
1726 			xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2);
1727 			if (svcauth_gss_unwrap_priv(rqstp, gc->gc_seq,
1728 						    rsci->mechctx))
1729 				goto garbage_args;
1730 			svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE * 2);
1731 			break;
1732 		default:
1733 			goto auth_err;
1734 		}
1735 		svcdata->rsci = rsci;
1736 		cache_get(&rsci->h);
1737 		rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
1738 					rsci->mechctx->mech_type,
1739 					GSS_C_QOP_DEFAULT,
1740 					gc->gc_svc);
1741 		ret = SVC_OK;
1742 		trace_rpcgss_svc_authenticate(rqstp, gc);
1743 		goto out;
1744 	}
1745 garbage_args:
1746 	ret = SVC_GARBAGE;
1747 	goto out;
1748 auth_err:
1749 	xdr_truncate_encode(&rqstp->rq_res_stream, XDR_UNIT * 2);
1750 	ret = SVC_DENIED;
1751 	goto out;
1752 complete:
1753 	ret = SVC_COMPLETE;
1754 	goto out;
1755 drop:
1756 	ret = SVC_CLOSE;
1757 out:
1758 	if (rsci)
1759 		cache_put(&rsci->h, sn->rsc_cache);
1760 	return ret;
1761 }
1762 
1763 static u32
svcauth_gss_prepare_to_wrap(struct svc_rqst * rqstp,struct gss_svc_data * gsd)1764 svcauth_gss_prepare_to_wrap(struct svc_rqst *rqstp, struct gss_svc_data *gsd)
1765 {
1766 	u32 offset;
1767 
1768 	/* Release can be called twice, but we only wrap once. */
1769 	offset = gsd->gsd_databody_offset;
1770 	gsd->gsd_databody_offset = 0;
1771 
1772 	/* AUTH_ERROR replies are not wrapped. */
1773 	if (rqstp->rq_auth_stat != rpc_auth_ok)
1774 		return 0;
1775 
1776 	/* Also don't wrap if the accept_stat is nonzero: */
1777 	if (*rqstp->rq_accept_statp != rpc_success)
1778 		return 0;
1779 
1780 	return offset;
1781 }
1782 
1783 /*
1784  * RFC 2203, Section 5.3.2.2
1785  *
1786  *	struct rpc_gss_integ_data {
1787  *		opaque databody_integ<>;
1788  *		opaque checksum<>;
1789  *	};
1790  *
1791  *	struct rpc_gss_data_t {
1792  *		unsigned int seq_num;
1793  *		proc_req_arg_t arg;
1794  *	};
1795  *
1796  * The RPC Reply message has already been XDR-encoded. rq_res_stream
1797  * is now positioned so that the checksum can be written just past
1798  * the RPC Reply message.
1799  */
svcauth_gss_wrap_integ(struct svc_rqst * rqstp)1800 static int svcauth_gss_wrap_integ(struct svc_rqst *rqstp)
1801 {
1802 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1803 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
1804 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
1805 	struct xdr_buf *buf = xdr->buf;
1806 	struct xdr_buf databody_integ;
1807 	struct xdr_netobj checksum;
1808 	u32 offset, maj_stat;
1809 
1810 	offset = svcauth_gss_prepare_to_wrap(rqstp, gsd);
1811 	if (!offset)
1812 		goto out;
1813 
1814 	if (xdr_buf_subsegment(buf, &databody_integ, offset + XDR_UNIT,
1815 			       buf->len - offset - XDR_UNIT))
1816 		goto wrap_failed;
1817 	/* Buffer space for these has already been reserved in
1818 	 * svcauth_gss_accept(). */
1819 	if (xdr_encode_word(buf, offset, databody_integ.len))
1820 		goto wrap_failed;
1821 	if (xdr_encode_word(buf, offset + XDR_UNIT, gc->gc_seq))
1822 		goto wrap_failed;
1823 
1824 	checksum.data = gsd->gsd_scratch;
1825 	maj_stat = gss_get_mic(gsd->rsci->mechctx, &databody_integ, &checksum);
1826 	if (maj_stat != GSS_S_COMPLETE)
1827 		goto bad_mic;
1828 
1829 	if (xdr_stream_encode_opaque(xdr, checksum.data, checksum.len) < 0)
1830 		goto wrap_failed;
1831 	xdr_commit_encode(xdr);
1832 
1833 out:
1834 	return 0;
1835 
1836 bad_mic:
1837 	trace_rpcgss_svc_get_mic(rqstp, maj_stat);
1838 	return -EINVAL;
1839 wrap_failed:
1840 	trace_rpcgss_svc_wrap_failed(rqstp);
1841 	return -EINVAL;
1842 }
1843 
1844 /*
1845  * RFC 2203, Section 5.3.2.3
1846  *
1847  *	struct rpc_gss_priv_data {
1848  *		opaque databody_priv<>
1849  *	};
1850  *
1851  *	struct rpc_gss_data_t {
1852  *		unsigned int seq_num;
1853  *		proc_req_arg_t arg;
1854  *	};
1855  *
1856  * gss_wrap() expands the size of the RPC message payload in the
1857  * response buffer. The main purpose of svcauth_gss_wrap_priv()
1858  * is to ensure there is adequate space in the response buffer to
1859  * avoid overflow during the wrap.
1860  */
svcauth_gss_wrap_priv(struct svc_rqst * rqstp)1861 static int svcauth_gss_wrap_priv(struct svc_rqst *rqstp)
1862 {
1863 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1864 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
1865 	struct xdr_buf *buf = &rqstp->rq_res;
1866 	struct kvec *head = buf->head;
1867 	struct kvec *tail = buf->tail;
1868 	u32 offset, pad, maj_stat;
1869 	__be32 *p;
1870 
1871 	offset = svcauth_gss_prepare_to_wrap(rqstp, gsd);
1872 	if (!offset)
1873 		return 0;
1874 
1875 	/*
1876 	 * Buffer space for this field has already been reserved
1877 	 * in svcauth_gss_accept(). Note that the GSS sequence
1878 	 * number is encrypted along with the RPC reply payload.
1879 	 */
1880 	if (xdr_encode_word(buf, offset + XDR_UNIT, gc->gc_seq))
1881 		goto wrap_failed;
1882 
1883 	/*
1884 	 * If there is currently tail data, make sure there is
1885 	 * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
1886 	 * the page, and move the current tail data such that
1887 	 * there is RPC_MAX_AUTH_SIZE slack space available in
1888 	 * both the head and tail.
1889 	 */
1890 	if (tail->iov_base) {
1891 		if (tail->iov_base >= head->iov_base + PAGE_SIZE)
1892 			goto wrap_failed;
1893 		if (tail->iov_base < head->iov_base)
1894 			goto wrap_failed;
1895 		if (tail->iov_len + head->iov_len
1896 				+ 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1897 			goto wrap_failed;
1898 		memmove(tail->iov_base + RPC_MAX_AUTH_SIZE, tail->iov_base,
1899 			tail->iov_len);
1900 		tail->iov_base += RPC_MAX_AUTH_SIZE;
1901 	}
1902 	/*
1903 	 * If there is no current tail data, make sure there is
1904 	 * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
1905 	 * allotted page, and set up tail information such that there
1906 	 * is RPC_MAX_AUTH_SIZE slack space available in both the
1907 	 * head and tail.
1908 	 */
1909 	if (!tail->iov_base) {
1910 		if (head->iov_len + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1911 			goto wrap_failed;
1912 		tail->iov_base = head->iov_base
1913 			+ head->iov_len + RPC_MAX_AUTH_SIZE;
1914 		tail->iov_len = 0;
1915 	}
1916 
1917 	maj_stat = gss_wrap(gsd->rsci->mechctx, offset + XDR_UNIT, buf,
1918 			    buf->pages);
1919 	if (maj_stat != GSS_S_COMPLETE)
1920 		goto bad_wrap;
1921 
1922 	/* Wrapping can change the size of databody_priv. */
1923 	if (xdr_encode_word(buf, offset, buf->len - offset - XDR_UNIT))
1924 		goto wrap_failed;
1925 	pad = xdr_pad_size(buf->len - offset - XDR_UNIT);
1926 	p = (__be32 *)(tail->iov_base + tail->iov_len);
1927 	memset(p, 0, pad);
1928 	tail->iov_len += pad;
1929 	buf->len += pad;
1930 
1931 	return 0;
1932 wrap_failed:
1933 	trace_rpcgss_svc_wrap_failed(rqstp);
1934 	return -EINVAL;
1935 bad_wrap:
1936 	trace_rpcgss_svc_wrap(rqstp, maj_stat);
1937 	return -ENOMEM;
1938 }
1939 
1940 /**
1941  * svcauth_gss_release - Wrap payload and release resources
1942  * @rqstp: RPC transaction context
1943  *
1944  * Return values:
1945  *    %0: the Reply is ready to be sent
1946  *    %-ENOMEM: failed to allocate memory
1947  *    %-EINVAL: encoding error
1948  */
1949 static int
svcauth_gss_release(struct svc_rqst * rqstp)1950 svcauth_gss_release(struct svc_rqst *rqstp)
1951 {
1952 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1953 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1954 	struct rpc_gss_wire_cred *gc;
1955 	int stat;
1956 
1957 	if (!gsd)
1958 		goto out;
1959 	gc = &gsd->clcred;
1960 	if (gc->gc_proc != RPC_GSS_PROC_DATA)
1961 		goto out;
1962 
1963 	switch (gc->gc_svc) {
1964 	case RPC_GSS_SVC_NONE:
1965 		break;
1966 	case RPC_GSS_SVC_INTEGRITY:
1967 		stat = svcauth_gss_wrap_integ(rqstp);
1968 		if (stat)
1969 			goto out_err;
1970 		break;
1971 	case RPC_GSS_SVC_PRIVACY:
1972 		stat = svcauth_gss_wrap_priv(rqstp);
1973 		if (stat)
1974 			goto out_err;
1975 		break;
1976 	/*
1977 	 * For any other gc_svc value, svcauth_gss_accept() already set
1978 	 * the auth_error appropriately; just fall through:
1979 	 */
1980 	}
1981 
1982 out:
1983 	stat = 0;
1984 out_err:
1985 	if (rqstp->rq_client)
1986 		auth_domain_put(rqstp->rq_client);
1987 	rqstp->rq_client = NULL;
1988 	if (rqstp->rq_gssclient)
1989 		auth_domain_put(rqstp->rq_gssclient);
1990 	rqstp->rq_gssclient = NULL;
1991 	if (rqstp->rq_cred.cr_group_info)
1992 		put_group_info(rqstp->rq_cred.cr_group_info);
1993 	rqstp->rq_cred.cr_group_info = NULL;
1994 	if (gsd && gsd->rsci) {
1995 		cache_put(&gsd->rsci->h, sn->rsc_cache);
1996 		gsd->rsci = NULL;
1997 	}
1998 	return stat;
1999 }
2000 
2001 static void
svcauth_gss_domain_release_rcu(struct rcu_head * head)2002 svcauth_gss_domain_release_rcu(struct rcu_head *head)
2003 {
2004 	struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
2005 	struct gss_domain *gd = container_of(dom, struct gss_domain, h);
2006 
2007 	kfree(dom->name);
2008 	kfree(gd);
2009 }
2010 
2011 static void
svcauth_gss_domain_release(struct auth_domain * dom)2012 svcauth_gss_domain_release(struct auth_domain *dom)
2013 {
2014 	call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu);
2015 }
2016 
2017 static struct auth_ops svcauthops_gss = {
2018 	.name		= "rpcsec_gss",
2019 	.owner		= THIS_MODULE,
2020 	.flavour	= RPC_AUTH_GSS,
2021 	.accept		= svcauth_gss_accept,
2022 	.release	= svcauth_gss_release,
2023 	.domain_release = svcauth_gss_domain_release,
2024 	.set_client	= svcauth_gss_set_client,
2025 };
2026 
rsi_cache_create_net(struct net * net)2027 static int rsi_cache_create_net(struct net *net)
2028 {
2029 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2030 	struct cache_detail *cd;
2031 	int err;
2032 
2033 	cd = cache_create_net(&rsi_cache_template, net);
2034 	if (IS_ERR(cd))
2035 		return PTR_ERR(cd);
2036 	err = cache_register_net(cd, net);
2037 	if (err) {
2038 		cache_destroy_net(cd, net);
2039 		return err;
2040 	}
2041 	sn->rsi_cache = cd;
2042 	return 0;
2043 }
2044 
rsi_cache_destroy_net(struct net * net)2045 static void rsi_cache_destroy_net(struct net *net)
2046 {
2047 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2048 	struct cache_detail *cd = sn->rsi_cache;
2049 
2050 	sn->rsi_cache = NULL;
2051 	cache_purge(cd);
2052 	cache_unregister_net(cd, net);
2053 	cache_destroy_net(cd, net);
2054 }
2055 
rsc_cache_create_net(struct net * net)2056 static int rsc_cache_create_net(struct net *net)
2057 {
2058 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2059 	struct cache_detail *cd;
2060 	int err;
2061 
2062 	cd = cache_create_net(&rsc_cache_template, net);
2063 	if (IS_ERR(cd))
2064 		return PTR_ERR(cd);
2065 	err = cache_register_net(cd, net);
2066 	if (err) {
2067 		cache_destroy_net(cd, net);
2068 		return err;
2069 	}
2070 	sn->rsc_cache = cd;
2071 	return 0;
2072 }
2073 
rsc_cache_destroy_net(struct net * net)2074 static void rsc_cache_destroy_net(struct net *net)
2075 {
2076 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2077 	struct cache_detail *cd = sn->rsc_cache;
2078 
2079 	sn->rsc_cache = NULL;
2080 	cache_purge(cd);
2081 	cache_unregister_net(cd, net);
2082 	cache_destroy_net(cd, net);
2083 }
2084 
2085 int
gss_svc_init_net(struct net * net)2086 gss_svc_init_net(struct net *net)
2087 {
2088 	int rv;
2089 
2090 	rv = rsc_cache_create_net(net);
2091 	if (rv)
2092 		return rv;
2093 	rv = rsi_cache_create_net(net);
2094 	if (rv)
2095 		goto out1;
2096 	rv = create_use_gss_proxy_proc_entry(net);
2097 	if (rv)
2098 		goto out2;
2099 
2100 	rv = create_krb5_enctypes_proc_entry(net);
2101 	if (rv)
2102 		goto out3;
2103 
2104 	return 0;
2105 
2106 out3:
2107 	destroy_use_gss_proxy_proc_entry(net);
2108 out2:
2109 	rsi_cache_destroy_net(net);
2110 out1:
2111 	rsc_cache_destroy_net(net);
2112 	return rv;
2113 }
2114 
2115 void
gss_svc_shutdown_net(struct net * net)2116 gss_svc_shutdown_net(struct net *net)
2117 {
2118 	destroy_krb5_enctypes_proc_entry(net);
2119 	destroy_use_gss_proxy_proc_entry(net);
2120 	rsi_cache_destroy_net(net);
2121 	rsc_cache_destroy_net(net);
2122 }
2123 
2124 int
gss_svc_init(void)2125 gss_svc_init(void)
2126 {
2127 	return svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss);
2128 }
2129 
2130 void
gss_svc_shutdown(void)2131 gss_svc_shutdown(void)
2132 {
2133 	svc_auth_unregister(RPC_AUTH_GSS);
2134 }
2135