Lines Matching +full:mic +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
17 * Context creation is handled largely by upcalls to user-space.
20 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
21 * Context destruction is handled in-kernel
22 * GSS_Delete_sec_context is in-kernel
32 * When user-space is happy that a context is established, it places an entry
35 * uid/gidlist - for determining access rights
68 return a->len == b->len && 0 == memcmp(a->data, b->data, a->len); in netobj_equal()
87 kfree(rsii->in_handle.data); in rsi_free()
88 kfree(rsii->in_token.data); in rsi_free()
89 kfree(rsii->out_handle.data); in rsi_free()
90 kfree(rsii->out_token.data); in rsi_free()
105 call_rcu(&rsii->rcu_head, rsi_free_rcu); in rsi_put()
110 return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS) in rsi_hash()
111 ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS); in rsi_hash()
118 return netobj_equal(&item->in_handle, &tmp->in_handle) && in rsi_match()
119 netobj_equal(&item->in_token, &tmp->in_token); in rsi_match()
124 dst->len = len; in dup_to_netobj()
125 dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL); in dup_to_netobj()
126 if (len && !dst->data) in dup_to_netobj()
127 return -ENOMEM; in dup_to_netobj()
133 return dup_to_netobj(dst, src->data, src->len); in dup_netobj()
141 new->out_handle.data = NULL; in rsi_init()
142 new->out_handle.len = 0; in rsi_init()
143 new->out_token.data = NULL; in rsi_init()
144 new->out_token.len = 0; in rsi_init()
145 new->in_handle.len = item->in_handle.len; in rsi_init()
146 item->in_handle.len = 0; in rsi_init()
147 new->in_token.len = item->in_token.len; in rsi_init()
148 item->in_token.len = 0; in rsi_init()
149 new->in_handle.data = item->in_handle.data; in rsi_init()
150 item->in_handle.data = NULL; in rsi_init()
151 new->in_token.data = item->in_token.data; in rsi_init()
152 item->in_token.data = NULL; in rsi_init()
160 BUG_ON(new->out_handle.data || new->out_token.data); in update_rsi()
161 new->out_handle.len = item->out_handle.len; in update_rsi()
162 item->out_handle.len = 0; in update_rsi()
163 new->out_token.len = item->out_token.len; in update_rsi()
164 item->out_token.len = 0; in update_rsi()
165 new->out_handle.data = item->out_handle.data; in update_rsi()
166 item->out_handle.data = NULL; in update_rsi()
167 new->out_token.data = item->out_token.data; in update_rsi()
168 item->out_token.data = NULL; in update_rsi()
170 new->major_status = item->major_status; in update_rsi()
171 new->minor_status = item->minor_status; in update_rsi()
178 return &rsii->h; in rsi_alloc()
194 qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len); in rsi_request()
195 qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len); in rsi_request()
196 (*bpp)[-1] = '\n'; in rsi_request()
198 "RPCSEC/GSS credential too large - please use gssproxy\n"); in rsi_request()
210 int status = -EINVAL; in rsi_parse()
217 status = -ENOMEM; in rsi_parse()
223 status = -EINVAL; in rsi_parse()
226 status = -ENOMEM; in rsi_parse()
237 status = -EINVAL; in rsi_parse()
259 status = -ENOMEM; in rsi_parse()
265 status = -EINVAL; in rsi_parse()
268 status = -ENOMEM; in rsi_parse()
277 cache_put(&rsip->h, cd); in rsi_parse()
279 status = -ENOMEM; in rsi_parse()
302 ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); in rsi_lookup()
314 ch = sunrpc_cache_update(cd, &new->h, in rsi_update()
315 &old->h, hash); in rsi_update()
327 * uid, gidlist, mechanism, service-set, mech-specific-data
338 /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
358 kfree(rsci->handle.data); in rsc_free()
359 if (rsci->mechctx) in rsc_free()
360 gss_delete_sec_context(&rsci->mechctx); in rsc_free()
361 free_svc_cred(&rsci->cred); in rsc_free()
368 kfree(rsci->handle.data); in rsc_free_rcu()
376 if (rsci->mechctx) in rsc_put()
377 gss_delete_sec_context(&rsci->mechctx); in rsc_put()
378 free_svc_cred(&rsci->cred); in rsc_put()
379 call_rcu(&rsci->rcu_head, rsc_free_rcu); in rsc_put()
385 return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS); in rsc_hash()
394 return netobj_equal(&new->handle, &tmp->handle); in rsc_match()
403 new->handle.len = tmp->handle.len; in rsc_init()
404 tmp->handle.len = 0; in rsc_init()
405 new->handle.data = tmp->handle.data; in rsc_init()
406 tmp->handle.data = NULL; in rsc_init()
407 new->mechctx = NULL; in rsc_init()
408 init_svc_cred(&new->cred); in rsc_init()
417 new->mechctx = tmp->mechctx; in update_rsc()
418 tmp->mechctx = NULL; in update_rsc()
419 memset(&new->seqdata, 0, sizeof(new->seqdata)); in update_rsc()
420 spin_lock_init(&new->seqdata.sd_lock); in update_rsc()
421 new->cred = tmp->cred; in update_rsc()
422 init_svc_cred(&tmp->cred); in update_rsc()
430 return &rsci->h; in rsc_alloc()
437 return -EINVAL; in rsc_upcall()
449 int status = -EINVAL; in rsc_parse()
456 status = -ENOMEM; in rsc_parse()
463 status = -EINVAL; in rsc_parse()
473 if (rv == -EINVAL) in rsc_parse()
475 if (rv == -ENOENT) in rsc_parse()
482 * instead, * -1 id's are later mapped to the in rsc_parse()
483 * (export-specific) anonymous id by nfsd_setuser. in rsc_parse()
501 status = -ENOMEM; in rsc_parse()
507 status = -EINVAL; in rsc_parse()
515 rsci.cred.cr_group_info->gid[i] = kgid; in rsc_parse()
524 status = -EOPNOTSUPP; in rsc_parse()
528 status = -EINVAL; in rsc_parse()
529 /* mech-specific data: */ in rsc_parse()
543 status = -ENOMEM; in rsc_parse()
555 cache_put(&rscp->h, cd); in rsc_parse()
557 status = -ENOMEM; in rsc_parse()
579 ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); in rsc_lookup()
591 ch = sunrpc_cache_update(cd, &new->h, in rsc_update()
592 &old->h, hash); in rsc_update()
607 if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) in gss_svc_searchbyctx()
613 if (cache_check(cd, &found->h, NULL)) in gss_svc_searchbyctx()
619 * gss_check_seq_num - GSS sequence number window check
634 struct gss_svc_seq_data *sd = &rsci->seqdata; in gss_check_seq_num()
637 spin_lock(&sd->sd_lock); in gss_check_seq_num()
638 if (seq_num > sd->sd_max) { in gss_check_seq_num()
639 if (seq_num >= sd->sd_max + GSS_SEQ_WIN) { in gss_check_seq_num()
640 memset(sd->sd_win, 0, sizeof(sd->sd_win)); in gss_check_seq_num()
641 sd->sd_max = seq_num; in gss_check_seq_num()
642 } else while (sd->sd_max < seq_num) { in gss_check_seq_num()
643 sd->sd_max++; in gss_check_seq_num()
644 __clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win); in gss_check_seq_num()
646 __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win); in gss_check_seq_num()
648 } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) { in gss_check_seq_num()
651 if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win)) in gss_check_seq_num()
657 spin_unlock(&sd->sd_lock); in gss_check_seq_num()
662 sd->sd_max - GSS_SEQ_WIN, in gss_check_seq_num()
663 sd->sd_max); in gss_check_seq_num()
680 if (argv->iov_len < 4) in svc_safe_getnetobj()
681 return -1; in svc_safe_getnetobj()
682 o->len = svc_getnl(argv); in svc_safe_getnetobj()
683 l = round_up_to_quad(o->len); in svc_safe_getnetobj()
684 if (argv->iov_len < l) in svc_safe_getnetobj()
685 return -1; in svc_safe_getnetobj()
686 o->data = argv->iov_base; in svc_safe_getnetobj()
687 argv->iov_base += l; in svc_safe_getnetobj()
688 argv->iov_len -= l; in svc_safe_getnetobj()
697 if (resv->iov_len + 4 > PAGE_SIZE) in svc_safe_putnetobj()
698 return -1; in svc_safe_putnetobj()
699 svc_putnl(resv, o->len); in svc_safe_putnetobj()
700 p = resv->iov_base + resv->iov_len; in svc_safe_putnetobj()
701 resv->iov_len += round_up_to_quad(o->len); in svc_safe_putnetobj()
702 if (resv->iov_len > PAGE_SIZE) in svc_safe_putnetobj()
703 return -1; in svc_safe_putnetobj()
704 memcpy(p, o->data, o->len); in svc_safe_putnetobj()
705 memset(p + o->len, 0, round_up_to_quad(o->len) - o->len); in svc_safe_putnetobj()
712 * or return SVC_DENIED and indicate error in rqstp->rq_auth_stat.
718 struct gss_ctx *ctx_id = rsci->mechctx; in gss_verify_header()
722 struct kvec *argv = &rqstp->rq_arg.head[0]; in gss_verify_header()
727 iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart; in gss_verify_header()
730 rqstp->rq_auth_stat = rpc_autherr_badverf; in gss_verify_header()
731 if (argv->iov_len < 4) in gss_verify_header()
739 if (rqstp->rq_deferred) /* skip verification of revisited request */ in gss_verify_header()
742 rqstp->rq_auth_stat = rpcsec_gsserr_credproblem; in gss_verify_header()
746 if (gc->gc_seq > MAXSEQ) { in gss_verify_header()
747 trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq); in gss_verify_header()
748 rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem; in gss_verify_header()
751 if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq)) in gss_verify_header()
761 svc_putnl(rqstp->rq_res.head, RPC_AUTH_NULL); in gss_write_null_verf()
762 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; in gss_write_null_verf()
763 /* don't really need to check if head->iov_len > PAGE_SIZE ... */ in gss_write_null_verf()
766 return -1; in gss_write_null_verf()
776 struct xdr_netobj mic; in gss_write_verf() local
779 int err = -1; in gss_write_verf()
781 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); in gss_write_verf()
784 return -ENOMEM; in gss_write_verf()
790 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; in gss_write_verf()
791 mic.data = (u8 *)(p + 1); in gss_write_verf()
792 maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); in gss_write_verf()
795 *p++ = htonl(mic.len); in gss_write_verf()
796 memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len); in gss_write_verf()
797 p += XDR_QUADLEN(mic.len); in gss_write_verf()
816 name = gss_service_to_auth_domain_name(ctx->mech_type, svc); in find_gss_auth_domain()
828 return gd->pseudoflavor; in svcauth_gss_flavor()
838 int stat = -ENOMEM; in svcauth_gss_register_pseudoflavor()
843 kref_init(&new->h.ref); in svcauth_gss_register_pseudoflavor()
844 new->h.name = kstrdup(name, GFP_KERNEL); in svcauth_gss_register_pseudoflavor()
845 if (!new->h.name) in svcauth_gss_register_pseudoflavor()
847 new->h.flavour = &svcauthops_gss; in svcauth_gss_register_pseudoflavor()
848 new->pseudoflavor = pseudoflavor; in svcauth_gss_register_pseudoflavor()
850 test = auth_domain_lookup(name, &new->h); in svcauth_gss_register_pseudoflavor()
851 if (test != &new->h) { in svcauth_gss_register_pseudoflavor()
854 stat = -EADDRINUSE; in svcauth_gss_register_pseudoflavor()
861 kfree(new->h.name); in svcauth_gss_register_pseudoflavor()
891 int stat = -EINVAL; in unwrap_integ_data()
892 struct xdr_netobj mic; in unwrap_integ_data() local
895 mic.data = NULL; in unwrap_integ_data()
897 /* NFS READ normally uses splice to send data in-place. However in unwrap_integ_data()
898 * the data in cache can change after the reply's MIC is computed in unwrap_integ_data()
900 * rejecting the server-computed MIC in this somewhat rare case, in unwrap_integ_data()
903 __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in unwrap_integ_data()
906 if (rqstp->rq_deferred) in unwrap_integ_data()
909 integ_len = svc_getnl(&buf->head[0]); in unwrap_integ_data()
912 if (integ_len > buf->len) in unwrap_integ_data()
917 /* copy out mic... */ in unwrap_integ_data()
918 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) in unwrap_integ_data()
920 if (mic.len > RPC_MAX_AUTH_SIZE) in unwrap_integ_data()
922 mic.data = kmalloc(mic.len, GFP_KERNEL); in unwrap_integ_data()
923 if (!mic.data) in unwrap_integ_data()
925 if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) in unwrap_integ_data()
927 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); in unwrap_integ_data()
930 rseqno = svc_getnl(&buf->head[0]); in unwrap_integ_data()
933 /* trim off the mic and padding at the end before returning */ in unwrap_integ_data()
934 xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4); in unwrap_integ_data()
937 kfree(mic.data); in unwrap_integ_data()
954 return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len; in total_buf_len()
960 if (buf->page_len == 0) { in fix_priv_head()
961 /* We need to adjust head and buf->len in tandem in this in fix_priv_head()
962 * case to make svc_defer() work--it finds the original in fix_priv_head()
963 * buffer start using buf->len - buf->head[0].iov_len. */ in fix_priv_head()
964 buf->head[0].iov_len -= pad; in fix_priv_head()
972 int pad, remaining_len, offset; in unwrap_priv_data() local
975 __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in unwrap_priv_data()
977 priv_len = svc_getnl(&buf->head[0]); in unwrap_priv_data()
978 if (rqstp->rq_deferred) { in unwrap_priv_data()
983 /* buf->len is the number of bytes from the original start of the in unwrap_priv_data()
989 pad = remaining_len - priv_len; in unwrap_priv_data()
990 buf->len -= pad; in unwrap_priv_data()
994 pad = priv_len - buf->len; in unwrap_priv_data()
995 /* The upper layers assume the buffer is aligned on 4-byte boundaries. in unwrap_priv_data()
996 * In the krb5p case, at least, the data ends up offset, so we need to in unwrap_priv_data()
1001 offset = xdr_pad_size(buf->head[0].iov_len); in unwrap_priv_data()
1002 if (offset) { in unwrap_priv_data()
1003 buf->buflen = RPCSVC_MAXPAYLOAD; in unwrap_priv_data()
1004 xdr_shift_buf(buf, offset); in unwrap_priv_data()
1010 rseqno = svc_getnl(&buf->head[0]); in unwrap_priv_data()
1017 return -EINVAL; in unwrap_priv_data()
1020 return -EINVAL; in unwrap_priv_data()
1023 return -EINVAL; in unwrap_priv_data()
1038 struct gss_svc_data *svcdata = rqstp->rq_auth_data; in svcauth_gss_set_client()
1039 struct rsc *rsci = svcdata->rsci; in svcauth_gss_set_client()
1040 struct rpc_gss_wire_cred *gc = &svcdata->clcred; in svcauth_gss_set_client()
1043 rqstp->rq_auth_stat = rpc_autherr_badcred; in svcauth_gss_set_client()
1054 rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc); in svcauth_gss_set_client()
1055 if (rqstp->rq_gssclient == NULL) in svcauth_gss_set_client()
1061 rqstp->rq_auth_stat = rpc_auth_ok; in svcauth_gss_set_client()
1079 rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN); in gss_write_init_verf()
1080 cache_put(&rsci->h, cd); in gss_write_init_verf()
1091 if (argv->iov_len < 2 * 4) in gss_read_common_verf()
1099 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) in gss_read_common_verf()
1101 if (dup_netobj(in_handle, &gc->gc_ctx)) in gss_read_common_verf()
1122 kfree(in_handle->data); in gss_read_verf()
1126 kfree(in_handle->data); in gss_read_verf()
1139 inlen = in_token->page_len; in gss_free_in_token_pages()
1141 if (in_token->pages[i]) in gss_free_in_token_pages()
1142 put_page(in_token->pages[i]); in gss_free_in_token_pages()
1143 inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen; in gss_free_in_token_pages()
1146 kfree(in_token->pages); in gss_free_in_token_pages()
1147 in_token->pages = NULL; in gss_free_in_token_pages()
1155 struct kvec *argv = &rqstp->rq_arg.head[0]; in gss_read_proxy_verf()
1160 res = gss_read_common_verf(gc, argv, &rqstp->rq_auth_stat, in_handle); in gss_read_proxy_verf()
1165 if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) in gss_read_proxy_verf()
1169 in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); in gss_read_proxy_verf()
1170 if (!in_token->pages) in gss_read_proxy_verf()
1172 in_token->page_base = 0; in gss_read_proxy_verf()
1173 in_token->page_len = inlen; in gss_read_proxy_verf()
1175 in_token->pages[i] = alloc_page(GFP_KERNEL); in gss_read_proxy_verf()
1176 if (!in_token->pages[i]) { in gss_read_proxy_verf()
1182 length = min_t(unsigned int, inlen, argv->iov_len); in gss_read_proxy_verf()
1183 memcpy(page_address(in_token->pages[0]), argv->iov_base, length); in gss_read_proxy_verf()
1184 inlen -= length; in gss_read_proxy_verf()
1187 from_offs = rqstp->rq_arg.page_base; in gss_read_proxy_verf()
1195 min_t(unsigned int, PAGE_SIZE - pgto_offs, in gss_read_proxy_verf()
1196 PAGE_SIZE - pgfrom_offs)); in gss_read_proxy_verf()
1197 memcpy(page_address(in_token->pages[pgto]) + pgto_offs, in gss_read_proxy_verf()
1198 page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs, in gss_read_proxy_verf()
1203 inlen -= length; in gss_read_proxy_verf()
1213 if (resv->iov_len + 4 > size_limit) in gss_write_resv()
1214 return -1; in gss_write_resv()
1217 return -1; in gss_write_resv()
1218 if (resv->iov_len + 3 * 4 > size_limit) in gss_write_resv()
1219 return -1; in gss_write_resv()
1224 return -1; in gss_write_resv()
1238 struct kvec *argv = &rqstp->rq_arg.head[0]; in svcauth_gss_legacy_init()
1239 struct kvec *resv = &rqstp->rq_res.head[0]; in svcauth_gss_legacy_init()
1245 ret = gss_read_verf(gc, argv, &rqstp->rq_auth_stat, in svcauth_gss_legacy_init()
1251 rsip = rsi_lookup(sn->rsi_cache, &rsikey); in svcauth_gss_legacy_init()
1255 if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0) in svcauth_gss_legacy_init()
1261 if (gss_write_init_verf(sn->rsc_cache, rqstp, in svcauth_gss_legacy_init()
1262 &rsip->out_handle, &rsip->major_status)) in svcauth_gss_legacy_init()
1265 &rsip->out_handle, &rsip->out_token, in svcauth_gss_legacy_init()
1266 rsip->major_status, rsip->minor_status)) in svcauth_gss_legacy_init()
1271 cache_put(&rsip->h, sn->rsi_cache); in svcauth_gss_legacy_init()
1288 status = -ENOMEM; in gss_proxy_save_rsc()
1304 if (!ud->found_creds) { in gss_proxy_save_rsc()
1312 rsci.cred = ud->creds; in gss_proxy_save_rsc()
1313 memset(&ud->creds, 0, sizeof(struct svc_cred)); in gss_proxy_save_rsc()
1315 status = -EOPNOTSUPP; in gss_proxy_save_rsc()
1317 gm = gss_mech_get_by_OID(&ud->mech_oid); in gss_proxy_save_rsc()
1322 status = -EINVAL; in gss_proxy_save_rsc()
1323 /* mech-specific data: */ in gss_proxy_save_rsc()
1324 status = gss_import_sec_context(ud->out_handle.data, in gss_proxy_save_rsc()
1325 ud->out_handle.len, in gss_proxy_save_rsc()
1332 expiry -= boot.tv_sec; in gss_proxy_save_rsc()
1341 cache_put(&rscp->h, cd); in gss_proxy_save_rsc()
1343 status = -ENOMEM; in gss_proxy_save_rsc()
1350 struct kvec *resv = &rqstp->rq_res.head[0]; in svcauth_gss_proxy_init()
1366 /* Perform synchronous upcall to gss-proxy */ in svcauth_gss_proxy_init()
1378 status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle); in svcauth_gss_proxy_init()
1389 if (gss_write_init_verf(sn->rsc_cache, rqstp, in svcauth_gss_proxy_init()
1405 * Try to set the sn->use_gss_proxy variable to a new value. We only allow
1406 * it to be changed if it's currently undefined (-1). If it's any other value
1407 * then return -EBUSY unless the type wouldn't have changed anyway.
1415 ret = cmpxchg(&sn->use_gss_proxy, -1, type); in set_gss_proxy()
1416 if (ret != -1 && ret != type) in set_gss_proxy()
1417 return -EBUSY; in set_gss_proxy()
1426 if (sn->use_gss_proxy == -1) in use_gss_proxy()
1428 return sn->use_gss_proxy; in use_gss_proxy()
1441 if (*ppos || count > sizeof(tbuf)-1) in write_gssp()
1442 return -EINVAL; in write_gssp()
1444 return -EFAULT; in write_gssp()
1451 return -EINVAL; in write_gssp()
1470 snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy); in read_gssp()
1474 len -= p; in read_gssp()
1478 return -EFAULT; in read_gssp()
1492 struct proc_dir_entry **p = &sn->use_gssp_proc; in create_use_gss_proxy_proc_entry()
1494 sn->use_gss_proxy = -1; in create_use_gss_proxy_proc_entry()
1495 *p = proc_create_data("use-gss-proxy", S_IFREG | 0600, in create_use_gss_proxy_proc_entry()
1496 sn->proc_net_rpc, in create_use_gss_proxy_proc_entry()
1499 return -ENOMEM; in create_use_gss_proxy_proc_entry()
1508 if (sn->use_gssp_proc) { in destroy_use_gss_proxy_proc_entry()
1509 remove_proc_entry("use-gss-proxy", sn->proc_net_rpc); in destroy_use_gss_proxy_proc_entry()
1535 struct kvec *argv = &rqstp->rq_arg.head[0]; in svcauth_gss_accept()
1536 struct kvec *resv = &rqstp->rq_res.head[0]; in svcauth_gss_accept()
1538 struct gss_svc_data *svcdata = rqstp->rq_auth_data; in svcauth_gss_accept()
1542 __be32 *reject_stat = resv->iov_base + resv->iov_len; in svcauth_gss_accept()
1546 rqstp->rq_auth_stat = rpc_autherr_badcred; in svcauth_gss_accept()
1551 rqstp->rq_auth_data = svcdata; in svcauth_gss_accept()
1552 svcdata->verf_start = NULL; in svcauth_gss_accept()
1553 svcdata->rsci = NULL; in svcauth_gss_accept()
1554 gc = &svcdata->clcred; in svcauth_gss_accept()
1559 rpcstart = argv->iov_base; in svcauth_gss_accept()
1560 rpcstart -= 7; in svcauth_gss_accept()
1567 if (argv->iov_len < 5 * 4) in svcauth_gss_accept()
1572 gc->gc_proc = svc_getnl(argv); in svcauth_gss_accept()
1573 gc->gc_seq = svc_getnl(argv); in svcauth_gss_accept()
1574 gc->gc_svc = svc_getnl(argv); in svcauth_gss_accept()
1575 if (svc_safe_getnetobj(argv, &gc->gc_ctx)) in svcauth_gss_accept()
1577 if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4) in svcauth_gss_accept()
1580 if ((gc->gc_proc != RPC_GSS_PROC_DATA) && (rqstp->rq_proc != 0)) in svcauth_gss_accept()
1583 rqstp->rq_auth_stat = rpc_autherr_badverf; in svcauth_gss_accept()
1584 switch (gc->gc_proc) { in svcauth_gss_accept()
1594 rqstp->rq_auth_stat = rpcsec_gsserr_credproblem; in svcauth_gss_accept()
1595 rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx); in svcauth_gss_accept()
1608 rqstp->rq_auth_stat = rpc_autherr_rejectedcred; in svcauth_gss_accept()
1613 switch (gc->gc_proc) { in svcauth_gss_accept()
1615 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) in svcauth_gss_accept()
1618 sunrpc_cache_unhash(sn->rsc_cache, &rsci->h); in svcauth_gss_accept()
1619 if (resv->iov_len + 4 > PAGE_SIZE) in svcauth_gss_accept()
1624 rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem; in svcauth_gss_accept()
1625 svcdata->verf_start = resv->iov_base + resv->iov_len; in svcauth_gss_accept()
1626 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) in svcauth_gss_accept()
1628 rqstp->rq_cred = rsci->cred; in svcauth_gss_accept()
1629 get_group_info(rsci->cred.cr_group_info); in svcauth_gss_accept()
1630 rqstp->rq_auth_stat = rpc_autherr_badcred; in svcauth_gss_accept()
1631 switch (gc->gc_svc) { in svcauth_gss_accept()
1638 if (unwrap_integ_data(rqstp, &rqstp->rq_arg, in svcauth_gss_accept()
1639 gc->gc_seq, rsci->mechctx)) in svcauth_gss_accept()
1641 rqstp->rq_auth_slack = RPC_MAX_AUTH_SIZE; in svcauth_gss_accept()
1647 if (unwrap_priv_data(rqstp, &rqstp->rq_arg, in svcauth_gss_accept()
1648 gc->gc_seq, rsci->mechctx)) in svcauth_gss_accept()
1650 rqstp->rq_auth_slack = RPC_MAX_AUTH_SIZE * 2; in svcauth_gss_accept()
1655 svcdata->rsci = rsci; in svcauth_gss_accept()
1656 cache_get(&rsci->h); in svcauth_gss_accept()
1657 rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor( in svcauth_gss_accept()
1658 rsci->mechctx->mech_type, in svcauth_gss_accept()
1660 gc->gc_svc); in svcauth_gss_accept()
1680 cache_put(&rsci->h, sn->rsc_cache); in svcauth_gss_accept()
1690 p = gsd->verf_start; in svcauth_gss_prepare_to_wrap()
1691 gsd->verf_start = NULL; in svcauth_gss_prepare_to_wrap()
1694 if (*(p-1) != rpc_success) in svcauth_gss_prepare_to_wrap()
1704 resbuf->head[0].iov_len -= 2 * 4; in svcauth_gss_prepare_to_wrap()
1714 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; in svcauth_gss_wrap_resp_integ()
1715 struct rpc_gss_wire_cred *gc = &gsd->clcred; in svcauth_gss_wrap_resp_integ()
1716 struct xdr_buf *resbuf = &rqstp->rq_res; in svcauth_gss_wrap_resp_integ()
1718 struct xdr_netobj mic; in svcauth_gss_wrap_resp_integ() local
1722 int stat = -EINVAL; in svcauth_gss_wrap_resp_integ()
1727 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; in svcauth_gss_wrap_resp_integ()
1728 integ_len = resbuf->len - integ_offset; in svcauth_gss_wrap_resp_integ()
1732 *p++ = htonl(gc->gc_seq); in svcauth_gss_wrap_resp_integ()
1737 if (resbuf->tail[0].iov_base == NULL) { in svcauth_gss_wrap_resp_integ()
1738 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE) in svcauth_gss_wrap_resp_integ()
1740 resbuf->tail[0].iov_base = resbuf->head[0].iov_base in svcauth_gss_wrap_resp_integ()
1741 + resbuf->head[0].iov_len; in svcauth_gss_wrap_resp_integ()
1742 resbuf->tail[0].iov_len = 0; in svcauth_gss_wrap_resp_integ()
1744 resv = &resbuf->tail[0]; in svcauth_gss_wrap_resp_integ()
1745 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; in svcauth_gss_wrap_resp_integ()
1746 if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) in svcauth_gss_wrap_resp_integ()
1748 svc_putnl(resv, mic.len); in svcauth_gss_wrap_resp_integ()
1749 memset(mic.data + mic.len, 0, in svcauth_gss_wrap_resp_integ()
1750 round_up_to_quad(mic.len) - mic.len); in svcauth_gss_wrap_resp_integ()
1751 resv->iov_len += XDR_QUADLEN(mic.len) << 2; in svcauth_gss_wrap_resp_integ()
1753 resbuf->len += XDR_QUADLEN(mic.len) << 2; in svcauth_gss_wrap_resp_integ()
1754 if (resv->iov_len > PAGE_SIZE) in svcauth_gss_wrap_resp_integ()
1765 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; in svcauth_gss_wrap_resp_priv()
1766 struct rpc_gss_wire_cred *gc = &gsd->clcred; in svcauth_gss_wrap_resp_priv()
1767 struct xdr_buf *resbuf = &rqstp->rq_res; in svcauth_gss_wrap_resp_priv()
1770 int offset; in svcauth_gss_wrap_resp_priv() local
1777 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base; in svcauth_gss_wrap_resp_priv()
1778 *p++ = htonl(gc->gc_seq); in svcauth_gss_wrap_resp_priv()
1779 inpages = resbuf->pages; in svcauth_gss_wrap_resp_priv()
1790 if (resbuf->tail[0].iov_base) { in svcauth_gss_wrap_resp_priv()
1791 if (resbuf->tail[0].iov_base >= in svcauth_gss_wrap_resp_priv()
1792 resbuf->head[0].iov_base + PAGE_SIZE) in svcauth_gss_wrap_resp_priv()
1793 return -EINVAL; in svcauth_gss_wrap_resp_priv()
1794 if (resbuf->tail[0].iov_base < resbuf->head[0].iov_base) in svcauth_gss_wrap_resp_priv()
1795 return -EINVAL; in svcauth_gss_wrap_resp_priv()
1796 if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len in svcauth_gss_wrap_resp_priv()
1798 return -ENOMEM; in svcauth_gss_wrap_resp_priv()
1799 memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE, in svcauth_gss_wrap_resp_priv()
1800 resbuf->tail[0].iov_base, in svcauth_gss_wrap_resp_priv()
1801 resbuf->tail[0].iov_len); in svcauth_gss_wrap_resp_priv()
1802 resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; in svcauth_gss_wrap_resp_priv()
1811 if (resbuf->tail[0].iov_base == NULL) { in svcauth_gss_wrap_resp_priv()
1812 if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) in svcauth_gss_wrap_resp_priv()
1813 return -ENOMEM; in svcauth_gss_wrap_resp_priv()
1814 resbuf->tail[0].iov_base = resbuf->head[0].iov_base in svcauth_gss_wrap_resp_priv()
1815 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; in svcauth_gss_wrap_resp_priv()
1816 resbuf->tail[0].iov_len = 0; in svcauth_gss_wrap_resp_priv()
1818 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages)) in svcauth_gss_wrap_resp_priv()
1819 return -ENOMEM; in svcauth_gss_wrap_resp_priv()
1820 *len = htonl(resbuf->len - offset); in svcauth_gss_wrap_resp_priv()
1821 pad = 3 - ((resbuf->len - offset - 1)&3); in svcauth_gss_wrap_resp_priv()
1822 p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len); in svcauth_gss_wrap_resp_priv()
1824 resbuf->tail[0].iov_len += pad; in svcauth_gss_wrap_resp_priv()
1825 resbuf->len += pad; in svcauth_gss_wrap_resp_priv()
1832 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; in svcauth_gss_release()
1834 struct xdr_buf *resbuf = &rqstp->rq_res; in svcauth_gss_release()
1835 int stat = -EINVAL; in svcauth_gss_release()
1840 gc = &gsd->clcred; in svcauth_gss_release()
1841 if (gc->gc_proc != RPC_GSS_PROC_DATA) in svcauth_gss_release()
1844 if (gsd->verf_start == NULL) in svcauth_gss_release()
1849 resbuf->len = total_buf_len(resbuf); in svcauth_gss_release()
1850 switch (gc->gc_svc) { in svcauth_gss_release()
1872 if (rqstp->rq_client) in svcauth_gss_release()
1873 auth_domain_put(rqstp->rq_client); in svcauth_gss_release()
1874 rqstp->rq_client = NULL; in svcauth_gss_release()
1875 if (rqstp->rq_gssclient) in svcauth_gss_release()
1876 auth_domain_put(rqstp->rq_gssclient); in svcauth_gss_release()
1877 rqstp->rq_gssclient = NULL; in svcauth_gss_release()
1878 if (rqstp->rq_cred.cr_group_info) in svcauth_gss_release()
1879 put_group_info(rqstp->rq_cred.cr_group_info); in svcauth_gss_release()
1880 rqstp->rq_cred.cr_group_info = NULL; in svcauth_gss_release()
1881 if (gsd && gsd->rsci) { in svcauth_gss_release()
1882 cache_put(&gsd->rsci->h, sn->rsc_cache); in svcauth_gss_release()
1883 gsd->rsci = NULL; in svcauth_gss_release()
1894 kfree(dom->name); in svcauth_gss_domain_release_rcu()
1901 call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu); in svcauth_gss_domain_release()
1928 sn->rsi_cache = cd; in rsi_cache_create_net()
1935 struct cache_detail *cd = sn->rsi_cache; in rsi_cache_destroy_net()
1937 sn->rsi_cache = NULL; in rsi_cache_destroy_net()
1957 sn->rsc_cache = cd; in rsc_cache_create_net()
1964 struct cache_detail *cd = sn->rsc_cache; in rsc_cache_destroy_net()
1966 sn->rsc_cache = NULL; in rsc_cache_destroy_net()