1 /*
2 * COPYRIGHT (c) 2008
3 * The Regents of the University of Michigan
4 * ALL RIGHTS RESERVED
5 *
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization. If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
15 * also be included.
16 *
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGES.
29 */
30
31 #include <crypto/skcipher.h>
32 #include <linux/types.h>
33 #include <linux/jiffies.h>
34 #include <linux/sunrpc/gss_krb5.h>
35 #include <linux/random.h>
36 #include <linux/pagemap.h>
37
38 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
39 # define RPCDBG_FACILITY RPCDBG_AUTH
40 #endif
41
42 static inline int
gss_krb5_padding(int blocksize,int length)43 gss_krb5_padding(int blocksize, int length)
44 {
45 return blocksize - (length % blocksize);
46 }
47
48 static inline void
gss_krb5_add_padding(struct xdr_buf * buf,int offset,int blocksize)49 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
50 {
51 int padding = gss_krb5_padding(blocksize, buf->len - offset);
52 char *p;
53 struct kvec *iov;
54
55 if (buf->page_len || buf->tail[0].iov_len)
56 iov = &buf->tail[0];
57 else
58 iov = &buf->head[0];
59 p = iov->iov_base + iov->iov_len;
60 iov->iov_len += padding;
61 buf->len += padding;
62 memset(p, padding, padding);
63 }
64
65 static inline int
gss_krb5_remove_padding(struct xdr_buf * buf,int blocksize)66 gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
67 {
68 u8 *ptr;
69 u8 pad;
70 size_t len = buf->len;
71
72 if (len <= buf->head[0].iov_len) {
73 pad = *(u8 *)(buf->head[0].iov_base + len - 1);
74 if (pad > buf->head[0].iov_len)
75 return -EINVAL;
76 buf->head[0].iov_len -= pad;
77 goto out;
78 } else
79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) {
81 unsigned int last = (buf->page_base + len - 1)
82 >>PAGE_SHIFT;
83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last]);
86 pad = *(ptr + offset);
87 kunmap_atomic(ptr);
88 goto out;
89 } else
90 len -= buf->page_len;
91 BUG_ON(len > buf->tail[0].iov_len);
92 pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
93 out:
94 /* XXX: NOTE: we do not adjust the page lengths--they represent
95 * a range of data in the real filesystem page cache, and we need
96 * to know that range so the xdr code can properly place read data.
97 * However adjusting the head length, as we do above, is harmless.
98 * In the case of a request that fits into a single page, the server
99 * also uses length and head length together to determine the original
100 * start of the request to copy the request for deferal; so it's
101 * easier on the server if we adjust head and tail length in tandem.
102 * It's not really a problem that we don't fool with the page and
103 * tail lengths, though--at worst badly formed xdr might lead the
104 * server to attempt to parse the padding.
105 * XXX: Document all these weird requirements for gss mechanism
106 * wrap/unwrap functions. */
107 if (pad > blocksize)
108 return -EINVAL;
109 if (buf->len > pad)
110 buf->len -= pad;
111 else
112 return -EINVAL;
113 return 0;
114 }
115
116 void
gss_krb5_make_confounder(char * p,u32 conflen)117 gss_krb5_make_confounder(char *p, u32 conflen)
118 {
119 static u64 i = 0;
120 u64 *q = (u64 *)p;
121
122 /* rfc1964 claims this should be "random". But all that's really
123 * necessary is that it be unique. And not even that is necessary in
124 * our case since our "gssapi" implementation exists only to support
125 * rpcsec_gss, so we know that the only buffers we will ever encrypt
126 * already begin with a unique sequence number. Just to hedge my bets
127 * I'll make a half-hearted attempt at something unique, but ensuring
128 * uniqueness would mean worrying about atomicity and rollover, and I
129 * don't care enough. */
130
131 /* initialize to random value */
132 if (i == 0) {
133 i = prandom_u32();
134 i = (i << 32) | prandom_u32();
135 }
136
137 switch (conflen) {
138 case 16:
139 *q++ = i++;
140 /* fall through */
141 case 8:
142 *q++ = i++;
143 break;
144 default:
145 BUG();
146 }
147 }
148
149 /* Assumptions: the head and tail of inbuf are ours to play with.
150 * The pages, however, may be real pages in the page cache and we replace
151 * them with scratch pages from **pages before writing to them. */
152 /* XXX: obviously the above should be documentation of wrap interface,
153 * and shouldn't be in this kerberos-specific file. */
154
155 /* XXX factor out common code with seal/unseal. */
156
157 static u32
gss_wrap_kerberos_v1(struct krb5_ctx * kctx,int offset,struct xdr_buf * buf,struct page ** pages)158 gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
159 struct xdr_buf *buf, struct page **pages)
160 {
161 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
162 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
163 .data = cksumdata};
164 int blocksize = 0, plainlen;
165 unsigned char *ptr, *msg_start;
166 s32 now;
167 int headlen;
168 struct page **tmp_pages;
169 u32 seq_send;
170 u8 *cksumkey;
171 u32 conflen = kctx->gk5e->conflen;
172
173 dprintk("RPC: %s\n", __func__);
174
175 now = get_seconds();
176
177 blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
178 gss_krb5_add_padding(buf, offset, blocksize);
179 BUG_ON((buf->len - offset) % blocksize);
180 plainlen = conflen + buf->len - offset;
181
182 headlen = g_token_size(&kctx->mech_used,
183 GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
184 (buf->len - offset);
185
186 ptr = buf->head[0].iov_base + offset;
187 /* shift data to make room for header. */
188 xdr_extend_head(buf, offset, headlen);
189
190 /* XXX Would be cleverer to encrypt while copying. */
191 BUG_ON((buf->len - offset - headlen) % blocksize);
192
193 g_make_token_header(&kctx->mech_used,
194 GSS_KRB5_TOK_HDR_LEN +
195 kctx->gk5e->cksumlength + plainlen, &ptr);
196
197
198 /* ptr now at header described in rfc 1964, section 1.2.1: */
199 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
200 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
201
202 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
203
204 /*
205 * signalg and sealalg are stored as if they were converted from LE
206 * to host endian, even though they're opaque pairs of bytes according
207 * to the RFC.
208 */
209 *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
210 *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
211 ptr[6] = 0xff;
212 ptr[7] = 0xff;
213
214 gss_krb5_make_confounder(msg_start, conflen);
215
216 if (kctx->gk5e->keyed_cksum)
217 cksumkey = kctx->cksum;
218 else
219 cksumkey = NULL;
220
221 /* XXXJBF: UGH!: */
222 tmp_pages = buf->pages;
223 buf->pages = pages;
224 if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
225 cksumkey, KG_USAGE_SEAL, &md5cksum))
226 return GSS_S_FAILURE;
227 buf->pages = tmp_pages;
228
229 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
230
231 seq_send = atomic_fetch_inc(&kctx->seq_send);
232
233 /* XXX would probably be more efficient to compute checksum
234 * and encrypt at the same time: */
235 if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
236 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
237 return GSS_S_FAILURE;
238
239 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
240 struct crypto_sync_skcipher *cipher;
241 int err;
242 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
243 0, 0);
244 if (IS_ERR(cipher))
245 return GSS_S_FAILURE;
246
247 krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
248
249 err = gss_encrypt_xdr_buf(cipher, buf,
250 offset + headlen - conflen, pages);
251 crypto_free_sync_skcipher(cipher);
252 if (err)
253 return GSS_S_FAILURE;
254 } else {
255 if (gss_encrypt_xdr_buf(kctx->enc, buf,
256 offset + headlen - conflen, pages))
257 return GSS_S_FAILURE;
258 }
259
260 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
261 }
262
263 static u32
gss_unwrap_kerberos_v1(struct krb5_ctx * kctx,int offset,struct xdr_buf * buf)264 gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
265 {
266 int signalg;
267 int sealalg;
268 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
269 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
270 .data = cksumdata};
271 s32 now;
272 int direction;
273 s32 seqnum;
274 unsigned char *ptr;
275 int bodysize;
276 void *data_start, *orig_start;
277 int data_len;
278 int blocksize;
279 u32 conflen = kctx->gk5e->conflen;
280 int crypt_offset;
281 u8 *cksumkey;
282
283 dprintk("RPC: gss_unwrap_kerberos\n");
284
285 ptr = (u8 *)buf->head[0].iov_base + offset;
286 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
287 buf->len - offset))
288 return GSS_S_DEFECTIVE_TOKEN;
289
290 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
291 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))
292 return GSS_S_DEFECTIVE_TOKEN;
293
294 /* XXX sanity-check bodysize?? */
295
296 /* get the sign and seal algorithms */
297
298 signalg = ptr[2] + (ptr[3] << 8);
299 if (signalg != kctx->gk5e->signalg)
300 return GSS_S_DEFECTIVE_TOKEN;
301
302 sealalg = ptr[4] + (ptr[5] << 8);
303 if (sealalg != kctx->gk5e->sealalg)
304 return GSS_S_DEFECTIVE_TOKEN;
305
306 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
307 return GSS_S_DEFECTIVE_TOKEN;
308
309 /*
310 * Data starts after token header and checksum. ptr points
311 * to the beginning of the token header
312 */
313 crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
314 (unsigned char *)buf->head[0].iov_base;
315
316 /*
317 * Need plaintext seqnum to derive encryption key for arcfour-hmac
318 */
319 if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
320 ptr + 8, &direction, &seqnum))
321 return GSS_S_BAD_SIG;
322
323 if ((kctx->initiate && direction != 0xff) ||
324 (!kctx->initiate && direction != 0))
325 return GSS_S_BAD_SIG;
326
327 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
328 struct crypto_sync_skcipher *cipher;
329 int err;
330
331 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
332 0, 0);
333 if (IS_ERR(cipher))
334 return GSS_S_FAILURE;
335
336 krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
337
338 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
339 crypto_free_sync_skcipher(cipher);
340 if (err)
341 return GSS_S_DEFECTIVE_TOKEN;
342 } else {
343 if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
344 return GSS_S_DEFECTIVE_TOKEN;
345 }
346
347 if (kctx->gk5e->keyed_cksum)
348 cksumkey = kctx->cksum;
349 else
350 cksumkey = NULL;
351
352 if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
353 cksumkey, KG_USAGE_SEAL, &md5cksum))
354 return GSS_S_FAILURE;
355
356 if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
357 kctx->gk5e->cksumlength))
358 return GSS_S_BAD_SIG;
359
360 /* it got through unscathed. Make sure the context is unexpired */
361
362 now = get_seconds();
363
364 if (now > kctx->endtime)
365 return GSS_S_CONTEXT_EXPIRED;
366
367 /* do sequencing checks */
368
369 /* Copy the data back to the right position. XXX: Would probably be
370 * better to copy and encrypt at the same time. */
371
372 blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
373 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
374 conflen;
375 orig_start = buf->head[0].iov_base + offset;
376 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
377 memmove(orig_start, data_start, data_len);
378 buf->head[0].iov_len -= (data_start - orig_start);
379 buf->len -= (data_start - orig_start);
380
381 if (gss_krb5_remove_padding(buf, blocksize))
382 return GSS_S_DEFECTIVE_TOKEN;
383
384 return GSS_S_COMPLETE;
385 }
386
387 /*
388 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need
389 * to do more than that, we shift repeatedly. Kevin Coffman reports
390 * seeing 28 bytes as the value used by Microsoft clients and servers
391 * with AES, so this constant is chosen to allow handling 28 in one pass
392 * without using too much stack space.
393 *
394 * If that proves to a problem perhaps we could use a more clever
395 * algorithm.
396 */
397 #define LOCAL_BUF_LEN 32u
398
rotate_buf_a_little(struct xdr_buf * buf,unsigned int shift)399 static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
400 {
401 char head[LOCAL_BUF_LEN];
402 char tmp[LOCAL_BUF_LEN];
403 unsigned int this_len, i;
404
405 BUG_ON(shift > LOCAL_BUF_LEN);
406
407 read_bytes_from_xdr_buf(buf, 0, head, shift);
408 for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
409 this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
410 read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
411 write_bytes_to_xdr_buf(buf, i, tmp, this_len);
412 }
413 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
414 }
415
_rotate_left(struct xdr_buf * buf,unsigned int shift)416 static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
417 {
418 int shifted = 0;
419 int this_shift;
420
421 shift %= buf->len;
422 while (shifted < shift) {
423 this_shift = min(shift - shifted, LOCAL_BUF_LEN);
424 rotate_buf_a_little(buf, this_shift);
425 shifted += this_shift;
426 }
427 }
428
rotate_left(u32 base,struct xdr_buf * buf,unsigned int shift)429 static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
430 {
431 struct xdr_buf subbuf;
432
433 xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
434 _rotate_left(&subbuf, shift);
435 }
436
437 static u32
gss_wrap_kerberos_v2(struct krb5_ctx * kctx,u32 offset,struct xdr_buf * buf,struct page ** pages)438 gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
439 struct xdr_buf *buf, struct page **pages)
440 {
441 u8 *ptr, *plainhdr;
442 s32 now;
443 u8 flags = 0x00;
444 __be16 *be16ptr;
445 __be64 *be64ptr;
446 u32 err;
447
448 dprintk("RPC: %s\n", __func__);
449
450 if (kctx->gk5e->encrypt_v2 == NULL)
451 return GSS_S_FAILURE;
452
453 /* make room for gss token header */
454 if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
455 return GSS_S_FAILURE;
456
457 /* construct gss token header */
458 ptr = plainhdr = buf->head[0].iov_base + offset;
459 *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
460 *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
461
462 if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
463 flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
464 if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
465 flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
466 /* We always do confidentiality in wrap tokens */
467 flags |= KG2_TOKEN_FLAG_SEALED;
468
469 *ptr++ = flags;
470 *ptr++ = 0xff;
471 be16ptr = (__be16 *)ptr;
472
473 *be16ptr++ = 0;
474 /* "inner" token header always uses 0 for RRC */
475 *be16ptr++ = 0;
476
477 be64ptr = (__be64 *)be16ptr;
478 *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
479
480 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
481 if (err)
482 return err;
483
484 now = get_seconds();
485 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
486 }
487
488 static u32
gss_unwrap_kerberos_v2(struct krb5_ctx * kctx,int offset,struct xdr_buf * buf)489 gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
490 {
491 s32 now;
492 u8 *ptr;
493 u8 flags = 0x00;
494 u16 ec, rrc;
495 int err;
496 u32 headskip, tailskip;
497 u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
498 unsigned int movelen;
499
500
501 dprintk("RPC: %s\n", __func__);
502
503 if (kctx->gk5e->decrypt_v2 == NULL)
504 return GSS_S_FAILURE;
505
506 ptr = buf->head[0].iov_base + offset;
507
508 if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
509 return GSS_S_DEFECTIVE_TOKEN;
510
511 flags = ptr[2];
512 if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
513 (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
514 return GSS_S_BAD_SIG;
515
516 if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
517 dprintk("%s: token missing expected sealed flag\n", __func__);
518 return GSS_S_DEFECTIVE_TOKEN;
519 }
520
521 if (ptr[3] != 0xff)
522 return GSS_S_DEFECTIVE_TOKEN;
523
524 ec = be16_to_cpup((__be16 *)(ptr + 4));
525 rrc = be16_to_cpup((__be16 *)(ptr + 6));
526
527 /*
528 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
529 * doesn't want it checked; see page 6 of rfc 2203.
530 */
531
532 if (rrc != 0)
533 rotate_left(offset + 16, buf, rrc);
534
535 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
536 &headskip, &tailskip);
537 if (err)
538 return GSS_S_FAILURE;
539
540 /*
541 * Retrieve the decrypted gss token header and verify
542 * it against the original
543 */
544 err = read_bytes_from_xdr_buf(buf,
545 buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
546 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
547 if (err) {
548 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
549 return GSS_S_FAILURE;
550 }
551 if (memcmp(ptr, decrypted_hdr, 6)
552 || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
553 dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
554 return GSS_S_FAILURE;
555 }
556
557 /* do sequencing checks */
558
559 /* it got through unscathed. Make sure the context is unexpired */
560 now = get_seconds();
561 if (now > kctx->endtime)
562 return GSS_S_CONTEXT_EXPIRED;
563
564 /*
565 * Move the head data back to the right position in xdr_buf.
566 * We ignore any "ec" data since it might be in the head or
567 * the tail, and we really don't need to deal with it.
568 * Note that buf->head[0].iov_len may indicate the available
569 * head buffer space rather than that actually occupied.
570 */
571 movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
572 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
573 if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
574 buf->head[0].iov_len)
575 return GSS_S_FAILURE;
576 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
577 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
578 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
579
580 /* Trim off the trailing "extra count" and checksum blob */
581 buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
582
583 return GSS_S_COMPLETE;
584 }
585
586 u32
gss_wrap_kerberos(struct gss_ctx * gctx,int offset,struct xdr_buf * buf,struct page ** pages)587 gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
588 struct xdr_buf *buf, struct page **pages)
589 {
590 struct krb5_ctx *kctx = gctx->internal_ctx_id;
591
592 switch (kctx->enctype) {
593 default:
594 BUG();
595 case ENCTYPE_DES_CBC_RAW:
596 case ENCTYPE_DES3_CBC_RAW:
597 case ENCTYPE_ARCFOUR_HMAC:
598 return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
599 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
600 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
601 return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
602 }
603 }
604
605 u32
gss_unwrap_kerberos(struct gss_ctx * gctx,int offset,struct xdr_buf * buf)606 gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
607 {
608 struct krb5_ctx *kctx = gctx->internal_ctx_id;
609
610 switch (kctx->enctype) {
611 default:
612 BUG();
613 case ENCTYPE_DES_CBC_RAW:
614 case ENCTYPE_DES3_CBC_RAW:
615 case ENCTYPE_ARCFOUR_HMAC:
616 return gss_unwrap_kerberos_v1(kctx, offset, buf);
617 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
618 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
619 return gss_unwrap_kerberos_v2(kctx, offset, buf);
620 }
621 }
622