2 * Copyright (c) 2003, PADL Software Pty Ltd.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of PADL Software nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "gsskrb5_locl.h"
36 * Implementation of RFC 4121
39 #define CFXSentByAcceptor (1 << 0)
40 #define CFXSealed (1 << 1)
41 #define CFXAcceptorSubkey (1 << 2)
44 _gsskrb5cfx_wrap_length_cfx(const gsskrb5_ctx context_handle,
49 size_t *output_length,
56 /* 16-byte header is always first */
57 *output_length = sizeof(gss_cfx_wrap_token_desc);
60 ret = krb5_crypto_get_checksum_type(context, crypto, &type);
64 ret = krb5_checksumsize(context, type, cksumsize);
71 /* Header is concatenated with data before encryption */
72 input_length += sizeof(gss_cfx_wrap_token_desc);
74 if (IS_DCE_STYLE(context_handle)) {
75 ret = krb5_crypto_getblocksize(context, crypto, &padsize);
77 ret = krb5_crypto_getpadsize(context, crypto, &padsize);
84 *padlength = padsize - (input_length % padsize);
86 /* We add the pad ourselves (noted here for completeness only) */
87 input_length += *padlength;
90 *output_length += krb5_get_wrapped_length(context,
91 crypto, input_length);
93 /* Checksum is concatenated with data */
94 *output_length += input_length + *cksumsize;
97 assert(*output_length > input_length);
103 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
104 const gsskrb5_ctx ctx,
105 krb5_context context,
108 OM_uint32 req_output_size,
109 OM_uint32 *max_input_size)
115 /* 16-byte header is always first */
116 if (req_output_size < 16)
118 req_output_size -= 16;
121 size_t wrapped_size, sz;
123 wrapped_size = req_output_size + 1;
126 sz = krb5_get_wrapped_length(context,
127 ctx->crypto, wrapped_size);
128 } while (wrapped_size && sz > req_output_size);
129 if (wrapped_size == 0)
133 if (wrapped_size < 16)
138 *max_input_size = wrapped_size;
143 ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
147 ret = krb5_checksumsize(context, type, &cksumsize);
151 if (req_output_size < cksumsize)
154 /* Checksum is concatenated with data */
155 *max_input_size = req_output_size - cksumsize;
162 * Rotate "rrc" bytes to the front or back
165 static krb5_error_code
166 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
168 u_char *tmp, buf[256];
181 if (rrc <= sizeof(buf)) {
190 memcpy(tmp, data, rrc);
191 memmove(data, (u_char *)data + rrc, left);
192 memcpy((u_char *)data + left, tmp, rrc);
194 memcpy(tmp, (u_char *)data + left, rrc);
195 memmove((u_char *)data + rrc, data, left);
196 memcpy(data, tmp, rrc);
199 if (rrc > sizeof(buf))
205 gss_iov_buffer_desc *
206 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
210 for (i = 0; i < iov_count; i++)
211 if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
217 allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
219 if (buffer->type & GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATED) {
220 if (buffer->buffer.length == size)
221 return GSS_S_COMPLETE;
222 free(buffer->buffer.value);
225 buffer->buffer.value = malloc(size);
226 buffer->buffer.length = size;
227 if (buffer->buffer.value == NULL) {
228 *minor_status = ENOMEM;
229 return GSS_S_FAILURE;
231 buffer->type |= GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATED;
233 return GSS_S_COMPLETE;
239 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
241 krb5_context context,
244 gss_iov_buffer_desc *iov,
247 OM_uint32 major_status, junk;
248 gss_iov_buffer_desc *header, *trailer, *padding;
249 size_t gsshsize, k5hsize;
250 size_t gsstsize, k5tsize;
251 size_t i, padlength, rrc = 0, ec = 0;
252 gss_cfx_wrap_token token;
256 krb5_crypto_iov *data = NULL;
257 int paddingoffset = 0;
259 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
260 if (header == NULL) {
261 *minor_status = EINVAL;
262 return GSS_S_FAILURE;
265 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_PADDING, &padlength);
267 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
268 if (padlength != 0 && padding == NULL) {
269 *minor_status = EINVAL;
270 return GSS_S_FAILURE;
273 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
278 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
279 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
281 gsshsize = k5hsize + sizeof(*token);
282 gsstsize = k5tsize + sizeof(*token); /* encrypted token stored in trailer */
286 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_CHECKSUM, &k5tsize);
288 gsshsize = sizeof(*token);
296 if (trailer == NULL) {
297 /* conf_req_flag=0 doesn't support DCE_STYLE */
298 if (conf_req_flag == 0) {
299 *minor_status = EINVAL;
300 major_status = GSS_S_FAILURE;
304 if (IS_DCE_STYLE(ctx))
306 gsshsize += gsstsize;
308 } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATE) {
309 major_status = allocate_buffer(minor_status, trailer, gsstsize);
312 } else if (trailer->buffer.length < gsstsize) {
313 *minor_status = KRB5_BAD_MSIZE;
314 major_status = GSS_S_FAILURE;
317 trailer->buffer.length = gsstsize;
323 if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATE) {
324 major_status = allocate_buffer(minor_status, header, gsshsize);
325 if (major_status != GSS_S_COMPLETE)
327 } else if (header->buffer.length < gsshsize) {
328 *minor_status = KRB5_BAD_MSIZE;
329 major_status = GSS_S_FAILURE;
332 header->buffer.length = gsshsize;
334 token = (gss_cfx_wrap_token)header->buffer.value;
336 token->TOK_ID[0] = 0x05;
337 token->TOK_ID[1] = 0x04;
339 token->Filler = 0xFF;
341 if (ctx->more_flags & ACCEPTOR_SUBKEY)
342 token->Flags |= CFXAcceptorSubkey;
344 if (ctx->more_flags & LOCAL)
345 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
347 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
351 * In Wrap tokens with confidentiality, the EC field is
352 * used to encode the size (in bytes) of the random filler.
354 token->Flags |= CFXSealed;
355 token->EC[0] = (padlength >> 8) & 0xFF;
356 token->EC[1] = (padlength >> 0) & 0xFF;
360 * In Wrap tokens without confidentiality, the EC field is
361 * used to encode the size (in bytes) of the trailing
364 * This is not used in the checksum calcuation itself,
365 * because the checksum length could potentially vary
366 * depending on the data length.
373 * In Wrap tokens that provide for confidentiality, the RRC
374 * field in the header contains the hex value 00 00 before
377 * In Wrap tokens that do not provide for confidentiality,
378 * both the EC and RRC fields in the appended checksum
379 * contain the hex value 00 00 for the purpose of calculating
385 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
386 krb5_auth_con_getlocalseqnumber(context,
389 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
390 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
391 krb5_auth_con_setlocalseqnumber(context,
394 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
396 data = calloc(iov_count + 3, sizeof(data[0]));
398 *minor_status = ENOMEM;
399 major_status = GSS_S_FAILURE;
407 {"header" | encrypt(plaintext-data | padding | E"header")}
409 Expanded, this is with with RRC = 0:
411 {"header" | krb5-header | plaintext-data | padding | E"header" | krb5-trailer }
413 In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(padding | E"header" | krb5-trailer)
415 {"header" | padding | E"header" | krb5-trailer | krb5-header | plaintext-data }
419 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
420 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
421 data[i].data.length = k5hsize;
423 for (i = 1; i < iov_count + 1; i++) {
424 switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
425 case GSS_IOV_BUFFER_TYPE_DATA:
426 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
428 case GSS_IOV_BUFFER_TYPE_PADDING:
429 data[i].flags = KRB5_CRYPTO_TYPE_PADDING;
432 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
433 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
436 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
439 data[i].data.length = iov[i - 1].buffer.length;
440 data[i].data.data = iov[i - 1].buffer.value;
444 * Any necessary padding is added here to ensure that the
445 * encrypted token header is always at the end of the
449 /* XXX KRB5_CRYPTO_TYPE_PADDING */
451 /* encrypted CFX header in trailer (or after the header if in
452 DCE mode). Copy in header into E"header"
454 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
456 data[i].data.data = trailer->buffer.value;
458 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize - k5tsize - sizeof(*token);
460 data[i].data.length = sizeof(*token);
461 memcpy(data[i].data.data, token, sizeof(*token));
464 /* Kerberos trailer comes after the gss trailer */
465 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
466 data[i].data.data = ((uint8_t *)data[i-1].data.data) + sizeof(*token);
467 data[i].data.length = k5tsize;
470 ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
473 major_status = GSS_S_FAILURE;
478 token->RRC[0] = (rrc >> 8) & 0xFF;
479 token->RRC[1] = (rrc >> 0) & 0xFF;
483 padding->buffer.length = data[paddingoffset].data.length;
489 {data | "header" | gss-trailer (krb5 checksum)
495 for (i = 0; i < iov_count; i++) {
496 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
497 case GSS_IOV_BUFFER_TYPE_DATA:
498 case GSS_IOV_BUFFER_TYPE_PADDING:
499 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
501 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
502 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
505 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
508 data[i].data.length = iov[i].buffer.length;
509 data[i].data.data = iov[i].buffer.value;
512 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
513 data[i].data.data = header->buffer.value;
514 data[i].data.length = header->buffer.length;
517 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
518 data[i].data.data = trailer->buffer.value;
519 data[i].data.length = trailer->buffer.length;
522 ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
525 major_status = GSS_S_FAILURE;
529 token->EC[0] = (trailer->buffer.length >> 8) & 0xFF;
530 token->EC[1] = (trailer->buffer.length >> 0) & 0xFF;
533 if (conf_state != NULL)
534 *conf_state = conf_req_flag;
539 return GSS_S_COMPLETE;
545 gss_release_iov_buffer(&junk, iov, iov_count);
550 /* This is slowpath */
552 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
555 size_t len = 0, skip;
558 for (i = 0; i < iov_count; i++)
559 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
560 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
561 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
562 len += iov[i].buffer.length;
566 *minor_status = ENOMEM;
567 return GSS_S_FAILURE;
573 for (i = 0; i < iov_count; i++) {
574 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
575 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
576 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
578 memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
579 q += iov[i].buffer.length;
582 assert((q - p) == len);
584 /* unrotate first part */
587 for (i = 0; i < iov_count; i++) {
588 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
589 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
590 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
592 if (iov[i].buffer.length <= skip) {
593 skip -= iov[i].buffer.length;
595 memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
596 q += iov[i].buffer.length - skip;
604 for (i = 0; i < iov_count; i++) {
605 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
606 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
607 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
609 memcpy(q, iov[i].buffer.value, MIN(iov[i].buffer.length, skip));
610 if (iov[i].buffer.length > skip)
612 skip -= iov[i].buffer.length;
613 q += iov[i].buffer.length;
616 return GSS_S_COMPLETE;
621 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
623 krb5_context context,
625 gss_qop_t *qop_state,
626 gss_iov_buffer_desc *iov,
629 OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
630 gss_iov_buffer_desc *header, *trailer;
631 gss_cfx_wrap_token token, ttoken;
636 krb5_crypto_iov *data = NULL;
641 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
642 if (header == NULL) {
643 *minor_status = EINVAL;
644 return GSS_S_FAILURE;
647 if (header->buffer.length < sizeof(*token)) /* we check exact below */
648 return GSS_S_DEFECTIVE_TOKEN;
650 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
652 token = (gss_cfx_wrap_token)header->buffer.value;
654 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
655 return GSS_S_DEFECTIVE_TOKEN;
657 /* Ignore unknown flags */
658 token_flags = token->Flags &
659 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
661 if (token_flags & CFXSentByAcceptor) {
662 if ((ctx->more_flags & LOCAL) == 0)
663 return GSS_S_DEFECTIVE_TOKEN;
666 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
667 if ((token_flags & CFXAcceptorSubkey) == 0)
668 return GSS_S_DEFECTIVE_TOKEN;
670 if (token_flags & CFXAcceptorSubkey)
671 return GSS_S_DEFECTIVE_TOKEN;
674 if (token->Filler != 0xFF)
675 return GSS_S_DEFECTIVE_TOKEN;
677 if (conf_state != NULL)
678 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
680 ec = (token->EC[0] << 8) | token->EC[1];
681 rrc = (token->RRC[0] << 8) | token->RRC[1];
684 * Check sequence number
686 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
687 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
689 /* no support for 64-bit sequence numbers */
690 *minor_status = ERANGE;
691 return GSS_S_UNSEQ_TOKEN;
694 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
695 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
698 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
701 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
704 * Decrypt and/or verify checksum
707 if (ctx->more_flags & LOCAL) {
708 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
710 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
713 data = calloc(iov_count + 3, sizeof(data[0]));
715 *minor_status = ENOMEM;
716 major_status = GSS_S_FAILURE;
720 if (token_flags & CFXSealed) {
721 size_t k5tsize, k5hsize;
723 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
724 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
726 /* Rotate by RRC; bogus to do this in-place XXX */
729 if (trailer == NULL) {
730 size_t gsstsize = k5tsize + sizeof(*token);
731 size_t gsshsize = k5hsize + sizeof(*token);
733 if (IS_DCE_STYLE(ctx))
735 gsshsize += gsstsize;
737 if (rrc != gsstsize) {
738 major_status = GSS_S_DEFECTIVE_TOKEN;
741 if (header->buffer.length != gsshsize) {
742 major_status = GSS_S_DEFECTIVE_TOKEN;
745 } else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
746 major_status = GSS_S_DEFECTIVE_TOKEN;
748 } else if (header->buffer.length != sizeof(*token) + k5hsize) {
749 major_status = GSS_S_DEFECTIVE_TOKEN;
751 } else if (rrc != 0) {
752 /* go though slowpath */
753 major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
759 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
760 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
761 data[i].data.length = k5hsize;
764 for (j = 0; j < iov_count; i++, j++) {
765 switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
766 case GSS_IOV_BUFFER_TYPE_DATA:
767 case GSS_IOV_BUFFER_TYPE_PADDING:
768 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
770 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
771 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
774 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
777 data[i].data.length = iov[j].buffer.length;
778 data[i].data.data = iov[j].buffer.value;
781 /* encrypted CFX header in trailer (or after the header if in
782 DCE mode). Copy in header into E"header"
784 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
786 data[i].data.data = trailer->buffer.value;
788 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize - k5tsize - sizeof(*token);
789 data[i].data.length = sizeof(*token);
790 ttoken = (gss_cfx_wrap_token)data[i].data.data;
793 /* Kerberos trailer comes after the gss trailer */
794 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
795 data[i].data.data = ((uint8_t *)data[i-1].data.data) + sizeof(*token);
796 data[i].data.length = k5tsize;
799 ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
802 major_status = GSS_S_FAILURE;
806 ttoken->RRC[0] = token->RRC[0];
807 ttoken->RRC[1] = token->RRC[1];
809 /* Check the integrity of the header */
810 if (memcmp(ttoken, token, sizeof(*token)) != 0) {
811 major_status = GSS_S_BAD_MIC;
817 *minor_status = EINVAL;
818 major_status = GSS_S_FAILURE;
822 if (trailer == NULL) {
823 *minor_status = EINVAL;
824 major_status = GSS_S_FAILURE;
828 if (trailer->buffer.length != ec) {
829 *minor_status = EINVAL;
830 major_status = GSS_S_FAILURE;
834 for (i = 0; i < iov_count; i++) {
835 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
836 case GSS_IOV_BUFFER_TYPE_DATA:
837 case GSS_IOV_BUFFER_TYPE_PADDING:
838 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
840 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
841 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
844 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
847 data[i].data.length = iov[i].buffer.length;
848 data[i].data.data = iov[i].buffer.value;
851 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
852 data[i].data.data = header->buffer.value;
853 data[i].data.length = header->buffer.length;
856 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
857 data[i].data.data = trailer->buffer.value;
858 data[i].data.length = trailer->buffer.length;
861 token = (gss_cfx_wrap_token)header->buffer.value;
867 ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
870 major_status = GSS_S_FAILURE;
875 if (qop_state != NULL) {
876 *qop_state = GSS_C_QOP_DEFAULT;
882 return GSS_S_COMPLETE;
888 gss_release_iov_buffer(&junk, iov, iov_count);
894 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
896 krb5_context context,
900 gss_iov_buffer_desc *iov,
905 size_t *padding = NULL;
907 GSSAPI_KRB5_INIT (&context);
910 for (size = 0, i = 0; i < iov_count; i++) {
911 switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
912 case GSS_IOV_BUFFER_TYPE_EMPTY:
914 case GSS_IOV_BUFFER_TYPE_DATA:
915 size += iov[i].buffer.length;
917 case GSS_IOV_BUFFER_TYPE_HEADER:
918 *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &iov[i].buffer.length);
920 return GSS_S_FAILURE;
922 case GSS_IOV_BUFFER_TYPE_TRAILER:
923 *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &iov[i].buffer.length);
925 return GSS_S_FAILURE;
927 case GSS_IOV_BUFFER_TYPE_PADDING:
928 if (padding != NULL) {
930 return GSS_S_FAILURE;
932 padding = &iov[i].buffer.length;
934 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
937 *minor_status = EINVAL;
938 return GSS_S_FAILURE;
943 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_PADDING, &pad);
945 *padding = pad - (size % pad);
952 return GSS_S_COMPLETE;
958 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
959 const gsskrb5_ctx ctx,
960 krb5_context context,
962 const gss_buffer_t input_message_buffer,
964 gss_buffer_t output_message_buffer)
966 gss_cfx_wrap_token token;
970 size_t wrapped_len, cksumsize;
971 uint16_t padlength, rrc = 0;
975 ret = _gsskrb5cfx_wrap_length_cfx(ctx, context,
976 ctx->crypto, conf_req_flag,
977 input_message_buffer->length,
978 &wrapped_len, &cksumsize, &padlength);
981 return GSS_S_FAILURE;
984 /* Always rotate encrypted token (if any) and checksum to header */
985 rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
987 output_message_buffer->length = wrapped_len;
988 output_message_buffer->value = malloc(output_message_buffer->length);
989 if (output_message_buffer->value == NULL) {
990 *minor_status = ENOMEM;
991 return GSS_S_FAILURE;
994 p = output_message_buffer->value;
995 token = (gss_cfx_wrap_token)p;
996 token->TOK_ID[0] = 0x05;
997 token->TOK_ID[1] = 0x04;
999 token->Filler = 0xFF;
1000 if ((ctx->more_flags & LOCAL) == 0)
1001 token->Flags |= CFXSentByAcceptor;
1002 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1003 token->Flags |= CFXAcceptorSubkey;
1004 if (conf_req_flag) {
1006 * In Wrap tokens with confidentiality, the EC field is
1007 * used to encode the size (in bytes) of the random filler.
1009 token->Flags |= CFXSealed;
1010 token->EC[0] = (padlength >> 8) & 0xFF;
1011 token->EC[1] = (padlength >> 0) & 0xFF;
1014 * In Wrap tokens without confidentiality, the EC field is
1015 * used to encode the size (in bytes) of the trailing
1018 * This is not used in the checksum calcuation itself,
1019 * because the checksum length could potentially vary
1020 * depending on the data length.
1027 * In Wrap tokens that provide for confidentiality, the RRC
1028 * field in the header contains the hex value 00 00 before
1031 * In Wrap tokens that do not provide for confidentiality,
1032 * both the EC and RRC fields in the appended checksum
1033 * contain the hex value 00 00 for the purpose of calculating
1039 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1040 krb5_auth_con_getlocalseqnumber(context,
1043 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1044 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1045 krb5_auth_con_setlocalseqnumber(context,
1048 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1051 * If confidentiality is requested, the token header is
1052 * appended to the plaintext before encryption; the resulting
1053 * token is {"header" | encrypt(plaintext | pad | "header")}.
1055 * If no confidentiality is requested, the checksum is
1056 * calculated over the plaintext concatenated with the
1059 if (ctx->more_flags & LOCAL) {
1060 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1062 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1065 if (conf_req_flag) {
1067 * Any necessary padding is added here to ensure that the
1068 * encrypted token header is always at the end of the
1071 * The specification does not require that the padding
1072 * bytes are initialized.
1074 p += sizeof(*token);
1075 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1076 memset(p + input_message_buffer->length, 0xFF, padlength);
1077 memcpy(p + input_message_buffer->length + padlength,
1078 token, sizeof(*token));
1080 ret = krb5_encrypt(context, ctx->crypto,
1082 input_message_buffer->length + padlength +
1086 *minor_status = ret;
1087 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1088 return GSS_S_FAILURE;
1090 assert(sizeof(*token) + cipher.length == wrapped_len);
1091 token->RRC[0] = (rrc >> 8) & 0xFF;
1092 token->RRC[1] = (rrc >> 0) & 0xFF;
1095 * this is really ugly, but needed against windows
1096 * for DCERPC, as windows rotates by EC+RRC.
1098 if (IS_DCE_STYLE(ctx)) {
1099 ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1101 ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1104 *minor_status = ret;
1105 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1106 return GSS_S_FAILURE;
1108 memcpy(p, cipher.data, cipher.length);
1109 krb5_data_free(&cipher);
1114 buf = malloc(input_message_buffer->length + sizeof(*token));
1116 *minor_status = ENOMEM;
1117 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1118 return GSS_S_FAILURE;
1120 memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1121 memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1123 ret = krb5_create_checksum(context, ctx->crypto,
1125 input_message_buffer->length +
1129 *minor_status = ret;
1130 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1132 return GSS_S_FAILURE;
1137 assert(cksum.checksum.length == cksumsize);
1138 token->EC[0] = (cksum.checksum.length >> 8) & 0xFF;
1139 token->EC[1] = (cksum.checksum.length >> 0) & 0xFF;
1140 token->RRC[0] = (rrc >> 8) & 0xFF;
1141 token->RRC[1] = (rrc >> 0) & 0xFF;
1143 p += sizeof(*token);
1144 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1145 memcpy(p + input_message_buffer->length,
1146 cksum.checksum.data, cksum.checksum.length);
1149 input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1151 *minor_status = ret;
1152 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1153 free_Checksum(&cksum);
1154 return GSS_S_FAILURE;
1156 free_Checksum(&cksum);
1159 if (conf_state != NULL) {
1160 *conf_state = conf_req_flag;
1164 return GSS_S_COMPLETE;
1167 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1168 const gsskrb5_ctx ctx,
1169 krb5_context context,
1170 const gss_buffer_t input_message_buffer,
1171 gss_buffer_t output_message_buffer,
1173 gss_qop_t *qop_state)
1175 gss_cfx_wrap_token token;
1177 krb5_error_code ret;
1181 OM_uint32 seq_number_lo, seq_number_hi;
1187 if (input_message_buffer->length < sizeof(*token)) {
1188 return GSS_S_DEFECTIVE_TOKEN;
1191 p = input_message_buffer->value;
1193 token = (gss_cfx_wrap_token)p;
1195 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1196 return GSS_S_DEFECTIVE_TOKEN;
1199 /* Ignore unknown flags */
1200 token_flags = token->Flags &
1201 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1203 if (token_flags & CFXSentByAcceptor) {
1204 if ((ctx->more_flags & LOCAL) == 0)
1205 return GSS_S_DEFECTIVE_TOKEN;
1208 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1209 if ((token_flags & CFXAcceptorSubkey) == 0)
1210 return GSS_S_DEFECTIVE_TOKEN;
1212 if (token_flags & CFXAcceptorSubkey)
1213 return GSS_S_DEFECTIVE_TOKEN;
1216 if (token->Filler != 0xFF) {
1217 return GSS_S_DEFECTIVE_TOKEN;
1220 if (conf_state != NULL) {
1221 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
1224 ec = (token->EC[0] << 8) | token->EC[1];
1225 rrc = (token->RRC[0] << 8) | token->RRC[1];
1228 * Check sequence number
1230 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1231 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1232 if (seq_number_hi) {
1233 /* no support for 64-bit sequence numbers */
1234 *minor_status = ERANGE;
1235 return GSS_S_UNSEQ_TOKEN;
1238 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1239 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1242 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1243 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1246 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1249 * Decrypt and/or verify checksum
1252 if (ctx->more_flags & LOCAL) {
1253 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1255 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1258 p += sizeof(*token);
1259 len = input_message_buffer->length;
1260 len -= (p - (u_char *)input_message_buffer->value);
1262 if (token_flags & CFXSealed) {
1264 * this is really ugly, but needed against windows
1265 * for DCERPC, as windows rotates by EC+RRC.
1267 if (IS_DCE_STYLE(ctx)) {
1268 *minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1270 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1272 if (*minor_status != 0) {
1273 return GSS_S_FAILURE;
1276 ret = krb5_decrypt(context, ctx->crypto, usage,
1279 *minor_status = ret;
1280 return GSS_S_BAD_MIC;
1283 /* Check that there is room for the pad and token header */
1284 if (data.length < ec + sizeof(*token)) {
1285 krb5_data_free(&data);
1286 return GSS_S_DEFECTIVE_TOKEN;
1289 p += data.length - sizeof(*token);
1291 /* RRC is unprotected; don't modify input buffer */
1292 ((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1293 ((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1295 /* Check the integrity of the header */
1296 if (memcmp(p, token, sizeof(*token)) != 0) {
1297 krb5_data_free(&data);
1298 return GSS_S_BAD_MIC;
1301 output_message_buffer->value = data.data;
1302 output_message_buffer->length = data.length - ec - sizeof(*token);
1306 /* Rotate by RRC; bogus to do this in-place XXX */
1307 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1308 if (*minor_status != 0) {
1309 return GSS_S_FAILURE;
1312 /* Determine checksum type */
1313 ret = krb5_crypto_get_checksum_type(context,
1317 *minor_status = ret;
1318 return GSS_S_FAILURE;
1321 cksum.checksum.length = ec;
1323 /* Check we have at least as much data as the checksum */
1324 if (len < cksum.checksum.length) {
1325 *minor_status = ERANGE;
1326 return GSS_S_BAD_MIC;
1329 /* Length now is of the plaintext only, no checksum */
1330 len -= cksum.checksum.length;
1331 cksum.checksum.data = p + len;
1333 output_message_buffer->length = len; /* for later */
1334 output_message_buffer->value = malloc(len + sizeof(*token));
1335 if (output_message_buffer->value == NULL) {
1336 *minor_status = ENOMEM;
1337 return GSS_S_FAILURE;
1340 /* Checksum is over (plaintext-data | "header") */
1341 memcpy(output_message_buffer->value, p, len);
1342 memcpy((u_char *)output_message_buffer->value + len,
1343 token, sizeof(*token));
1345 /* EC is not included in checksum calculation */
1346 token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1353 ret = krb5_verify_checksum(context, ctx->crypto,
1355 output_message_buffer->value,
1356 len + sizeof(*token),
1359 *minor_status = ret;
1360 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1361 return GSS_S_BAD_MIC;
1365 if (qop_state != NULL) {
1366 *qop_state = GSS_C_QOP_DEFAULT;
1370 return GSS_S_COMPLETE;
1373 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1374 const gsskrb5_ctx ctx,
1375 krb5_context context,
1377 const gss_buffer_t message_buffer,
1378 gss_buffer_t message_token)
1380 gss_cfx_mic_token token;
1381 krb5_error_code ret;
1388 len = message_buffer->length + sizeof(*token);
1391 *minor_status = ENOMEM;
1392 return GSS_S_FAILURE;
1395 memcpy(buf, message_buffer->value, message_buffer->length);
1397 token = (gss_cfx_mic_token)(buf + message_buffer->length);
1398 token->TOK_ID[0] = 0x04;
1399 token->TOK_ID[1] = 0x04;
1401 if ((ctx->more_flags & LOCAL) == 0)
1402 token->Flags |= CFXSentByAcceptor;
1403 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1404 token->Flags |= CFXAcceptorSubkey;
1405 memset(token->Filler, 0xFF, 5);
1407 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1408 krb5_auth_con_getlocalseqnumber(context,
1411 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1412 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1413 krb5_auth_con_setlocalseqnumber(context,
1416 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1418 if (ctx->more_flags & LOCAL) {
1419 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1421 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1424 ret = krb5_create_checksum(context, ctx->crypto,
1425 usage, 0, buf, len, &cksum);
1427 *minor_status = ret;
1429 return GSS_S_FAILURE;
1432 /* Determine MIC length */
1433 message_token->length = sizeof(*token) + cksum.checksum.length;
1434 message_token->value = malloc(message_token->length);
1435 if (message_token->value == NULL) {
1436 *minor_status = ENOMEM;
1437 free_Checksum(&cksum);
1439 return GSS_S_FAILURE;
1442 /* Token is { "header" | get_mic("header" | plaintext-data) } */
1443 memcpy(message_token->value, token, sizeof(*token));
1444 memcpy((u_char *)message_token->value + sizeof(*token),
1445 cksum.checksum.data, cksum.checksum.length);
1447 free_Checksum(&cksum);
1451 return GSS_S_COMPLETE;
1454 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1455 const gsskrb5_ctx ctx,
1456 krb5_context context,
1457 const gss_buffer_t message_buffer,
1458 const gss_buffer_t token_buffer,
1459 gss_qop_t *qop_state)
1461 gss_cfx_mic_token token;
1463 krb5_error_code ret;
1465 OM_uint32 seq_number_lo, seq_number_hi;
1471 if (token_buffer->length < sizeof(*token)) {
1472 return GSS_S_DEFECTIVE_TOKEN;
1475 p = token_buffer->value;
1477 token = (gss_cfx_mic_token)p;
1479 if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1480 return GSS_S_DEFECTIVE_TOKEN;
1483 /* Ignore unknown flags */
1484 token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1486 if (token_flags & CFXSentByAcceptor) {
1487 if ((ctx->more_flags & LOCAL) == 0)
1488 return GSS_S_DEFECTIVE_TOKEN;
1490 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1491 if ((token_flags & CFXAcceptorSubkey) == 0)
1492 return GSS_S_DEFECTIVE_TOKEN;
1494 if (token_flags & CFXAcceptorSubkey)
1495 return GSS_S_DEFECTIVE_TOKEN;
1498 if (memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1499 return GSS_S_DEFECTIVE_TOKEN;
1503 * Check sequence number
1505 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1506 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1507 if (seq_number_hi) {
1508 *minor_status = ERANGE;
1509 return GSS_S_UNSEQ_TOKEN;
1512 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1513 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1516 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1519 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1524 ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1527 *minor_status = ret;
1528 return GSS_S_FAILURE;
1531 cksum.checksum.data = p + sizeof(*token);
1532 cksum.checksum.length = token_buffer->length - sizeof(*token);
1534 if (ctx->more_flags & LOCAL) {
1535 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1537 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1540 buf = malloc(message_buffer->length + sizeof(*token));
1542 *minor_status = ENOMEM;
1543 return GSS_S_FAILURE;
1545 memcpy(buf, message_buffer->value, message_buffer->length);
1546 memcpy(buf + message_buffer->length, token, sizeof(*token));
1548 ret = krb5_verify_checksum(context, ctx->crypto,
1551 sizeof(*token) + message_buffer->length,
1554 *minor_status = ret;
1556 return GSS_S_BAD_MIC;
1561 if (qop_state != NULL) {
1562 *qop_state = GSS_C_QOP_DEFAULT;
1565 return GSS_S_COMPLETE;