gss: allow partial accept context export in SPNEGO

Support for exporting partially established acceptor context tokens. With this,
an acceptor can send the initiator an encrypted state cookie containing the
exported context token.

(The concrete mechanism, of course, must either require a single round trip or
support partial context export itself. Kerberos and GSS EAP would work, but
Kerberos with GSS_C_DCE_STYLE would not, as currently implemented.)

Partial context export is not permitted for initiators.
This commit is contained in:
Luke Howard
2021-08-05 07:57:40 +10:00
parent fe426f7a28
commit 6554dc69b0
7 changed files with 695 additions and 105 deletions

View File

@@ -171,6 +171,7 @@ mechsrc = \
spnegosrc = \
spnego/accept_sec_context.c \
spnego/compat.c \
spnego/context_storage.c \
spnego/context_stubs.c \
spnego/external.c \
spnego/init_sec_context.c \

View File

@@ -186,6 +186,7 @@ mechsrc = \
spnegosrc = \
spnego/accept_sec_context.c \
spnego/compat.c \
spnego/context_storage.c \
spnego/context_stubs.c \
spnego/external.c \
spnego/init_sec_context.c \
@@ -465,6 +466,7 @@ libgssapi_OBJs = \
$(OBJ)\mech/gssspi_query_meta_data.obj \
$(OBJ)\spnego/accept_sec_context.obj \
$(OBJ)\spnego/compat.obj \
$(OBJ)\spnego/context_storage.obj \
$(OBJ)\spnego/context_stubs.obj \
$(OBJ)\spnego/external.obj \
$(OBJ)\spnego/init_sec_context.obj \

View File

@@ -0,0 +1,584 @@
/*
* Copyright (C) 2021, PADL Software Pty Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spnego_locl.h"
#include "mech/mech_locl.h"
#define SC_MECH_TYPES 0x0001
#define SC_PREFERRED_MECH_TYPE 0x0002
#define SC_SELECTED_MECH_TYPE 0x0004
#define SC_NEGOTIATED_MECH_TYPE 0x0008
#define SC_NEGOTIATED_CTX_ID 0x0010
#define SC_MECH_FLAGS 0x0020
#define SC_MECH_TIME_REC 0x0040
#define SC_MECH_SRC_NAME 0x0080
#define SC_TARGET_NAME 0x0100
#define SC_NEGOEX 0x0200
#define SNC_OID 0x01
#define SNC_MECH_CONTEXT 0x02
#define SNC_METADATA 0x04
static krb5_error_code
ret_spnego_context(krb5_storage *sp, gssspnego_ctx *ctxp);
static krb5_error_code
store_spnego_context(krb5_storage *sp, gssspnego_ctx ctx);
static krb5_error_code
ret_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech **mechp);
static krb5_error_code
store_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech *mech);
static krb5_error_code
ret_gss_oid(krb5_storage *sp, gss_OID *oidp);
static krb5_error_code
store_gss_oid(krb5_storage *sp, gss_OID oid);
static krb5_error_code
ret_gss_buffer(krb5_storage *sp, gss_buffer_t buffer);
static krb5_error_code
store_gss_buffer(krb5_storage *sp, gss_const_buffer_t buffer);
static uint16_t
spnego_flags_to_int(struct spnego_flags flags);
static struct spnego_flags
int_to_spnego_flags(uint16_t f);
OM_uint32 GSSAPI_CALLCONV
_gss_spnego_import_sec_context_internal(OM_uint32 *minor,
gss_const_buffer_t buffer,
gssspnego_ctx *ctxp)
{
krb5_error_code ret;
krb5_storage *sp;
sp = krb5_storage_from_readonly_mem(buffer->value, buffer->length);
if (sp == NULL) {
*minor = ENOMEM;
return GSS_S_FAILURE;
}
krb5_storage_set_byteorder(sp, KRB5_STORAGE_BYTEORDER_PACKED);
ret = ret_spnego_context(sp, ctxp);
krb5_storage_free(sp);
*minor = ret;
return ret ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
OM_uint32 GSSAPI_CALLCONV
_gss_spnego_export_sec_context_internal(OM_uint32 *minor,
gssspnego_ctx ctx,
gss_buffer_t buffer)
{
krb5_error_code ret;
krb5_storage *sp;
krb5_data data;
sp = krb5_storage_emem();
if (sp == NULL) {
*minor = ENOMEM;
return GSS_S_FAILURE;
}
krb5_data_zero(&data);
krb5_storage_set_byteorder(sp, KRB5_STORAGE_BYTEORDER_PACKED);
ret = store_spnego_context(sp, ctx);
if (ret == 0)
ret = krb5_storage_to_data(sp, &data);
if (ret == 0) {
buffer->length = data.length;
buffer->value = data.data;
}
krb5_storage_free(sp);
*minor = ret;
return ret ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
static krb5_error_code
ret_spnego_context(krb5_storage *sp, gssspnego_ctx *ctxp)
{
OM_uint32 major = GSS_S_COMPLETE, minor;
gssspnego_ctx ctx = NULL;
krb5_error_code ret = 0;
krb5_data data;
gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
uint16_t sc_flags, spnego_flags;
*ctxp = NULL;
krb5_data_zero(&data);
CHECK(major, _gss_spnego_alloc_sec_context(&minor, (gss_ctx_id_t *)&ctx));
CHECK(ret, krb5_ret_uint16(sp, &sc_flags));
CHECK(ret, krb5_ret_uint16(sp, &spnego_flags));
ctx->flags = int_to_spnego_flags(spnego_flags);
if (sc_flags & SC_MECH_TYPES)
CHECK(ret, ret_gss_buffer(sp, &ctx->NegTokenInit_mech_types));
if (sc_flags & SC_PREFERRED_MECH_TYPE)
CHECK(ret, ret_gss_oid(sp, &ctx->preferred_mech_type));
if (sc_flags & SC_SELECTED_MECH_TYPE)
CHECK(ret, ret_gss_oid(sp, &ctx->selected_mech_type));
if (sc_flags & SC_NEGOTIATED_MECH_TYPE)
CHECK(ret, ret_gss_oid(sp, &ctx->negotiated_mech_type));
if (sc_flags & SC_NEGOTIATED_CTX_ID) {
CHECK(ret, ret_gss_buffer(sp, &buf));
CHECK(major, gss_import_sec_context(&minor, &buf,
&ctx->negotiated_ctx_id));
gss_release_buffer(&minor, &buf);
}
if (sc_flags & SC_MECH_FLAGS)
CHECK(ret, krb5_ret_uint32(sp, &ctx->mech_flags));
if (sc_flags & SC_MECH_TIME_REC)
CHECK(ret, krb5_ret_uint32(sp, &ctx->mech_time_rec));
else
ctx->mech_time_rec = GSS_C_INDEFINITE;
if (sc_flags & SC_MECH_SRC_NAME) {
CHECK(ret, ret_gss_buffer(sp, &buf));
CHECK(major, gss_import_name(&minor, &buf, GSS_C_NT_EXPORT_NAME,
&ctx->mech_src_name));
gss_release_buffer(&minor, &buf);
}
if (sc_flags & SC_TARGET_NAME) {
CHECK(ret, ret_gss_buffer(sp, &buf));
CHECK(major, gss_import_name(&minor, &buf, GSS_C_NT_EXPORT_NAME,
&ctx->target_name));
gss_release_buffer(&minor, &buf);
}
if (sc_flags & SC_NEGOEX) {
uint8_t i, nschemes;
CHECK(ret, krb5_ret_uint8(sp, &ctx->negoex_step));
CHECK(ret, krb5_ret_data(sp, &data));
ctx->negoex_transcript = krb5_storage_emem();
if (ctx->negoex_transcript == NULL) {
ret = ENOMEM;
goto fail;
}
krb5_storage_set_byteorder(ctx->negoex_transcript,
KRB5_STORAGE_BYTEORDER_LE);
if (krb5_storage_write(ctx->negoex_transcript,
data.data, data.length) != data.length) {
ret = ENOMEM;
goto fail;
}
krb5_data_free(&data);
CHECK(ret, krb5_ret_uint32(sp, &ctx->negoex_seqnum));
if (krb5_storage_read(sp, ctx->negoex_conv_id,
GUID_LENGTH) != GUID_LENGTH) {
ret = KRB5_BAD_MSIZE;
goto fail;
}
CHECK(ret, krb5_ret_uint8(sp, &nschemes));
for (i = 0; i < nschemes; i++) {
struct negoex_auth_mech *mech;
CHECK(ret, ret_negoex_auth_mech(sp, &mech));
HEIM_TAILQ_INSERT_TAIL(&ctx->negoex_mechs, mech, links);
}
}
*ctxp = ctx;
fail:
if (ret == 0 && GSS_ERROR(major))
ret = minor ? minor : KRB5_BAD_MSIZE;
if (ret)
_gss_spnego_delete_sec_context(&minor, (gss_ctx_id_t *)&ctx,
GSS_C_NO_BUFFER);
krb5_data_free(&data);
gss_release_buffer(&minor, &buf);
return ret;
}
static krb5_error_code
store_spnego_context(krb5_storage *sp, gssspnego_ctx ctx)
{
OM_uint32 major = GSS_S_COMPLETE, minor;
krb5_error_code ret = 0;
krb5_data data;
gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
uint16_t sc_flags = 0, spnego_flags;
krb5_data_zero(&data);
if (ctx->NegTokenInit_mech_types.length)
sc_flags |= SC_MECH_TYPES;
if (ctx->preferred_mech_type)
sc_flags |= SC_PREFERRED_MECH_TYPE;
if (ctx->selected_mech_type)
sc_flags |= SC_SELECTED_MECH_TYPE;
if (ctx->negotiated_mech_type)
sc_flags |= SC_NEGOTIATED_MECH_TYPE;
if (ctx->negotiated_ctx_id)
sc_flags |= SC_NEGOTIATED_CTX_ID;
if (ctx->mech_flags)
sc_flags |= SC_MECH_FLAGS;
if (ctx->mech_time_rec != GSS_C_INDEFINITE)
sc_flags |= SC_MECH_TIME_REC;
if (ctx->mech_src_name)
sc_flags |= SC_MECH_SRC_NAME;
if (ctx->target_name)
sc_flags |= SC_TARGET_NAME;
if (ctx->negoex_step)
sc_flags |= SC_NEGOEX;
CHECK(ret, krb5_store_uint16(sp, sc_flags));
spnego_flags = spnego_flags_to_int(ctx->flags);
CHECK(ret, krb5_store_uint16(sp, spnego_flags));
if (sc_flags & SC_MECH_TYPES)
CHECK(ret, store_gss_buffer(sp, &ctx->NegTokenInit_mech_types));
if (sc_flags & SC_PREFERRED_MECH_TYPE)
CHECK(ret, store_gss_oid(sp, ctx->preferred_mech_type));
if (sc_flags & SC_SELECTED_MECH_TYPE)
CHECK(ret, store_gss_oid(sp, ctx->selected_mech_type));
if (sc_flags & SC_NEGOTIATED_MECH_TYPE)
CHECK(ret, store_gss_oid(sp, ctx->negotiated_mech_type));
if (sc_flags & SC_NEGOTIATED_CTX_ID) {
CHECK(major, gss_export_sec_context(&minor, &ctx->negotiated_ctx_id,
&buf));
CHECK(ret, store_gss_buffer(sp, &buf));
gss_release_buffer(&minor, &buf);
}
if (sc_flags & SC_MECH_FLAGS)
CHECK(ret, krb5_store_uint32(sp, ctx->mech_flags));
if (sc_flags & SC_MECH_TIME_REC)
CHECK(ret, krb5_store_uint32(sp, ctx->mech_time_rec));
if (sc_flags & SC_MECH_SRC_NAME) {
CHECK(major, gss_export_name(&minor, ctx->mech_src_name, &buf));
CHECK(ret, store_gss_buffer(sp, &buf));
gss_release_buffer(&minor, &buf);
}
if (sc_flags & SC_TARGET_NAME) {
CHECK(major, gss_export_name(&minor, ctx->target_name, &buf));
CHECK(ret, store_gss_buffer(sp, &buf));
gss_release_buffer(&minor, &buf);
}
if (sc_flags & SC_NEGOEX) {
uint32_t nschemes;
struct negoex_auth_mech *mech;
if (ctx->negoex_step > 0xff) {
ret = ERANGE;
goto fail;
}
CHECK(ret, krb5_store_uint8(sp, ctx->negoex_step));
if (ctx->negoex_transcript) {
CHECK(ret, krb5_storage_to_data(ctx->negoex_transcript, &data));
}
CHECK(ret, krb5_store_data(sp, data));
krb5_data_free(&data);
CHECK(ret, krb5_store_uint32(sp, ctx->negoex_seqnum));
CHECK(ret, krb5_store_bytes(sp, ctx->negoex_conv_id, GUID_LENGTH));
nschemes = 0;
HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links)
nschemes++;
if (nschemes > 0xff) {
ret = ERANGE;
goto fail;
}
CHECK(ret, krb5_store_uint8(sp, nschemes));
HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links)
CHECK(ret, store_negoex_auth_mech(sp, mech));
}
fail:
if (ret == 0 && GSS_ERROR(major))
ret = minor ? minor : KRB5_BAD_MSIZE;
krb5_data_free(&data);
gss_release_buffer(&minor, &buf);
return ret;
}
static krb5_error_code
ret_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech **mechp)
{
krb5_error_code ret;
OM_uint32 major = GSS_S_COMPLETE, minor;
gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
struct negoex_auth_mech *mech;
krb5_context context = _gss_mg_krb5_context();
uint8_t snc_flags, negoex_flags;
*mechp = NULL;
mech = calloc(1, sizeof(*mech));
if (mech == NULL) {
ret = ENOMEM;
goto fail;
}
CHECK(ret, krb5_ret_uint8(sp, &snc_flags));
CHECK(ret, krb5_ret_uint8(sp, &negoex_flags));
if (negoex_flags & (1 << 0))
mech->complete = 1;
if (negoex_flags & (1 << 1))
mech->sent_checksum = 1;
if (negoex_flags & (1 << 2))
mech->verified_checksum = 1;
if (snc_flags & SNC_OID)
CHECK(ret, ret_gss_oid(sp, &mech->oid));
if (krb5_storage_read(sp, mech->scheme, GUID_LENGTH) != GUID_LENGTH) {
ret = KRB5_BAD_MSIZE;
goto fail;
}
if (snc_flags & SNC_MECH_CONTEXT) {
CHECK(ret, ret_gss_buffer(sp, &buf));
CHECK(major, gss_import_sec_context(&minor, &buf,
&mech->mech_context));
gss_release_buffer(&minor, &buf);
}
if (snc_flags & SNC_METADATA)
CHECK(ret, ret_gss_buffer(sp, &mech->metadata));
*mechp = mech;
fail:
if (ret == 0 && GSS_ERROR(major))
ret = minor ? minor : KRB5_BAD_MSIZE;
if (ret)
_gss_negoex_release_auth_mech(context, mech);
gss_release_buffer(&minor, &buf);
return ret;
}
static krb5_error_code
store_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech *mech)
{
krb5_error_code ret;
OM_uint32 major = GSS_S_COMPLETE, minor;
gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
uint8_t negoex_flags = 0, snc_flags = 0;
negoex_flags = 0;
if (mech->complete)
negoex_flags |= (1 << 0);
if (mech->sent_checksum)
negoex_flags |= (1 << 1);
if (mech->verified_checksum)
negoex_flags |= (1 << 2);
if (mech->oid)
snc_flags |= SNC_OID;
if (mech->mech_context)
snc_flags |= SNC_MECH_CONTEXT;
if (mech->metadata.length)
snc_flags |= SNC_METADATA;
CHECK(ret, krb5_store_uint8(sp, snc_flags));
CHECK(ret, krb5_store_uint8(sp, negoex_flags));
if (snc_flags & SNC_OID)
CHECK(ret, store_gss_oid(sp, mech->oid));
CHECK(ret, krb5_store_bytes(sp, mech->scheme, GUID_LENGTH));
if (snc_flags & SNC_MECH_CONTEXT) {
CHECK(major, gss_export_sec_context(&minor, &mech->mech_context,
&buf));
CHECK(ret, store_gss_buffer(sp, &buf));
}
if (snc_flags & SNC_METADATA)
CHECK(ret, store_gss_buffer(sp, &mech->metadata));
fail:
if (ret == 0 && GSS_ERROR(major))
ret = minor ? minor : KRB5_BAD_MSIZE;
gss_release_buffer(&minor, &buf);
return ret;
}
static krb5_error_code
ret_gss_oid(krb5_storage *sp, gss_OID *oidp)
{
krb5_data data;
krb5_error_code ret;
gss_OID_desc oid;
OM_uint32 major, minor;
*oidp = GSS_C_NO_OID;
ret = krb5_ret_data(sp, &data);
if (ret)
return ret;
if (data.length) {
oid.length = data.length;
oid.elements = data.data;
major = _gss_intern_oid(&minor, &oid, oidp);
} else
major = GSS_S_COMPLETE;
krb5_data_free(&data);
return GSS_ERROR(major) ? ENOMEM : 0;
}
static krb5_error_code
store_gss_oid(krb5_storage *sp, gss_OID oid)
{
krb5_data data;
krb5_data_zero(&data);
if (oid) {
data.length = oid->length;
data.data = oid->elements;
}
return krb5_store_data(sp, data);
}
static krb5_error_code
ret_gss_buffer(krb5_storage *sp, gss_buffer_t buffer)
{
krb5_error_code ret;
krb5_data data;
_mg_buffer_zero(buffer);
ret = krb5_ret_data(sp, &data);
if (ret)
return ret;
if (data.length) {
buffer->length = data.length;
buffer->value = data.data;
} else
krb5_data_free(&data);
return 0;
}
static krb5_error_code
store_gss_buffer(krb5_storage *sp, gss_const_buffer_t buffer)
{
krb5_data data;
krb5_data_zero(&data);
if (buffer) {
data.length = buffer->length;
data.data = buffer->value;
}
return krb5_store_data(sp, data);
}
static uint16_t
spnego_flags_to_int(struct spnego_flags flags)
{
uint16_t f = 0;
if (flags.open)
f |= (1 << 0);
if (flags.local)
f |= (1 << 1);
if (flags.require_mic)
f |= (1 << 2);
if (flags.peer_require_mic)
f |= (1 << 3);
if (flags.sent_mic)
f |= (1 << 4);
if (flags.verified_mic)
f |= (1 << 5);
if (flags.safe_omit)
f |= (1 << 6);
if (flags.maybe_open)
f |= (1 << 7);
if (flags.seen_supported_mech)
f |= (1 << 8);
return f;
}
static struct spnego_flags
int_to_spnego_flags(uint16_t f)
{
struct spnego_flags flags;
memset(&flags, 0, sizeof(flags));
if (f & (1 << 0))
flags.open = 1;
if (f & (1 << 1))
flags.local = 1;
if (f & (1 << 2))
flags.require_mic = 1;
if (f & (1 << 3))
flags.peer_require_mic = 1;
if (f & (1 << 4))
flags.sent_mic = 1;
if (f & (1 << 5))
flags.verified_mic = 1;
if (f & (1 << 6))
flags.safe_omit = 1;
if (f & (1 << 7))
flags.maybe_open = 1;
if (f & (1 << 8))
flags.seen_supported_mech = 1;
return flags;
}

View File

@@ -263,6 +263,9 @@ OM_uint32 GSSAPI_CALLCONV _gss_spnego_inquire_context (
locally_initiated,
open_context);
if (open_context)
*open_context = gssspnego_ctx_complete_p(ctx);
return maj_stat;
}
@@ -304,13 +307,12 @@ OM_uint32 GSSAPI_CALLCONV _gss_spnego_export_sec_context (
)
{
gssspnego_ctx ctx;
OM_uint32 ret;
OM_uint32 major_status;
*minor_status = 0;
if (context_handle == NULL) {
if (context_handle == NULL)
return GSS_S_NO_CONTEXT;
}
ctx = (gssspnego_ctx)*context_handle;
@@ -319,25 +321,30 @@ OM_uint32 GSSAPI_CALLCONV _gss_spnego_export_sec_context (
HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
/*
* Partial context export is only supported on the acceptor side, as we
* cannot represent the initiator function pointer state in an exported
* token, and also because it is mostly useful for acceptors which need
* to manage multiple initiator states.
*/
if (ctx->flags.local && !gssspnego_ctx_complete_p(ctx)) {
major_status = GSS_S_NO_CONTEXT;
goto out;
}
major_status = _gss_spnego_export_sec_context_internal(minor_status,
ctx,
interprocess_token);
out:
if (major_status == GSS_S_COMPLETE)
major_status = _gss_spnego_internal_delete_sec_context(minor_status,
context_handle,
GSS_C_NO_BUFFER);
else
HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
return GSS_S_NO_CONTEXT;
}
ret = gss_export_sec_context(minor_status,
&ctx->negotiated_ctx_id,
interprocess_token);
if (ret == GSS_S_COMPLETE) {
ret = _gss_spnego_internal_delete_sec_context(minor_status,
context_handle,
GSS_C_NO_BUFFER);
if (ret == GSS_S_COMPLETE)
return GSS_S_COMPLETE;
}
HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
return ret;
return major_status;
}
OM_uint32 GSSAPI_CALLCONV _gss_spnego_import_sec_context (
@@ -346,35 +353,9 @@ OM_uint32 GSSAPI_CALLCONV _gss_spnego_import_sec_context (
gss_ctx_id_t *context_handle
)
{
OM_uint32 ret, minor;
gss_ctx_id_t context;
gssspnego_ctx ctx;
*context_handle = GSS_C_NO_CONTEXT;
ret = _gss_spnego_alloc_sec_context(minor_status, &context);
if (ret != GSS_S_COMPLETE) {
return ret;
}
ctx = (gssspnego_ctx)context;
HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
ret = gss_import_sec_context(minor_status,
interprocess_token,
&ctx->negotiated_ctx_id);
if (ret != GSS_S_COMPLETE) {
_gss_spnego_internal_delete_sec_context(&minor, &context, GSS_C_NO_BUFFER);
return ret;
}
ctx->flags.open = 1;
/* don't bother filling in the rest of the fields */
HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
*context_handle = (gss_ctx_id_t)ctx;
return GSS_S_COMPLETE;
return _gss_spnego_import_sec_context_internal(minor_status,
interprocess_token,
(gssspnego_ctx *)context_handle);
}
OM_uint32 GSSAPI_CALLCONV _gss_spnego_inquire_names_for_mech (

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2011-2019 PADL Software Pty Ltd.
* Copyright (C) 2011-2021 PADL Software Pty Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -88,33 +88,42 @@ buffer_set_to_crypto(OM_uint32 *minor,
return GSS_S_COMPLETE;
}
#define NEGOEX_SIGN_KEY 1
#define NEGOEX_VERIFY_KEY 2
#define NEGOEX_BOTH_KEYS (NEGOEX_SIGN_KEY|NEGOEX_VERIFY_KEY)
static OM_uint32
get_session_keys(OM_uint32 *minor,
krb5_context context,
OM_uint32 flags,
struct negoex_auth_mech *mech)
{
OM_uint32 major, tmpMinor;
gss_buffer_set_t buffers = GSS_C_NO_BUFFER_SET;
major = gss_inquire_sec_context_by_oid(&tmpMinor, mech->mech_context,
GSS_C_INQ_NEGOEX_KEY, &buffers);
if (major == GSS_S_COMPLETE) {
major = buffer_set_to_crypto(minor, context,
buffers, &mech->crypto);
_gss_secure_release_buffer_set(&tmpMinor, &buffers);
if (major != GSS_S_COMPLETE)
return major;
if (flags & NEGOEX_SIGN_KEY) {
major = gss_inquire_sec_context_by_oid(&tmpMinor, mech->mech_context,
GSS_C_INQ_NEGOEX_KEY, &buffers);
if (major == GSS_S_COMPLETE) {
major = buffer_set_to_crypto(minor, context,
buffers, &mech->crypto);
_gss_secure_release_buffer_set(&tmpMinor, &buffers);
if (major != GSS_S_COMPLETE)
return major;
}
}
major = gss_inquire_sec_context_by_oid(&tmpMinor, mech->mech_context,
GSS_C_INQ_NEGOEX_VERIFY_KEY,
&buffers);
if (major == GSS_S_COMPLETE) {
major = buffer_set_to_crypto(minor, context,
buffers, &mech->verify_crypto);
_gss_secure_release_buffer_set(&tmpMinor, &buffers);
if (major != GSS_S_COMPLETE)
return major;
if (flags & NEGOEX_VERIFY_KEY) {
major = gss_inquire_sec_context_by_oid(&tmpMinor, mech->mech_context,
GSS_C_INQ_NEGOEX_VERIFY_KEY,
&buffers);
if (major == GSS_S_COMPLETE) {
major = buffer_set_to_crypto(minor, context,
buffers, &mech->verify_crypto);
_gss_secure_release_buffer_set(&tmpMinor, &buffers);
if (major != GSS_S_COMPLETE)
return major;
}
}
return GSS_S_COMPLETE;
@@ -411,7 +420,7 @@ mech_init(OM_uint32 *minor,
}
}
if (!GSS_ERROR(major))
return get_session_keys(minor, context, mech);
return get_session_keys(minor, context, NEGOEX_BOTH_KEYS, mech);
/* Remember the error we got from the first mech. */
if (first_mech) {
@@ -509,7 +518,7 @@ mech_accept(OM_uint32 *minor,
!gss_oid_equal(ctx->negotiated_mech_type, mech->oid))
_gss_mg_log(1, "negoex client didn't send the mech they said they would");
major = get_session_keys(minor, context, mech);
major = get_session_keys(minor, context, NEGOEX_BOTH_KEYS, mech);
} else if (ctx->negoex_step == 1) {
gss_mg_collect_error(ctx->negotiated_mech_type, major, *minor);
*mech_error = TRUE;
@@ -532,6 +541,13 @@ verify_keyusage(gssspnego_ctx ctx, int make_checksum)
NEGOEX_KEYUSAGE_ACCEPTOR_CHECKSUM : NEGOEX_KEYUSAGE_INITIATOR_CHECKSUM;
}
static OM_uint32
verify_key_flags(gssspnego_ctx ctx, int make_checksum)
{
return (ctx->flags.local ^ make_checksum) ?
NEGOEX_SIGN_KEY : NEGOEX_VERIFY_KEY;
}
static OM_uint32
verify_checksum(OM_uint32 *minor,
gssspnego_ctx ctx,
@@ -558,6 +574,13 @@ verify_checksum(OM_uint32 *minor,
if (msg == NULL || !GUID_EQ(msg->scheme, mech->scheme))
return GSS_S_COMPLETE;
/*
* Last chance attempt to obtain session key for imported exported partial
* contexts (which do not carry the session key at the NegoEx layer).
*/
if (mech->verify_crypto == NULL)
get_session_keys(minor, context, verify_key_flags(ctx, FALSE), mech);
/*
* A recoverable error may cause us to be unable to verify a token from the
* other party. In this case we should send an alert.
@@ -618,8 +641,15 @@ make_checksum(OM_uint32 *minor, gssspnego_ctx ctx)
if (mech->crypto == NULL) {
if (mech->complete) {
*minor = (OM_uint32)NEGOEX_NO_VERIFY_KEY;
return GSS_S_UNAVAILABLE;
/*
* Last chance attempt to obtain session key for imported exported partial
* contexts (which do not carry the session key at the NegoEx layer).
*/
get_session_keys(minor, context, verify_key_flags(ctx, TRUE), mech);
if (mech->crypto == NULL) {
*minor = (OM_uint32)NEGOEX_NO_VERIFY_KEY;
return GSS_S_UNAVAILABLE;
}
} else {
return GSS_S_COMPLETE;
}

View File

@@ -30,9 +30,6 @@
#include "spnego_locl.h"
static void
release_auth_mech(krb5_context context, struct negoex_auth_mech *mech);
/*
* SPNEGO expects to find the active mech context in ctx->negotiated_ctx_id,
* but the metadata exchange APIs force us to have one mech context per mech
@@ -92,7 +89,7 @@ release_all_mechs(gssspnego_ctx ctx, krb5_context context)
struct negoex_auth_mech *mech, *next;
HEIM_TAILQ_FOREACH_SAFE(mech, &ctx->negoex_mechs, links, next) {
release_auth_mech(context, mech);
_gss_negoex_release_auth_mech(context, mech);
}
HEIM_TAILQ_INIT(&ctx->negoex_mechs);
@@ -656,20 +653,6 @@ _gss_negoex_locate_alert_message(struct negoex_message *messages,
return (msg == NULL) ? NULL : &msg->u.a;
}
#define CHECK(ret, x) do { (ret) = (x); if (ret) goto fail; } while (0)
static krb5_error_code
store_bytes(krb5_storage *sp, const void *bytes, size_t length)
{
ssize_t ssize;
ssize = krb5_storage_write(sp, bytes, length);
if (ssize != length)
return ENOMEM;
return 0;
}
/*
* Add the encoding of a MESSAGE_HEADER structure to buf, given the number of
* bytes of the payload following the full header. Increment the sequence
@@ -707,7 +690,7 @@ put_message_header(OM_uint32 *minor, gssspnego_ctx ctx,
/* cbMessageLength */
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, header_len + payload_len));
/* ConversationId */
CHECK(ret, store_bytes(ctx->negoex_transcript, ctx->negoex_conv_id, GUID_LENGTH));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, ctx->negoex_conv_id, GUID_LENGTH));
_gss_negoex_log_message(0, type,
ctx->negoex_conv_id, ctx->negoex_seqnum,
@@ -745,7 +728,7 @@ _gss_negoex_add_nego_message(OM_uint32 *minor,
if (major != GSS_S_COMPLETE)
return major;
CHECK(ret, store_bytes(ctx->negoex_transcript, random, 32));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, random, 32));
/* ProtocolVersion */
CHECK(ret, krb5_store_uint64(ctx->negoex_transcript, 0));
/* AuthSchemes vector */
@@ -755,11 +738,11 @@ _gss_negoex_add_nego_message(OM_uint32 *minor,
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
CHECK(ret, krb5_store_uint16(ctx->negoex_transcript, 0));
/* Four bytes of padding to reach a multiple of 8 bytes. */
CHECK(ret, store_bytes(ctx->negoex_transcript, "\0\0\0\0", 4));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, "\0\0\0\0", 4));
/* Payload (auth schemes) */
HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links) {
CHECK(ret, store_bytes(ctx->negoex_transcript, mech->scheme, GUID_LENGTH));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, mech->scheme, GUID_LENGTH));
}
return GSS_S_COMPLETE;
@@ -784,12 +767,12 @@ _gss_negoex_add_exchange_message(OM_uint32 *minor,
if (major != GSS_S_COMPLETE)
return major;
CHECK(ret, store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
/* Exchange byte vector */
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, token->length));
/* Payload (token) */
CHECK(ret, store_bytes(ctx->negoex_transcript, token->value, token->length));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, token->value, token->length));
return GSS_S_COMPLETE;
@@ -814,7 +797,7 @@ _gss_negoex_add_verify_message(OM_uint32 *minor,
if (major != GSS_S_COMPLETE)
return major;
CHECK(ret, store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, CHECKSUM_HEADER_LENGTH));
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, CHECKSUM_SCHEME_RFC3961));
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, cksum_type));
@@ -822,9 +805,9 @@ _gss_negoex_add_verify_message(OM_uint32 *minor,
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, cksum_len));
/* Four bytes of padding to reach a multiple of 8 bytes. */
CHECK(ret, store_bytes(ctx->negoex_transcript, "\0\0\0\0", 4));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, "\0\0\0\0", 4));
/* Payload (checksum contents) */
CHECK(ret, store_bytes(ctx->negoex_transcript, cksum, cksum_len));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, cksum, cksum_len));
return GSS_S_COMPLETE;
@@ -852,14 +835,14 @@ _gss_negoex_add_verify_no_key_alert(OM_uint32 *minor,
if (major != GSS_S_COMPLETE)
return major;
CHECK(ret, store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
/* ErrorCode */
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, 0));
/* Alerts vector */
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
CHECK(ret, krb5_store_uint16(ctx->negoex_transcript, 1));
/* Six bytes of padding to reach a multiple of 8 bytes. */
CHECK(ret, store_bytes(ctx->negoex_transcript, "\0\0\0\0\0\0", 6));
CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, "\0\0\0\0\0\0", 6));
/* Payload part 1: a single ALERT element */
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, ALERT_TYPE_PULSE));
CHECK(ret, krb5_store_uint32(ctx->negoex_transcript,
@@ -877,9 +860,9 @@ fail:
}
static void
release_auth_mech(krb5_context context,
struct negoex_auth_mech *mech)
void
_gss_negoex_release_auth_mech(krb5_context context,
struct negoex_auth_mech *mech)
{
OM_uint32 tmpmin;
@@ -904,7 +887,7 @@ _gss_negoex_delete_auth_mech(gssspnego_ctx ctx,
krb5_context context = _gss_mg_krb5_context();
HEIM_TAILQ_REMOVE(&ctx->negoex_mechs, mech, links);
release_auth_mech(context, mech);
_gss_negoex_release_auth_mech(context, mech);
}
/* Remove all auth mech entries except for mech from ctx->mechs. */

View File

@@ -78,6 +78,8 @@
#define ALLOC(X, N) (X) = calloc((N), sizeof(*(X)))
#define CHECK(ret, x) do { (ret) = (x); if (ret) goto fail; } while (0)
struct gssspnego_ctx_desc;
typedef struct gssspnego_ctx_desc *gssspnego_ctx;
@@ -120,7 +122,7 @@ struct gssspnego_ctx_desc {
gss_name_t target_name;
gssspnego_initiator_state initiator_state;
int negoex_step;
uint8_t negoex_step;
krb5_storage *negoex_transcript;
uint32_t negoex_seqnum;
conversation_id negoex_conv_id;
@@ -147,4 +149,11 @@ struct gssspnego_optimistic_ctx {
#include "spnego-private.h"
static inline int
gssspnego_ctx_complete_p(gssspnego_ctx ctx)
{
return ctx->flags.open &&
(ctx->flags.safe_omit || (ctx->flags.sent_mic && ctx->flags.verified_mic));
}
#endif /* SPNEGO_LOCL_H */