Files
Ralph Boehme a2a765f88e lib/krb5: initialize kdc_offset in the memory ccache from the krb5_context
This ensures we inherit the clock skew adjustment from the AS-REQ/REP into the
memory ccache in a similar way done for the file ccache.

This means krb5_cc_get_kdc_offset() will return the correct value and
_krb5_get_cred_kdc_any() uses the adjusted time in the authenticator of
subsequent TGS-REQ.

BUG: https://bugzilla.samba.org/show_bug.cgi?id=15676

Pair-Programmed-With: Stefan Metzmacher <metze@samba.org>
Signed-off-by: Ralph Boehme <slow@samba.org>
Signed-off-by: Stefan Metzmacher <metze@samba.org>
2024-07-06 16:07:17 -04:00

642 lines
15 KiB
C

/*
* Copyright (c) 1997-2004 Kungliga Tekniska Högskolan
* (Royal Institute of Technology, Stockholm, Sweden).
* All rights reserved.
*
* Portions Copyright (c) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "krb5_locl.h"
typedef struct krb5_mcache {
char *name;
unsigned int refcnt;
unsigned int anonymous:1;
unsigned int dead:1;
krb5_principal primary_principal;
struct link {
krb5_creds cred;
struct link *next;
} *creds;
struct krb5_mcache *next;
time_t mtime;
krb5_deltat kdc_offset;
HEIMDAL_MUTEX mutex;
} krb5_mcache;
static HEIMDAL_MUTEX mcc_mutex = HEIMDAL_MUTEX_INITIALIZER;
static struct krb5_mcache *mcc_head;
#define MCACHE(X) ((krb5_mcache *)(X)->data.data)
#define MISDEAD(X) ((X)->dead)
static krb5_error_code KRB5_CALLCONV
mcc_get_name_2(krb5_context context,
krb5_ccache id,
const char **name,
const char **col,
const char **sub)
{
if (name)
*name = MCACHE(id)->name;
if (col)
*col = NULL;
if (sub)
*sub = MCACHE(id)->name;
return 0;
}
static krb5_error_code
mcc_alloc(krb5_context context, const char *name, krb5_mcache **out)
{
krb5_mcache *m, *m_c;
size_t counter = 0;
int ret = 0;
*out = NULL;
ALLOC(m, 1);
if(m == NULL)
return krb5_enomem(context);
again:
if (counter > 3) {
free(m->name);
free(m);
return EAGAIN; /* XXX */
}
if(name == NULL)
ret = asprintf(&m->name, "u%p-%llu", m, (unsigned long long)counter);
else
m->name = strdup(name);
if(ret < 0 || m->name == NULL) {
free(m);
return krb5_enomem(context);
}
if (strcmp(m->name, "anonymous") == 0) {
HEIMDAL_MUTEX_init(&(m->mutex));
m->anonymous = 1;
m->dead = 0;
m->refcnt = 1;
m->primary_principal = NULL;
m->creds = NULL;
m->mtime = time(NULL);
m->kdc_offset = 0;
m->next = NULL;
*out = m;
return 0;
}
/* check for dups first */
HEIMDAL_MUTEX_lock(&mcc_mutex);
for (m_c = mcc_head; m_c != NULL; m_c = m_c->next)
if (strcmp(m->name, m_c->name) == 0)
break;
if (m_c) {
if (name) {
/* We raced with another thread to create this cache */
free(m->name);
free(m);
m = m_c;
HEIMDAL_MUTEX_lock(&(m->mutex));
m->refcnt++;
HEIMDAL_MUTEX_unlock(&(m->mutex));
} else {
/* How likely are we to conflict on new_unique anyways?? */
counter++;
free(m->name);
m->name = NULL;
HEIMDAL_MUTEX_unlock(&mcc_mutex);
goto again;
}
HEIMDAL_MUTEX_unlock(&mcc_mutex);
*out = m;
return 0;
}
m->anonymous = 0;
m->dead = 0;
m->refcnt = 1;
m->primary_principal = NULL;
m->creds = NULL;
m->mtime = time(NULL);
m->kdc_offset = 0;
m->next = mcc_head;
HEIMDAL_MUTEX_init(&(m->mutex));
mcc_head = m;
HEIMDAL_MUTEX_unlock(&mcc_mutex);
*out = m;
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_resolve_2(krb5_context context,
krb5_ccache *id,
const char *res,
const char *sub)
{
krb5_error_code ret;
krb5_mcache *m;
if ((ret = mcc_alloc(context, sub && *sub ? sub : res, &m)))
return ret;
(*id)->data.data = m;
(*id)->data.length = sizeof(*m);
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_gen_new(krb5_context context, krb5_ccache *id)
{
krb5_error_code ret;
krb5_mcache *m;
if ((ret = mcc_alloc(context, NULL, &m)))
return ret;
(*id)->data.data = m;
(*id)->data.length = sizeof(*m);
return 0;
}
static void KRB5_CALLCONV
mcc_destroy_internal(krb5_context context,
krb5_mcache *m)
{
struct link *l;
if (m->primary_principal != NULL) {
krb5_free_principal (context, m->primary_principal);
m->primary_principal = NULL;
}
m->dead = 1;
l = m->creds;
while (l != NULL) {
struct link *old;
krb5_free_cred_contents (context, &l->cred);
old = l;
l = l->next;
free (old);
}
m->creds = NULL;
return;
}
static krb5_error_code KRB5_CALLCONV
mcc_initialize(krb5_context context,
krb5_ccache id,
krb5_principal primary_principal)
{
krb5_mcache *m = MCACHE(id);
krb5_error_code ret = 0;
HEIMDAL_MUTEX_lock(&(m->mutex));
heim_assert(m->refcnt != 0, "resurection released mcache");
/*
* It's important to destroy any existing
* creds here, that matches the baheviour
* of all other backends and also the
* MEMORY: backend in MIT.
*/
mcc_destroy_internal(context, m);
m->dead = 0;
m->kdc_offset = context->kdc_sec_offset;
m->mtime = time(NULL);
ret = krb5_copy_principal (context,
primary_principal,
&m->primary_principal);
HEIMDAL_MUTEX_unlock(&(m->mutex));
return ret;
}
static int
mcc_close_internal(krb5_mcache *m)
{
HEIMDAL_MUTEX_lock(&(m->mutex));
heim_assert(m->refcnt != 0, "closed dead cache mcache");
if (--m->refcnt != 0) {
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
if (MISDEAD(m)) {
free(m->name);
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 1;
}
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_close(krb5_context context,
krb5_ccache id)
{
krb5_mcache *m = MCACHE(id);
if (mcc_close_internal(MCACHE(id))) {
HEIMDAL_MUTEX_destroy(&(m->mutex));
krb5_data_free(&id->data);
}
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_destroy(krb5_context context,
krb5_ccache id)
{
krb5_mcache **n, *m = MCACHE(id);
if (m->anonymous) {
HEIMDAL_MUTEX_lock(&(m->mutex));
if (m->refcnt == 0) {
HEIMDAL_MUTEX_unlock(&(m->mutex));
krb5_abortx(context, "mcc_destroy: refcnt already 0");
}
if (!MISDEAD(m))
mcc_destroy_internal(context, m);
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
HEIMDAL_MUTEX_lock(&mcc_mutex);
HEIMDAL_MUTEX_lock(&(m->mutex));
if (m->refcnt == 0)
{
HEIMDAL_MUTEX_unlock(&(m->mutex));
HEIMDAL_MUTEX_unlock(&mcc_mutex);
krb5_abortx(context, "mcc_destroy: refcnt already 0");
}
if (!MISDEAD(m)) {
/* if this is an active mcache, remove it from the linked
list, and free all data */
for(n = &mcc_head; n && *n; n = &(*n)->next) {
if(m == *n) {
*n = m->next;
break;
}
}
mcc_destroy_internal(context, m);
}
HEIMDAL_MUTEX_unlock(&(m->mutex));
HEIMDAL_MUTEX_unlock(&mcc_mutex);
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_store_cred(krb5_context context,
krb5_ccache id,
krb5_creds *creds)
{
krb5_mcache *m = MCACHE(id);
krb5_error_code ret;
struct link *l;
HEIMDAL_MUTEX_lock(&(m->mutex));
if (MISDEAD(m))
{
HEIMDAL_MUTEX_unlock(&(m->mutex));
return ENOENT;
}
l = malloc (sizeof(*l));
if (l == NULL)
return krb5_enomem(context);
l->next = m->creds;
m->creds = l;
memset (&l->cred, 0, sizeof(l->cred));
ret = krb5_copy_creds_contents (context, creds, &l->cred);
if (ret) {
m->creds = l->next;
free (l);
HEIMDAL_MUTEX_unlock(&(m->mutex));
return ret;
}
m->mtime = time(NULL);
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_get_principal(krb5_context context,
krb5_ccache id,
krb5_principal *principal)
{
krb5_mcache *m = MCACHE(id);
krb5_error_code ret = 0;
HEIMDAL_MUTEX_lock(&(m->mutex));
if (MISDEAD(m) || m->primary_principal == NULL) {
HEIMDAL_MUTEX_unlock(&(m->mutex));
return ENOENT;
}
ret = krb5_copy_principal (context,
m->primary_principal,
principal);
HEIMDAL_MUTEX_unlock(&(m->mutex));
return ret;
}
static krb5_error_code KRB5_CALLCONV
mcc_get_first (krb5_context context,
krb5_ccache id,
krb5_cc_cursor *cursor)
{
krb5_mcache *m = MCACHE(id);
HEIMDAL_MUTEX_lock(&(m->mutex));
if (MISDEAD(m)) {
HEIMDAL_MUTEX_unlock(&(m->mutex));
return ENOENT;
}
*cursor = m->creds;
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_get_next (krb5_context context,
krb5_ccache id,
krb5_cc_cursor *cursor,
krb5_creds *creds)
{
krb5_mcache *m = MCACHE(id);
struct link *l;
HEIMDAL_MUTEX_lock(&(m->mutex));
if (MISDEAD(m)) {
HEIMDAL_MUTEX_unlock(&(m->mutex));
return ENOENT;
}
HEIMDAL_MUTEX_unlock(&(m->mutex));
l = *cursor;
if (l != NULL) {
*cursor = l->next;
return krb5_copy_creds_contents (context,
&l->cred,
creds);
} else
return KRB5_CC_END;
}
static krb5_error_code KRB5_CALLCONV
mcc_end_get (krb5_context context,
krb5_ccache id,
krb5_cc_cursor *cursor)
{
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_remove_cred(krb5_context context,
krb5_ccache id,
krb5_flags which,
krb5_creds *mcreds)
{
krb5_mcache *m = MCACHE(id);
struct link **q, *p;
HEIMDAL_MUTEX_lock(&(m->mutex));
for(q = &m->creds, p = *q; p; p = *q) {
if(krb5_compare_creds(context, which, mcreds, &p->cred)) {
*q = p->next;
krb5_free_cred_contents(context, &p->cred);
free(p);
m->mtime = time(NULL);
} else
q = &p->next;
}
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_set_flags(krb5_context context,
krb5_ccache id,
krb5_flags flags)
{
return 0; /* XXX */
}
struct mcache_iter {
krb5_mcache *cache;
};
static krb5_error_code KRB5_CALLCONV
mcc_get_cache_first(krb5_context context, krb5_cc_cursor *cursor)
{
struct mcache_iter *iter;
iter = calloc(1, sizeof(*iter));
if (iter == NULL)
return krb5_enomem(context);
HEIMDAL_MUTEX_lock(&mcc_mutex);
iter->cache = mcc_head;
if (iter->cache) {
HEIMDAL_MUTEX_lock(&(iter->cache->mutex));
iter->cache->refcnt++;
HEIMDAL_MUTEX_unlock(&(iter->cache->mutex));
}
HEIMDAL_MUTEX_unlock(&mcc_mutex);
*cursor = iter;
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_get_cache_next(krb5_context context, krb5_cc_cursor cursor, krb5_ccache *id)
{
struct mcache_iter *iter = cursor;
krb5_error_code ret;
krb5_mcache *m;
if (iter->cache == NULL)
return KRB5_CC_END;
HEIMDAL_MUTEX_lock(&mcc_mutex);
m = iter->cache;
if (m->next)
{
HEIMDAL_MUTEX_lock(&(m->next->mutex));
m->next->refcnt++;
HEIMDAL_MUTEX_unlock(&(m->next->mutex));
}
iter->cache = m->next;
HEIMDAL_MUTEX_unlock(&mcc_mutex);
ret = _krb5_cc_allocate(context, &krb5_mcc_ops, id);
if (ret)
return ret;
(*id)->data.data = m;
(*id)->data.length = sizeof(*m);
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_end_cache_get(krb5_context context, krb5_cc_cursor cursor)
{
struct mcache_iter *iter = cursor;
if (iter->cache)
mcc_close_internal(iter->cache);
iter->cache = NULL;
free(iter);
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_move(krb5_context context, krb5_ccache from, krb5_ccache to)
{
krb5_mcache *mfrom = MCACHE(from), *mto = MCACHE(to);
struct link *creds;
krb5_principal principal;
krb5_mcache **n;
HEIMDAL_MUTEX_lock(&mcc_mutex);
/* drop the from cache from the linked list to avoid lookups */
for(n = &mcc_head; n && *n; n = &(*n)->next) {
if(mfrom == *n) {
*n = mfrom->next;
break;
}
}
HEIMDAL_MUTEX_lock(&(mfrom->mutex));
HEIMDAL_MUTEX_lock(&(mto->mutex));
/* swap creds */
creds = mto->creds;
mto->creds = mfrom->creds;
mfrom->creds = creds;
/* swap principal */
principal = mto->primary_principal;
mto->primary_principal = mfrom->primary_principal;
mfrom->primary_principal = principal;
mto->mtime = mfrom->mtime = time(NULL);
HEIMDAL_MUTEX_unlock(&(mfrom->mutex));
HEIMDAL_MUTEX_unlock(&(mto->mutex));
HEIMDAL_MUTEX_unlock(&mcc_mutex);
krb5_cc_destroy(context, from);
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_default_name(krb5_context context, char **str)
{
*str = strdup("MEMORY:");
if (*str == NULL)
return krb5_enomem(context);
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_lastchange(krb5_context context, krb5_ccache id, krb5_timestamp *mtime)
{
krb5_mcache *m = MCACHE(id);
HEIMDAL_MUTEX_lock(&(m->mutex));
*mtime = m->mtime;
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_set_kdc_offset(krb5_context context, krb5_ccache id, krb5_deltat kdc_offset)
{
krb5_mcache *m = MCACHE(id);
HEIMDAL_MUTEX_lock(&(m->mutex));
m->kdc_offset = kdc_offset;
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
static krb5_error_code KRB5_CALLCONV
mcc_get_kdc_offset(krb5_context context, krb5_ccache id, krb5_deltat *kdc_offset)
{
krb5_mcache *m = MCACHE(id);
HEIMDAL_MUTEX_lock(&(m->mutex));
*kdc_offset = m->kdc_offset;
HEIMDAL_MUTEX_unlock(&(m->mutex));
return 0;
}
/**
* Variable containing the MEMORY based credential cache implementation.
*
* @ingroup krb5_ccache
*/
KRB5_LIB_VARIABLE const krb5_cc_ops krb5_mcc_ops = {
KRB5_CC_OPS_VERSION_5,
"MEMORY",
NULL,
NULL,
mcc_gen_new,
mcc_initialize,
mcc_destroy,
mcc_close,
mcc_store_cred,
NULL, /* mcc_retrieve */
mcc_get_principal,
mcc_get_first,
mcc_get_next,
mcc_end_get,
mcc_remove_cred,
mcc_set_flags,
NULL,
mcc_get_cache_first,
mcc_get_cache_next,
mcc_end_cache_get,
mcc_move,
mcc_default_name,
NULL,
mcc_lastchange,
mcc_set_kdc_offset,
mcc_get_kdc_offset,
mcc_get_name_2,
mcc_resolve_2
};