
Status: - And it works! - We have an extensive test based on decoding a rich EK certficate. This test exercises all of: - decoding - encoding with and without decoded open types - copying of decoded values with decoded open types - freeing of decoded values with decoded open types Valgrind finds no memory errors. - Added a manual page for the compiler. - rfc2459.asn1 now has all three primary PKIX types that we care about defined as in RFC5912, with IOS constraints and parameterization: - `Extension` (embeds open type in an `OCTET STRING`) - `OtherName` (embeds open type in an `ANY`-like type) - `SingleAttribute` (embeds open type in an `ANY`-like type) - `AttributeSet` (embeds open type in a `SET OF ANY`-like type) All of these use OIDs as the open type type ID field, but integer open type type ID fields are also supported (and needed, for Kerberos). That will cover every typed hole pattern in all our ASN.1 modules. With this we'll be able to automatically and recursively decode through all subject DN attributes even when the subject DN is a directoryName SAN, and subjectDirectoryAttributes, and all extensions, and all SANs, and all authorization-data elements, and PA-data, and... We're not really using `SingleAttribute` and `AttributeSet` yet because various changes are needed in `lib/hx509` for that. - `asn1_compile` builds and recognizes the subset of X.681/682/683 that we need for, and now use in, rfc2459.asn1. It builds the necessary AST, generates the correct C types, and generates templating for object sets and open types! - See READMEs for details. - Codegen backend not tested; I won't make it implement automatic open type handling, but it should at least not crash by substituting `heim_any` for open types not embedded in `OCTET STRING`. - We're _really_ starting to have problems with the ITU-T ASN.1 grammar and our version of it... Type names have to start with upper-case, value names with lower-case, but it's not enough to disambiguate. The fact the we've allowed value and type names to violate their respective start-with case rules is causing us trouble now that we're adding grammar from X.681/682/683, and we're going to have to undo that. In preparation for that I'm capitalizing the `heim_any` and `heim_any_set` types, and doing some additional cleanup, which requires changes to other parts of Heimdal (all in this same commit for now). Problems we have because of this: - We cannot IMPORT values into modules because we have no idea if a symbol being imported refers to a value or a type because the only clue we would have is the symbol's name, so we assume IMPORTed symbols are for types. This means we can't import OIDs, for example, which is super annoying. One thing we might be able to do here is mark imported symbols as being of an undetermined-but-not-undefined type, then coerce the symbol's type the first time it's used in a context where its type is inferred as type, value, object, object set, or class. (Though since we don't generate C symbols for objects or classes, we won't be able to import them, especially since we need to know them at compile time and cannot defer their handling to link- or run-time.) - The `NULL` type name, and the `NULL` value name now cause two reduce/reduce conflicts via the `FieldSetting` production. - Various shift/reduce conflicts involving `NULL` values in non-top-level contexts (in constraints, for example). - Currently I have a bug where to disambiguate the grammar I have a CLASS_IDENTIFIER token that is all caps, while TYPE_IDENTIFIER must start with a capital but not be all caps, but this breaks Kerberos since all its types are all capitalized -- oof! To fix this I made it so class names have to be all caps and start with an underscore (ick). TBD: - Check all the XXX comments and address them - Apply this treatment to Kerberos! Automatic handling of authz-data sounds useful :) - Apply this treatment to PKCS#10 (CSRs) and other ASN.1 modules too. - Replace various bits of code in `lib/hx509/` with uses of this feature. - Add JER. - Enhance `hxtool` and `asn1_print`. Getting there!
501 lines
16 KiB
C
501 lines
16 KiB
C
/*
|
|
* Copyright (c) 2005 Kungliga Tekniska Högskolan
|
|
* (Royal Institute of Technology, Stockholm, Sweden).
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
*
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
*
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* 3. Neither the name of the Institute nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*/
|
|
|
|
/*
|
|
* This test tries to test reader/writer concurrency for the SQLite3 and LMDB
|
|
* HDB backends. We're hoping to find that one thread or process can dump the
|
|
* HDB while another writes -- this way backups and ipropd-master need not
|
|
* block write transactions when dumping a huge HDB.
|
|
*
|
|
* It has two modes: threaded, and forked.
|
|
*
|
|
* Apparently, neither LMDB nor SQLite3 give us the desired level of
|
|
* concurrency in threaded mode, with this test not making progress. This is
|
|
* surprising, at least for SQLite3, which is supposed to support N readers, 1
|
|
* writer and be thread-safe. LMDB also is supposed to support N readers, 1
|
|
* writers, but perhaps not all in one process?
|
|
*/
|
|
|
|
#include "hdb_locl.h"
|
|
#include <sys/types.h>
|
|
#include <sys/wait.h>
|
|
#include <pthread.h>
|
|
#include <getarg.h>
|
|
|
|
struct tsync {
|
|
pthread_mutex_t lock;
|
|
pthread_cond_t rcv;
|
|
pthread_cond_t wcv;
|
|
const char *hdb_name;
|
|
const char *fname;
|
|
volatile int writer_go;
|
|
volatile int reader_go;
|
|
int writer_go_pipe[2];
|
|
int reader_go_pipe[2];
|
|
};
|
|
|
|
static void *
|
|
threaded_reader(void *d)
|
|
{
|
|
krb5_error_code ret;
|
|
krb5_context context;
|
|
struct tsync *s = d;
|
|
hdb_entry_ex entr;
|
|
HDB *dbr = NULL;
|
|
|
|
printf("Reader thread opening HDB\n");
|
|
|
|
if ((krb5_init_context(&context)))
|
|
errx(1, "krb5_init_context failed");
|
|
|
|
printf("Reader thread waiting for writer to create the HDB\n");
|
|
(void) pthread_mutex_lock(&s->lock);
|
|
s->writer_go = 1;
|
|
(void) pthread_cond_signal(&s->wcv);
|
|
while (!s->reader_go)
|
|
(void) pthread_cond_wait(&s->rcv, &s->lock);
|
|
s->reader_go = 0;
|
|
(void) pthread_mutex_unlock(&s->lock);
|
|
|
|
/* Open a new HDB handle to read */
|
|
if ((ret = hdb_create(context, &dbr, s->hdb_name))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret, "Could not get a handle for HDB %s (read)",
|
|
s->hdb_name);
|
|
}
|
|
if ((ret = dbr->hdb_open(context, dbr, O_RDONLY, 0))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret, "Could not open HDB %s", s->hdb_name);
|
|
}
|
|
if ((ret = dbr->hdb_firstkey(context, dbr, 0, &entr))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret, "Could not iterate HDB %s", s->hdb_name);
|
|
}
|
|
free_HDB_entry(&entr.entry);
|
|
|
|
/* Tell the writer to go ahead and write */
|
|
printf("Reader thread iterated one entry; telling writer to write more\n");
|
|
s->writer_go = 1;
|
|
(void) pthread_mutex_lock(&s->lock);
|
|
(void) pthread_cond_signal(&s->wcv);
|
|
|
|
/* Wait for the writer to have written one more entry to the HDB */
|
|
printf("Reader thread waiting for writer\n");
|
|
while (!s->reader_go)
|
|
(void) pthread_cond_wait(&s->rcv, &s->lock);
|
|
s->reader_go = 0;
|
|
(void) pthread_mutex_unlock(&s->lock);
|
|
|
|
/* Iterate the rest */
|
|
printf("Reader thread iterating another entry\n");
|
|
if ((ret = dbr->hdb_nextkey(context, dbr, 0, &entr))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret,
|
|
"Could not iterate while writing to HDB %s", s->hdb_name);
|
|
}
|
|
printf("Reader thread iterated another entry\n");
|
|
free_HDB_entry(&entr.entry);
|
|
if ((ret = dbr->hdb_nextkey(context, dbr, 0, &entr)) == 0) {
|
|
//(void) unlink(s->fname);
|
|
krb5_warn(context, ret,
|
|
"HDB %s sees writes committed since starting iteration",
|
|
s->hdb_name);
|
|
} else if (ret != HDB_ERR_NOENTRY) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret,
|
|
"Could not iterate while writing to HDB %s (2)", s->hdb_name);
|
|
}
|
|
|
|
/* Tell the writer we're done */
|
|
printf("Reader thread telling writer to go\n");
|
|
s->writer_go = 1;
|
|
(void) pthread_cond_signal(&s->wcv);
|
|
(void) pthread_mutex_unlock(&s->lock);
|
|
|
|
dbr->hdb_close(context, dbr);
|
|
dbr->hdb_destroy(context, dbr);
|
|
krb5_free_context(context);
|
|
printf("Reader thread exiting\n");
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
forked_reader(struct tsync *s)
|
|
{
|
|
krb5_error_code ret;
|
|
krb5_context context;
|
|
hdb_entry_ex entr;
|
|
ssize_t bytes;
|
|
char b[1];
|
|
HDB *dbr = NULL;
|
|
|
|
printf("Reader process opening HDB\n");
|
|
|
|
(void) close(s->writer_go_pipe[0]);
|
|
(void) close(s->reader_go_pipe[1]);
|
|
s->writer_go_pipe[0] = -1;
|
|
s->reader_go_pipe[1] = -1;
|
|
if ((krb5_init_context(&context)))
|
|
errx(1, "krb5_init_context failed");
|
|
|
|
printf("Reader process waiting for writer\n");
|
|
while ((bytes = read(s->reader_go_pipe[0], b, sizeof(b))) == -1 &&
|
|
errno == EINTR)
|
|
;
|
|
|
|
/* Open a new HDB handle to read */
|
|
if ((ret = hdb_create(context, &dbr, s->hdb_name))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret, "Could not get a handle for HDB %s (read)",
|
|
s->hdb_name);
|
|
}
|
|
if ((ret = dbr->hdb_open(context, dbr, O_RDONLY, 0))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret, "Could not open HDB %s", s->hdb_name);
|
|
}
|
|
if ((ret = dbr->hdb_firstkey(context, dbr, 0, &entr))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret, "Could not iterate HDB %s", s->hdb_name);
|
|
}
|
|
printf("Reader process iterated one entry\n");
|
|
free_HDB_entry(&entr.entry);
|
|
|
|
/* Tell the writer to go ahead and write */
|
|
printf("Reader process iterated one entry; telling writer to write more\n");
|
|
while ((bytes = write(s->writer_go_pipe[1], "", sizeof(""))) == -1 &&
|
|
errno == EINTR)
|
|
;
|
|
|
|
|
|
/* Wait for the writer to have written one more entry to the HDB */
|
|
printf("Reader process waiting for writer\n");
|
|
while ((bytes = read(s->reader_go_pipe[0], b, sizeof(b))) == -1 &&
|
|
errno == EINTR)
|
|
;
|
|
if (bytes == -1)
|
|
err(1, "Could not read from reader-go pipe (error)");
|
|
if (bytes == 0)
|
|
errx(1, "Could not read from reader-go pipe (EOF)");
|
|
|
|
/* Iterate the rest */
|
|
if ((ret = dbr->hdb_nextkey(context, dbr, 0, &entr))) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret,
|
|
"Could not iterate while writing to HDB %s", s->hdb_name);
|
|
}
|
|
free_HDB_entry(&entr.entry);
|
|
printf("Reader process iterated another entry\n");
|
|
if ((ret = dbr->hdb_nextkey(context, dbr, 0, &entr)) == 0) {
|
|
//(void) unlink(s->fname);
|
|
krb5_warn(context, ret,
|
|
"HDB %s sees writes committed since starting iteration (%s)",
|
|
s->hdb_name, entr.entry.principal->name.name_string.val[0]);
|
|
} else if (ret != HDB_ERR_NOENTRY) {
|
|
//(void) unlink(s->fname);
|
|
krb5_err(context, 1, ret,
|
|
"Could not iterate while writing to HDB %s (2)", s->hdb_name);
|
|
}
|
|
|
|
/* Tell the writer we're done */
|
|
printf("Reader process done; telling writer to go\n");
|
|
while ((bytes = write(s->writer_go_pipe[1], "", sizeof(""))) == -1 &&
|
|
errno == EINTR)
|
|
;
|
|
|
|
dbr->hdb_close(context, dbr);
|
|
dbr->hdb_destroy(context, dbr);
|
|
krb5_free_context(context);
|
|
(void) close(s->writer_go_pipe[1]);
|
|
(void) close(s->reader_go_pipe[0]);
|
|
printf("Reader process exiting\n");
|
|
_exit(0);
|
|
}
|
|
|
|
static krb5_error_code
|
|
make_entry(krb5_context context, hdb_entry_ex *entry, const char *name)
|
|
{
|
|
krb5_error_code ret;
|
|
|
|
memset(entry, 0, sizeof(*entry));
|
|
entry->entry.kvno = 2;
|
|
entry->entry.keys.len = 0;
|
|
entry->entry.keys.val = NULL;
|
|
entry->entry.created_by.time = time(NULL);
|
|
entry->entry.modified_by = NULL;
|
|
entry->entry.valid_start = NULL;
|
|
entry->entry.valid_end = NULL;
|
|
entry->entry.max_life = NULL;
|
|
entry->entry.max_renew = NULL;
|
|
entry->entry.etypes = NULL;
|
|
entry->entry.generation = NULL;
|
|
entry->entry.extensions = NULL;
|
|
if ((ret = krb5_make_principal(context, &entry->entry.principal,
|
|
"TEST.H5L.SE", name, NULL)))
|
|
return ret;
|
|
if ((ret = krb5_make_principal(context, &entry->entry.created_by.principal,
|
|
"TEST.H5L.SE", "tester", NULL)))
|
|
return ret;
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
readers_turn(struct tsync *s, pid_t child, int threaded)
|
|
{
|
|
if (threaded) {
|
|
(void) pthread_mutex_lock(&s->lock);
|
|
s->reader_go = 1;
|
|
(void) pthread_cond_signal(&s->rcv);
|
|
|
|
while (!s->writer_go)
|
|
(void) pthread_cond_wait(&s->wcv, &s->lock);
|
|
s->writer_go = 0;
|
|
(void) pthread_mutex_unlock(&s->lock);
|
|
} else {
|
|
ssize_t bytes;
|
|
char b[1];
|
|
|
|
while ((bytes = write(s->reader_go_pipe[1], "", sizeof(""))) == -1 &&
|
|
errno == EINTR)
|
|
;
|
|
if (bytes == -1) {
|
|
kill(child, SIGKILL);
|
|
err(1, "Could not write to reader-go pipe (error)");
|
|
}
|
|
if (bytes == 0) {
|
|
kill(child, SIGKILL);
|
|
err(1, "Could not write to reader-go pipe (EOF?)");
|
|
}
|
|
|
|
while ((bytes = read(s->writer_go_pipe[0], b, sizeof(b))) == -1 &&
|
|
errno == EINTR)
|
|
;
|
|
if (bytes == -1) {
|
|
kill(child, SIGKILL);
|
|
err(1, "Could not read from writer-go pipe");
|
|
}
|
|
if (bytes == 0) {
|
|
kill(child, SIGKILL);
|
|
errx(1, "Child errored");
|
|
}
|
|
s->writer_go = 0;
|
|
}
|
|
}
|
|
|
|
static void
|
|
test_hdb_concurrency(char *name, const char *ext, int threaded)
|
|
{
|
|
krb5_error_code ret;
|
|
krb5_context context;
|
|
char *fname = strchr(name, ':') + 1;
|
|
char *fname_ext = NULL;
|
|
pthread_t reader_thread;
|
|
struct tsync ts;
|
|
hdb_entry_ex entw;
|
|
pid_t child = getpid();
|
|
HDB *dbw = NULL;
|
|
int status;
|
|
int fd;
|
|
|
|
memset(&ts, 0, sizeof(ts));
|
|
(void) pthread_cond_init(&ts.rcv, NULL);
|
|
(void) pthread_cond_init(&ts.wcv, NULL);
|
|
(void) pthread_mutex_init(&ts.lock, NULL);
|
|
|
|
if ((krb5_init_context(&context)))
|
|
errx(1, "krb5_init_context failed");
|
|
|
|
/* Use mkstemp() then unlink() to avoid warnings about mktemp(); ugh */
|
|
if ((fd = mkstemp(fname)) == -1)
|
|
err(1, "mkstemp(%s)", fname);
|
|
(void) close(fd);
|
|
(void) unlink(fname);
|
|
if (asprintf(&fname_ext, "%s%s", fname, ext ? ext : "") == -1 ||
|
|
fname_ext == NULL)
|
|
err(1, "Out of memory");
|
|
ts.hdb_name = name;
|
|
ts.fname = fname_ext;
|
|
|
|
if (threaded) {
|
|
printf("Starting reader thread\n");
|
|
(void) pthread_mutex_lock(&ts.lock);
|
|
if ((errno = pthread_create(&reader_thread, NULL, threaded_reader, &ts))) {
|
|
(void) unlink(fname_ext);
|
|
krb5_err(context, 1, errno, "Could not create a thread to read HDB");
|
|
}
|
|
|
|
/* Wait for reader */
|
|
while (!ts.writer_go)
|
|
(void) pthread_cond_wait(&ts.wcv, &ts.lock);
|
|
(void) pthread_mutex_unlock(&ts.lock);
|
|
} else {
|
|
printf("Starting reader process\n");
|
|
if (pipe(ts.writer_go_pipe) == -1)
|
|
err(1, "Could not create a pipe");
|
|
if (pipe(ts.reader_go_pipe) == -1)
|
|
err(1, "Could not create a pipe");
|
|
switch ((child = fork())) {
|
|
case -1: err(1, "Could not fork a child");
|
|
case 0: forked_reader(&ts); _exit(0);
|
|
default: break;
|
|
}
|
|
(void) close(ts.writer_go_pipe[1]);
|
|
ts.writer_go_pipe[1] = -1;
|
|
}
|
|
|
|
printf("Writing two entries into HDB\n");
|
|
if ((ret = hdb_create(context, &dbw, name)))
|
|
krb5_err(context, 1, ret, "Could not get a handle for HDB %s (write)",
|
|
name);
|
|
if ((ret = dbw->hdb_open(context, dbw, O_RDWR | O_CREAT, 0600)))
|
|
krb5_err(context, 1, ret, "Could not create HDB %s", name);
|
|
|
|
/* Add two entries */
|
|
memset(&entw, 0, sizeof(entw));
|
|
if ((ret = make_entry(context, &entw, "foo")) ||
|
|
(ret = dbw->hdb_store(context, dbw, 0, &entw))) {
|
|
(void) unlink(fname_ext);
|
|
krb5_err(context, 1, ret,
|
|
"Could not store entry for \"foo\" in HDB %s", name);
|
|
}
|
|
free_HDB_entry(&entw.entry);
|
|
if ((ret = make_entry(context, &entw, "bar")) ||
|
|
(ret = dbw->hdb_store(context, dbw, 0, &entw))) {
|
|
(void) unlink(fname_ext);
|
|
krb5_err(context, 1, ret,
|
|
"Could not store entry for \"foo\" in HDB %s", name);
|
|
}
|
|
free_HDB_entry(&entw.entry);
|
|
|
|
/* Tell the reader to start reading */
|
|
readers_turn(&ts, child, threaded);
|
|
|
|
/* Store one more entry */
|
|
if ((ret = make_entry(context, &entw, "foobar")) ||
|
|
(ret = dbw->hdb_store(context, dbw, 0, &entw))) {
|
|
(void) unlink(fname_ext);
|
|
krb5_err(context, 1, ret,
|
|
"Could not store entry for \"foobar\" in HDB %s "
|
|
"while iterating it", name);
|
|
}
|
|
free_HDB_entry(&entw.entry);
|
|
|
|
/* Tell the reader to go again */
|
|
readers_turn(&ts, child, threaded);
|
|
|
|
dbw->hdb_close(context, dbw);
|
|
dbw->hdb_destroy(context, dbw);
|
|
if (threaded) {
|
|
(void) pthread_join(reader_thread, NULL);
|
|
} else {
|
|
(void) close(ts.writer_go_pipe[1]);
|
|
(void) close(ts.reader_go_pipe[0]);
|
|
(void) close(ts.reader_go_pipe[1]);
|
|
while (wait(&status) == -1 && errno == EINTR)
|
|
;
|
|
(void) close(ts.writer_go_pipe[0]);
|
|
if (!WIFEXITED(status))
|
|
errx(1, "Child reader died");
|
|
if (WEXITSTATUS(status) != 0)
|
|
errx(1, "Child reader errored");
|
|
}
|
|
(void) unlink(fname_ext);
|
|
krb5_free_context(context);
|
|
}
|
|
|
|
static int use_fork;
|
|
static int use_threads;
|
|
static int help_flag;
|
|
static int version_flag;
|
|
|
|
struct getargs args[] = {
|
|
{ "use-fork", 'f', arg_flag, &use_fork, NULL, NULL },
|
|
{ "use-threads", 't', arg_flag, &use_threads, NULL, NULL },
|
|
{ "help", 'h', arg_flag, &help_flag, NULL, NULL },
|
|
{ "version", 0, arg_flag, &version_flag, NULL, NULL }
|
|
};
|
|
|
|
static int num_args = sizeof(args) / sizeof(args[0]);
|
|
|
|
int
|
|
main(int argc, char **argv)
|
|
{
|
|
char stemplate[sizeof("sqlite:testhdb-XXXXXX")];
|
|
#ifdef HAVE_LMDB
|
|
char ltemplate[sizeof("lmdb:testhdb-XXXXXX")];
|
|
#endif
|
|
int o = 0;
|
|
|
|
setprogname(argv[0]);
|
|
|
|
if (getarg(args, num_args, argc, argv, &o))
|
|
krb5_std_usage(1, args, num_args);
|
|
|
|
if (help_flag)
|
|
krb5_std_usage(0, args, num_args);
|
|
|
|
if (version_flag){
|
|
print_version(NULL);
|
|
return 0;
|
|
}
|
|
|
|
if (!use_fork && !use_threads)
|
|
use_threads = use_fork = 1;
|
|
|
|
#ifdef HAVE_FORK
|
|
if (use_fork) {
|
|
printf("Testing SQLite3 HDB backend (multi-process)\n");
|
|
memcpy(stemplate, "sqlite:testhdb-XXXXXX", sizeof("sqlite:testhdb-XXXXXX"));
|
|
test_hdb_concurrency(stemplate, "", 0);
|
|
|
|
#ifdef HAVE_LMDB
|
|
printf("Testing LMDB HDB backend (multi-process)\n");
|
|
memcpy(ltemplate, "lmdb:testhdb-XXXXXX", sizeof("lmdb:testhdb-XXXXXX"));
|
|
test_hdb_concurrency(ltemplate, ".lmdb", 0);
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
if (use_threads) {
|
|
printf("Testing SQLite3 HDB backend (multi-process)\n");
|
|
memcpy(stemplate, "sqlite:testhdb-XXXXXX", sizeof("sqlite:testhdb-XXXXXX"));
|
|
test_hdb_concurrency(stemplate, "", 1);
|
|
|
|
#ifdef HAVE_LMDB
|
|
printf("Testing LMDB HDB backend (multi-process)\n");
|
|
memcpy(ltemplate, "lmdb:testhdb-XXXXXX", sizeof("lmdb:testhdb-XXXXXX"));
|
|
test_hdb_concurrency(ltemplate, ".lmdb", 1);
|
|
#endif
|
|
}
|
|
return 0;
|
|
}
|