Restrukturert trunk.
Lispkoden ligger nå i trunk/lisp Lagt til trunk/python, som inneholder interface.py, en sped begynnelse på et brukerinterfjas mot bibsys/databasen. Lagt til modifisert PyZ3950-bibliotek (\ /) (O.o) (> <) Bunny approves these changes.
This commit is contained in:
parent
a771b49ca7
commit
b9804b39ee
987
python/PyZ3950/CQLParser.py
Normal file
987
python/PyZ3950/CQLParser.py
Normal file
@ -0,0 +1,987 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Author: Rob Sanderson (azaroth@liv.ac.uk)
|
||||
# Distributed and Usable under the GPL
|
||||
# Version: 1.7
|
||||
# Most Recent Changes: contexts, new modifier style for 1.1
|
||||
#
|
||||
# With thanks to Adam from IndexData and Mike Taylor for their valuable input
|
||||
|
||||
from shlex import shlex
|
||||
from xml.sax.saxutils import escape
|
||||
from xml.dom.minidom import Node, parseString
|
||||
from PyZ3950.SRWDiagnostics import *
|
||||
# Don't use cStringIO as it borks Unicode (apparently)
|
||||
from StringIO import StringIO
|
||||
import types
|
||||
|
||||
# Parsing strictness flags
|
||||
errorOnEmptyTerm = 0 # index = "" (often meaningless)
|
||||
errorOnQuotedIdentifier = 0 # "/foo/bar" = "" (unnecessary BNF restriction)
|
||||
errorOnDuplicatePrefix = 0 # >a=b >a=c "" (impossible due to BNF)
|
||||
fullResultSetNameCheck = 1 # srw.rsn=foo and srw.rsn=foo (mutant!!)
|
||||
|
||||
# Base values for CQL
|
||||
serverChoiceRelation = "scr"
|
||||
serverChoiceIndex = "cql.serverchoice"
|
||||
|
||||
order = ['=', '>', '>=', '<', '<=', '<>']
|
||||
modifierSeparator = "/"
|
||||
booleans = ['and', 'or', 'not', 'prox']
|
||||
|
||||
reservedPrefixes = {"srw" : "http://www.loc.gov/zing/cql/srw-indexes/v1.0/",
|
||||
"cql" : "info:srw/cql-context-set/1/cql-v1.1"}
|
||||
|
||||
XCQLNamespace = "http://www.loc.gov/zing/cql/xcql/"
|
||||
|
||||
# End of 'configurable' stuff
|
||||
|
||||
class PrefixableObject:
|
||||
"Root object for triple and searchClause"
|
||||
prefixes = {}
|
||||
parent = None
|
||||
config = None
|
||||
|
||||
def __init__(self):
|
||||
self.prefixes = {}
|
||||
self.parent = None
|
||||
self.config = None
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
# Just generate our prefixes
|
||||
space = " " * depth
|
||||
xml = ['%s<prefixes>\n' % (space)]
|
||||
for p in self.prefixes.keys():
|
||||
xml.append("%s <prefix>\n%s <name>%s</name>\n%s <identifier>%s</identifier>\n%s </prefix>\n" % (space, space, escape(p), space, escape(self.prefixes[p]), space))
|
||||
xml.append("%s</prefixes>\n" % (space))
|
||||
return ''.join(xml)
|
||||
|
||||
|
||||
def addPrefix(self, name, identifier):
|
||||
if (errorOnDuplicatePrefix and (self.prefixes.has_key(name) or reservedPrefixes.has_key(name))):
|
||||
# Maybe error
|
||||
diag = Diagnostic45()
|
||||
diag.details = name
|
||||
raise diag;
|
||||
self.prefixes[name] = identifier
|
||||
|
||||
def resolvePrefix(self, name):
|
||||
# Climb tree
|
||||
if (reservedPrefixes.has_key(name)):
|
||||
return reservedPrefixes[name]
|
||||
elif (self.prefixes.has_key(name)):
|
||||
return self.prefixes[name]
|
||||
elif (self.parent <> None):
|
||||
return self.parent.resolvePrefix(name)
|
||||
elif (self.config <> None):
|
||||
# Config is some sort of server config which specifies defaults
|
||||
return self.config.resolvePrefix(name)
|
||||
else:
|
||||
# Top of tree, no config, no resolution->Unknown indexset
|
||||
# For client we need to allow no prefix?
|
||||
|
||||
#diag = Diagnostic15()
|
||||
#diag.details = name
|
||||
#raise diag
|
||||
return None
|
||||
|
||||
|
||||
class PrefixedObject:
|
||||
"Root object for relation, relationModifier and index"
|
||||
prefix = ""
|
||||
prefixURI = ""
|
||||
value = ""
|
||||
parent = None
|
||||
|
||||
def __init__(self, val):
|
||||
# All prefixed things are case insensitive
|
||||
val = val.lower()
|
||||
if val and val[0] == '"' and val[-1] == '"':
|
||||
if errorOnQuotedIdentifier:
|
||||
diag = Diagnostic14()
|
||||
diag.details = val
|
||||
raise diag
|
||||
else:
|
||||
val = val[1:-1]
|
||||
self.value = val
|
||||
self.splitValue()
|
||||
|
||||
def __str__(self):
|
||||
if (self.prefix):
|
||||
return "%s.%s" % (self.prefix, self.value)
|
||||
else:
|
||||
return self.value
|
||||
|
||||
def splitValue(self):
|
||||
f = self.value.find(".")
|
||||
if (self.value.count('.') > 1):
|
||||
diag = Diagnostic15()
|
||||
diag.details = "Multiple '.' characters: %s" % (self.value)
|
||||
raise(diag)
|
||||
elif (f == 0):
|
||||
diag = Diagnostic15()
|
||||
diag.details = "Null indexset: %s" % (irt.index)
|
||||
raise(diag)
|
||||
elif f >= 0:
|
||||
self.prefix = self.value[:f].lower()
|
||||
self.value = self.value[f+1:].lower()
|
||||
|
||||
def resolvePrefix(self):
|
||||
if (not self.prefixURI):
|
||||
self.prefixURI = self.parent.resolvePrefix(self.prefix)
|
||||
return self.prefixURI
|
||||
|
||||
class ModifiableObject:
|
||||
# Treat modifiers as keys on boolean/relation?
|
||||
modifiers = []
|
||||
|
||||
def __getitem__(self, k):
|
||||
if (type(k) == types.IntType):
|
||||
try:
|
||||
return self.modifiers[k]
|
||||
except:
|
||||
return None
|
||||
for m in self.modifiers:
|
||||
if (str(m.type) == k or m.type.value == k):
|
||||
return m
|
||||
return None
|
||||
|
||||
class Triple (PrefixableObject):
|
||||
"Object to represent a CQL triple"
|
||||
leftOperand = None
|
||||
boolean = None
|
||||
rightOperand = None
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
"Create the XCQL representation of the object"
|
||||
space = " " * depth
|
||||
if (depth == 0):
|
||||
xml = ['<triple xmlns="%s">\n' % (XCQLNamespace)]
|
||||
else:
|
||||
xml = ['%s<triple>\n' % (space)]
|
||||
|
||||
if self.prefixes:
|
||||
xml.append(PrefixableObject.toXCQL(self, depth+1))
|
||||
|
||||
xml.append(self.boolean.toXCQL(depth+1))
|
||||
xml.append("%s <leftOperand>\n" % (space))
|
||||
xml.append(self.leftOperand.toXCQL(depth+2))
|
||||
xml.append("%s </leftOperand>\n" % (space))
|
||||
xml.append("%s <rightOperand>\n" % (space))
|
||||
xml.append(self.rightOperand.toXCQL(depth+2))
|
||||
xml.append("%s </rightOperand>\n" % (space))
|
||||
xml.append("%s</triple>\n" % (space))
|
||||
return ''.join(xml)
|
||||
|
||||
def toCQL(self):
|
||||
txt = []
|
||||
if (self.prefixes):
|
||||
for p in self.prefixes.keys():
|
||||
if (p <> ''):
|
||||
txt.append('>%s="%s"' % (p, self.prefixes[p]))
|
||||
else:
|
||||
txt.append('>"%s"' % (self.prefixes[p]))
|
||||
prefs = ' '.join(txt)
|
||||
return "(%s %s %s %s)" % (prefs, self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
|
||||
else:
|
||||
return "(%s %s %s)" % (self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
|
||||
|
||||
|
||||
def getResultSetId(self, top=None):
|
||||
|
||||
if fullResultSetNameCheck == 0 or self.boolean.value in ['not', 'prox']:
|
||||
return ""
|
||||
|
||||
if top == None:
|
||||
topLevel = 1
|
||||
top = self;
|
||||
else:
|
||||
topLevel = 0
|
||||
|
||||
# Iterate over operands and build a list
|
||||
rsList = []
|
||||
if isinstance(self.leftOperand, Triple):
|
||||
rsList.extend(self.leftOperand.getResultSetId(top))
|
||||
else:
|
||||
rsList.append(self.leftOperand.getResultSetId(top))
|
||||
if isinstance(self.rightOperand, Triple):
|
||||
rsList.extend(self.rightOperand.getResultSetId(top))
|
||||
else:
|
||||
rsList.append(self.rightOperand.getResultSetId(top))
|
||||
|
||||
if topLevel == 1:
|
||||
# Check all elements are the same, if so we're a fubar form of present
|
||||
if (len(rsList) == rsList.count(rsList[0])):
|
||||
return rsList[0]
|
||||
else:
|
||||
return ""
|
||||
else:
|
||||
return rsList
|
||||
|
||||
class SearchClause (PrefixableObject):
|
||||
"Object to represent a CQL searchClause"
|
||||
index = None
|
||||
relation = None
|
||||
term = None
|
||||
|
||||
def __init__(self, ind, rel, t):
|
||||
PrefixableObject.__init__(self)
|
||||
self.index = ind
|
||||
self.relation = rel
|
||||
self.term = t
|
||||
ind.parent = self
|
||||
rel.parent = self
|
||||
t.parent = self
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
"Produce XCQL version of the object"
|
||||
space = " " * depth
|
||||
if (depth == 0):
|
||||
xml = ['<searchClause xmlns="%s">\n' % (XCQLNamespace)]
|
||||
else:
|
||||
xml = ['%s<searchClause>\n' % (space)]
|
||||
|
||||
if self.prefixes:
|
||||
xml.append(PrefixableObject.toXCQL(self, depth+1))
|
||||
|
||||
xml.append(self.index.toXCQL(depth+1))
|
||||
xml.append(self.relation.toXCQL(depth+1))
|
||||
xml.append(self.term.toXCQL(depth+1))
|
||||
xml.append("%s</searchClause>\n" % (space))
|
||||
return ''.join(xml)
|
||||
|
||||
def toCQL(self):
|
||||
text = []
|
||||
for p in self.prefixes.keys():
|
||||
if (p <> ''):
|
||||
text.append('>%s="%s"' % (p, self.prefixes[p]))
|
||||
else:
|
||||
text.append('>"%s"' % (self.prefixes[p]))
|
||||
text.append('%s %s "%s"' % (self.index, self.relation.toCQL(), self.term))
|
||||
return ' '.join(text)
|
||||
|
||||
def getResultSetId(self, top=None):
|
||||
idx = self.index
|
||||
idx.resolvePrefix()
|
||||
if (idx.prefixURI == reservedPrefixes['cql'] and idx.value.lower() == 'resultsetid'):
|
||||
return self.term.value
|
||||
else:
|
||||
return ""
|
||||
|
||||
class Index(PrefixedObject):
|
||||
"Object to represent a CQL index"
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
if (depth == 0):
|
||||
ns = ' xmlns="%s"' % (XCQLNamespace)
|
||||
else:
|
||||
ns = ""
|
||||
return "%s<index%s>%s</index>\n" % (" "*depth, ns, escape(str(self)))
|
||||
|
||||
def toCQL(self):
|
||||
return str(self)
|
||||
|
||||
class Relation(PrefixedObject, ModifiableObject):
|
||||
"Object to represent a CQL relation"
|
||||
def __init__(self, rel, mods=[]):
|
||||
self.prefix = "cql"
|
||||
PrefixedObject.__init__(self, rel)
|
||||
self.modifiers = mods
|
||||
for m in mods:
|
||||
m.parent = self
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
"Create XCQL representation of object"
|
||||
if (depth == 0):
|
||||
ns = ' xmlns="%s"' % (XCQLNamespace)
|
||||
else:
|
||||
ns = ""
|
||||
|
||||
space = " " * depth
|
||||
|
||||
xml = ["%s<relation%s>\n" % (space, ns)]
|
||||
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
|
||||
if self.modifiers:
|
||||
xml.append("%s <modifiers>\n" % (space))
|
||||
for m in self.modifiers:
|
||||
xml.append(m.toXCQL(depth+2))
|
||||
xml.append("%s </modifiers>\n" % (space))
|
||||
xml.append("%s</relation>\n" % (space))
|
||||
return ''.join(xml)
|
||||
|
||||
def toCQL(self):
|
||||
txt = [self.value]
|
||||
txt.extend(map(str, self.modifiers))
|
||||
return '/'.join(txt)
|
||||
|
||||
class Term:
|
||||
value = ""
|
||||
def __init__(self, v):
|
||||
if (v <> ""):
|
||||
# Unquoted literal
|
||||
if v in ['>=', '<=', '>', '<', '<>', "/", '=']:
|
||||
diag = Diagnostic25()
|
||||
diag.details = self.value
|
||||
raise diag
|
||||
|
||||
# Check existence of meaningful term
|
||||
nonanchor = 0
|
||||
for c in v:
|
||||
if c != "^":
|
||||
nonanchor = 1
|
||||
break
|
||||
if not nonanchor:
|
||||
diag = Diagnostic32()
|
||||
diag.details = "Only anchoring charater(s) in term: " + v
|
||||
raise diag
|
||||
|
||||
# Unescape quotes
|
||||
if (v[0] == '"' and v[-1] == '"'):
|
||||
v = v[1:-1]
|
||||
v = v.replace('\\"', '"')
|
||||
|
||||
if (not v and errorOnEmptyTerm):
|
||||
diag = Diagnostic27()
|
||||
raise diag
|
||||
|
||||
# Check for badly placed \s
|
||||
startidx = 0
|
||||
idx = v.find("\\", startidx)
|
||||
while (idx > -1):
|
||||
startidx = idx+1
|
||||
if not irt.term[idx+1] in ['?', '\\', '*', '^']:
|
||||
diag = Diagnostic26()
|
||||
diag.details = irt.term
|
||||
raise diag
|
||||
v = v.find("\\", startidx)
|
||||
|
||||
elif (errorOnEmptyTerm):
|
||||
diag = Diagnostic27()
|
||||
raise diag
|
||||
|
||||
self.value = v
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
if (depth == 0):
|
||||
ns = ' xmlns="%s"' % (XCQLNamespace)
|
||||
else:
|
||||
ns = ""
|
||||
return "%s<term%s>%s</term>\n" % (" "*depth, ns, escape(self.value))
|
||||
|
||||
class Boolean(ModifiableObject):
|
||||
"Object to represent a CQL boolean"
|
||||
value = ""
|
||||
parent = None
|
||||
def __init__(self, bool, mods=[]):
|
||||
self.value = bool
|
||||
self.modifiers = mods
|
||||
self.parent = None
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
"Create XCQL representation of object"
|
||||
space = " " * depth
|
||||
xml = ["%s<boolean>\n" % (space)]
|
||||
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
|
||||
if self.modifiers:
|
||||
xml.append("%s <modifiers>\n" % (space))
|
||||
for m in self.modifiers:
|
||||
xml.append(m.toXCQL(depth+2))
|
||||
xml.append("%s </modifiers>\n" % (space))
|
||||
xml.append("%s</boolean>\n" % (space))
|
||||
return ''.join(xml)
|
||||
|
||||
def toCQL(self):
|
||||
txt = [self.value]
|
||||
for m in self.modifiers:
|
||||
txt.append(m.toCQL())
|
||||
return '/'.join(txt)
|
||||
|
||||
def resolvePrefix(self, name):
|
||||
return self.parent.resolvePrefix(name)
|
||||
|
||||
class ModifierType(PrefixedObject):
|
||||
# Same as index, but we'll XCQLify in ModifierClause
|
||||
parent = None
|
||||
prefix = "cql"
|
||||
|
||||
class ModifierClause:
|
||||
"Object to represent a relation modifier"
|
||||
parent = None
|
||||
type = None
|
||||
comparison = ""
|
||||
value = ""
|
||||
|
||||
def __init__(self, type, comp="", val=""):
|
||||
self.type = ModifierType(type)
|
||||
self.type.parent = self
|
||||
self.comparison = comp
|
||||
self.value = val
|
||||
|
||||
def __str__(self):
|
||||
if (self.value):
|
||||
return "%s%s%s" % (str(self.type), self.comparison, self.value)
|
||||
else:
|
||||
return "%s" % (str(self.type))
|
||||
|
||||
def toXCQL(self, depth=0):
|
||||
if (self.value):
|
||||
return "%s<modifier>\n%s<type>%s</type>\n%s<comparison>%s</comparison>\n%s<value>%s</value>\n%s</modifier>\n" % (" " * depth, " " * (depth+1), escape(str(self.type)), " " * (depth+1), escape(self.comparison), " " * (depth+1), escape(self.value), " " * depth)
|
||||
else:
|
||||
return "%s<modifier><type>%s</type></modifier>\n" % (" " * depth, escape(str(self.type)))
|
||||
|
||||
def toCQL(self):
|
||||
return str(self)
|
||||
|
||||
def resolvePrefix(self, name):
|
||||
# Need to skip parent, which has its own resolvePrefix
|
||||
# eg boolean or relation, neither of which is prefixable
|
||||
return self.parent.parent.resolvePrefix(name)
|
||||
|
||||
|
||||
|
||||
# Requires changes for: <= >= <>, and escaped \" in "
|
||||
# From shlex.py (std library for 2.2+)
|
||||
class CQLshlex(shlex):
|
||||
"shlex with additions for CQL parsing"
|
||||
quotes = '"'
|
||||
commenters = ""
|
||||
nextToken = ""
|
||||
|
||||
def __init__(self, thing):
|
||||
shlex.__init__(self, thing)
|
||||
self.wordchars += "!@#$%^&*-+{}[];,.?|~`:\\"
|
||||
self.wordchars += ''.join(map(chr, range(128,254)))
|
||||
|
||||
def read_token(self):
|
||||
"Read a token from the input stream (no pushback or inclusions)"
|
||||
|
||||
while 1:
|
||||
if (self.nextToken != ""):
|
||||
self.token = self.nextToken
|
||||
self.nextToken = ""
|
||||
# Bah. SUPER ugly non portable
|
||||
if self.token == "/":
|
||||
self.state = ' '
|
||||
break
|
||||
|
||||
nextchar = self.instream.read(1)
|
||||
if nextchar == '\n':
|
||||
self.lineno = self.lineno + 1
|
||||
if self.debug >= 3:
|
||||
print "shlex: in state ", repr(self.state), " I see character:", repr(nextchar)
|
||||
|
||||
if self.state is None:
|
||||
self.token = '' # past end of file
|
||||
break
|
||||
elif self.state == ' ':
|
||||
if not nextchar:
|
||||
self.state = None # end of file
|
||||
break
|
||||
elif nextchar in self.whitespace:
|
||||
if self.debug >= 2:
|
||||
print "shlex: I see whitespace in whitespace state"
|
||||
if self.token:
|
||||
break # emit current token
|
||||
else:
|
||||
continue
|
||||
elif nextchar in self.commenters:
|
||||
self.instream.readline()
|
||||
self.lineno = self.lineno + 1
|
||||
elif nextchar in self.wordchars:
|
||||
self.token = nextchar
|
||||
self.state = 'a'
|
||||
elif nextchar in self.quotes:
|
||||
self.token = nextchar
|
||||
self.state = nextchar
|
||||
elif nextchar in ['<', '>']:
|
||||
self.token = nextchar
|
||||
self.state = '<'
|
||||
else:
|
||||
self.token = nextchar
|
||||
if self.token:
|
||||
break # emit current token
|
||||
else:
|
||||
continue
|
||||
elif self.state == '<':
|
||||
# Only accumulate <=, >= or <>
|
||||
|
||||
if self.token == ">" and nextchar == "=":
|
||||
self.token = self.token + nextchar
|
||||
self.state = ' '
|
||||
break
|
||||
elif self.token == "<" and nextchar in ['>', '=']:
|
||||
self.token = self.token + nextchar
|
||||
self.state = ' '
|
||||
break
|
||||
elif not nextchar:
|
||||
self.state = None
|
||||
break
|
||||
elif nextchar == "/":
|
||||
self.state = "/"
|
||||
self.nextToken = "/"
|
||||
break
|
||||
elif nextchar in self.wordchars:
|
||||
self.state='a'
|
||||
self.nextToken = nextchar
|
||||
break
|
||||
elif nextchar in self.quotes:
|
||||
self.state=nextchar
|
||||
self.nextToken = nextchar
|
||||
break
|
||||
else:
|
||||
self.state = ' '
|
||||
break
|
||||
|
||||
elif self.state in self.quotes:
|
||||
self.token = self.token + nextchar
|
||||
# Allow escaped quotes
|
||||
if nextchar == self.state and self.token[-2] != '\\':
|
||||
self.state = ' '
|
||||
break
|
||||
elif not nextchar: # end of file
|
||||
if self.debug >= 2:
|
||||
print "shlex: I see EOF in quotes state"
|
||||
# Override SHLEX's ValueError to throw diagnostic
|
||||
diag = Diagnostic14()
|
||||
diag.details = self.token[:-1]
|
||||
raise diag
|
||||
elif self.state == 'a':
|
||||
if not nextchar:
|
||||
self.state = None # end of file
|
||||
break
|
||||
elif nextchar in self.whitespace:
|
||||
if self.debug >= 2:
|
||||
print "shlex: I see whitespace in word state"
|
||||
self.state = ' '
|
||||
if self.token:
|
||||
break # emit current token
|
||||
else:
|
||||
continue
|
||||
elif nextchar in self.commenters:
|
||||
self.instream.readline()
|
||||
self.lineno = self.lineno + 1
|
||||
elif nextchar in self.wordchars or nextchar in self.quotes:
|
||||
self.token = self.token + nextchar
|
||||
elif nextchar in ['>', '<']:
|
||||
self.nextToken = nextchar
|
||||
self.state = '<'
|
||||
break
|
||||
else:
|
||||
self.pushback = [nextchar] + self.pushback
|
||||
if self.debug >= 2:
|
||||
print "shlex: I see punctuation in word state"
|
||||
self.state = ' '
|
||||
if self.token:
|
||||
break # emit current token
|
||||
else:
|
||||
continue
|
||||
result = self.token
|
||||
self.token = ''
|
||||
if self.debug > 1:
|
||||
if result:
|
||||
print "shlex: raw token=" + `result`
|
||||
else:
|
||||
print "shlex: raw token=EOF"
|
||||
return result
|
||||
|
||||
class CQLParser:
|
||||
"Token parser to create object structure for CQL"
|
||||
parser = ""
|
||||
currentToken = ""
|
||||
nextToken = ""
|
||||
|
||||
def __init__(self, p):
|
||||
""" Initialise with shlex parser """
|
||||
self.parser = p
|
||||
self.fetch_token() # Fetches to next
|
||||
self.fetch_token() # Fetches to curr
|
||||
|
||||
def is_boolean(self, token):
|
||||
"Is the token a boolean"
|
||||
token = token.lower()
|
||||
return token in booleans
|
||||
|
||||
def fetch_token(self):
|
||||
""" Read ahead one token """
|
||||
tok = self.parser.get_token()
|
||||
self.currentToken = self.nextToken
|
||||
self.nextToken = tok
|
||||
|
||||
def prefixes(self):
|
||||
"Create prefixes dictionary"
|
||||
prefs = {}
|
||||
while (self.currentToken == ">"):
|
||||
# Strip off maps
|
||||
self.fetch_token()
|
||||
if self.nextToken == "=":
|
||||
# Named map
|
||||
name = self.currentToken
|
||||
self.fetch_token() # = is current
|
||||
self.fetch_token() # id is current
|
||||
identifier = self.currentToken
|
||||
self.fetch_token()
|
||||
else:
|
||||
name = ""
|
||||
identifier = self.currentToken
|
||||
self.fetch_token()
|
||||
if (errorOnDuplicatePrefix and prefs.has_key(name)):
|
||||
# Error condition
|
||||
diag = Diagnostic45()
|
||||
diag.details = name
|
||||
raise diag;
|
||||
if len(identifier) > 1 and identifier[0] == '"' and identifier[-1] == '"':
|
||||
identifier = identifier[1:-1]
|
||||
prefs[name.lower()] = identifier
|
||||
|
||||
return prefs
|
||||
|
||||
|
||||
def query(self):
|
||||
""" Parse query """
|
||||
prefs = self.prefixes()
|
||||
left = self.subQuery()
|
||||
while 1:
|
||||
if not self.currentToken:
|
||||
break;
|
||||
bool = self.is_boolean(self.currentToken)
|
||||
if bool:
|
||||
boolobject = self.boolean()
|
||||
right = self.subQuery()
|
||||
# Setup Left Object
|
||||
trip = tripleType()
|
||||
trip.leftOperand = left
|
||||
trip.boolean = boolobject
|
||||
trip.rightOperand = right
|
||||
left.parent = trip
|
||||
right.parent = trip
|
||||
boolobject.parent = trip
|
||||
left = trip
|
||||
else:
|
||||
break;
|
||||
|
||||
for p in prefs.keys():
|
||||
left.addPrefix(p, prefs[p])
|
||||
return left
|
||||
|
||||
def subQuery(self):
|
||||
""" Find either query or clause """
|
||||
if self.currentToken == "(":
|
||||
self.fetch_token() # Skip (
|
||||
object = self.query()
|
||||
if self.currentToken == ")":
|
||||
self.fetch_token() # Skip )
|
||||
else:
|
||||
diag = Diagnostic13()
|
||||
diag.details = self.currentToken
|
||||
raise diag
|
||||
else:
|
||||
prefs = self.prefixes()
|
||||
if (prefs):
|
||||
object = self.query()
|
||||
for p in prefs.keys():
|
||||
object.addPrefix(p, prefs[p])
|
||||
else:
|
||||
object = self.clause()
|
||||
return object
|
||||
|
||||
def clause(self):
|
||||
""" Find searchClause """
|
||||
bool = self.is_boolean(self.nextToken)
|
||||
if not bool and not (self.nextToken in [')', '(', '']):
|
||||
|
||||
index = indexType(self.currentToken)
|
||||
self.fetch_token() # Skip Index
|
||||
rel = self.relation()
|
||||
if (self.currentToken == ''):
|
||||
diag = Diagnostic10()
|
||||
diag.details = "Expected Term, got end of query."
|
||||
raise(diag)
|
||||
term = termType(self.currentToken)
|
||||
self.fetch_token() # Skip Term
|
||||
|
||||
irt = searchClauseType(index, rel, term)
|
||||
|
||||
elif self.currentToken and (bool or self.nextToken in [')', '']):
|
||||
|
||||
irt = searchClauseType(indexType(serverChoiceIndex), relationType(serverChoiceRelation), termType(self.currentToken))
|
||||
self.fetch_token()
|
||||
|
||||
elif self.currentToken == ">":
|
||||
prefs = self.prefixes()
|
||||
# iterate to get object
|
||||
object = self.clause()
|
||||
for p in prefs.keys():
|
||||
object.addPrefix(p, prefs[p]);
|
||||
return object
|
||||
|
||||
else:
|
||||
diag = Diagnostic10()
|
||||
diag.details = "Expected Boolean or Relation but got: " + self.currentToken
|
||||
raise diag
|
||||
|
||||
return irt
|
||||
|
||||
def modifiers(self):
|
||||
mods = []
|
||||
while (self.currentToken == modifierSeparator):
|
||||
self.fetch_token()
|
||||
mod = self.currentToken
|
||||
mod = mod.lower()
|
||||
if (mod == modifierSeparator):
|
||||
diag = Diagnostic20()
|
||||
diag.details = "Null modifier"
|
||||
raise diag
|
||||
self.fetch_token()
|
||||
comp = self.currentToken
|
||||
if (comp in order):
|
||||
self.fetch_token()
|
||||
value = self.currentToken
|
||||
self.fetch_token()
|
||||
else:
|
||||
comp = ""
|
||||
value = ""
|
||||
mods.append(ModifierClause(mod, comp, value))
|
||||
return mods
|
||||
|
||||
|
||||
def boolean(self):
|
||||
""" Find boolean """
|
||||
self.currentToken = self.currentToken.lower()
|
||||
if self.currentToken in booleans:
|
||||
bool = booleanType(self.currentToken)
|
||||
self.fetch_token()
|
||||
bool.modifiers = self.modifiers()
|
||||
for b in bool.modifiers:
|
||||
b.parent = bool
|
||||
|
||||
else:
|
||||
diag = Diagnostic37()
|
||||
diag.details = self.currentToken
|
||||
raise diag
|
||||
|
||||
return bool
|
||||
|
||||
def relation(self):
|
||||
""" Find relation """
|
||||
self.currentToken = self.currentToken.lower()
|
||||
rel = relationType(self.currentToken)
|
||||
self.fetch_token()
|
||||
rel.modifiers = self.modifiers()
|
||||
for r in rel.modifiers:
|
||||
r.parent = rel
|
||||
|
||||
return rel
|
||||
|
||||
|
||||
|
||||
class XCQLParser:
|
||||
""" Parser for XCQL using some very simple DOM """
|
||||
|
||||
def firstChildElement(self, elem):
|
||||
""" Find first child which is an Element """
|
||||
for c in elem.childNodes:
|
||||
if c.nodeType == Node.ELEMENT_NODE:
|
||||
return c
|
||||
return None
|
||||
|
||||
def firstChildData(self,elem):
|
||||
""" Find first child which is Data """
|
||||
for c in elem.childNodes:
|
||||
if c.nodeType == Node.TEXT_NODE:
|
||||
return c
|
||||
return None
|
||||
|
||||
def searchClause(self, elem):
|
||||
""" Process a <searchClause> """
|
||||
sc = searchClauseType()
|
||||
for c in elem.childNodes:
|
||||
if c.nodeType == Node.ELEMENT_NODE:
|
||||
if c.localName == "index":
|
||||
sc.index = indexType(self.firstChildData(c).data.lower())
|
||||
elif c.localName == "term":
|
||||
sc.term = termType(self.firstChildData(c).data)
|
||||
elif c.localName == "relation":
|
||||
sc.relation = self.relation(c)
|
||||
elif c.localName == "prefixes":
|
||||
sc.prefixes = self.prefixes(c)
|
||||
else:
|
||||
raise(ValueError, c.localName)
|
||||
return sc
|
||||
|
||||
def triple(self, elem):
|
||||
""" Process a <triple> """
|
||||
trip = tripleType()
|
||||
for c in elem.childNodes:
|
||||
if c.nodeType == Node.ELEMENT_NODE:
|
||||
if c.localName == "boolean":
|
||||
trip.boolean = self.boolean(c)
|
||||
elif c.localName == "prefixes":
|
||||
trip.prefixes = self.prefixes(c)
|
||||
elif c.localName == "leftOperand":
|
||||
c2 = self.firstChildElement(c)
|
||||
if c2.localName == "searchClause":
|
||||
trip.leftOperand = self.searchClause(c2)
|
||||
else:
|
||||
trip.leftOperand = self.triple(c2)
|
||||
else:
|
||||
c2 = self.firstChildElement(c)
|
||||
if c2.localName == "searchClause":
|
||||
trip.rightOperand = self.searchClause(c2)
|
||||
else:
|
||||
trip.rightOperand = self.triple(c2)
|
||||
return trip
|
||||
|
||||
def relation(self, elem):
|
||||
""" Process a <relation> """
|
||||
rel = relationType()
|
||||
for c in elem.childNodes:
|
||||
if c.nodeType == Node.ELEMENT_NODE:
|
||||
if c.localName == "value":
|
||||
rel.value = c.firstChild.data.lower()
|
||||
elif c.localName == "modifiers":
|
||||
mods = []
|
||||
for c2 in c.childNodes:
|
||||
if c2.nodeType == Node.ELEMENT_NODE:
|
||||
if c2.localName == "modifier":
|
||||
for c3 in c2.childNodes:
|
||||
if c3.localName == "value":
|
||||
val = self.firstChildData(c2).data.lower()
|
||||
mods.append(val)
|
||||
rel.modifiers = mods
|
||||
return rel
|
||||
|
||||
def boolean(self, elem):
|
||||
"Process a <boolean>"
|
||||
bool = booleanType()
|
||||
for c in elem.childNodes:
|
||||
if c.nodeType == Node.ELEMENT_NODE:
|
||||
if c.localName == "value":
|
||||
bool.value = self.firstChildData(c).data.lower()
|
||||
else:
|
||||
# Can be in any order, so we need to extract, then order
|
||||
mods = {}
|
||||
for c2 in c.childNodes:
|
||||
if c2.nodeType == Node.ELEMENT_NODE:
|
||||
if c2.localName == "modifier":
|
||||
type = ""
|
||||
value = ""
|
||||
for c3 in c2.childNodes:
|
||||
if c3.nodeType == Node.ELEMENT_NODE:
|
||||
if c3.localName == "value":
|
||||
value = self.firstChildData(c3).data.lower()
|
||||
elif c3.localName == "type":
|
||||
type = self.firstChildData(c3).data
|
||||
mods[type] = value
|
||||
|
||||
modlist = []
|
||||
for t in booleanModifierTypes[1:]:
|
||||
if mods.has_key(t):
|
||||
modlist.append(mods[t])
|
||||
else:
|
||||
modlist.append('')
|
||||
bool.modifiers = modlist
|
||||
return bool
|
||||
|
||||
def prefixes(self, elem):
|
||||
"Process <prefixes>"
|
||||
prefs = {}
|
||||
for c in elem.childNodes:
|
||||
if c.nodeType == Node.ELEMENT_NODE:
|
||||
# prefix
|
||||
name = ""
|
||||
identifier = ""
|
||||
for c2 in c.childNodes:
|
||||
if c2.nodeType == Node.ELEMENT_NODE:
|
||||
if c2.localName == "name":
|
||||
name = self.firstChildData(c2).data.lower()
|
||||
elif c2.localName == "identifier":
|
||||
identifier = self.firstChildData(c2).data
|
||||
prefs[name] = identifier
|
||||
return prefs
|
||||
|
||||
|
||||
def xmlparse(s):
|
||||
""" API. Return a seachClause/triple object from XML string """
|
||||
doc = parseString(s)
|
||||
q = xcqlparse(doc.firstChild)
|
||||
return q
|
||||
|
||||
def xcqlparse(query):
|
||||
""" API. Return a searchClause/triple object from XML DOM objects"""
|
||||
# Requires only properties of objects so we don't care how they're generated
|
||||
|
||||
p = XCQLParser()
|
||||
if query.localName == "searchClause":
|
||||
return p.searchClause(query)
|
||||
else:
|
||||
return p.triple(query)
|
||||
|
||||
|
||||
def parse(query):
|
||||
""" API. Return a searchClause/triple object from CQL string"""
|
||||
|
||||
try:
|
||||
query = query.encode("utf-8")
|
||||
except:
|
||||
diag = Diagnostic10()
|
||||
diag.details = "Cannot parse non utf-8 characters"
|
||||
raise diag
|
||||
|
||||
q = StringIO(query)
|
||||
lexer = CQLshlex(q)
|
||||
parser = CQLParser(lexer)
|
||||
object = parser.query()
|
||||
if parser.currentToken != '':
|
||||
diag = Diagnostic10()
|
||||
diag.details = "Unprocessed tokens remain: " + repr(parser.currentToken)
|
||||
raise diag
|
||||
else:
|
||||
del lexer
|
||||
del parser
|
||||
del q
|
||||
return object
|
||||
|
||||
|
||||
# Assign our objects to generate
|
||||
tripleType = Triple
|
||||
booleanType = Boolean
|
||||
relationType = Relation
|
||||
searchClauseType = SearchClause
|
||||
modifierClauseType = ModifierClause
|
||||
modifierTypeType = ModifierType
|
||||
indexType = Index
|
||||
termType = Term
|
||||
|
||||
try:
|
||||
from CQLUtils import *
|
||||
tripleType = CTriple
|
||||
booleanType = CBoolean
|
||||
relationType = CRelation
|
||||
searchClauseType = CSearchClause
|
||||
modifierClauseType = CModifierClause
|
||||
modifierTypeType = CModifierType
|
||||
indexType = CIndex
|
||||
termType = CTerm
|
||||
except:
|
||||
# Nested scopes. Utils needs our classes to parent
|
||||
# We need its classes to build (maybe)
|
||||
pass
|
||||
|
||||
|
||||
if (__name__ == "__main__"):
|
||||
import sys;
|
||||
s = sys.stdin.readline()
|
||||
try:
|
||||
q = parse(s);
|
||||
except SRWDiagnostic, diag:
|
||||
# Print a full version, not just str()
|
||||
print "Diagnostic Generated."
|
||||
print " Code: " + str(diag.code)
|
||||
print " Details: " + str(diag.details)
|
||||
print " Message: " + str(diag.message)
|
||||
else:
|
||||
print q.toXCQL()[:-1];
|
||||
|
544
python/PyZ3950/CQLUtils.py
Normal file
544
python/PyZ3950/CQLUtils.py
Normal file
@ -0,0 +1,544 @@
|
||||
|
||||
"""CQL utility functions and subclasses"""
|
||||
|
||||
from CQLParser import *
|
||||
from types import ListType, IntType
|
||||
from SRWDiagnostics import *
|
||||
|
||||
from PyZ3950 import z3950, asn1, oids
|
||||
from PyZ3950.zdefs import make_attr
|
||||
|
||||
asn1.register_oid (oids.Z3950_QUERY_CQL, asn1.GeneralString)
|
||||
|
||||
class ZCQLConfig:
|
||||
|
||||
contextSets = {'dc' : 'info:srw/cql-context-set/1/dc-v1.1',
|
||||
'cql' : 'info:srw/cql-context-set/1/cql-v1.1',
|
||||
'bath' : 'http://zing.z3950.org/cql/bath/2.0/',
|
||||
'zthes' : 'http://zthes.z3950.org/cql/1.0/',
|
||||
'ccg' : 'http://srw.cheshire3.org/contextSets/ccg/1.1/ ',
|
||||
'rec' : 'info:srw/cql-context-set/2/rec-1.0',
|
||||
'net' : 'info:srw/cql-context-set/2/net-1.0'}
|
||||
|
||||
dc = {'title' : 4,
|
||||
'subject' : 21,
|
||||
'creator' : 1003,
|
||||
'author' : 1003,
|
||||
'editor' : 1020,
|
||||
'contributor' : 1018,
|
||||
'publisher' : 1018,
|
||||
'description' : 62,
|
||||
'date' : 30,
|
||||
'resourceType' : 1031,
|
||||
'type' : 1031,
|
||||
'format' : 1034,
|
||||
'identifier' : 12,
|
||||
'source' : 1019,
|
||||
'language' : 54,
|
||||
'relation' : 1016,
|
||||
'coverage' : 1016,
|
||||
'rights' : 1016
|
||||
}
|
||||
|
||||
cql = {'anywhere' : 1016,
|
||||
'serverChoice' : 1016}
|
||||
|
||||
# The common bib1 points
|
||||
bib1 = {"personal_name" : 1,
|
||||
"corporate_name" : 2,
|
||||
"conference_name" : 3,
|
||||
"title" : 4,
|
||||
"title_series" : 5,
|
||||
"title_uniform" : 6,
|
||||
"isbn" : 7,
|
||||
"issn" : 8,
|
||||
"lccn" : 9,
|
||||
"local_number" : 12,
|
||||
"dewey_number" : 13,
|
||||
"lccn" : 16,
|
||||
"local_classification" : 20,
|
||||
"subject" : 21,
|
||||
"subject_lc" : 27,
|
||||
"subject_local" : 29,
|
||||
"date" : 30,
|
||||
"date_publication" : 31,
|
||||
"date_acquisition" : 32,
|
||||
"local_call_number" : 53,
|
||||
"abstract" : 62,
|
||||
"note" : 63,
|
||||
"record_type" : 1001,
|
||||
"name" : 1002,
|
||||
"author" : 1003,
|
||||
"author_personal" : 1004,
|
||||
"identifier" : 1007,
|
||||
"text_body" : 1010,
|
||||
"date_modified" : 1012,
|
||||
"date_added" : 1011,
|
||||
"concept_text" : 1014,
|
||||
"any" : 1016,
|
||||
"default" : 1017,
|
||||
"publisher" : 1018,
|
||||
"record_source" : 1019,
|
||||
"editor" : 1020,
|
||||
"docid" : 1032,
|
||||
"anywhere" : 1035,
|
||||
"sici" : 1037
|
||||
}
|
||||
|
||||
exp1 = {"explainCategory" :1,
|
||||
"humanStringLanguage" : 2,
|
||||
"databaseName" : 3,
|
||||
"serverName" : 4,
|
||||
"attributeSetOID" : 5,
|
||||
"recordSyntaxOID" : 6,
|
||||
"tagSetOID" : 7,
|
||||
"extendedServiceOID" : 8,
|
||||
"dateAdded" : 9,
|
||||
"dateChanged" : 10,
|
||||
"dateExpires" : 11,
|
||||
"elementSetName" : 12,
|
||||
"processingContext" : 13,
|
||||
"processingName" : 14,
|
||||
"termListName" : 15,
|
||||
"schemaOID" : 16,
|
||||
"producer" : 17,
|
||||
"supplier" : 18,
|
||||
"availability" : 19,
|
||||
"proprietary" : 20,
|
||||
"userFee" : 21,
|
||||
"variantSetOID" : 22,
|
||||
"unitSystem" : 23,
|
||||
"keyword" : 24,
|
||||
"explainDatabase" : 25,
|
||||
"processingOID" : 26
|
||||
}
|
||||
|
||||
xd1 = {"title" : 1,
|
||||
"subject" : 2,
|
||||
"name" : 3,
|
||||
"description" : 4,
|
||||
"date" : 5,
|
||||
"type" : 6,
|
||||
"format" : 7,
|
||||
"identifier" : 8,
|
||||
"source" : 9,
|
||||
"langauge" : 10,
|
||||
"relation" : 11,
|
||||
"coverage" : 12,
|
||||
"rights" : 13}
|
||||
|
||||
util = {"record_date" : 1,
|
||||
"record_agent" : 2,
|
||||
"record_language" : 3,
|
||||
"control_number" : 4,
|
||||
"cost" : 5,
|
||||
"record_syntax" : 6,
|
||||
"database_schema" : 7,
|
||||
"score" : 8,
|
||||
"rank" : 9,
|
||||
"result_set_position" : 10,
|
||||
"all" : 11,
|
||||
"anywhere" : 12,
|
||||
"server_choice" : 13,
|
||||
"wildcard" : 14,
|
||||
"wildpath" : 15}
|
||||
|
||||
defaultAttrSet = z3950.Z3950_ATTRS_BIB1_ov
|
||||
|
||||
def __init__(self):
|
||||
self.util1 = self.util
|
||||
self.xd = self.xd1
|
||||
|
||||
def attrsToCql(self, attrs):
|
||||
hash = {}
|
||||
for c in attrs:
|
||||
if (not c[0]):
|
||||
c[0] = self.defaultAttrSet
|
||||
hash[(c[0], c[1])] = c[2]
|
||||
bib1 = z3950.Z3950_ATTRS_BIB1_ov
|
||||
use = hash.get((bib1, 1), 4)
|
||||
rel = hash.get((bib1, 2), 3)
|
||||
posn = hash.get((bib1, 3), None)
|
||||
struct = hash.get((bib1, 4), None)
|
||||
trunc = hash.get((bib1, 5), None)
|
||||
comp = hash.get((bib1, 6), None)
|
||||
|
||||
index = None
|
||||
if (not isinstance(use, int)):
|
||||
index = indexType(use)
|
||||
else:
|
||||
for v in self.dc.items():
|
||||
if use == v[1]:
|
||||
index = indexType("dc.%s" % (v[0]))
|
||||
break
|
||||
if not index:
|
||||
for v in self.bib1.items():
|
||||
if (use == v[1]):
|
||||
index = indexType("bib1.%s" % (v[0]))
|
||||
break
|
||||
if not index:
|
||||
index = indexType("bib1.%i" % (use))
|
||||
|
||||
relations = ['', '<', '<=', '=', '>=', '>', '<>']
|
||||
if (comp == 3):
|
||||
relation = relationType("exact")
|
||||
elif (rel > 6):
|
||||
if struct in [2, 6]:
|
||||
relation = relationType('any')
|
||||
else:
|
||||
relation = relationType('=')
|
||||
else:
|
||||
relation = relationType(relations[rel])
|
||||
|
||||
if (rel == 100):
|
||||
relation.modifiers.append(modifierClauseType('phonetic'))
|
||||
elif (rel == 101):
|
||||
relation.modifiers.append(modifierClauseType('stem'))
|
||||
elif (rel == 102):
|
||||
relation.modifiers.append(modifierClauseType('relevant'))
|
||||
|
||||
if (struct in [2, 6]):
|
||||
relation.modifiers.append(modifierClauseType('word'))
|
||||
elif (struct in [4, 5, 100]):
|
||||
relation.modifiers.append(modifierClauseType('date'))
|
||||
elif (struct == 109):
|
||||
relation.modifiers.append(modifierClauseType('number'))
|
||||
elif (struct in [1, 108]):
|
||||
relation.modifiers.append(modifierClauseType('string'))
|
||||
elif (struct == 104):
|
||||
relation.modifiers.append(modifierClauseType('uri'))
|
||||
|
||||
return (index, relation)
|
||||
|
||||
zConfig = ZCQLConfig()
|
||||
|
||||
def rpn2cql(rpn, config=zConfig, attrSet=None):
|
||||
if rpn[0] == 'op':
|
||||
# single search clause
|
||||
op = rpn[1]
|
||||
type = op[0]
|
||||
if type == 'attrTerm':
|
||||
attrs = op[1].attributes
|
||||
term = op[1].term
|
||||
combs = []
|
||||
for comb in attrs:
|
||||
if hasattr(comb, 'attributeSet'):
|
||||
attrSet = comb.attributeSet
|
||||
if hasattr(comb, 'attributeType'):
|
||||
aType = comb.attributeType
|
||||
else:
|
||||
# Broken!
|
||||
aType = 1
|
||||
vstruct = comb.attributeValue
|
||||
if (vstruct[0] == 'numeric'):
|
||||
aValue = vstruct[1]
|
||||
else:
|
||||
# Complex attr value
|
||||
vstruct = vstruct[1]
|
||||
if (hasattr(vstruct, 'list')):
|
||||
aValue = vstruct.list[0][1]
|
||||
else:
|
||||
# semanticAction?
|
||||
aValue = vstruct.semanticAction[0][1]
|
||||
combs.append([attrSet, aType, aValue])
|
||||
# Now let config do its thing
|
||||
(index, relation) = config.attrsToCql(combs)
|
||||
return searchClauseType(index, relation, termType(term[1]))
|
||||
|
||||
elif type == 'resultSet':
|
||||
return searchClauseType(indexType('cql.resultSetId'), relationType('='), termType(op[0]))
|
||||
|
||||
elif rpn[0] == 'rpnRpnOp':
|
||||
triple = rpn[1]
|
||||
bool = triple.op
|
||||
lhs = triple.rpn1
|
||||
rhs = triple.rpn2
|
||||
ctrip = tripleType()
|
||||
ctrip.leftOperation = rpn2cql(lhs, config)
|
||||
ctrip.rightOperand = rpn2cql(rhs, config)
|
||||
ctrip.boolean = booleanType(bool[0])
|
||||
if bool[0] == 'prox':
|
||||
distance = bool[1].distance
|
||||
order = bool[1].ordered
|
||||
if order:
|
||||
order = "ordered"
|
||||
else:
|
||||
order = "unordered"
|
||||
relation = bool[1].relationType
|
||||
rels = ["", "<", "<=", "=", ">=", ">", "<>"]
|
||||
relation = rels[relation]
|
||||
unit = bool[1].proximityUnitCode
|
||||
units = ["", "character", "word", "sentence", "paragraph", "section", "chapter", "document", "element", "subelement", "elementType", "byte"]
|
||||
if unit[0] == "known":
|
||||
unit = units[unit[1]]
|
||||
mods = [cql.modifierClauseType('distance', relation, str(distance)), cql.modifierClauseType('word', '=', unit), cql.modifierClauseType(order)]
|
||||
ctrip.boolean.modifiers = mods
|
||||
return ctrip
|
||||
|
||||
elif rpn[0] == 'type_1':
|
||||
q = rpn[1]
|
||||
return rpn2cql(q.rpn, config, q.attributeSet)
|
||||
|
||||
|
||||
|
||||
|
||||
class CSearchClause(SearchClause):
|
||||
|
||||
def convertMetachars(self, t):
|
||||
"Convert SRW meta characters in to Cheshire's meta characters"
|
||||
# Fail on ?, ^ or * not at the end.
|
||||
if (count(t, "?") != count(t, "\\?")):
|
||||
diag = Diagnostic28()
|
||||
diag.details = "? Unsupported"
|
||||
raise diag
|
||||
elif (count(t, "^") != count(t, "\\^")):
|
||||
diag = Diagnostic31()
|
||||
diag.details = "^ Unsupported"
|
||||
raise diag
|
||||
elif (count(t, "*") != count(t, "\\*")):
|
||||
if t[-1] != "*" or t[-2] == "\\":
|
||||
diag = Diagnostic28()
|
||||
diag.details = "Non trailing * unsupported"
|
||||
raise diag
|
||||
else:
|
||||
t[-1] = "#"
|
||||
t = replace(t, "\\^", "^")
|
||||
t = replace(t, "\\?", "?")
|
||||
t = replace(t, "\\*", "*")
|
||||
return t
|
||||
|
||||
def toRPN(self, top=None):
|
||||
if not top:
|
||||
top = self
|
||||
|
||||
if (self.relation.value in ['any', 'all']):
|
||||
# Need to split this into and/or tree
|
||||
if (self.relation.value == 'any'):
|
||||
bool = " or "
|
||||
else:
|
||||
bool = " and "
|
||||
words = self.term.value.split()
|
||||
self.relation.value = '='
|
||||
# Add 'word' relationModifier
|
||||
self.relation.modifiers.append(CModifierClause('cql.word'))
|
||||
|
||||
# Create CQL, parse it, walk new tree
|
||||
idxrel = "%s %s" % (self.index.toCQL(), self.relation.toCQL())
|
||||
text = []
|
||||
for w in words:
|
||||
text.append('%s "%s"' % (idxrel, w))
|
||||
cql = bool.join(text)
|
||||
tree = parse(cql)
|
||||
tree.prefixes = self.prefixes
|
||||
tree.parent = self.parent
|
||||
tree.config = self.config
|
||||
return tree.toRPN(top)
|
||||
else:
|
||||
# attributes, term
|
||||
# AttributeElement: attributeType, attributeValue
|
||||
# attributeValue ('numeric', n) or ('complex', struct)
|
||||
if (self.index.value == 'resultsetid'):
|
||||
return ('op', ('resultSet', self.term.value))
|
||||
|
||||
clause = z3950.AttributesPlusTerm()
|
||||
attrs = self.index.toRPN(top)
|
||||
if (self.term.value.isdigit()):
|
||||
self.relation.modifiers.append(CModifierClause('cql.number'))
|
||||
relattrs = self.relation.toRPN(top)
|
||||
attrs.update(relattrs)
|
||||
butes =[]
|
||||
for e in attrs.iteritems():
|
||||
butes.append((e[0][0], e[0][1], e[1]))
|
||||
|
||||
clause.attributes = [make_attr(*e) for e in butes]
|
||||
clause.term = self.term.toRPN(top)
|
||||
|
||||
return ('op', ('attrTerm', clause))
|
||||
|
||||
|
||||
class CBoolean(Boolean):
|
||||
|
||||
def toRPN(self, top):
|
||||
op = self.value
|
||||
if (self.value == 'not'):
|
||||
op = 'and-not'
|
||||
elif (self.value == 'prox'):
|
||||
# Create ProximityOperator
|
||||
prox = z3950.ProximityOperator()
|
||||
# distance, ordered, proximityUnitCode, relationType
|
||||
u = self['unit']
|
||||
try:
|
||||
units = ["", "character", "word", "sentence", "paragraph", "section", "chapter", "document", "element", "subelement", "elementType", "byte"]
|
||||
if (u.value in units):
|
||||
prox.unit = ('known', units.index(u.value))
|
||||
else:
|
||||
# Uhhhh.....
|
||||
prox.unit = ('private', int(u.value))
|
||||
except:
|
||||
prox.unit = ('known', 2)
|
||||
|
||||
d = self['distance']
|
||||
try:
|
||||
prox.distance = int(d.value)
|
||||
except:
|
||||
if (prox.unit == ('known', 2)):
|
||||
prox.distance = 1
|
||||
else:
|
||||
prox.distance = 0
|
||||
try:
|
||||
rels = ["", "<", "<=", "=", ">=", ">", "<>"]
|
||||
prox.relationType = rels.index(d.comparison)
|
||||
except:
|
||||
prox.relationType = 2
|
||||
|
||||
prox.ordered = bool(self['ordered'])
|
||||
return ('op', ('prox', prox))
|
||||
|
||||
return (op, None)
|
||||
|
||||
class CTriple(Triple):
|
||||
|
||||
def toRPN(self, top=None):
|
||||
"""rpnRpnOp"""
|
||||
if not top:
|
||||
top = self
|
||||
|
||||
op = z3950.RpnRpnOp()
|
||||
op.rpn1 = self.leftOperand.toRPN(top)
|
||||
op.rpn2 = self.rightOperand.toRPN(top)
|
||||
op.op = self.boolean.toRPN(top)
|
||||
return ('rpnRpnOp', op)
|
||||
|
||||
|
||||
class CIndex(Index):
|
||||
def toRPN(self, top):
|
||||
self.resolvePrefix()
|
||||
pf = self.prefix
|
||||
if (not pf and self.prefixURI):
|
||||
# We have a default
|
||||
for k in zConfig.contextSets:
|
||||
if zConfig.contextSets[k] == self.prefixURI:
|
||||
pf = k
|
||||
break
|
||||
|
||||
# Default BIB1
|
||||
set = oids.oids['Z3950']['ATTRS']['BIB1']['oid']
|
||||
|
||||
if (hasattr(top, 'config') and top.config):
|
||||
config = top.config
|
||||
# Check SRW Configuration
|
||||
cql = config.contextSetNamespaces['cql']
|
||||
index = self.value
|
||||
if self.prefixURI == cql and self.value == "serverchoice":
|
||||
# Have to resolve our prefixes etc, so create an index object to do it
|
||||
index = config.defaultIndex
|
||||
cidx = CIndex(index)
|
||||
cidx.config = config
|
||||
cidx.parent = config
|
||||
cidx.resolvePrefix()
|
||||
pf = cidx.prefix
|
||||
index = cidx.value
|
||||
|
||||
if config.indexHash.has_key(pf):
|
||||
if config.indexHash[pf].has_key(index):
|
||||
idx = config.indexHash[pf][index]
|
||||
# Need to map from this list to RPN list
|
||||
attrs = {}
|
||||
for i in idx:
|
||||
set = asn1.OidVal(map(int, i[0].split('.')))
|
||||
type = int(i[1])
|
||||
if (i[2].isdigit()):
|
||||
val = int(i[2])
|
||||
else:
|
||||
val = i[2]
|
||||
attrs[(set, type)] = val
|
||||
return attrs
|
||||
else:
|
||||
diag = Diagnostic16()
|
||||
diag.details = index
|
||||
diag.message = "Unknown index"
|
||||
raise diag
|
||||
else:
|
||||
diag = Diagnostic15()
|
||||
diag.details = pf
|
||||
diag.message = "Unknown context set"
|
||||
raise diag
|
||||
elif (hasattr(zConfig, pf)):
|
||||
mp = getattr(zConfig, pf)
|
||||
if (mp.has_key(self.value)):
|
||||
val = mp[self.value]
|
||||
else:
|
||||
val = self.value
|
||||
elif (oids.oids['Z3950']['ATTRS'].has_key(pf.upper())):
|
||||
set = oids.oids['Z3950']['ATTRS'][pf.upper()]['oid']
|
||||
if (self.value.isdigit()):
|
||||
# bib1.1018
|
||||
val = int(self.value)
|
||||
else:
|
||||
# complex attribute for bib1
|
||||
val = self.value
|
||||
else:
|
||||
print "Can't resolve %s" % pf
|
||||
raise(ValueError)
|
||||
|
||||
return {(set, 1) : val}
|
||||
|
||||
|
||||
class CRelation(Relation):
|
||||
def toRPN(self, top):
|
||||
rels = ['', '<', '<=', '=', '>=', '>', '<>']
|
||||
set = z3950.Z3950_ATTRS_BIB1_ov
|
||||
vals = [None, None, None, None, None, None, None]
|
||||
|
||||
if self.value in rels:
|
||||
vals[2] = rels.index(self.value)
|
||||
elif self.value in ['exact', 'scr']:
|
||||
vals[2] = 3
|
||||
elif (self.value == 'within'):
|
||||
vals[2] = 104
|
||||
|
||||
if self['relevant']:
|
||||
vals[2] = 102
|
||||
elif self['stem']:
|
||||
vals[2] = 101
|
||||
elif self['phonetic']:
|
||||
vals[2] = 100
|
||||
|
||||
if self['number']:
|
||||
vals[4] = 109
|
||||
vals[5] = 100
|
||||
elif self['date']:
|
||||
vals[4] = 5
|
||||
elif self['word']:
|
||||
vals[4] = 2
|
||||
|
||||
if self.value == 'exact':
|
||||
vals[3] = 1
|
||||
vals[5] = 100
|
||||
# vals[6] = 3
|
||||
else:
|
||||
vals[3] = 3
|
||||
# vals[6] = 1
|
||||
|
||||
attrs = {}
|
||||
for x in range(1,7):
|
||||
if vals[x]:
|
||||
attrs[(z3950.Z3950_ATTRS_BIB1_ov, x)] = vals[x]
|
||||
|
||||
return attrs
|
||||
|
||||
|
||||
class CTerm(Term):
|
||||
def toRPN(self, top):
|
||||
return ('general', self.value)
|
||||
|
||||
class CModifierClause(ModifierClause):
|
||||
pass
|
||||
|
||||
class CModifierType(ModifierType):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
|
40
python/PyZ3950/PyZ3950_parsetab.py
Normal file
40
python/PyZ3950/PyZ3950_parsetab.py
Normal file
@ -0,0 +1,40 @@
|
||||
|
||||
# PyZ3950_parsetab.py
|
||||
# This file is automatically generated. Do not edit.
|
||||
|
||||
_lr_method = 'SLR'
|
||||
|
||||
_lr_signature = '\xfc\xb2\xa8\xb7\xd9\xe7\xad\xba"\xb2Ss\'\xcd\x08\x16'
|
||||
|
||||
_lr_action_items = {'QUOTEDVALUE':([5,26,0,19,16,],[1,1,1,1,1,]),'LOGOP':([3,25,4,14,9,6,27,23,13,20,22,1,],[-5,-9,-14,-13,16,-8,16,-7,16,-6,-4,-12,]),'SET':([0,16,5,26,],[11,11,11,11,]),'RPAREN':([27,23,3,22,1,25,13,4,20,6,14,],[28,-7,-5,-4,-12,-9,20,-14,-6,-8,-13,]),'$':([8,14,2,23,3,20,28,25,9,1,4,6,22,],[0,-13,-1,-7,-5,-6,-3,-9,-2,-12,-14,-8,-4,]),'SLASH':([21,],[26,]),'ATTRSET':([0,],[7,]),'QUAL':([0,26,16,18,5,],[10,10,10,24,10,]),'COMMA':([10,12,24,],[-10,18,-11,]),'LPAREN':([26,0,16,7,5,],[5,5,5,15,5,]),'WORD':([19,17,14,0,5,26,6,16,15,1,4,25,],[4,23,-13,4,4,4,14,4,21,-12,-14,14,]),'RELOP':([11,24,10,12,],[17,-11,-10,19,]),}
|
||||
|
||||
_lr_action = { }
|
||||
for _k, _v in _lr_action_items.items():
|
||||
for _x,_y in zip(_v[0],_v[1]):
|
||||
_lr_action[(_x,_k)] = _y
|
||||
del _lr_action_items
|
||||
|
||||
_lr_goto_items = {'cclfind_or_attrset':([0,],[2,]),'elements':([5,26,16,0,],[3,3,22,3,]),'quallist':([5,26,0,16,],[12,12,12,12,]),'val':([5,16,26,19,0,],[6,6,6,25,6,]),'top':([0,],[8,]),'cclfind':([5,0,26,],[13,9,27,]),}
|
||||
|
||||
_lr_goto = { }
|
||||
for _k, _v in _lr_goto_items.items():
|
||||
for _x,_y in zip(_v[0],_v[1]):
|
||||
_lr_goto[(_x,_k)] = _y
|
||||
del _lr_goto_items
|
||||
_lr_productions = [
|
||||
("S'",1,None,None,None),
|
||||
('top',1,'p_top','./ccl.py',154),
|
||||
('cclfind_or_attrset',1,'p_cclfind_or_attrset_1','./ccl.py',158),
|
||||
('cclfind_or_attrset',6,'p_cclfind_or_attrset_2','./ccl.py',162),
|
||||
('cclfind',3,'p_ccl_find_1','./ccl.py',166),
|
||||
('cclfind',1,'p_ccl_find_2','./ccl.py',170),
|
||||
('elements',3,'p_elements_1','./ccl.py',174),
|
||||
('elements',3,'p_elements_2','./ccl.py',196),
|
||||
('elements',1,'p_elements_3','./ccl.py',202),
|
||||
('elements',3,'p_elements_4','./ccl.py',206),
|
||||
('quallist',1,'p_quallist_1','./ccl.py',213),
|
||||
('quallist',3,'p_quallist_2','./ccl.py',217),
|
||||
('val',1,'p_val_1','./ccl.py',221),
|
||||
('val',2,'p_val_2','./ccl.py',225),
|
||||
('val',1,'p_val_3','./ccl.py',229),
|
||||
]
|
451
python/PyZ3950/SRWDiagnostics.py
Normal file
451
python/PyZ3950/SRWDiagnostics.py
Normal file
@ -0,0 +1,451 @@
|
||||
|
||||
# Base Class
|
||||
|
||||
class SRWDiagnostic (Exception):
|
||||
""" Base Diagnostic Class"""
|
||||
code = 0
|
||||
uri = "info:srw/diagnostic/1/"
|
||||
details = ""
|
||||
message = ""
|
||||
|
||||
surrogate = 0
|
||||
fatal = 1
|
||||
|
||||
def __str__(self):
|
||||
return "%s [%s]: %s" % (self.uri, self.message, self.details)
|
||||
|
||||
# NB 'Need' name for serialization in SRW
|
||||
def __init__(self, name=None):
|
||||
if (self.code):
|
||||
self.uri = "%s%d" % (self.uri, self.code)
|
||||
Exception.__init__(self)
|
||||
|
||||
# Diagnostic Types
|
||||
|
||||
class GeneralDiagnostic (SRWDiagnostic):
|
||||
pass
|
||||
|
||||
class CQLDiagnostic (SRWDiagnostic):
|
||||
pass
|
||||
|
||||
class RecordDiagnostic (SRWDiagnostic):
|
||||
pass
|
||||
|
||||
class ResultSetDiagnostic (SRWDiagnostic):
|
||||
pass
|
||||
|
||||
class SortDiagnostic (SRWDiagnostic):
|
||||
pass
|
||||
|
||||
class StyleDiagnostic (SRWDiagnostic):
|
||||
pass
|
||||
|
||||
class ScanDiagnostic (SRWDiagnostic):
|
||||
pass
|
||||
|
||||
class DeprecatedDiagnostic(SRWDiagnostic):
|
||||
def __init__(self, name=None):
|
||||
print "WARNING: Use of deprecated diagnostic %s" % (self)
|
||||
SRWDiagnostic.__init__(self)
|
||||
|
||||
class ExplainDiagnostic (DeprecatedDiagnostic):
|
||||
pass
|
||||
|
||||
|
||||
# Rob's (empty) diagnostic set
|
||||
class RobDiagnostic (SRWDiagnostic):
|
||||
uri = "info:srw/diagnostic/2/"
|
||||
|
||||
|
||||
# Individual Diagnostics
|
||||
|
||||
class Diagnostic1 (GeneralDiagnostic):
|
||||
code = 1
|
||||
message = "General system error"
|
||||
|
||||
class Diagnostic2 (GeneralDiagnostic):
|
||||
code = 2
|
||||
message = "System temporarily unavailable"
|
||||
|
||||
class Diagnostic3 (GeneralDiagnostic):
|
||||
code = 3
|
||||
message = "Authentication error"
|
||||
|
||||
class Diagnostic4 (GeneralDiagnostic):
|
||||
code = 4
|
||||
message = "Unsupported operation"
|
||||
|
||||
class Diagnostic5 (GeneralDiagnostic):
|
||||
code = 5
|
||||
message = "Unsupported version"
|
||||
|
||||
class Diagnostic6 (GeneralDiagnostic):
|
||||
code = 6
|
||||
message = "Unsupported parameter value"
|
||||
|
||||
class Diagnostic7 (GeneralDiagnostic):
|
||||
code = 7
|
||||
message = "Mandatory parameter not supplied"
|
||||
|
||||
class Diagnostic8 (GeneralDiagnostic):
|
||||
code = 8
|
||||
message = "Unknown parameter"
|
||||
|
||||
|
||||
|
||||
class Diagnostic10 (CQLDiagnostic):
|
||||
code = 10
|
||||
message = "Malformed query"
|
||||
|
||||
class Diagnostic13 (CQLDiagnostic):
|
||||
code = 13
|
||||
message = "Unsupported use of parentheses"
|
||||
|
||||
class Diagnostic14 (CQLDiagnostic):
|
||||
code = 14
|
||||
message = "Unsupported use of quotes"
|
||||
|
||||
class Diagnostic15 (CQLDiagnostic):
|
||||
code = 15
|
||||
message = "Unsupported context set"
|
||||
|
||||
class Diagnostic16 (CQLDiagnostic):
|
||||
code = 16
|
||||
message = "Unsupported index"
|
||||
|
||||
class Diagnostic18 (CQLDiagnostic):
|
||||
code = 18
|
||||
message = "Unsupported combination of indexes"
|
||||
|
||||
class Diagnostic19 (CQLDiagnostic):
|
||||
code = 19
|
||||
message = "Unsupported relation"
|
||||
|
||||
class Diagnostic20 (CQLDiagnostic):
|
||||
code = 20
|
||||
message = "Unsupported relation modifier"
|
||||
|
||||
class Diagnostic21 (CQLDiagnostic):
|
||||
code = 21
|
||||
message = "Unsupported combination of relation modifiers"
|
||||
|
||||
class Diagnostic22 (CQLDiagnostic):
|
||||
code = 22
|
||||
message = "Unsupported combination of relation and index"
|
||||
|
||||
class Diagnostic23 (CQLDiagnostic):
|
||||
code = 23
|
||||
message = "Too many characters in term"
|
||||
|
||||
class Diagnostic24 (CQLDiagnostic):
|
||||
code = 24
|
||||
message = "Unsupported combination of relation and term"
|
||||
|
||||
class Diagnostic26 (CQLDiagnostic):
|
||||
code = 26
|
||||
message = "Non special character escaped in term"
|
||||
|
||||
class Diagnostic27 (CQLDiagnostic):
|
||||
code = 27
|
||||
message = "Empty term unsupported"
|
||||
|
||||
class Diagnostic28 (CQLDiagnostic):
|
||||
code = 28
|
||||
message = "Masking character not supported"
|
||||
|
||||
class Diagnostic29 (CQLDiagnostic):
|
||||
code = 29
|
||||
message = "Masked words too short"
|
||||
|
||||
class Diagnostic30 (CQLDiagnostic):
|
||||
code = 30
|
||||
message = "Too many masking characters in term"
|
||||
|
||||
class Diagnostic31 (CQLDiagnostic):
|
||||
code = 31
|
||||
message = "Anchoring character not supported"
|
||||
|
||||
class Diagnostic32 (CQLDiagnostic):
|
||||
code = 32
|
||||
message = "Anchoring character in unsupported position."
|
||||
|
||||
class Diagnostic33 (CQLDiagnostic):
|
||||
code = 33
|
||||
message = "Combination of proximity/adjacency and masking characters not supported"
|
||||
|
||||
class Diagnostic34 (CQLDiagnostic):
|
||||
code = 34
|
||||
message = "Combination of proximity/adjacency and anchoring characters not supported"
|
||||
|
||||
class Diagnostic35 (CQLDiagnostic):
|
||||
code = 35
|
||||
message = "Term only stopwords"
|
||||
|
||||
class Diagnostic36 (CQLDiagnostic):
|
||||
code = 36
|
||||
message = "Term in invalid format for index or relation"
|
||||
|
||||
class Diagnostic37 (CQLDiagnostic):
|
||||
code = 37
|
||||
message = "Unsupported boolean operator"
|
||||
|
||||
class Diagnostic38 (CQLDiagnostic):
|
||||
code = 38
|
||||
message = "Too many boolean operators"
|
||||
|
||||
class Diagnostic39 (CQLDiagnostic):
|
||||
code = 39
|
||||
message = "Proximity not supported"
|
||||
|
||||
class Diagnostic40 (CQLDiagnostic):
|
||||
code = 40
|
||||
message = "Unsupported proximity relation"
|
||||
|
||||
class Diagnostic41 (CQLDiagnostic):
|
||||
code = 41
|
||||
message = "Unsupported proximity distance"
|
||||
|
||||
class Diagnostic42 (CQLDiagnostic):
|
||||
code = 42
|
||||
message = "Unsupported proximity unit"
|
||||
|
||||
class Diagnostic43 (CQLDiagnostic):
|
||||
code = 43
|
||||
message = "Unsupported proximity ordering"
|
||||
|
||||
class Diagnostic44 (CQLDiagnostic):
|
||||
code = 44
|
||||
message = "Unsupported combination of proximity modifiers"
|
||||
|
||||
|
||||
|
||||
class Diagnostic50 (ResultSetDiagnostic):
|
||||
code = 50
|
||||
message = "Result sets not supported"
|
||||
|
||||
class Diagnostic51 (ResultSetDiagnostic):
|
||||
code = 51
|
||||
message = "Result set does not exist"
|
||||
|
||||
class Diagnostic52 (ResultSetDiagnostic):
|
||||
code = 52
|
||||
message = "Result set temporarily unavailable"
|
||||
|
||||
class Diagnostic53 (ResultSetDiagnostic):
|
||||
code = 53
|
||||
message = "Result sets only supported for retrieval"
|
||||
|
||||
class Diagnostic55 (ResultSetDiagnostic):
|
||||
code = 55
|
||||
message = "Combination of result sets with search terms not supported"
|
||||
|
||||
class Diagnostic58 (ResultSetDiagnostic):
|
||||
code = 58
|
||||
message = "Result set created with unpredictable partial results available"
|
||||
|
||||
class Diagnostic59 (ResultSetDiagnostic):
|
||||
code = 59
|
||||
message = "Result set created with valid partial results available"
|
||||
|
||||
|
||||
class Diagnostic60 (RecordDiagnostic):
|
||||
code = 60
|
||||
message = "Too many records retrieved"
|
||||
|
||||
class Diagnostic61 (RecordDiagnostic):
|
||||
code = 61
|
||||
message = "First record position out of range"
|
||||
|
||||
class Diagnostic64 (RecordDiagnostic):
|
||||
code = 64
|
||||
message = "Record temporarily unavailable"
|
||||
surrogate = 1
|
||||
|
||||
class Diagnostic65 (RecordDiagnostic):
|
||||
code = 65
|
||||
message = "Record does not exist"
|
||||
surrogate = 1
|
||||
|
||||
class Diagnostic66 (RecordDiagnostic):
|
||||
code = 66
|
||||
message = "Unknown schema for retrieval"
|
||||
|
||||
class Diagnostic67 (RecordDiagnostic):
|
||||
code = 67
|
||||
message = "Record not available in this schema"
|
||||
surrogate = 1
|
||||
|
||||
class Diagnostic68 (RecordDiagnostic):
|
||||
code = 68
|
||||
message = "Not authorised to send record"
|
||||
surrogate = 1
|
||||
|
||||
class Diagnostic69 (RecordDiagnostic):
|
||||
code = 69
|
||||
message = "Not authorised to send record in this schema"
|
||||
surrogate = 1
|
||||
|
||||
class Diagnostic70 (RecordDiagnostic):
|
||||
code = 70
|
||||
message = "Record too large to send"
|
||||
surrogate = 1
|
||||
|
||||
class Diagnostic71 (RecordDiagnostic):
|
||||
code = 71
|
||||
message = "Unsupported record packing"
|
||||
|
||||
class Diagnostic72 (RecordDiagnostic):
|
||||
code = 72
|
||||
message = "XPath retrieval unsupported"
|
||||
|
||||
class Diagnostic73 (RecordDiagnostic):
|
||||
code = 73
|
||||
message = "XPath expression contains unsupported feature"
|
||||
|
||||
class Diagnostic74 (RecordDiagnostic):
|
||||
code = 74
|
||||
message = "Unable to evaluate XPath expression"
|
||||
|
||||
|
||||
|
||||
class Diagnostic80 (SortDiagnostic):
|
||||
code = 80
|
||||
message = "Sort not supported"
|
||||
|
||||
class Diagnostic82 (SortDiagnostic):
|
||||
code = 82
|
||||
message = "Unsupported sort sequence"
|
||||
|
||||
class Diagnostic83 (SortDiagnostic):
|
||||
code = 83
|
||||
message = "Too many records to sort"
|
||||
|
||||
class Diagnostic84 (SortDiagnostic):
|
||||
code = 84
|
||||
message = "Too many sort keys"
|
||||
|
||||
class Diagnostic86 (SortDiagnostic):
|
||||
code = 86
|
||||
message = "Incompatible record formats"
|
||||
|
||||
class Diagnostic87 (SortDiagnostic):
|
||||
code = 87
|
||||
message = "Unsupported schema for sort"
|
||||
|
||||
class Diagnostic88 (SortDiagnostic):
|
||||
code = 88
|
||||
message = "Unsupported tag path for sort"
|
||||
|
||||
class Diagnostic89 (SortDiagnostic):
|
||||
code = 89
|
||||
message = "Tag path unsupported for schema"
|
||||
|
||||
class Diagnostic90 (SortDiagnostic):
|
||||
code = 90
|
||||
message = "Unsupported direction value"
|
||||
|
||||
class Diagnostic91 (SortDiagnostic):
|
||||
code = 91
|
||||
message = "Unsupported case value"
|
||||
|
||||
class Diagnostic92 (SortDiagnostic):
|
||||
code = 92
|
||||
message = "Unsupported missing value action"
|
||||
|
||||
|
||||
class Diagnostic110 (StyleDiagnostic):
|
||||
code = 110
|
||||
message = "Stylesheets not supported"
|
||||
|
||||
class Diagnostic111 (StyleDiagnostic):
|
||||
code = 111
|
||||
message = "Unsupported stylesheet"
|
||||
|
||||
class Diagnostic120 (ScanDiagnostic):
|
||||
code = 120
|
||||
message = "Response position out of range"
|
||||
|
||||
class Diagnostic121 (ScanDiagnostic):
|
||||
code = 121
|
||||
message = "Too many terms requested"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Deprecated diagnostics
|
||||
|
||||
class Diagnostic11 (DeprecatedDiagnostic):
|
||||
code = 11
|
||||
message = "Unsupported query type"
|
||||
|
||||
class Diagnostic12 (DeprecatedDiagnostic):
|
||||
code = 12
|
||||
message = "Too many characters in query"
|
||||
|
||||
class Diagnostic17 (DeprecatedDiagnostic):
|
||||
code = 17
|
||||
message = "Illegal or unsupported combination of index and index set."
|
||||
|
||||
class Diagnostic25 (DeprecatedDiagnostic):
|
||||
code = 25
|
||||
message = "Special characters not quoted in term"
|
||||
|
||||
class Diagnostic45 (DeprecatedDiagnostic):
|
||||
code = 45
|
||||
message = "Index set name (prefix) assigned to multiple identifiers"
|
||||
|
||||
class Diagnostic54 (DeprecatedDiagnostic):
|
||||
code = 54
|
||||
message = "Retrieval may only occur from an existing result set"
|
||||
|
||||
class Diagnostic56 (DeprecatedDiagnostic):
|
||||
code = 56
|
||||
message = "Only combination of single result set with search terms supported"
|
||||
|
||||
class Diagnostic57 (DeprecatedDiagnostic):
|
||||
code = 57
|
||||
message = "Result set created but no records available"
|
||||
|
||||
class Diagnostic62 (DeprecatedDiagnostic):
|
||||
code = 62
|
||||
message = "Negative number of records requested"
|
||||
|
||||
class Diagnostic63 (DeprecatedDiagnostic):
|
||||
code = 63
|
||||
message = "System error in retrieving records"
|
||||
|
||||
class Diagnostic81 (DeprecatedDiagnostic):
|
||||
code = 81
|
||||
message = "Unsupported sort type"
|
||||
|
||||
class Diagnostic85 (DeprecatedDiagnostic):
|
||||
code = 85
|
||||
message = "Duplicate sort keys"
|
||||
|
||||
class Diagnostic100 (ExplainDiagnostic):
|
||||
code = 100
|
||||
message = "Explain not supported"
|
||||
|
||||
class Diagnostic101 (ExplainDiagnostic):
|
||||
code = 101
|
||||
message = "Explain request type not supported"
|
||||
|
||||
class Diagnostic102 (ExplainDiagnostic):
|
||||
code = 102
|
||||
message = "Explain record temporarily unavailable"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
5
python/PyZ3950/__init__.py
Normal file
5
python/PyZ3950/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
"""Python Z3950/MARC/ASN.1 package, supporting ZOOM API.
|
||||
"""
|
||||
|
||||
__all__ = ['zoom', 'zmarc']
|
||||
# only pieces most users need: if you need asn1, import it explicitly
|
2036
python/PyZ3950/asn1.py
Normal file
2036
python/PyZ3950/asn1.py
Normal file
File diff suppressed because it is too large
Load Diff
191
python/PyZ3950/bib1msg.py
Normal file
191
python/PyZ3950/bib1msg.py
Normal file
@ -0,0 +1,191 @@
|
||||
"""Translate bib-1 error numbers to messages."""
|
||||
|
||||
from PyZ3950 import asn1
|
||||
from PyZ3950 import z3950
|
||||
from PyZ3950 import oids
|
||||
|
||||
msg_dict = {
|
||||
1: 'permanent system error', # (unspecified),
|
||||
2: 'temporary system error', # (unspecified),
|
||||
3: 'unsupported search', # (unspecified),
|
||||
4: 'Terms only exclusion (stop) words', # (unspecified),
|
||||
5: 'Too many argument words', # (unspecified),
|
||||
6: 'Too many boolean operators', # (unspecified),
|
||||
7: 'Too many truncated words', # (unspecified),
|
||||
8: 'Too many incomplete subfields', # (unspecified),
|
||||
9: 'Truncated words too short', # (unspecified),
|
||||
10: 'Invalid format for record number (search term)', # (unspecified),
|
||||
11: 'Too many characters in search statement', # (unspecified),
|
||||
12: 'Too many records retrieved', # (unspecified),
|
||||
13: 'Present request out-of-range', # (unspecified),
|
||||
14: 'System error in presenting records', # (unspecified),
|
||||
15: 'Record not authorized to be sent intersystem', # (unspecified),
|
||||
16: 'Record exceeds Preferred-message-size', # (unspecified),
|
||||
17: 'Record exceeds Exceptional-record-size', # (unspecified),
|
||||
18: 'Result set not supported as a search term', # (unspecified),
|
||||
19: 'Only single result set as search term supported', # (unspecified),
|
||||
20: 'Only ANDing of a single result set as search term', # (unspecified),
|
||||
21: 'Result set exists and replace indicator off', # (unspecified),
|
||||
22: 'Result set naming not supported', # (unspecified),
|
||||
23: 'Specified combination of databases not supported', # (unspecified),
|
||||
24: 'Element set names not supported', # (unspecified),
|
||||
25: 'Specified element set name not valid for specified database', # (unspecified),
|
||||
26: 'Only generic form of element set name supported', # (unspecified),
|
||||
27: 'Result set no longer exists - unilaterally deleted by target', # (unspecified),
|
||||
28: 'Result set is in use', # (unspecified),
|
||||
29: 'One of the specified databases is locked', # (unspecified),
|
||||
30: 'Specified result set does not exist', # (unspecified),
|
||||
31: 'Resources exhausted - no results available', # (unspecified),
|
||||
32: 'Resources exhausted - unpredictable partial results available', # (unspecified),
|
||||
33: 'Resources exhausted - valid subset of results available', # (unspecified),
|
||||
100: '(unspecified) error', # (unspecified),
|
||||
101: 'Access-control failure', # (unspecified),
|
||||
102: 'Challenge required, could not be issued - operation terminated', # (unspecified),
|
||||
103: 'Challenge required, could not be issued - record not included', # (unspecified),
|
||||
104: 'Challenge failed - record not included', # (unspecified),
|
||||
105: 'Terminated at origin request', # (unspecified),
|
||||
106: 'No abstract syntaxes agreed to for this record', # (unspecified),
|
||||
107: 'Query type not supported', # (unspecified),
|
||||
108: 'Malformed query', # (unspecified),
|
||||
109: 'Database unavailable', # database name,
|
||||
110: 'Operator unsupported', # operator,
|
||||
111: 'Too many databases specified', # maximum,
|
||||
112: 'Too many result sets created', # maximum,
|
||||
113: 'Unsupported attribute type', # type,
|
||||
114: 'Unsupported Use attribute', # value,
|
||||
115: 'Unsupported term value for Use attribute', # term,
|
||||
116: 'Use attribute required but not supplied', # (unspecified),
|
||||
117: 'Unsupported Relation attribute', # value,
|
||||
118: 'Unsupported Structure attribute', # value,
|
||||
119: 'Unsupported Position attribute', # value,
|
||||
120: 'Unsupported Truncation attribute', # value,
|
||||
121: 'Unsupported Attribute Set', # oid,
|
||||
122: 'Unsupported Completeness attribute', # value,
|
||||
123: 'Unsupported attribute combination', # (unspecified),
|
||||
124: 'Unsupported coded value for term', # value,
|
||||
125: 'Malformed search term', # (unspecified),
|
||||
126: 'Illegal term value for attribute', # term,
|
||||
127: 'Unparsable format for un-normalized value', # value,
|
||||
128: 'Illegal result set name', # name,
|
||||
129: 'Proximity search of sets not supported', # (unspecified),
|
||||
130: 'Illegal result set in proximity search', # result set name,
|
||||
131: 'Unsupported proximity relation', # value,
|
||||
132: 'Unsupported proximity unit code', # value,
|
||||
201: 'Proximity not supported with this attribute combination attribute', # list,
|
||||
202: 'Unsupported distance for proximity', # distance,
|
||||
203: 'Ordered flag not supported for proximity', # (unspecified),
|
||||
205: 'Only zero step size supported for Scan', # (unspecified),
|
||||
206: 'Specified step size not supported for Scan step', # size,
|
||||
207: 'Cannot sort according to sequence', # sequence,
|
||||
208: 'No result set name supplied on Sort', # (unspecified),
|
||||
209: 'Generic sort not supported (database-specific sort only supported)', # (unspecified),
|
||||
210: 'Database specific sort not supported', # (unspecified),
|
||||
211: 'Too many sort keys', # number,
|
||||
212: 'Duplicate sort keys', # key,
|
||||
213: 'Unsupported missing data action', # value,
|
||||
214: 'Illegal sort relation', # relation,
|
||||
215: 'Illegal case value', # value,
|
||||
216: 'Illegal missing data action', # value,
|
||||
217: 'Segmentation: Cannot guarantee records will fit in specified segments', # (unspecified),
|
||||
218: 'ES: Package name already in use', # name,
|
||||
219: 'ES: no such package, on modify/delete', # name,
|
||||
220: 'ES: quota exceeded', # (unspecified),
|
||||
221: 'ES: extended service type not supported', # type,
|
||||
222: 'ES: permission denied on ES - id not authorized', # (unspecified),
|
||||
223: 'ES: permission denied on ES - cannot modify or delete', # (unspecified),
|
||||
224: 'ES: immediate execution failed', # (unspecified),
|
||||
225: 'ES: immediate execution not supported for this service', # (unspecified),
|
||||
226: 'ES: immediate execution not supported for these parameters', # (unspecified),
|
||||
227: 'No data available in requested record syntax', # (unspecified),
|
||||
228: 'Scan: malformed scan', # (unspecified),
|
||||
229: 'Term type not supported', # type,
|
||||
230: 'Sort: too many input results', # max,
|
||||
231: 'Sort: incompatible record formats', # (unspecified),
|
||||
232: 'Scan: term list not supported', # alternative term list,
|
||||
233: 'Scan: unsupported value of position-in-response', # value,
|
||||
234: 'Too many index terms processed', # number of terms,
|
||||
235: 'Database does not exist', # database name,
|
||||
236: 'Access to specified database denied', # database name,
|
||||
237: 'Sort: illegal sort', # (unspecified),
|
||||
238: 'Record not available in requested syntax', # alternative suggested syntax(es),
|
||||
239: 'Record syntax not supported', # syntax,
|
||||
240: 'Scan: Resources exhausted looking for satisfying terms', # (unspecified),
|
||||
241: 'Scan: Beginning or end of term list', # (unspecified),
|
||||
242: 'Segmentation: max-segment-size too small to segment record', # smallest acceptable size,
|
||||
243: 'Present: additional-ranges parameter not supported', # (unspecified),
|
||||
244: 'Present: comp-spec parameter not supported', # (unspecified),
|
||||
245: "Type-1 query: restriction ('resultAttr') operand not supported:", # (unspecified),
|
||||
246: "Type-1 query: 'complex' attributeValue not supported", # (unspecified),
|
||||
247: "Type-1 query: 'attributeSet' as part of AttributeElement not supported", # (unspecified),
|
||||
1001: 'Malformed APDU',
|
||||
1002: 'ES: EXTERNAL form of Item Order request not supported.', # ,
|
||||
1003: 'ES: Result set item form of Item Order request not supported.', # ,
|
||||
1004: 'ES: Extended services not supported unless access control is in effect.', # ,
|
||||
1005: 'Response records in Search response not supported.', # ,
|
||||
1006: 'Response records in Search response not possible for specified database (or database combination). See note 1.', # ,
|
||||
1007: 'No Explain server. See note 2.', # pointers to servers that have a surrogate Explain database for this server.,
|
||||
1008: 'ES: missing mandatory parameter for specified function', # parameter,
|
||||
1009: 'ES: Item Order, unsupported OID in itemRequest.', # OID,
|
||||
1010: 'Init/AC: Bad Userid', # ,
|
||||
1011: 'Init/AC: Bad Userid and/or Password', # ,
|
||||
1012: 'Init/AC: No searches remaining (pre-purchased searches exhausted)', # ,
|
||||
1013: 'Init/AC: Incorrect interface type (specified id valid only when used with a particular access method or client)', # ,
|
||||
1014: 'Init/AC: Authentication System error', # ,
|
||||
1015: 'Init/AC: Maximum number of simultaneous sessions for Userid', # ,
|
||||
1016: 'Init/AC: Blocked network address', # ,
|
||||
1017: 'Init/AC: No databases available for specified userId', # ,
|
||||
1018: 'Init/AC: System temporarily out of resources', # ,
|
||||
1019: 'Init/AC: System not available due to maintenance', # when it's expected back up,
|
||||
1020: 'Init/AC: System temporarily unavailable', # when it's expected back up,
|
||||
1021: 'Init/AC: Account has expired', # ,
|
||||
1022: 'Init/AC: Password has expired so a new one must be supplied', # ,
|
||||
1023: 'Init/AC: Password has been changed by an administrator so a new one must be supplied', # ,
|
||||
1024: 'Unsupported Attribute. See note 3.', # an unstructured string indicating the object identifier of the attribute set id, the numeric value of the attribute type, and the numeric value of the attribute.,
|
||||
1025: 'Service not supported for this database', # ,
|
||||
1026: 'Record cannot be opened because it is locked', # ,
|
||||
1027: 'SQL error', # ,
|
||||
1028: 'Record deleted', # ,
|
||||
1029: 'Scan: too many terms requested.', # Addinfo: max terms supported,
|
||||
1040: 'ES: Invalid function', # function,
|
||||
1041: 'ES: Error in retention time', # (unspecified),
|
||||
1042: 'ES: Permissions data not understood', # permissions,
|
||||
1043: 'ES: Invalid OID for task specific parameters', # oid,
|
||||
1044: 'ES: Invalid action', # action,
|
||||
1045: 'ES: Unknown schema', # schema,
|
||||
1046: 'ES: Too many records in package', # maximum number allowed,
|
||||
1047: 'ES: Invalid wait action', # wait action,
|
||||
1048: 'ES: Cannot create task package -- exceeds maximum permissable size (see note 4)', # maximum task package size,
|
||||
1049: 'ES: Cannot return task package -- exceeds maximum permissable size for ES response (see note 5)', # maximum task package size for ES response,
|
||||
1050: 'ES: Extended services request too large (see note 6)', # maximum size of extended services request,
|
||||
1051: 'Scan: Attribute set id required -- not supplied', # ,
|
||||
1052: 'ES: Cannot process task package record -- exceeds maximum permissible record size for ES (see note 7)', # maximum record size for ES,
|
||||
1053: 'ES: Cannot return task package record -- exceeds maximum permissible record size for ES response (see note 8)', # maximum record size for ES response,
|
||||
1054: 'Init: Required negotiation record not included', # oid(s) of required negotiation record(s),
|
||||
1055: 'Init: negotiation option required', # ,
|
||||
1056: 'Attribute not supported for database', # attribute (oid, type, and value), and database name,
|
||||
1057: 'ES: Unsupported value of task package parameter (See Note 9)', # parameter and value,
|
||||
1058: 'Duplicate Detection: Cannot dedup on requested record portion', # ,
|
||||
1059: 'Duplicate Detection: Requested detection criterion not supported', # detection criterion,
|
||||
1060: 'Duplicate Detection: Requested level of match not supported', # ,
|
||||
1061: 'Duplicate Detection: Requested regular expression not supported', # ,
|
||||
1062: 'Duplicate Detection: Cannot do clustering', # ,
|
||||
1063: 'Duplicate Detection: Retention criterion not supported', # retention criterion,
|
||||
1064: 'Duplicate Detection: Requested number (or percentage) of entries for retention too large', # ,
|
||||
1065: 'Duplicate Detection: Requested sort criterion not supported', # sort criterion,
|
||||
1066: 'CompSpec: Unknown schema, or schema not supported.', # ,
|
||||
1067: 'Encapsulation: Encapsulated sequence of PDUs not supported.', # specific unsupported sequence,
|
||||
1068: 'Encapsulation: Base operation (and encapsulated PDUs) not executed based on pre-screening analysis.', # ,
|
||||
1069: 'No syntaxes available for this request. See note 10.', # ,
|
||||
1070: 'user not authorized to receive record(s) in requested syntax', # ,
|
||||
1071: 'preferredRecordSyntax not supplied', # ,
|
||||
1072: 'Query term includes characters that do not translate into the target character set.', # Characters that do not translate
|
||||
}
|
||||
|
||||
|
||||
def lookup_errmsg (condition, oid):
|
||||
if oid <> oids.Z3950_DIAG_BIB1_ov:
|
||||
return "Unknown oid: %s condition %d" % (str (oid), condition)
|
||||
if msg_dict.has_key (condition):
|
||||
return msg_dict[condition]
|
||||
else:
|
||||
return "Unknown BIB-1 error condition %d" % (condition,)
|
406
python/PyZ3950/c2query.py
Normal file
406
python/PyZ3950/c2query.py
Normal file
@ -0,0 +1,406 @@
|
||||
|
||||
#!/usr/local/bin/python2.3
|
||||
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except:
|
||||
from StringIO import StringIO
|
||||
from PyZ3950 import z3950, oids
|
||||
from types import IntType, StringType, ListType
|
||||
# We need "\"\"" to be one token
|
||||
from PyZ3950.CQLParser import CQLshlex
|
||||
from PyZ3950.CQLUtils import ZCQLConfig
|
||||
from PyZ3950.zdefs import make_attr
|
||||
zconfig = ZCQLConfig()
|
||||
|
||||
"""
|
||||
http://cheshire.berkeley.edu/cheshire2.html#zfind
|
||||
|
||||
top ::= query ['resultsetid' name]
|
||||
query ::= query boolean clause | clause
|
||||
clause ::= '(' query ')'
|
||||
| attributes [relation] term
|
||||
| resultset
|
||||
attributes ::= '[' { [set] type '=' value } ']' | name
|
||||
boolean ::= 'and' | 'or' | 'not' | (synonyms)
|
||||
prox ::= ('!PROX' | (synonyms)) {'/' name}
|
||||
relation ::= '>' | '<' | ...
|
||||
|
||||
[bib1 1=5, bib1 3=6] > term and title @ fish
|
||||
"""
|
||||
|
||||
booleans = {'AND' : 'and',
|
||||
'.AND.' : 'and',
|
||||
'&&' : 'and',
|
||||
'OR' : 'or',
|
||||
'.OR.' : 'or',
|
||||
'||' : 'or',
|
||||
'NOT' : 'and-not',
|
||||
'.NOT.' : 'and-not',
|
||||
'ANDNOT' : 'and-not',
|
||||
'.ANDNOT.' : 'and-not',
|
||||
'!!' : 'and-not'
|
||||
}
|
||||
|
||||
relations = {'<' : 1,
|
||||
'LT' : 1,
|
||||
'.LT.' : 1,
|
||||
'<=' : 2,
|
||||
'LE' : 2,
|
||||
'.LE.' : 2,
|
||||
'=' : 3,
|
||||
'>=' : 4,
|
||||
'GE' : 4,
|
||||
'.GE.' : 4,
|
||||
'>' : 5,
|
||||
'GT' : 5,
|
||||
'.GT.' : 5,
|
||||
'<>' : 6,
|
||||
'!=' : 6,
|
||||
'NE' : 6,
|
||||
'.NE.' : 6,
|
||||
'?' : 100,
|
||||
'PHON' : 100,
|
||||
'.PHON.' : 100,
|
||||
'%' : 101,
|
||||
'STEM' : 101,
|
||||
'.STEM.' : 101,
|
||||
'@' : 102,
|
||||
'REL' : 102,
|
||||
'.REL.' : 102,
|
||||
'<=>' : 104,
|
||||
'WITHIN' : 104,
|
||||
'.WITHIN.' : 104}
|
||||
|
||||
geoRelations = {'>=<' : 7,
|
||||
'.OVERLAPS.' : 7,
|
||||
'>#<' : 8,
|
||||
'.FULLY_ENCLOSED_WITHIN.' : 8,
|
||||
'<#>' : 9,
|
||||
'.ENCLOSES.' : 9,
|
||||
'<>#' : 10,
|
||||
'.OUTSIDE_OF.' : 10,
|
||||
'+-+' : 11,
|
||||
'.NEAR.' : 11,
|
||||
'.#.' : 12,
|
||||
'.MEMBERS_CONTAIN.' : 12,
|
||||
'!.#.' : 13,
|
||||
'.MEMBERS_NOT_CONTAIN.' : 13,
|
||||
':<:' : 14,
|
||||
'.BEFORE.' : 14,
|
||||
':<=:' : 15,
|
||||
'.BEFORE_OR_DURING.' : 15,
|
||||
':=:' : 16,
|
||||
'.DURING.' : 16,
|
||||
':>=:' : 17,
|
||||
'.DURING_OR_AFTER.' : 17,
|
||||
':>:' : 18,
|
||||
'.AFTER.' : 18}
|
||||
|
||||
proxBooleans = {'!PROX' : (2, 0, 2),
|
||||
'!ADJ' : (2, 0, 2),
|
||||
'!NEAR' : (20, 0, 2),
|
||||
'!FAR' : (20, 0, 4),
|
||||
'!OPROX' : (2, 1, 2),
|
||||
'!OADJ' : (2, 1, 2),
|
||||
'!ONEAR' : (20, 1, 2),
|
||||
'!OFAR' : (20, 1, 4)}
|
||||
|
||||
proxUnits = {'C' : 1,
|
||||
'CHAR' : 1,
|
||||
'W' : 2,
|
||||
'WORD' : 2,
|
||||
'S' : 3,
|
||||
'SENT' : 3,
|
||||
'SENTENCE' : 3,
|
||||
'P' : 4,
|
||||
'PARA' : 4,
|
||||
'PARAGRAPH' : 4,
|
||||
'SECTION' : 5,
|
||||
'CHAPTER' : 6,
|
||||
'DOCUMENT' : 7,
|
||||
'ELEMENT' : 8,
|
||||
'SUBELEMENT' : 9,
|
||||
'ELEMENTTYPE' : 10,
|
||||
'BYTE' : 11}
|
||||
|
||||
privateBooleans = {'!FUZZY_AND' : 1,
|
||||
'!FUZZY_OR' : 2,
|
||||
'!FUZZY_NOT' : 3,
|
||||
'!RESTRICT_FROM' : 4,
|
||||
'!RESTRICT_TO' : 5,
|
||||
'!MERGE_SUM' : 6,
|
||||
'!MERGE_MEAN' : 7,
|
||||
'!MERGE_NORM' : 8}
|
||||
|
||||
xzconfig = ZCQLConfig()
|
||||
|
||||
class C2Parser:
|
||||
lexer = None
|
||||
currentToken = None
|
||||
nextToken = None
|
||||
|
||||
def __init__(self, l):
|
||||
self.lexer = l
|
||||
self.fetch_token()
|
||||
|
||||
|
||||
def fetch_token(self):
|
||||
tok = self.lexer.get_token()
|
||||
self.currentToken = self.nextToken
|
||||
self.nextToken = tok
|
||||
|
||||
def is_boolean(self, tok=None):
|
||||
if (tok == None):
|
||||
tok = self.currentToken
|
||||
if (privateBooleans.has_key(tok.upper())):
|
||||
return 1
|
||||
elif (booleans.has_key(tok.upper())):
|
||||
return 2
|
||||
elif (proxBooleans.has_key(tok.upper())):
|
||||
return 3
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def top(self):
|
||||
|
||||
rpn = self.query()
|
||||
# Check for resultsetid
|
||||
if (self.currentToken.lower() == 'resultsetid'):
|
||||
self.fetch_token()
|
||||
resultset = self.currentToken
|
||||
else:
|
||||
resultset = None
|
||||
|
||||
rpnq = z3950.RPNQuery()
|
||||
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
|
||||
rpnq.rpn = rpn
|
||||
q = ('type_1', rpnq)
|
||||
return (q, resultset)
|
||||
|
||||
def query(self):
|
||||
self.fetch_token()
|
||||
left = self.subquery()
|
||||
while 1:
|
||||
if not self.currentToken:
|
||||
break
|
||||
bool = self.is_boolean()
|
||||
if bool:
|
||||
bool = self.boolean()
|
||||
right = self.subquery()
|
||||
# Put left into triple, make triple new left
|
||||
op = z3950.RpnRpnOp()
|
||||
op.rpn1 = left
|
||||
op.rpn2 = right
|
||||
op.op = bool
|
||||
wrap = ('rpnRpnOp', op)
|
||||
left = wrap
|
||||
else:
|
||||
break
|
||||
return left
|
||||
|
||||
|
||||
def subquery(self):
|
||||
if self.currentToken == "(":
|
||||
object = self.query()
|
||||
if (self.currentToken <> ")"):
|
||||
raise ValueError
|
||||
else:
|
||||
self.fetch_token()
|
||||
else:
|
||||
object = self.clause()
|
||||
return object
|
||||
|
||||
def boolean(self):
|
||||
tok = self.currentToken.upper()
|
||||
self.fetch_token()
|
||||
if (booleans.has_key(tok)):
|
||||
return (booleans[tok], None)
|
||||
elif (privateBooleans.has_key(tok)):
|
||||
# Generate cutesie prox trick
|
||||
type = privateBooleans[tok]
|
||||
prox = z3950.ProximityOperator()
|
||||
prox.proximityUnitCode = ('private', type)
|
||||
prox.distance = 0
|
||||
prox.ordered = 0
|
||||
prox.relationType = 3
|
||||
return ('op', ('prox', prox))
|
||||
|
||||
elif (proxBooleans.has_key(tok)):
|
||||
# Generate prox
|
||||
prox = z3950.ProximityOperator()
|
||||
stuff = proxBooleans[tok]
|
||||
prox.distance = stuff[0]
|
||||
prox.ordered = stuff[1]
|
||||
prox.relationType = stuff[2]
|
||||
prox.proximityUnitCode = ('known', 2)
|
||||
|
||||
# Now look for /
|
||||
while (self.currentToken == "/"):
|
||||
self.fetch_token()
|
||||
if (self.currentToken.isdigit()):
|
||||
prox.distance = int(self.currentToken)
|
||||
elif (proxUnits.has_key(self.currentToken.upper())):
|
||||
prox.proximityUnitCode = ('known', proxUnits[self.currentToken.upper()])
|
||||
else:
|
||||
raise ValueError
|
||||
self.fetch_token()
|
||||
return ('op', ('prox', prox))
|
||||
else:
|
||||
# Argh!
|
||||
raise ValueError
|
||||
|
||||
def clause(self):
|
||||
|
||||
if (self.is_boolean(self.nextToken) or not self.nextToken or self.nextToken.lower() == 'resultsetid' or self.nextToken == ")"):
|
||||
# Must be a resultset
|
||||
tok = self.currentToken
|
||||
self.fetch_token()
|
||||
return ('op', ('resultSet', tok))
|
||||
|
||||
elif (self.currentToken == '['):
|
||||
# List of attributes
|
||||
attrs = []
|
||||
oidHash = oids.oids['Z3950']['ATTRS']
|
||||
while (1):
|
||||
self.fetch_token()
|
||||
|
||||
if (self.currentToken == ']'):
|
||||
break
|
||||
|
||||
if (oidHash.has_key(self.currentToken)):
|
||||
attrSet = oidHash[self.currentToken]['ov']
|
||||
self.fetch_token()
|
||||
elif (self.currentToken[:8] == '1.2.840.'):
|
||||
attrSet = asn1.OidVal(map(int, self.currentToken.split('.')))
|
||||
self.fetch_token()
|
||||
else:
|
||||
attrSet = None
|
||||
|
||||
if (self.currentToken[-1] == ','):
|
||||
tok = self.currentToken[:-1]
|
||||
else:
|
||||
tok = self.currentToken
|
||||
|
||||
if (tok.isdigit()):
|
||||
# 1 = foo
|
||||
atype = int(tok)
|
||||
self.fetch_token()
|
||||
if (self.currentToken == '='):
|
||||
# = foo
|
||||
self.fetch_token()
|
||||
|
||||
if (self.currentToken[0] == '='):
|
||||
# =foo
|
||||
tok = self.currentToken[1:]
|
||||
else:
|
||||
tok = self.currentToken
|
||||
|
||||
if (tok[-1] == ','):
|
||||
tok = tok[:-1]
|
||||
|
||||
if (tok.isdigit()):
|
||||
val = int(tok)
|
||||
else:
|
||||
val = tok
|
||||
if (val[0] == "'" and val[-1] == "'"):
|
||||
val = val[1:-1]
|
||||
elif (tok[-1] == '='):
|
||||
#1= foo
|
||||
tok = tok[:-1]
|
||||
if (tok.isdigit()):
|
||||
atype = int(tok)
|
||||
self.fetch_token()
|
||||
if (self.currentToken[-1] == ","):
|
||||
tok = self.currentToken[:-1]
|
||||
else:
|
||||
tok = self.currentToken
|
||||
if (tok.isdigit()):
|
||||
val = int(self.currentToken)
|
||||
else:
|
||||
val = tok
|
||||
if (val[0] == "'" and val[-1] == "'"):
|
||||
val = val[1:-1]
|
||||
|
||||
elif (tok.find('=') > -1):
|
||||
# 1=foo
|
||||
(atype, val) = self.currentToken.split('=')
|
||||
atype = int(atype)
|
||||
if (val[-1] == ","):
|
||||
val = val[:-1]
|
||||
if (val.isdigit()):
|
||||
val = int(val)
|
||||
elif (val[0] == "'" and val[-1] == "'"):
|
||||
val = val[1:-1]
|
||||
else:
|
||||
# ???
|
||||
raise ValueError
|
||||
attrs.append([attrSet, atype, val])
|
||||
|
||||
else:
|
||||
# Check for named index
|
||||
if (zconfig.BIB1.has_key(self.currentToken.lower())):
|
||||
attrs = [[oids.Z3950_ATTRS_BIB1_ov, 1, zconfig.BIB1[self.currentToken.lower()]]]
|
||||
else:
|
||||
# Just pass through the name
|
||||
attrs = [[oids.Z3950_ATTRS_BIB1_ov, 1, self.currentToken]]
|
||||
|
||||
self.fetch_token()
|
||||
# Check for relation
|
||||
tok = self.currentToken.upper()
|
||||
if (relations.has_key(tok)):
|
||||
val = relations[tok]
|
||||
found = 0
|
||||
for a in attrs:
|
||||
if (a[0] in [oids.Z3950_ATTRS_BIB1, None] and a[1] == 2):
|
||||
found =1
|
||||
a[2] = val
|
||||
break
|
||||
if (not found):
|
||||
attrs.append([None, 2, val])
|
||||
self.fetch_token()
|
||||
elif (geoRelations.has_key(tok)):
|
||||
val = geoRelations[tok]
|
||||
found = 0
|
||||
for a in attrs:
|
||||
if (a[0] in [oids.Z3950_ATTRS_BIB1, oids.Z3950_ATTRS_GEO, None] and a[1] == 2):
|
||||
found = 1
|
||||
a[2] = val
|
||||
break
|
||||
if (not found):
|
||||
attrs.append([oids.Z3950_ATTRS_GEO, 2, val])
|
||||
self.fetch_token()
|
||||
|
||||
if (self.currentToken.find(' ')):
|
||||
# Already quoted
|
||||
term = self.currentToken
|
||||
else:
|
||||
# Accumulate
|
||||
term = []
|
||||
while (self.currentToken and not self.is_boolean(self.currentToken) and self.currentToken.lower() != 'resultsetid'):
|
||||
term.append(self.currenToken)
|
||||
term = ' '.join(term)
|
||||
|
||||
self.fetch_token()
|
||||
|
||||
# Phew. Now build AttributesPlusTerm
|
||||
clause = z3950.AttributesPlusTerm()
|
||||
clause.attributes = [make_attr(*e) for e in attrs]
|
||||
clause.term = ('general', term)
|
||||
return ('op', ('attrTerm', clause))
|
||||
|
||||
|
||||
def parse(q):
|
||||
|
||||
query = StringIO(q)
|
||||
lexer = CQLshlex(query)
|
||||
# Override CQL's wordchars list to include /=><
|
||||
lexer.wordchars += "!@#$%^&*-+;,.?|~`:\\><='"
|
||||
lexer.wordchars = lexer.wordchars.replace('[', '')
|
||||
lexer.wordchars = lexer.wordchars.replace(']', '')
|
||||
|
||||
|
||||
parser = C2Parser(lexer)
|
||||
return parser.top()
|
||||
|
365
python/PyZ3950/ccl.py
Normal file
365
python/PyZ3950/ccl.py
Normal file
@ -0,0 +1,365 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""Implements part of CCL, the Common Command Language, ISO 8777. I'm
|
||||
working from the description in the YAZ toolkit
|
||||
(http://www.indexdata.dk/yaz/doc/tools.php), rather than the ISO
|
||||
spec. Two extensions:
|
||||
- qualifiers can be literal "(attrtyp, attrval)" pairs, so, e.g., the
|
||||
following is a legitimate for ISBN: "(1,7)=0312033095"
|
||||
- the optional ATTRSET (attrset/query) which must appear at the beginning
|
||||
of the string.
|
||||
Allowed values are:
|
||||
BIB1 (default)
|
||||
XD1
|
||||
UTIL
|
||||
ZTHES1
|
||||
EXP1
|
||||
or an oid expressed as a dotted string. (A leading dot implies a
|
||||
prefix of 1.2.840.1003.3, so, e.g., .1 is the same as BIB1.)
|
||||
|
||||
Eventually I will support v3-style mixing attribute sets within
|
||||
a single query, but for now I don't.
|
||||
"""
|
||||
|
||||
from __future__ import nested_scopes
|
||||
import string
|
||||
|
||||
in_setup = 0
|
||||
|
||||
try:
|
||||
from PyZ3950 import z3950
|
||||
from PyZ3950 import oids
|
||||
from PyZ3950 import asn1
|
||||
|
||||
_attrdict = {
|
||||
'bib1' : oids.Z3950_ATTRS_BIB1_ov,
|
||||
'zthes1': oids.Z3950_ATTRS_ZTHES_ov,
|
||||
'xd1': oids.Z3950_ATTRS_XD1_ov,
|
||||
'utility': oids.Z3950_ATTRS_UTIL_ov,
|
||||
'exp1': oids.Z3950_ATTRS_EXP1_ov
|
||||
}
|
||||
|
||||
except ImportError, err:
|
||||
print "Error importing (OK during setup)", err
|
||||
in_setup = 1
|
||||
|
||||
class QuerySyntaxError(Exception): pass
|
||||
class ParseError(QuerySyntaxError): pass
|
||||
class LexError(QuerySyntaxError): pass
|
||||
class UnimplError(QuerySyntaxError): pass
|
||||
|
||||
tokens = ('LPAREN', 'RPAREN', 'COMMA',
|
||||
'SET', 'ATTRSET','QUAL', 'QUOTEDVALUE', 'RELOP', 'WORD',
|
||||
'LOGOP', 'SLASH')
|
||||
|
||||
t_LPAREN= r'\('
|
||||
t_RPAREN= r'\)'
|
||||
t_COMMA = r','
|
||||
t_SLASH = r'/'
|
||||
def t_ATTRSET(t):
|
||||
r'(?i)ATTRSET'
|
||||
return t
|
||||
|
||||
def t_SET (t): # need to def as function to override parsing as WORD, gr XXX
|
||||
r'(SET)'
|
||||
return t
|
||||
|
||||
relop_to_attrib = {
|
||||
'<': 1,
|
||||
'<=': 2,
|
||||
'=': 3,
|
||||
'>=': 4,
|
||||
'>': 5,
|
||||
'<>': 6}
|
||||
|
||||
t_RELOP = "|".join (["(%s)" % r for r in relop_to_attrib.keys()])
|
||||
# XXX Index Data docs say 'doesn't follow ... ISO8777'?
|
||||
|
||||
# XXX expand to rd. addt'l defns from file?
|
||||
|
||||
qual_dict = { # These are bib-1 attribute values, see
|
||||
# http://www.loc.gov/z3950/agency/defns/bib1.html and ftp://ftp.loc.gov/pub/z3950/defs/bib1.txt
|
||||
'TI': (1,4),
|
||||
'AU': (1,1003), # use 1003 to work w/ both NLC-BNC and LC
|
||||
'ISBN': (1,7),
|
||||
'LCCN': (1,9),
|
||||
'ANY': (1,1016),
|
||||
'FIF': (3, 1), # first-in-field
|
||||
'AIF': (3,3), # any-in-field (default)
|
||||
'RTRUNC': (5,1),
|
||||
'NOTRUNC': (5,100) # (default)
|
||||
}
|
||||
default_quals = ['ANY'] # XXX should be per-attr-set
|
||||
default_relop = '='
|
||||
|
||||
def t_QUAL(t):
|
||||
return t
|
||||
|
||||
def mk_quals ():
|
||||
quals = ("|".join (map (lambda x: '(' + x + ')', qual_dict.keys())))
|
||||
t_QUAL.__doc__ = "(?i)" + quals + r"|(\([0-9]+,[0-9]+\))"
|
||||
|
||||
def t_QUOTEDVALUE(t):
|
||||
r"(\".*?\")"
|
||||
if t.value[0] == '"':
|
||||
t.value = t.value[1:-1]
|
||||
return t
|
||||
|
||||
word_init = "[a-z]|[A-Z]|[0-9]|&|:"
|
||||
word_non_init = ",|\.|\'"
|
||||
|
||||
t_WORD = "(%s)(%s|%s)*" % (word_init, word_init, word_non_init)
|
||||
|
||||
def t_LOGOP(t):
|
||||
r'(?i)(AND)|(OR)|(NOT)'
|
||||
return t
|
||||
|
||||
|
||||
t_ignore = " \t"
|
||||
|
||||
def t_error(t):
|
||||
raise LexError ('t_error: ' + str (t))
|
||||
|
||||
|
||||
from ply import lex
|
||||
|
||||
|
||||
|
||||
def relex ():
|
||||
global lexer
|
||||
mk_quals ()
|
||||
lexer = lex.lex()
|
||||
|
||||
relex ()
|
||||
|
||||
def add_qual (qual_name, val):
|
||||
"""Add a qualifier definition, and regenerate the lexer."""
|
||||
qual_dict[qual_name] = val
|
||||
relex ()
|
||||
|
||||
from ply import yacc
|
||||
|
||||
#if in_setup:
|
||||
# import yacc
|
||||
#else:
|
||||
# from PyZ3950 import yacc
|
||||
|
||||
class Node:
|
||||
def __init__(self,type,children=None,leaf=None):
|
||||
self.type = type
|
||||
if children:
|
||||
self.children = children
|
||||
else:
|
||||
self.children = [ ]
|
||||
self.leaf = leaf
|
||||
def str_child (self, child, depth):
|
||||
if isinstance (child, Node): # ugh
|
||||
return child.str_depth (depth)
|
||||
indent = " " * (4 * depth)
|
||||
return indent + str (child) + "\n"
|
||||
def str_depth (self, depth): # ugh
|
||||
indent = " " * (4 * depth)
|
||||
l = ["%s%s %s" % (indent, self.type, self.leaf)]
|
||||
l.append ("".join (map (lambda s: self.str_child (s, depth + 1),
|
||||
self.children)))
|
||||
return "\n".join (l)
|
||||
def __str__(self):
|
||||
return "\n" + self.str_depth (0)
|
||||
|
||||
def p_top (t):
|
||||
'top : cclfind_or_attrset'
|
||||
t[0] = t[1]
|
||||
|
||||
def p_cclfind_or_attrset_1 (t):
|
||||
'cclfind_or_attrset : cclfind'
|
||||
t[0] = t[1]
|
||||
|
||||
def p_cclfind_or_attrset_2 (t):
|
||||
'cclfind_or_attrset : ATTRSET LPAREN WORD SLASH cclfind RPAREN'
|
||||
t[0] = Node ('attrset', [t[5]], t[3])
|
||||
|
||||
def p_ccl_find_1(t):
|
||||
'cclfind : cclfind LOGOP elements'
|
||||
t[0] = Node ('op', [t[1],t[3]], t[2])
|
||||
|
||||
def p_ccl_find_2(t):
|
||||
'cclfind : elements'
|
||||
t[0] = t[1]
|
||||
|
||||
def p_elements_1(t):
|
||||
'elements : LPAREN cclfind RPAREN'
|
||||
t[0] = t[2]
|
||||
|
||||
class QuallistVal:
|
||||
def __init__ (self, quallist, val):
|
||||
self.quallist = quallist
|
||||
self.val = val
|
||||
def __str__ (self):
|
||||
return "QV: %s %s" % (str(self.quallist),str (self.val))
|
||||
def __getitem__ (self, i):
|
||||
if i == 0: return self.quallist
|
||||
if i == 1: return self.val
|
||||
raise IndexError ('QuallistVal err ' + str (i))
|
||||
|
||||
def xlate_qualifier (x):
|
||||
if x[0] == '(' and x[-1] == ')':
|
||||
t = x[1:-1].split (',') # t must be of len 2 b/c of lexer
|
||||
return (string.atoi (t[0]), string.atoi (t[1]))
|
||||
return qual_dict[(x.upper ())]
|
||||
|
||||
|
||||
def p_elements_2 (t):
|
||||
'elements : SET RELOP WORD'
|
||||
if t[2] <> '=':
|
||||
raise QuerySyntaxError (str (t[1], str (t[2]), str (t[3])))
|
||||
t[0] = Node ('set', leaf = t[3])
|
||||
|
||||
def p_elements_3(t):
|
||||
'elements : val'
|
||||
t[0] = Node ('relop', QuallistVal (map (xlate_qualifier, default_quals), t[1]), default_relop)
|
||||
|
||||
def p_elements_4(t):
|
||||
'elements : quallist RELOP val'
|
||||
t[0] = Node ('relop', QuallistVal(map (xlate_qualifier, t[1]),t[3]), t[2])
|
||||
|
||||
# XXX p_elements_5 would be quals followed by recursive def'n, not yet implemented
|
||||
# XXX p_elements_6 would be quals followed by range, not yet implemented.
|
||||
|
||||
def p_quallist_1 (t):
|
||||
'quallist : QUAL'
|
||||
t[0] = [t[1]]
|
||||
|
||||
def p_quallist_2 (t):
|
||||
'quallist : quallist COMMA QUAL'
|
||||
t[0] = t[1] + [t[3]]
|
||||
|
||||
def p_val_1(t):
|
||||
'val : QUOTEDVALUE'
|
||||
t[0] = t[1]
|
||||
|
||||
def p_val_2(t):
|
||||
'val : val WORD'
|
||||
t[0] = t[1] + " " + t[2]
|
||||
|
||||
def p_val_3(t):
|
||||
'val : WORD'
|
||||
t[0] = t[1]
|
||||
|
||||
|
||||
# XXX also don't yet handle proximity operator
|
||||
|
||||
def p_error(t):
|
||||
raise ParseError ('Parse p_error ' + str (t))
|
||||
|
||||
precedence = (
|
||||
('left', 'LOGOP'),
|
||||
)
|
||||
|
||||
yacc.yacc (debug=0, tabmodule = 'PyZ3950_parsetab')
|
||||
#yacc.yacc (debug=0, tabpackage = 'PyZ3950', tabmodule='PyZ3950_parsetab')
|
||||
|
||||
|
||||
def attrset_to_oid (attrset):
|
||||
l = attrset.lower ()
|
||||
if _attrdict.has_key (l):
|
||||
return _attrdict [l]
|
||||
split_l = l.split ('.')
|
||||
if split_l[0] == '':
|
||||
split_l = oids.Z3950_ATTRS + split_l[1:]
|
||||
try:
|
||||
intlist = map (string.atoi, split_l)
|
||||
except ValueError:
|
||||
raise ParseError ('Bad OID: ' + l)
|
||||
return asn1.OidVal (intlist)
|
||||
|
||||
|
||||
def tree_to_q (ast):
|
||||
if ast.type == 'op':
|
||||
myrpnRpnOp = z3950.RpnRpnOp ()
|
||||
myrpnRpnOp.rpn1 = tree_to_q(ast.children[0])
|
||||
myrpnRpnOp.rpn2 = tree_to_q(ast.children[1])
|
||||
op = ast.leaf.lower ()
|
||||
if op == 'not': op = 'and-not' # CCL spec of 'not' vs. Z39.50 spec of 'and-not'
|
||||
myrpnRpnOp.op = (op, None)
|
||||
return ('rpnRpnOp', myrpnRpnOp)
|
||||
elif ast.type == 'relop':
|
||||
# XXX but e.g. LC (http://lcweb.loc.gov/z3950/lcserver.html)
|
||||
# doesn't support other relation attributes, either.
|
||||
try:
|
||||
relattr = relop_to_attrib [ast.leaf]
|
||||
except KeyError: # should never happen, how could we have lexed it?
|
||||
raise UnimplError (ast.leaf)
|
||||
def make_aelt (qual):
|
||||
val = ('numeric', qual [1])
|
||||
return z3950.AttributeElement (attributeType = qual[0],
|
||||
attributeValue = val)
|
||||
apt = z3950.AttributesPlusTerm ()
|
||||
quallist = ast.children.quallist
|
||||
if ast.leaf <> '=':
|
||||
quallist.append ((2,relattr)) # 2 is relation attribute
|
||||
# see http://www.loc.gov/z3950/agency/markup/13.html ATR.1.1
|
||||
apt.attributes = map (make_aelt, quallist)
|
||||
apt.term = ('general', ast.children.val) # XXX update for V3?
|
||||
return ('op', ('attrTerm', apt))
|
||||
elif ast.type == 'set':
|
||||
return ('op', ('resultSet', ast.leaf))
|
||||
|
||||
raise UnimplError("Bad ast type " + str(ast.type))
|
||||
|
||||
def mk_rpn_query (query):
|
||||
"""Transform a CCL query into an RPN query."""
|
||||
# need to copy or create a new lexer because it contains globals
|
||||
# PLY 1.0 lacks __copy__
|
||||
# PLY 1.3.1-1.5 have __copy__, but it's broken and returns None
|
||||
# I sent David Beazley a patch, so future PLY releases will
|
||||
# presumably work correctly.
|
||||
# Recreating the lexer each time is noticeably slower, so this solution
|
||||
# is suboptimal for PLY <= 1.5, but better than being thread-unsafe.
|
||||
# Perhaps I should have per-thread lexer instead XXX
|
||||
# with example/twisted/test.py set to parse_only, I get 277 parses/sec
|
||||
# with fixed PLY, vs. 63 parses/sec with broken PLY, on my 500 MHz PIII
|
||||
# laptop.
|
||||
|
||||
copiedlexer = None
|
||||
if hasattr (lexer, '__copy__'):
|
||||
copiedlexer = lexer.__copy__ ()
|
||||
if copiedlexer == None:
|
||||
copiedlexer = lex.lex ()
|
||||
ast = yacc.parse (query, copiedlexer)
|
||||
return ast_to_rpn (ast)
|
||||
|
||||
def ast_to_rpn (ast):
|
||||
if ast.type == 'attrset':
|
||||
attrset = attrset_to_oid (ast.leaf)
|
||||
ast = ast.children [0]
|
||||
else:
|
||||
attrset = oids.Z3950_ATTRS_BIB1_ov
|
||||
rpnq = z3950.RPNQuery (attributeSet = attrset)
|
||||
rpnq.rpn = tree_to_q (ast)
|
||||
return ('type_1', rpnq)
|
||||
|
||||
def testlex (s):
|
||||
lexer.input (s)
|
||||
while 1:
|
||||
token = lexer.token ()
|
||||
if not token:
|
||||
break
|
||||
print token
|
||||
|
||||
def testyacc (s):
|
||||
copylex = lexer.__copy__ ()
|
||||
ast = yacc.parse (s, lexer = copylex)
|
||||
print "AST:", ast
|
||||
print "RPN Query:", ast_to_rpn (ast)
|
||||
|
||||
if __name__ == '__main__':
|
||||
testfn = testyacc
|
||||
# testfn = testlex
|
||||
testfn ('attrset (BIB1/ au="Gaiman, Neil" or ti=Sandman)')
|
||||
while 1:
|
||||
s = raw_input ('Query: ')
|
||||
if len (s) == 0:
|
||||
break
|
||||
testfn (s)
|
||||
# testyacc ()
|
||||
# testlex ()
|
53
python/PyZ3950/charneg.py
Normal file
53
python/PyZ3950/charneg.py
Normal file
@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
assert (0)
|
||||
# XXX shouldn't use, absorbed into z3950_2001.py
|
||||
|
||||
#from PyZ3950 import asn1
|
||||
import asn1
|
||||
|
||||
InitialSet=asn1.SEQUENCE ([('g0',None,asn1.TYPE(asn1.IMPLICIT(0,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
|
||||
('g1',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
|
||||
('g2',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
|
||||
('g3',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
|
||||
('c0',None,asn1.TYPE(asn1.IMPLICIT(4,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),0),
|
||||
('c1',None,asn1.TYPE(asn1.IMPLICIT(5,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1)])
|
||||
|
||||
PrivateCharacterSet=asn1.CHOICE ([('viaOid',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (asn1.OBJECT_IDENTIFIER))),
|
||||
('externallySpecified',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.EXTERNAL)),
|
||||
('previouslyAgreedUpon',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.NULL))])
|
||||
|
||||
LeftAndRight=asn1.SEQUENCE ([('gLeft',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),0),
|
||||
('gRight',None,asn1.TYPE(asn1.IMPLICIT(4,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1)])
|
||||
|
||||
Iso10646=asn1.SEQUENCE ([('collections',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),asn1.OBJECT_IDENTIFIER),1),
|
||||
('encodingLevel',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.OID),0)])
|
||||
|
||||
LanguageCode=asn1.GeneralString
|
||||
|
||||
Environment=asn1.CHOICE ([('sevenBit',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),asn1.NULL)),
|
||||
('eightBit',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.NULL))])
|
||||
|
||||
Iso2022=asn1.CHOICE ([('originProposal',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE ([('proposedEnvironment',None,asn1.TYPE(asn1.EXPLICIT(0,cls=asn1.CONTEXT_FLAG),Environment),1),
|
||||
('proposedSets',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (asn1.INTEGER)),0),
|
||||
('proposedInitialSets',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (InitialSet)),0),
|
||||
('proposedLeftAndRight',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),LeftAndRight),0)]))),
|
||||
('targetResponse',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE ([('selectedEnvironment',None,asn1.TYPE(asn1.EXPLICIT(0,cls=asn1.CONTEXT_FLAG),Environment),0),
|
||||
('selectedSets',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (asn1.INTEGER)),0),
|
||||
('selectedinitialSet',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),InitialSet),0),
|
||||
('selectedLeftAndRight',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),LeftAndRight),0)])))])
|
||||
|
||||
TargetResponse=asn1.SEQUENCE ([('selectedCharSets',None,asn1.TYPE(asn1.EXPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.CHOICE ([('iso2022',None,asn1.TYPE(asn1.EXPLICIT(1,cls=asn1.CONTEXT_FLAG),Iso2022)),
|
||||
('iso10646',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),Iso10646)),
|
||||
('private',None,asn1.TYPE(asn1.EXPLICIT(3,cls=asn1.CONTEXT_FLAG),PrivateCharacterSet)),
|
||||
('none',None,asn1.TYPE(asn1.IMPLICIT(4,cls=asn1.CONTEXT_FLAG),asn1.NULL))])),1),
|
||||
('selectedLanguage',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),LanguageCode),1),
|
||||
('recordsInSelectedCharSets',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.BOOLEAN),1)])
|
||||
|
||||
OriginProposal=asn1.SEQUENCE ([('proposedCharSets',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF ( asn1.CHOICE ([('iso2022',None,asn1.TYPE(asn1.EXPLICIT(1,cls=asn1.CONTEXT_FLAG),Iso2022)),
|
||||
('iso10646',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),Iso10646)),
|
||||
('private',None,asn1.TYPE(asn1.EXPLICIT(3,cls=asn1.CONTEXT_FLAG),PrivateCharacterSet))]))),1),
|
||||
('proposedlanguages',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (LanguageCode)),1),
|
||||
('recordsInSelectedCharSets',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.BOOLEAN),1)])
|
||||
CharSetandLanguageNegotiation=asn1.CHOICE ([('proposal',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),OriginProposal)),
|
||||
('response',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),TargetResponse))])
|
65
python/PyZ3950/compile_oids.py
Normal file
65
python/PyZ3950/compile_oids.py
Normal file
@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Original by Robert Sanderson, modifications by Aaron Lav
|
||||
|
||||
import sys
|
||||
from PyZ3950 import asn1
|
||||
|
||||
inh = file("oids.txt")
|
||||
outh = file("oids.py", "w")
|
||||
outh.write('from PyZ3950 import asn1\n')
|
||||
# from ... to get same globals as others importing asn1
|
||||
outh.write('oids = {}\n')
|
||||
|
||||
oids = {}
|
||||
vars = {}
|
||||
|
||||
for line in inh:
|
||||
if (not line.isspace()):
|
||||
flds = line.split(None)
|
||||
name = flds[0]
|
||||
number = flds[1]
|
||||
if (len(flds) > 2):
|
||||
aliasList = flds[2:]
|
||||
else:
|
||||
aliasList = []
|
||||
|
||||
if (number[0] == "."):
|
||||
|
||||
# add to previous
|
||||
splitname = name.split("_")
|
||||
cur = oids
|
||||
for n in splitname[:-1]:
|
||||
cur = cur[n]
|
||||
|
||||
val = cur['val'] + [int(number[1:])]
|
||||
oid = asn1.OidVal(val)
|
||||
|
||||
cur [splitname[-1]] = {'oid': oid, 'val' : val}
|
||||
|
||||
vars[name] = val
|
||||
tree = "oids['%s']" % "']['".join (splitname)
|
||||
outh.write(tree + " = " + "{'oid': asn1.OidVal(" + str(val) + "), 'val': " + str(val) + "}\n")
|
||||
|
||||
else:
|
||||
# base
|
||||
splitnums = number.split('.')
|
||||
numlist = map(int, splitnums)
|
||||
|
||||
oids[name] = {}
|
||||
oids[name]['oid'] = asn1.OidVal(numlist)
|
||||
oids[name]['val'] = numlist
|
||||
vars[name] = numlist
|
||||
|
||||
outh.write("oids['" + name + "'] = {'oid': asn1.OidVal(" + str(numlist) + "), 'val': " + str(numlist) + "}\n")
|
||||
|
||||
|
||||
inh.close()
|
||||
|
||||
items = vars.items()
|
||||
items.sort()
|
||||
for k,v in items:
|
||||
outh.write(k + " = " + str(v) + "\n")
|
||||
outh.write(k + "_ov = asn1.OidVal(" + str (v) + ")\n")
|
||||
|
||||
outh.close()
|
71
python/PyZ3950/grs1.py
Normal file
71
python/PyZ3950/grs1.py
Normal file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""Utility functions for GRS-1 data"""
|
||||
|
||||
from __future__ import nested_scopes
|
||||
# XXX still need to tag non-leaf nodes w/ (tagType, tagValue)
|
||||
# XXX tagType can be omitted. If so, default either supplied
|
||||
# dynamically by tagSet-M or statically spec'd by schema
|
||||
|
||||
# from TAG (Z39.50-1995 App 12): tagType 1 is tagSet-M, 2 tagSet-G,
|
||||
# 3 locally defined.
|
||||
|
||||
class Node:
|
||||
"""Defined members are:
|
||||
tag - tag (always present, except for top node)
|
||||
metadata - metadata (opt, seriesOrder only for nonleaf - v. RET.3.2.3 )
|
||||
children - list of Node
|
||||
leaf - leaf data (children and leaf are mutually exclusive)
|
||||
"""
|
||||
def __init__ (self, **kw):
|
||||
self.__dict__.update (kw)
|
||||
self.tab_size = 3 # controls str() indentation width
|
||||
def str_depth (self, depth):
|
||||
l = []
|
||||
children = getattr (self, 'children', [])
|
||||
leaf = getattr (self, 'leaf', None)
|
||||
tag = getattr (self, 'tag', None)
|
||||
indent = " " * (self.tab_size * depth)
|
||||
if leaf <> None:
|
||||
l.append ("%s%s %s" % (
|
||||
indent, str (tag), leaf.content))
|
||||
else:
|
||||
if tag <> None:
|
||||
l.append (indent + str (tag))
|
||||
meta = getattr (self, 'metadata', None)
|
||||
if meta <> None:
|
||||
l.append (indent + 'metadata: ' + str (meta))
|
||||
l.append ("".join (map (
|
||||
lambda n: n.str_depth (depth + 1), children)))
|
||||
return "\n".join (l)
|
||||
def __str__ (self):
|
||||
return "\n" + self.str_depth (-1)
|
||||
|
||||
|
||||
def preproc (raw):
|
||||
"""Transform the raw output of the asn.1 decoder into something
|
||||
a bit more programmer-friendly. (This is automatically called
|
||||
by the ZOOM API, so you don't need to worry about it unless you're
|
||||
using the raw z3950 API.)
|
||||
"""
|
||||
if isinstance (raw, type ([])):
|
||||
return Node (children = map (preproc, raw))
|
||||
else: # TaggedElement
|
||||
kw = {}
|
||||
tag = (raw.tagType, raw.tagValue [1])
|
||||
# Value [0] is str vs. num indicator
|
||||
kw ['tag'] = tag
|
||||
meta = getattr (raw, 'metaData', None)
|
||||
if meta <> None:
|
||||
kw ['metadata'] = meta
|
||||
if raw.content[0] == 'subtree':
|
||||
return Node (children = map (preproc, raw.content [1]), **kw)
|
||||
else:
|
||||
# tag and metadata are here redundantly encoded as
|
||||
# both attributes of leaf and of Node. Use the Node
|
||||
# attribs, I'll try to clean this up sometime.
|
||||
return Node (leaf = raw, **kw)
|
||||
|
||||
|
||||
|
||||
|
16434
python/PyZ3950/marc_to_unicode.py
Normal file
16434
python/PyZ3950/marc_to_unicode.py
Normal file
File diff suppressed because it is too large
Load Diff
479
python/PyZ3950/oids.py
Normal file
479
python/PyZ3950/oids.py
Normal file
@ -0,0 +1,479 @@
|
||||
from PyZ3950 import asn1
|
||||
oids = {}
|
||||
oids['Z3950'] = {'oid': asn1.OidVal([1, 2, 840, 10003]), 'val': [1, 2, 840, 10003]}
|
||||
oids['Z3950']['ATTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3]), 'val': [1, 2, 840, 10003, 3]}
|
||||
oids['Z3950']['DIAG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4]), 'val': [1, 2, 840, 10003, 4]}
|
||||
oids['Z3950']['RECSYN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5]), 'val': [1, 2, 840, 10003, 5]}
|
||||
oids['Z3950']['TRANSFER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 6]), 'val': [1, 2, 840, 10003, 6]}
|
||||
oids['Z3950']['RRF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7]), 'val': [1, 2, 840, 10003, 7]}
|
||||
oids['Z3950']['ACCESS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8]), 'val': [1, 2, 840, 10003, 8]}
|
||||
oids['Z3950']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9]), 'val': [1, 2, 840, 10003, 9]}
|
||||
oids['Z3950']['USR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10]), 'val': [1, 2, 840, 10003, 10]}
|
||||
oids['Z3950']['SPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11]), 'val': [1, 2, 840, 10003, 11]}
|
||||
oids['Z3950']['VAR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12]), 'val': [1, 2, 840, 10003, 12]}
|
||||
oids['Z3950']['SCHEMA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13]), 'val': [1, 2, 840, 10003, 13]}
|
||||
oids['Z3950']['TAGSET'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14]), 'val': [1, 2, 840, 10003, 14]}
|
||||
oids['Z3950']['NEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15]), 'val': [1, 2, 840, 10003, 15]}
|
||||
oids['Z3950']['QUERY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16]), 'val': [1, 2, 840, 10003, 16]}
|
||||
oids['Z3950']['ATTRS']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 1]), 'val': [1, 2, 840, 10003, 3, 1]}
|
||||
oids['Z3950']['ATTRS']['EXP1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 2]), 'val': [1, 2, 840, 10003, 3, 2]}
|
||||
oids['Z3950']['ATTRS']['EXT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 3]), 'val': [1, 2, 840, 10003, 3, 3]}
|
||||
oids['Z3950']['ATTRS']['CCL1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 4]), 'val': [1, 2, 840, 10003, 3, 4]}
|
||||
oids['Z3950']['ATTRS']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 5]), 'val': [1, 2, 840, 10003, 3, 5]}
|
||||
oids['Z3950']['ATTRS']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 6]), 'val': [1, 2, 840, 10003, 3, 6]}
|
||||
oids['Z3950']['ATTRS']['COLLECTIONS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 7]), 'val': [1, 2, 840, 10003, 3, 7]}
|
||||
oids['Z3950']['ATTRS']['CIMI1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 8]), 'val': [1, 2, 840, 10003, 3, 8]}
|
||||
oids['Z3950']['ATTRS']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 9]), 'val': [1, 2, 840, 10003, 3, 9]}
|
||||
oids['Z3950']['ATTRS']['ZBIG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 10]), 'val': [1, 2, 840, 10003, 3, 10]}
|
||||
oids['Z3950']['ATTRS']['UTIL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 11]), 'val': [1, 2, 840, 10003, 3, 11]}
|
||||
oids['Z3950']['ATTRS']['XD1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 12]), 'val': [1, 2, 840, 10003, 3, 12]}
|
||||
oids['Z3950']['ATTRS']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 13]), 'val': [1, 2, 840, 10003, 3, 13]}
|
||||
oids['Z3950']['ATTRS']['FIN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 14]), 'val': [1, 2, 840, 10003, 3, 14]}
|
||||
oids['Z3950']['ATTRS']['DAN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 15]), 'val': [1, 2, 840, 10003, 3, 15]}
|
||||
oids['Z3950']['ATTRS']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 16]), 'val': [1, 2, 840, 10003, 3, 16]}
|
||||
oids['Z3950']['ATTRS']['MARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 17]), 'val': [1, 2, 840, 10003, 3, 17]}
|
||||
oids['Z3950']['ATTRS']['BIB2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 18]), 'val': [1, 2, 840, 10003, 3, 18]}
|
||||
oids['Z3950']['ATTRS']['ZEEREX'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 19]), 'val': [1, 2, 840, 10003, 3, 19]}
|
||||
oids['Z3950']['DIAG']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 1]), 'val': [1, 2, 840, 10003, 4, 1]}
|
||||
oids['Z3950']['DIAG']['DIAG1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 2]), 'val': [1, 2, 840, 10003, 4, 2]}
|
||||
oids['Z3950']['DIAG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 3]), 'val': [1, 2, 840, 10003, 4, 3]}
|
||||
oids['Z3950']['DIAG']['GENERAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 4]), 'val': [1, 2, 840, 10003, 4, 4]}
|
||||
oids['Z3950']['RECSYN']['UNIMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 1]), 'val': [1, 2, 840, 10003, 5, 1]}
|
||||
oids['Z3950']['RECSYN']['INTERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 2]), 'val': [1, 2, 840, 10003, 5, 2]}
|
||||
oids['Z3950']['RECSYN']['CCF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 3]), 'val': [1, 2, 840, 10003, 5, 3]}
|
||||
oids['Z3950']['RECSYN']['USMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10]), 'val': [1, 2, 840, 10003, 5, 10]}
|
||||
oids['Z3950']['RECSYN']['USMARC']['BIBLIO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 1]), 'val': [1, 2, 840, 10003, 5, 10, 1]}
|
||||
oids['Z3950']['RECSYN']['USMARC']['AUTH'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 2]), 'val': [1, 2, 840, 10003, 5, 10, 2]}
|
||||
oids['Z3950']['RECSYN']['USMARC']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 3]), 'val': [1, 2, 840, 10003, 5, 10, 3]}
|
||||
oids['Z3950']['RECSYN']['USMARC']['COMMUNITY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 4]), 'val': [1, 2, 840, 10003, 5, 10, 4]}
|
||||
oids['Z3950']['RECSYN']['USMARC']['CLASS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 5]), 'val': [1, 2, 840, 10003, 5, 10, 5]}
|
||||
oids['Z3950']['RECSYN']['UKMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 11]), 'val': [1, 2, 840, 10003, 5, 11]}
|
||||
oids['Z3950']['RECSYN']['NORMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 12]), 'val': [1, 2, 840, 10003, 5, 12]}
|
||||
oids['Z3950']['RECSYN']['LIBRISMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 13]), 'val': [1, 2, 840, 10003, 5, 13]}
|
||||
oids['Z3950']['RECSYN']['DANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 14]), 'val': [1, 2, 840, 10003, 5, 14]}
|
||||
oids['Z3950']['RECSYN']['FINMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 15]), 'val': [1, 2, 840, 10003, 5, 15]}
|
||||
oids['Z3950']['RECSYN']['MAB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 16]), 'val': [1, 2, 840, 10003, 5, 16]}
|
||||
oids['Z3950']['RECSYN']['CANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 17]), 'val': [1, 2, 840, 10003, 5, 17]}
|
||||
oids['Z3950']['RECSYN']['SBNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 18]), 'val': [1, 2, 840, 10003, 5, 18]}
|
||||
oids['Z3950']['RECSYN']['PICAMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 19]), 'val': [1, 2, 840, 10003, 5, 19]}
|
||||
oids['Z3950']['RECSYN']['AUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 20]), 'val': [1, 2, 840, 10003, 5, 20]}
|
||||
oids['Z3950']['RECSYN']['IBERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 21]), 'val': [1, 2, 840, 10003, 5, 21]}
|
||||
oids['Z3950']['RECSYN']['CATMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 22]), 'val': [1, 2, 840, 10003, 5, 22]}
|
||||
oids['Z3950']['RECSYN']['MALMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 23]), 'val': [1, 2, 840, 10003, 5, 23]}
|
||||
oids['Z3950']['RECSYN']['JPMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 24]), 'val': [1, 2, 840, 10003, 5, 24]}
|
||||
oids['Z3950']['RECSYN']['SWEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 25]), 'val': [1, 2, 840, 10003, 5, 25]}
|
||||
oids['Z3950']['RECSYN']['SIGLEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 26]), 'val': [1, 2, 840, 10003, 5, 26]}
|
||||
oids['Z3950']['RECSYN']['ISDSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 27]), 'val': [1, 2, 840, 10003, 5, 27]}
|
||||
oids['Z3950']['RECSYN']['RUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 28]), 'val': [1, 2, 840, 10003, 5, 28]}
|
||||
oids['Z3950']['RECSYN']['HUNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 29]), 'val': [1, 2, 840, 10003, 5, 29]}
|
||||
oids['Z3950']['RECSYN']['NACSISCATP'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 30]), 'val': [1, 2, 840, 10003, 5, 30]}
|
||||
oids['Z3950']['RECSYN']['FINMARC2000'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 31]), 'val': [1, 2, 840, 10003, 5, 31]}
|
||||
oids['Z3950']['RECSYN']['MARC21FIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 32]), 'val': [1, 2, 840, 10003, 5, 32]}
|
||||
oids['Z3950']['RECSYN']['COMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 33]), 'val': [1, 2, 840, 10003, 5, 33]}
|
||||
oids['Z3950']['RECSYN']['EXPLAIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 100]), 'val': [1, 2, 840, 10003, 5, 100]}
|
||||
oids['Z3950']['RECSYN']['SUTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 101]), 'val': [1, 2, 840, 10003, 5, 101]}
|
||||
oids['Z3950']['RECSYN']['OPAC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 102]), 'val': [1, 2, 840, 10003, 5, 102]}
|
||||
oids['Z3950']['RECSYN']['SUMMARY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 103]), 'val': [1, 2, 840, 10003, 5, 103]}
|
||||
oids['Z3950']['RECSYN']['GRS0'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 104]), 'val': [1, 2, 840, 10003, 5, 104]}
|
||||
oids['Z3950']['RECSYN']['GRS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 105]), 'val': [1, 2, 840, 10003, 5, 105]}
|
||||
oids['Z3950']['RECSYN']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 106]), 'val': [1, 2, 840, 10003, 5, 106]}
|
||||
oids['Z3950']['RECSYN']['FRAGMENT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 107]), 'val': [1, 2, 840, 10003, 5, 107]}
|
||||
oids['Z3950']['RECSYN']['MIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109]), 'val': [1, 2, 840, 10003, 5, 109]}
|
||||
oids['Z3950']['RECSYN']['MIME']['PDF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 1]), 'val': [1, 2, 840, 10003, 5, 109, 1]}
|
||||
oids['Z3950']['RECSYN']['MIME']['POSTSCRIPT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 2]), 'val': [1, 2, 840, 10003, 5, 109, 2]}
|
||||
oids['Z3950']['RECSYN']['MIME']['HTML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 3]), 'val': [1, 2, 840, 10003, 5, 109, 3]}
|
||||
oids['Z3950']['RECSYN']['MIME']['TIFF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 4]), 'val': [1, 2, 840, 10003, 5, 109, 4]}
|
||||
oids['Z3950']['RECSYN']['MIME']['GIF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 5]), 'val': [1, 2, 840, 10003, 5, 109, 5]}
|
||||
oids['Z3950']['RECSYN']['MIME']['JPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 6]), 'val': [1, 2, 840, 10003, 5, 109, 6]}
|
||||
oids['Z3950']['RECSYN']['MIME']['PNG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 7]), 'val': [1, 2, 840, 10003, 5, 109, 7]}
|
||||
oids['Z3950']['RECSYN']['MIME']['MPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 8]), 'val': [1, 2, 840, 10003, 5, 109, 8]}
|
||||
oids['Z3950']['RECSYN']['MIME']['SGML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 9]), 'val': [1, 2, 840, 10003, 5, 109, 9]}
|
||||
oids['Z3950']['RECSYN']['MIME']['XML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 10]), 'val': [1, 2, 840, 10003, 5, 109, 10]}
|
||||
oids['Z3950']['RECSYN']['ZMIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110]), 'val': [1, 2, 840, 10003, 5, 110]}
|
||||
oids['Z3950']['RECSYN']['ZMIME']['TIFFB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 1]), 'val': [1, 2, 840, 10003, 5, 110, 1]}
|
||||
oids['Z3950']['RECSYN']['ZMIME']['WAV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 2]), 'val': [1, 2, 840, 10003, 5, 110, 2]}
|
||||
oids['Z3950']['RECSYN']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 111]), 'val': [1, 2, 840, 10003, 5, 111]}
|
||||
oids['Z3950']['RRF']['RESOURCE1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 1]), 'val': [1, 2, 840, 10003, 7, 1]}
|
||||
oids['Z3950']['RRF']['RESOURCE2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 2]), 'val': [1, 2, 840, 10003, 7, 2]}
|
||||
oids['Z3950']['ACCESS']['PROMPT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 1]), 'val': [1, 2, 840, 10003, 8, 1]}
|
||||
oids['Z3950']['ACCESS']['DES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 2]), 'val': [1, 2, 840, 10003, 8, 2]}
|
||||
oids['Z3950']['ACCESS']['KRB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 3]), 'val': [1, 2, 840, 10003, 8, 3]}
|
||||
oids['Z3950']['ES']['PERSISTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 1]), 'val': [1, 2, 840, 10003, 9, 1]}
|
||||
oids['Z3950']['ES']['PERSISTQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 2]), 'val': [1, 2, 840, 10003, 9, 2]}
|
||||
oids['Z3950']['ES']['PERIODQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 3]), 'val': [1, 2, 840, 10003, 9, 3]}
|
||||
oids['Z3950']['ES']['ITEMORDER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 4]), 'val': [1, 2, 840, 10003, 9, 4]}
|
||||
oids['Z3950']['ES']['DBUPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5]), 'val': [1, 2, 840, 10003, 9, 5]}
|
||||
oids['Z3950']['ES']['DBUPDATE']['REV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1]}
|
||||
oids['Z3950']['ES']['DBUPDATE']['REV']['1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1, 1]}
|
||||
oids['Z3950']['ES']['EXPORTSPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 6]), 'val': [1, 2, 840, 10003, 9, 6]}
|
||||
oids['Z3950']['ES']['EXPORTINV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 7]), 'val': [1, 2, 840, 10003, 9, 7]}
|
||||
oids['Z3950']['USR']['SEARCHRES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1]), 'val': [1, 2, 840, 10003, 10, 1]}
|
||||
oids['Z3950']['USR']['CHARSETNEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 2]), 'val': [1, 2, 840, 10003, 10, 2]}
|
||||
oids['Z3950']['USR']['INFO1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 3]), 'val': [1, 2, 840, 10003, 10, 3]}
|
||||
oids['Z3950']['USR']['SEARCHTERMS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 4]), 'val': [1, 2, 840, 10003, 10, 4]}
|
||||
oids['Z3950']['USR']['SEARCHTERMS2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 5]), 'val': [1, 2, 840, 10003, 10, 5]}
|
||||
oids['Z3950']['USR']['DATETIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 6]), 'val': [1, 2, 840, 10003, 10, 6]}
|
||||
oids['Z3950']['USR']['INSERTACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 9]), 'val': [1, 2, 840, 10003, 10, 9]}
|
||||
oids['Z3950']['USR']['EDITACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 10]), 'val': [1, 2, 840, 10003, 10, 10]}
|
||||
oids['Z3950']['USR']['AUTHFILE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 11]), 'val': [1, 2, 840, 10003, 10, 11]}
|
||||
oids['Z3950']['USR']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000]), 'val': [1, 2, 840, 10003, 10, 1000]}
|
||||
oids['Z3950']['USR']['PRIVATE']['OCLC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17]), 'val': [1, 2, 840, 10003, 10, 1000, 17]}
|
||||
oids['Z3950']['USR']['PRIVATE']['OCLC']['INFO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1]), 'val': [1, 2, 840, 10003, 10, 1000, 17, 1]}
|
||||
oids['Z3950']['SPEC']['ESPEC1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 1]), 'val': [1, 2, 840, 10003, 11, 1]}
|
||||
oids['Z3950']['SPEC']['ESPEC2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 2]), 'val': [1, 2, 840, 10003, 11, 2]}
|
||||
oids['Z3950']['SPEC']['ESPECQ'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 3]), 'val': [1, 2, 840, 10003, 11, 3]}
|
||||
oids['Z3950']['VAR']['VARIANT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12, 1]), 'val': [1, 2, 840, 10003, 12, 1]}
|
||||
oids['Z3950']['SCHEMA']['WAIS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
|
||||
oids['Z3950']['SCHEMA']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 2]), 'val': [1, 2, 840, 10003, 13, 2]}
|
||||
oids['Z3950']['SCHEMA']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 3]), 'val': [1, 2, 840, 10003, 13, 3]}
|
||||
oids['Z3950']['SCHEMA']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 4]), 'val': [1, 2, 840, 10003, 13, 4]}
|
||||
oids['Z3950']['SCHEMA']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 5]), 'val': [1, 2, 840, 10003, 13, 5]}
|
||||
oids['Z3950']['SCHEMA']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 6]), 'val': [1, 2, 840, 10003, 13, 6]}
|
||||
oids['Z3950']['SCHEMA']['HOLDINGS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7]), 'val': [1, 2, 840, 10003, 13, 7]}
|
||||
oids['Z3950']['SCHEMA']['HOLDINGS']['11'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 1]), 'val': [1, 2, 840, 10003, 13, 7, 1]}
|
||||
oids['Z3950']['SCHEMA']['HOLDINGS']['12'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 2]), 'val': [1, 2, 840, 10003, 13, 7, 2]}
|
||||
oids['Z3950']['SCHEMA']['HOLDINGS']['14'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 4]), 'val': [1, 2, 840, 10003, 13, 7, 4]}
|
||||
oids['Z3950']['SCHEMA']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
|
||||
oids['Z3950']['SCHEMA']['INSERT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
|
||||
oids['Z3950']['SCHEMA']['EDIT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
|
||||
oids['Z3950']['TAGSET']['M'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 1]), 'val': [1, 2, 840, 10003, 14, 1]}
|
||||
oids['Z3950']['TAGSET']['G'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 2]), 'val': [1, 2, 840, 10003, 14, 2]}
|
||||
oids['Z3950']['TAGSET']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 3]), 'val': [1, 2, 840, 10003, 14, 3]}
|
||||
oids['Z3950']['TAGSET']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 4]), 'val': [1, 2, 840, 10003, 14, 4]}
|
||||
oids['Z3950']['TAGSET']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 5]), 'val': [1, 2, 840, 10003, 14, 5]}
|
||||
oids['Z3950']['TAGSET']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 6]), 'val': [1, 2, 840, 10003, 14, 6]}
|
||||
oids['Z3950']['TAGSET']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 7]), 'val': [1, 2, 840, 10003, 14, 7]}
|
||||
oids['Z3950']['TAGSET']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 8]), 'val': [1, 2, 840, 10003, 14, 8]}
|
||||
oids['Z3950']['NEG']['CHARSET2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1]), 'val': [1, 2, 840, 10003, 15, 1]}
|
||||
oids['Z3950']['NEG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 2]), 'val': [1, 2, 840, 10003, 15, 2]}
|
||||
oids['Z3950']['NEG']['CHARSET3'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 3]), 'val': [1, 2, 840, 10003, 15, 3]}
|
||||
oids['Z3950']['NEG']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000]), 'val': [1, 2, 840, 10003, 15, 1000]}
|
||||
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81]), 'val': [1, 2, 840, 10003, 15, 1000, 81]}
|
||||
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA']['CHARSETNAME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1]), 'val': [1, 2, 840, 10003, 15, 1000, 81, 1]}
|
||||
oids['Z3950']['QUERY']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 1]), 'val': [1, 2, 840, 10003, 16, 1]}
|
||||
oids['Z3950']['QUERY']['CQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 2]), 'val': [1, 2, 840, 10003, 16, 2]}
|
||||
oids['UNICODE'] = {'oid': asn1.OidVal([1, 0, 10646]), 'val': [1, 0, 10646]}
|
||||
oids['UNICODE']['PART1'] = {'oid': asn1.OidVal([1, 0, 10646, 1]), 'val': [1, 0, 10646, 1]}
|
||||
oids['UNICODE']['PART1']['XFERSYN'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0]), 'val': [1, 0, 10646, 1, 0]}
|
||||
oids['UNICODE']['PART1']['XFERSYN']['UCS2'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 2]), 'val': [1, 0, 10646, 1, 0, 2]}
|
||||
oids['UNICODE']['PART1']['XFERSYN']['UCS4'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 4]), 'val': [1, 0, 10646, 1, 0, 4]}
|
||||
oids['UNICODE']['PART1']['XFERSYN']['UTF16'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 5]), 'val': [1, 0, 10646, 1, 0, 5]}
|
||||
oids['UNICODE']['PART1']['XFERSYN']['UTF8'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 8]), 'val': [1, 0, 10646, 1, 0, 8]}
|
||||
UNICODE = [1, 0, 10646]
|
||||
UNICODE_ov = asn1.OidVal([1, 0, 10646])
|
||||
UNICODE_PART1 = [1, 0, 10646, 1]
|
||||
UNICODE_PART1_ov = asn1.OidVal([1, 0, 10646, 1])
|
||||
UNICODE_PART1_XFERSYN = [1, 0, 10646, 1, 0]
|
||||
UNICODE_PART1_XFERSYN_ov = asn1.OidVal([1, 0, 10646, 1, 0])
|
||||
UNICODE_PART1_XFERSYN_UCS2 = [1, 0, 10646, 1, 0, 2]
|
||||
UNICODE_PART1_XFERSYN_UCS2_ov = asn1.OidVal([1, 0, 10646, 1, 0, 2])
|
||||
UNICODE_PART1_XFERSYN_UCS4 = [1, 0, 10646, 1, 0, 4]
|
||||
UNICODE_PART1_XFERSYN_UCS4_ov = asn1.OidVal([1, 0, 10646, 1, 0, 4])
|
||||
UNICODE_PART1_XFERSYN_UTF16 = [1, 0, 10646, 1, 0, 5]
|
||||
UNICODE_PART1_XFERSYN_UTF16_ov = asn1.OidVal([1, 0, 10646, 1, 0, 5])
|
||||
UNICODE_PART1_XFERSYN_UTF8 = [1, 0, 10646, 1, 0, 8]
|
||||
UNICODE_PART1_XFERSYN_UTF8_ov = asn1.OidVal([1, 0, 10646, 1, 0, 8])
|
||||
Z3950 = [1, 2, 840, 10003]
|
||||
Z3950_ov = asn1.OidVal([1, 2, 840, 10003])
|
||||
Z3950_ACCESS = [1, 2, 840, 10003, 8]
|
||||
Z3950_ACCESS_ov = asn1.OidVal([1, 2, 840, 10003, 8])
|
||||
Z3950_ACCESS_DES1 = [1, 2, 840, 10003, 8, 2]
|
||||
Z3950_ACCESS_DES1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 2])
|
||||
Z3950_ACCESS_KRB1 = [1, 2, 840, 10003, 8, 3]
|
||||
Z3950_ACCESS_KRB1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 3])
|
||||
Z3950_ACCESS_PROMPT1 = [1, 2, 840, 10003, 8, 1]
|
||||
Z3950_ACCESS_PROMPT1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 1])
|
||||
Z3950_ATTRS = [1, 2, 840, 10003, 3]
|
||||
Z3950_ATTRS_ov = asn1.OidVal([1, 2, 840, 10003, 3])
|
||||
Z3950_ATTRS_BIB1 = [1, 2, 840, 10003, 3, 1]
|
||||
Z3950_ATTRS_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 1])
|
||||
Z3950_ATTRS_BIB2 = [1, 2, 840, 10003, 3, 18]
|
||||
Z3950_ATTRS_BIB2_ov = asn1.OidVal([1, 2, 840, 10003, 3, 18])
|
||||
Z3950_ATTRS_CCL1 = [1, 2, 840, 10003, 3, 4]
|
||||
Z3950_ATTRS_CCL1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 4])
|
||||
Z3950_ATTRS_CIMI1 = [1, 2, 840, 10003, 3, 8]
|
||||
Z3950_ATTRS_CIMI1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 8])
|
||||
Z3950_ATTRS_COLLECTIONS1 = [1, 2, 840, 10003, 3, 7]
|
||||
Z3950_ATTRS_COLLECTIONS1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 7])
|
||||
Z3950_ATTRS_DAN1 = [1, 2, 840, 10003, 3, 15]
|
||||
Z3950_ATTRS_DAN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 15])
|
||||
Z3950_ATTRS_EXP1 = [1, 2, 840, 10003, 3, 2]
|
||||
Z3950_ATTRS_EXP1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 2])
|
||||
Z3950_ATTRS_EXT1 = [1, 2, 840, 10003, 3, 3]
|
||||
Z3950_ATTRS_EXT1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 3])
|
||||
Z3950_ATTRS_FIN1 = [1, 2, 840, 10003, 3, 14]
|
||||
Z3950_ATTRS_FIN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 14])
|
||||
Z3950_ATTRS_GEO = [1, 2, 840, 10003, 3, 9]
|
||||
Z3950_ATTRS_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 3, 9])
|
||||
Z3950_ATTRS_GILS = [1, 2, 840, 10003, 3, 5]
|
||||
Z3950_ATTRS_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 5])
|
||||
Z3950_ATTRS_HOLD = [1, 2, 840, 10003, 3, 16]
|
||||
Z3950_ATTRS_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 3, 16])
|
||||
Z3950_ATTRS_MARC = [1, 2, 840, 10003, 3, 17]
|
||||
Z3950_ATTRS_MARC_ov = asn1.OidVal([1, 2, 840, 10003, 3, 17])
|
||||
Z3950_ATTRS_STAS = [1, 2, 840, 10003, 3, 6]
|
||||
Z3950_ATTRS_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 6])
|
||||
Z3950_ATTRS_UTIL = [1, 2, 840, 10003, 3, 11]
|
||||
Z3950_ATTRS_UTIL_ov = asn1.OidVal([1, 2, 840, 10003, 3, 11])
|
||||
Z3950_ATTRS_XD1 = [1, 2, 840, 10003, 3, 12]
|
||||
Z3950_ATTRS_XD1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 12])
|
||||
Z3950_ATTRS_ZBIG = [1, 2, 840, 10003, 3, 10]
|
||||
Z3950_ATTRS_ZBIG_ov = asn1.OidVal([1, 2, 840, 10003, 3, 10])
|
||||
Z3950_ATTRS_ZEEREX = [1, 2, 840, 10003, 3, 19]
|
||||
Z3950_ATTRS_ZEEREX_ov = asn1.OidVal([1, 2, 840, 10003, 3, 19])
|
||||
Z3950_ATTRS_ZTHES = [1, 2, 840, 10003, 3, 13]
|
||||
Z3950_ATTRS_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 3, 13])
|
||||
Z3950_DIAG = [1, 2, 840, 10003, 4]
|
||||
Z3950_DIAG_ov = asn1.OidVal([1, 2, 840, 10003, 4])
|
||||
Z3950_DIAG_BIB1 = [1, 2, 840, 10003, 4, 1]
|
||||
Z3950_DIAG_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 1])
|
||||
Z3950_DIAG_DIAG1 = [1, 2, 840, 10003, 4, 2]
|
||||
Z3950_DIAG_DIAG1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 2])
|
||||
Z3950_DIAG_ES = [1, 2, 840, 10003, 4, 3]
|
||||
Z3950_DIAG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 4, 3])
|
||||
Z3950_DIAG_GENERAL = [1, 2, 840, 10003, 4, 4]
|
||||
Z3950_DIAG_GENERAL_ov = asn1.OidVal([1, 2, 840, 10003, 4, 4])
|
||||
Z3950_ES = [1, 2, 840, 10003, 9]
|
||||
Z3950_ES_ov = asn1.OidVal([1, 2, 840, 10003, 9])
|
||||
Z3950_ES_DBUPDATE = [1, 2, 840, 10003, 9, 5]
|
||||
Z3950_ES_DBUPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5])
|
||||
Z3950_ES_DBUPDATE_REV = [1, 2, 840, 10003, 9, 5, 1]
|
||||
Z3950_ES_DBUPDATE_REV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1])
|
||||
Z3950_ES_DBUPDATE_REV_1 = [1, 2, 840, 10003, 9, 5, 1, 1]
|
||||
Z3950_ES_DBUPDATE_REV_1_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1])
|
||||
Z3950_ES_EXPORTINV = [1, 2, 840, 10003, 9, 7]
|
||||
Z3950_ES_EXPORTINV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 7])
|
||||
Z3950_ES_EXPORTSPEC = [1, 2, 840, 10003, 9, 6]
|
||||
Z3950_ES_EXPORTSPEC_ov = asn1.OidVal([1, 2, 840, 10003, 9, 6])
|
||||
Z3950_ES_ITEMORDER = [1, 2, 840, 10003, 9, 4]
|
||||
Z3950_ES_ITEMORDER_ov = asn1.OidVal([1, 2, 840, 10003, 9, 4])
|
||||
Z3950_ES_PERIODQRY = [1, 2, 840, 10003, 9, 3]
|
||||
Z3950_ES_PERIODQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 3])
|
||||
Z3950_ES_PERSISTQRY = [1, 2, 840, 10003, 9, 2]
|
||||
Z3950_ES_PERSISTQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 2])
|
||||
Z3950_ES_PERSISTRS = [1, 2, 840, 10003, 9, 1]
|
||||
Z3950_ES_PERSISTRS_ov = asn1.OidVal([1, 2, 840, 10003, 9, 1])
|
||||
Z3950_NEG = [1, 2, 840, 10003, 15]
|
||||
Z3950_NEG_ov = asn1.OidVal([1, 2, 840, 10003, 15])
|
||||
Z3950_NEG_CHARSET2 = [1, 2, 840, 10003, 15, 1]
|
||||
Z3950_NEG_CHARSET2_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1])
|
||||
Z3950_NEG_CHARSET3 = [1, 2, 840, 10003, 15, 3]
|
||||
Z3950_NEG_CHARSET3_ov = asn1.OidVal([1, 2, 840, 10003, 15, 3])
|
||||
Z3950_NEG_ES = [1, 2, 840, 10003, 15, 2]
|
||||
Z3950_NEG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 15, 2])
|
||||
Z3950_NEG_PRIVATE = [1, 2, 840, 10003, 15, 1000]
|
||||
Z3950_NEG_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000])
|
||||
Z3950_NEG_PRIVATE_INDEXDATA = [1, 2, 840, 10003, 15, 1000, 81]
|
||||
Z3950_NEG_PRIVATE_INDEXDATA_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81])
|
||||
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME = [1, 2, 840, 10003, 15, 1000, 81, 1]
|
||||
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1])
|
||||
Z3950_QUERY = [1, 2, 840, 10003, 16]
|
||||
Z3950_QUERY_ov = asn1.OidVal([1, 2, 840, 10003, 16])
|
||||
Z3950_QUERY_CQL = [1, 2, 840, 10003, 16, 2]
|
||||
Z3950_QUERY_CQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 2])
|
||||
Z3950_QUERY_SQL = [1, 2, 840, 10003, 16, 1]
|
||||
Z3950_QUERY_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 1])
|
||||
Z3950_RECSYN = [1, 2, 840, 10003, 5]
|
||||
Z3950_RECSYN_ov = asn1.OidVal([1, 2, 840, 10003, 5])
|
||||
Z3950_RECSYN_AUSMARC = [1, 2, 840, 10003, 5, 20]
|
||||
Z3950_RECSYN_AUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 20])
|
||||
Z3950_RECSYN_CANMARC = [1, 2, 840, 10003, 5, 17]
|
||||
Z3950_RECSYN_CANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 17])
|
||||
Z3950_RECSYN_CATMARC = [1, 2, 840, 10003, 5, 22]
|
||||
Z3950_RECSYN_CATMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 22])
|
||||
Z3950_RECSYN_CCF = [1, 2, 840, 10003, 5, 3]
|
||||
Z3950_RECSYN_CCF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 3])
|
||||
Z3950_RECSYN_COMARC = [1, 2, 840, 10003, 5, 33]
|
||||
Z3950_RECSYN_COMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 33])
|
||||
Z3950_RECSYN_DANMARC = [1, 2, 840, 10003, 5, 14]
|
||||
Z3950_RECSYN_DANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 14])
|
||||
Z3950_RECSYN_ES = [1, 2, 840, 10003, 5, 106]
|
||||
Z3950_RECSYN_ES_ov = asn1.OidVal([1, 2, 840, 10003, 5, 106])
|
||||
Z3950_RECSYN_EXPLAIN = [1, 2, 840, 10003, 5, 100]
|
||||
Z3950_RECSYN_EXPLAIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 100])
|
||||
Z3950_RECSYN_FINMARC = [1, 2, 840, 10003, 5, 15]
|
||||
Z3950_RECSYN_FINMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 15])
|
||||
Z3950_RECSYN_FINMARC2000 = [1, 2, 840, 10003, 5, 31]
|
||||
Z3950_RECSYN_FINMARC2000_ov = asn1.OidVal([1, 2, 840, 10003, 5, 31])
|
||||
Z3950_RECSYN_FRAGMENT = [1, 2, 840, 10003, 5, 107]
|
||||
Z3950_RECSYN_FRAGMENT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 107])
|
||||
Z3950_RECSYN_GRS0 = [1, 2, 840, 10003, 5, 104]
|
||||
Z3950_RECSYN_GRS0_ov = asn1.OidVal([1, 2, 840, 10003, 5, 104])
|
||||
Z3950_RECSYN_GRS1 = [1, 2, 840, 10003, 5, 105]
|
||||
Z3950_RECSYN_GRS1_ov = asn1.OidVal([1, 2, 840, 10003, 5, 105])
|
||||
Z3950_RECSYN_HUNMARC = [1, 2, 840, 10003, 5, 29]
|
||||
Z3950_RECSYN_HUNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 29])
|
||||
Z3950_RECSYN_IBERMARC = [1, 2, 840, 10003, 5, 21]
|
||||
Z3950_RECSYN_IBERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 21])
|
||||
Z3950_RECSYN_INTERMARC = [1, 2, 840, 10003, 5, 2]
|
||||
Z3950_RECSYN_INTERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 2])
|
||||
Z3950_RECSYN_ISDSMARC = [1, 2, 840, 10003, 5, 27]
|
||||
Z3950_RECSYN_ISDSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 27])
|
||||
Z3950_RECSYN_JPMARC = [1, 2, 840, 10003, 5, 24]
|
||||
Z3950_RECSYN_JPMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 24])
|
||||
Z3950_RECSYN_LIBRISMARC = [1, 2, 840, 10003, 5, 13]
|
||||
Z3950_RECSYN_LIBRISMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 13])
|
||||
Z3950_RECSYN_MAB = [1, 2, 840, 10003, 5, 16]
|
||||
Z3950_RECSYN_MAB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 16])
|
||||
Z3950_RECSYN_MALMARC = [1, 2, 840, 10003, 5, 23]
|
||||
Z3950_RECSYN_MALMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 23])
|
||||
Z3950_RECSYN_MARC21FIN = [1, 2, 840, 10003, 5, 32]
|
||||
Z3950_RECSYN_MARC21FIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 32])
|
||||
Z3950_RECSYN_MIME = [1, 2, 840, 10003, 5, 109]
|
||||
Z3950_RECSYN_MIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109])
|
||||
Z3950_RECSYN_MIME_GIF = [1, 2, 840, 10003, 5, 109, 5]
|
||||
Z3950_RECSYN_MIME_GIF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 5])
|
||||
Z3950_RECSYN_MIME_HTML = [1, 2, 840, 10003, 5, 109, 3]
|
||||
Z3950_RECSYN_MIME_HTML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 3])
|
||||
Z3950_RECSYN_MIME_JPEG = [1, 2, 840, 10003, 5, 109, 6]
|
||||
Z3950_RECSYN_MIME_JPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 6])
|
||||
Z3950_RECSYN_MIME_MPEG = [1, 2, 840, 10003, 5, 109, 8]
|
||||
Z3950_RECSYN_MIME_MPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 8])
|
||||
Z3950_RECSYN_MIME_PDF = [1, 2, 840, 10003, 5, 109, 1]
|
||||
Z3950_RECSYN_MIME_PDF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 1])
|
||||
Z3950_RECSYN_MIME_PNG = [1, 2, 840, 10003, 5, 109, 7]
|
||||
Z3950_RECSYN_MIME_PNG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 7])
|
||||
Z3950_RECSYN_MIME_POSTSCRIPT = [1, 2, 840, 10003, 5, 109, 2]
|
||||
Z3950_RECSYN_MIME_POSTSCRIPT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 2])
|
||||
Z3950_RECSYN_MIME_SGML = [1, 2, 840, 10003, 5, 109, 9]
|
||||
Z3950_RECSYN_MIME_SGML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 9])
|
||||
Z3950_RECSYN_MIME_TIFF = [1, 2, 840, 10003, 5, 109, 4]
|
||||
Z3950_RECSYN_MIME_TIFF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 4])
|
||||
Z3950_RECSYN_MIME_XML = [1, 2, 840, 10003, 5, 109, 10]
|
||||
Z3950_RECSYN_MIME_XML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 10])
|
||||
Z3950_RECSYN_NACSISCATP = [1, 2, 840, 10003, 5, 30]
|
||||
Z3950_RECSYN_NACSISCATP_ov = asn1.OidVal([1, 2, 840, 10003, 5, 30])
|
||||
Z3950_RECSYN_NORMARC = [1, 2, 840, 10003, 5, 12]
|
||||
Z3950_RECSYN_NORMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 12])
|
||||
Z3950_RECSYN_OPAC = [1, 2, 840, 10003, 5, 102]
|
||||
Z3950_RECSYN_OPAC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 102])
|
||||
Z3950_RECSYN_PICAMARC = [1, 2, 840, 10003, 5, 19]
|
||||
Z3950_RECSYN_PICAMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 19])
|
||||
Z3950_RECSYN_RUSMARC = [1, 2, 840, 10003, 5, 28]
|
||||
Z3950_RECSYN_RUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 28])
|
||||
Z3950_RECSYN_SBNMARC = [1, 2, 840, 10003, 5, 18]
|
||||
Z3950_RECSYN_SBNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 18])
|
||||
Z3950_RECSYN_SIGLEMARC = [1, 2, 840, 10003, 5, 26]
|
||||
Z3950_RECSYN_SIGLEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 26])
|
||||
Z3950_RECSYN_SQL = [1, 2, 840, 10003, 5, 111]
|
||||
Z3950_RECSYN_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 5, 111])
|
||||
Z3950_RECSYN_SUMMARY = [1, 2, 840, 10003, 5, 103]
|
||||
Z3950_RECSYN_SUMMARY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 103])
|
||||
Z3950_RECSYN_SUTRS = [1, 2, 840, 10003, 5, 101]
|
||||
Z3950_RECSYN_SUTRS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 101])
|
||||
Z3950_RECSYN_SWEMARC = [1, 2, 840, 10003, 5, 25]
|
||||
Z3950_RECSYN_SWEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 25])
|
||||
Z3950_RECSYN_UKMARC = [1, 2, 840, 10003, 5, 11]
|
||||
Z3950_RECSYN_UKMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 11])
|
||||
Z3950_RECSYN_UNIMARC = [1, 2, 840, 10003, 5, 1]
|
||||
Z3950_RECSYN_UNIMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 1])
|
||||
Z3950_RECSYN_USMARC = [1, 2, 840, 10003, 5, 10]
|
||||
Z3950_RECSYN_USMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10])
|
||||
Z3950_RECSYN_USMARC_AUTH = [1, 2, 840, 10003, 5, 10, 2]
|
||||
Z3950_RECSYN_USMARC_AUTH_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 2])
|
||||
Z3950_RECSYN_USMARC_BIBLIO = [1, 2, 840, 10003, 5, 10, 1]
|
||||
Z3950_RECSYN_USMARC_BIBLIO_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 1])
|
||||
Z3950_RECSYN_USMARC_CLASS = [1, 2, 840, 10003, 5, 10, 5]
|
||||
Z3950_RECSYN_USMARC_CLASS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 5])
|
||||
Z3950_RECSYN_USMARC_COMMUNITY = [1, 2, 840, 10003, 5, 10, 4]
|
||||
Z3950_RECSYN_USMARC_COMMUNITY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 4])
|
||||
Z3950_RECSYN_USMARC_HOLD = [1, 2, 840, 10003, 5, 10, 3]
|
||||
Z3950_RECSYN_USMARC_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 3])
|
||||
Z3950_RECSYN_ZMIME = [1, 2, 840, 10003, 5, 110]
|
||||
Z3950_RECSYN_ZMIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110])
|
||||
Z3950_RECSYN_ZMIME_TIFFB = [1, 2, 840, 10003, 5, 110, 1]
|
||||
Z3950_RECSYN_ZMIME_TIFFB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 1])
|
||||
Z3950_RECSYN_ZMIME_WAV = [1, 2, 840, 10003, 5, 110, 2]
|
||||
Z3950_RECSYN_ZMIME_WAV_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 2])
|
||||
Z3950_RRF = [1, 2, 840, 10003, 7]
|
||||
Z3950_RRF_ov = asn1.OidVal([1, 2, 840, 10003, 7])
|
||||
Z3950_RRF_RESOURCE1 = [1, 2, 840, 10003, 7, 1]
|
||||
Z3950_RRF_RESOURCE1_ov = asn1.OidVal([1, 2, 840, 10003, 7, 1])
|
||||
Z3950_RRF_RESOURCE2 = [1, 2, 840, 10003, 7, 2]
|
||||
Z3950_RRF_RESOURCE2_ov = asn1.OidVal([1, 2, 840, 10003, 7, 2])
|
||||
Z3950_SCHEMA = [1, 2, 840, 10003, 13]
|
||||
Z3950_SCHEMA_ov = asn1.OidVal([1, 2, 840, 10003, 13])
|
||||
Z3950_SCHEMA_CIMI = [1, 2, 840, 10003, 13, 5]
|
||||
Z3950_SCHEMA_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 13, 5])
|
||||
Z3950_SCHEMA_COLLECTIONS = [1, 2, 840, 10003, 13, 3]
|
||||
Z3950_SCHEMA_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 3])
|
||||
Z3950_SCHEMA_EDIT = [1, 2, 840, 10003, 13, 1]
|
||||
Z3950_SCHEMA_EDIT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
|
||||
Z3950_SCHEMA_GEO = [1, 2, 840, 10003, 13, 4]
|
||||
Z3950_SCHEMA_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 13, 4])
|
||||
Z3950_SCHEMA_GILS = [1, 2, 840, 10003, 13, 2]
|
||||
Z3950_SCHEMA_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 2])
|
||||
Z3950_SCHEMA_HOLDINGS = [1, 2, 840, 10003, 13, 7]
|
||||
Z3950_SCHEMA_HOLDINGS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7])
|
||||
Z3950_SCHEMA_HOLDINGS_11 = [1, 2, 840, 10003, 13, 7, 1]
|
||||
Z3950_SCHEMA_HOLDINGS_11_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 1])
|
||||
Z3950_SCHEMA_HOLDINGS_12 = [1, 2, 840, 10003, 13, 7, 2]
|
||||
Z3950_SCHEMA_HOLDINGS_12_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 2])
|
||||
Z3950_SCHEMA_HOLDINGS_14 = [1, 2, 840, 10003, 13, 7, 4]
|
||||
Z3950_SCHEMA_HOLDINGS_14_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 4])
|
||||
Z3950_SCHEMA_INSERT = [1, 2, 840, 10003, 13, 1]
|
||||
Z3950_SCHEMA_INSERT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
|
||||
Z3950_SCHEMA_UPDATE = [1, 2, 840, 10003, 13, 6]
|
||||
Z3950_SCHEMA_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 13, 6])
|
||||
Z3950_SCHEMA_WAIS = [1, 2, 840, 10003, 13, 1]
|
||||
Z3950_SCHEMA_WAIS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
|
||||
Z3950_SCHEMA_ZTHES = [1, 2, 840, 10003, 13, 1]
|
||||
Z3950_SCHEMA_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
|
||||
Z3950_SPEC = [1, 2, 840, 10003, 11]
|
||||
Z3950_SPEC_ov = asn1.OidVal([1, 2, 840, 10003, 11])
|
||||
Z3950_SPEC_ESPEC1 = [1, 2, 840, 10003, 11, 1]
|
||||
Z3950_SPEC_ESPEC1_ov = asn1.OidVal([1, 2, 840, 10003, 11, 1])
|
||||
Z3950_SPEC_ESPEC2 = [1, 2, 840, 10003, 11, 2]
|
||||
Z3950_SPEC_ESPEC2_ov = asn1.OidVal([1, 2, 840, 10003, 11, 2])
|
||||
Z3950_SPEC_ESPECQ = [1, 2, 840, 10003, 11, 3]
|
||||
Z3950_SPEC_ESPECQ_ov = asn1.OidVal([1, 2, 840, 10003, 11, 3])
|
||||
Z3950_TAGSET = [1, 2, 840, 10003, 14]
|
||||
Z3950_TAGSET_ov = asn1.OidVal([1, 2, 840, 10003, 14])
|
||||
Z3950_TAGSET_CIMI = [1, 2, 840, 10003, 14, 6]
|
||||
Z3950_TAGSET_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 14, 6])
|
||||
Z3950_TAGSET_COLLECTIONS = [1, 2, 840, 10003, 14, 5]
|
||||
Z3950_TAGSET_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 5])
|
||||
Z3950_TAGSET_G = [1, 2, 840, 10003, 14, 2]
|
||||
Z3950_TAGSET_G_ov = asn1.OidVal([1, 2, 840, 10003, 14, 2])
|
||||
Z3950_TAGSET_GILS = [1, 2, 840, 10003, 14, 4]
|
||||
Z3950_TAGSET_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 4])
|
||||
Z3950_TAGSET_M = [1, 2, 840, 10003, 14, 1]
|
||||
Z3950_TAGSET_M_ov = asn1.OidVal([1, 2, 840, 10003, 14, 1])
|
||||
Z3950_TAGSET_STAS = [1, 2, 840, 10003, 14, 3]
|
||||
Z3950_TAGSET_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 3])
|
||||
Z3950_TAGSET_UPDATE = [1, 2, 840, 10003, 14, 7]
|
||||
Z3950_TAGSET_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 14, 7])
|
||||
Z3950_TAGSET_ZTHES = [1, 2, 840, 10003, 14, 8]
|
||||
Z3950_TAGSET_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 14, 8])
|
||||
Z3950_TRANSFER = [1, 2, 840, 10003, 6]
|
||||
Z3950_TRANSFER_ov = asn1.OidVal([1, 2, 840, 10003, 6])
|
||||
Z3950_USR = [1, 2, 840, 10003, 10]
|
||||
Z3950_USR_ov = asn1.OidVal([1, 2, 840, 10003, 10])
|
||||
Z3950_USR_AUTHFILE = [1, 2, 840, 10003, 10, 11]
|
||||
Z3950_USR_AUTHFILE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 11])
|
||||
Z3950_USR_CHARSETNEG = [1, 2, 840, 10003, 10, 2]
|
||||
Z3950_USR_CHARSETNEG_ov = asn1.OidVal([1, 2, 840, 10003, 10, 2])
|
||||
Z3950_USR_DATETIME = [1, 2, 840, 10003, 10, 6]
|
||||
Z3950_USR_DATETIME_ov = asn1.OidVal([1, 2, 840, 10003, 10, 6])
|
||||
Z3950_USR_EDITACTIONQUAL = [1, 2, 840, 10003, 10, 10]
|
||||
Z3950_USR_EDITACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 10])
|
||||
Z3950_USR_INFO1 = [1, 2, 840, 10003, 10, 3]
|
||||
Z3950_USR_INFO1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 3])
|
||||
Z3950_USR_INSERTACTIONQUAL = [1, 2, 840, 10003, 10, 9]
|
||||
Z3950_USR_INSERTACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 9])
|
||||
Z3950_USR_PRIVATE = [1, 2, 840, 10003, 10, 1000]
|
||||
Z3950_USR_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000])
|
||||
Z3950_USR_PRIVATE_OCLC = [1, 2, 840, 10003, 10, 1000, 17]
|
||||
Z3950_USR_PRIVATE_OCLC_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17])
|
||||
Z3950_USR_PRIVATE_OCLC_INFO = [1, 2, 840, 10003, 10, 1000, 17, 1]
|
||||
Z3950_USR_PRIVATE_OCLC_INFO_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1])
|
||||
Z3950_USR_SEARCHRES1 = [1, 2, 840, 10003, 10, 1]
|
||||
Z3950_USR_SEARCHRES1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1])
|
||||
Z3950_USR_SEARCHTERMS1 = [1, 2, 840, 10003, 10, 4]
|
||||
Z3950_USR_SEARCHTERMS1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 4])
|
||||
Z3950_USR_SEARCHTERMS2 = [1, 2, 840, 10003, 10, 5]
|
||||
Z3950_USR_SEARCHTERMS2_ov = asn1.OidVal([1, 2, 840, 10003, 10, 5])
|
||||
Z3950_VAR = [1, 2, 840, 10003, 12]
|
||||
Z3950_VAR_ov = asn1.OidVal([1, 2, 840, 10003, 12])
|
||||
Z3950_VAR_VARIANT1 = [1, 2, 840, 10003, 12, 1]
|
||||
Z3950_VAR_VARIANT1_ov = asn1.OidVal([1, 2, 840, 10003, 12, 1])
|
260
python/PyZ3950/pqf.py
Normal file
260
python/PyZ3950/pqf.py
Normal file
@ -0,0 +1,260 @@
|
||||
#!/usr/local/bin/python2.3
|
||||
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except:
|
||||
from StringIO import StringIO
|
||||
from PyZ3950 import z3950, oids,asn1
|
||||
from PyZ3950.zdefs import make_attr
|
||||
from types import IntType, StringType, ListType
|
||||
from PyZ3950.CQLParser import CQLshlex
|
||||
|
||||
|
||||
"""
|
||||
Parser for PQF directly into RPN structure.
|
||||
PQF docs: http://www.indexdata.dk/yaz/doc/tools.html
|
||||
|
||||
NB: This does not implement /everything/ in PQF, in particular: @attr 2=3 @and @attr 1=4 title @attr 1=1003 author (eg that 2 should be 3 for all subsequent clauses)
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class PQFParser:
|
||||
lexer = None
|
||||
currentToken = None
|
||||
nextToken = None
|
||||
|
||||
def __init__(self, l):
|
||||
self.lexer = l
|
||||
self.fetch_token()
|
||||
|
||||
def fetch_token(self):
|
||||
""" Read ahead one token """
|
||||
tok = self.lexer.get_token()
|
||||
self.currentToken = self.nextToken
|
||||
self.nextToken = tok
|
||||
|
||||
def is_boolean(self):
|
||||
if (self.currentToken.lower() in ['@and', '@or', '@not', '@prox']):
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def defaultClause(self, t):
|
||||
# Assign a default clause: anywhere =
|
||||
clause = z3950.AttributesPlusTerm()
|
||||
attrs = [(oids.Z3950_ATTRS_BIB1, 1, 1016), (oids.Z3950_ATTRS_BIB1, 2, 3)]
|
||||
clause.attributes = [make_attr(*e) for e in attrs]
|
||||
clause.term = t
|
||||
return ('op', ('attrTerm', clause))
|
||||
|
||||
# Grammar fns
|
||||
|
||||
def query(self):
|
||||
set = self.top_set()
|
||||
qst = self.query_struct()
|
||||
|
||||
# Pull in a (hopefully) null token
|
||||
self.fetch_token()
|
||||
if (self.currentToken):
|
||||
# Nope, unprocessed tokens remain
|
||||
raise(ValueError)
|
||||
|
||||
rpnq = z3950.RPNQuery()
|
||||
if set:
|
||||
rpnq.attributeSet = set
|
||||
else:
|
||||
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
|
||||
rpnq.rpn = qst
|
||||
|
||||
|
||||
return ('type_1', rpnq)
|
||||
|
||||
def top_set(self):
|
||||
if (self.nextToken == '@attrset'):
|
||||
self.fetch_token()
|
||||
self.fetch_token()
|
||||
n = self.currentToken.upper()
|
||||
if (n[:14] == "1.2.840.10003."):
|
||||
return asn1.OidVal(map(int, n.split('.')))
|
||||
return oids.oids['Z3950']['ATTRS'][n]['oid']
|
||||
else:
|
||||
return None
|
||||
|
||||
# This totally ignores the BNF, but does the 'right' thing
|
||||
def query_struct(self):
|
||||
self.fetch_token()
|
||||
if (self.currentToken == '@attr'):
|
||||
attrs = []
|
||||
while self.currentToken == '@attr':
|
||||
attrs.append(self.attr_spec())
|
||||
self.fetch_token()
|
||||
t = self.term()
|
||||
|
||||
# Now we have attrs + term
|
||||
clause = z3950.AttributesPlusTerm()
|
||||
clause.attributes = [make_attr(*e) for e in attrs]
|
||||
clause.term = t
|
||||
return ('op', ('attrTerm', clause))
|
||||
elif (self.is_boolean()):
|
||||
# @operator query query
|
||||
return self.complex()
|
||||
elif (self.currentToken == '@set'):
|
||||
return self.result_set()
|
||||
elif (self.currentToken == "{"):
|
||||
# Parens
|
||||
s = self.query_struct()
|
||||
if (self.nextToken <> "}"):
|
||||
raise(ValueError)
|
||||
else:
|
||||
self.fetch_token()
|
||||
return s
|
||||
|
||||
else:
|
||||
t = self.term()
|
||||
return self.defaultClause(t)
|
||||
|
||||
def term(self):
|
||||
# Need to split to allow attrlist then @term
|
||||
type = 'general'
|
||||
if (self.currentToken == '@term'):
|
||||
self.fetch_token()
|
||||
type = self.currentToken.lower()
|
||||
types = {'general' : 'general', 'string' : 'characterString', 'numeric' : 'numeric', 'external' : 'external'}
|
||||
type = types[type]
|
||||
self.fetch_token()
|
||||
|
||||
if (self.currentToken[0] == '"' and self.currentToken[-1] == '"'):
|
||||
term = self.currentToken[1:-1]
|
||||
else:
|
||||
term = self.currentToken
|
||||
|
||||
return (type, term)
|
||||
|
||||
def result_set(self):
|
||||
self.fetch_token()
|
||||
return ('op', ('resultSet', self.currentToken))
|
||||
|
||||
def attr_spec(self):
|
||||
# @attr is CT
|
||||
self.fetch_token()
|
||||
if (self.currentToken.find('=') == -1):
|
||||
# attrset
|
||||
set = self.currentToken
|
||||
if (set[:14] == "1.2.840.10003."):
|
||||
set = asn1.OidVal(map(int, set.split('.')))
|
||||
else:
|
||||
set = oids.oids['Z3950']['ATTRS'][set.upper()]['oid']
|
||||
self.fetch_token()
|
||||
else:
|
||||
set = None
|
||||
# May raise
|
||||
(atype, val) = self.currentToken.split('=')
|
||||
if (not atype.isdigit()):
|
||||
raise ValueError
|
||||
atype = int(atype)
|
||||
if (val.isdigit()):
|
||||
val = int(val)
|
||||
return (set, atype, val)
|
||||
|
||||
def complex(self):
|
||||
op = z3950.RpnRpnOp()
|
||||
op.op = self.boolean()
|
||||
op.rpn1 = self.query_struct()
|
||||
op.rpn2 = self.query_struct()
|
||||
return ('rpnRpnOp', op)
|
||||
|
||||
def boolean(self):
|
||||
b = self.currentToken[1:]
|
||||
b = b.lower()
|
||||
if (b == 'prox'):
|
||||
self.fetch_token()
|
||||
exclusion = self.currentToken
|
||||
self.fetch_token()
|
||||
distance = self.currentToken
|
||||
self.fetch_token()
|
||||
ordered = self.currentToken
|
||||
self.fetch_token()
|
||||
relation = self.currentToken
|
||||
self.fetch_token()
|
||||
which = self.currentToken
|
||||
self.fetch_token()
|
||||
unit = self.currentToken
|
||||
|
||||
prox = z3950.ProximityOperator()
|
||||
if (not (relation.isdigit() and exclusion.isdigit() and distance.isdigit() and unit.isdigit())):
|
||||
raise ValueError
|
||||
prox.relationType = int(relation)
|
||||
prox.exclusion = bool(exclusion)
|
||||
prox.distance = int(distance)
|
||||
if (which[0] == 'k'):
|
||||
prox.unit = ('known', int(unit))
|
||||
elif (which[0] == 'p'):
|
||||
prox.unit = ('private', int(unit))
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
return (b, prox)
|
||||
elif b == 'not':
|
||||
return ('and-not', None)
|
||||
else:
|
||||
return (b, None)
|
||||
|
||||
|
||||
def parse(q):
|
||||
|
||||
query = StringIO(q)
|
||||
lexer = CQLshlex(query)
|
||||
# Override CQL's wordchars list to include /=><()
|
||||
lexer.wordchars += "!@#$%^&*-+[];,.?|~`:\\><=/'()"
|
||||
|
||||
parser = PQFParser(lexer)
|
||||
return parser.query()
|
||||
|
||||
|
||||
def rpn2pqf(rpn):
|
||||
# Turn RPN structure into PQF equivalent
|
||||
q = rpn[1]
|
||||
if (rpn[0] == 'type_1'):
|
||||
# Top level
|
||||
if (q.attributeSet):
|
||||
query = '@attrset %s ' % ( '.'.join(map(str, q.attributeSet.lst)))
|
||||
else:
|
||||
query = ""
|
||||
rest = rpn2pqf(q.rpn)
|
||||
return "%s%s" % (query, rest)
|
||||
elif (rpn[0] == 'rpnRpnOp'):
|
||||
# boolean
|
||||
if (q.op[0] in ['and', 'or']):
|
||||
query = ['@', q.op[0], ' ']
|
||||
elif (q.op[0] == 'and-not'):
|
||||
query = ['@not ']
|
||||
else:
|
||||
query = ['@prox']
|
||||
# XXX
|
||||
query.append(' ')
|
||||
query.append(rpn2pqf(q.rpn1))
|
||||
query.append(' ')
|
||||
query.append(rpn2pqf(q.rpn2))
|
||||
return ''.join(query)
|
||||
elif (rpn[0] == 'op'):
|
||||
if (q[0] == 'attrTerm'):
|
||||
query = []
|
||||
for a in q[1].attributes:
|
||||
if (a.attributeValue[0] == 'numeric'):
|
||||
val = str(a.attributeValue[1])
|
||||
else:
|
||||
val = a.attributeValue[1].list[0][1]
|
||||
query.append("@attr %i=%s " % (a.attributeType, val))
|
||||
query.append('"%s" ' % (q[1].term[1]))
|
||||
return ''.join(query)
|
||||
elif (q[0] == 'resultSet'):
|
||||
return "@set %s" % (q[1])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
754
python/PyZ3950/z3950.py
Normal file
754
python/PyZ3950/z3950.py
Normal file
@ -0,0 +1,754 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# This file should be available from
|
||||
# http://www.pobox.com/~asl2/software/PyZ3950/
|
||||
# and is licensed under the X Consortium license:
|
||||
# Copyright (c) 2001, Aaron S. Lav, asl2@pobox.com
|
||||
# All rights reserved.
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, and/or sell copies of the Software, and to permit persons
|
||||
# to whom the Software is furnished to do so, provided that the above
|
||||
# copyright notice(s) and this permission notice appear in all copies of
|
||||
# the Software and that both the above copyright notice(s) and this
|
||||
# permission notice appear in supporting documentation.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
|
||||
# OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
|
||||
# HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
|
||||
# INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
|
||||
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
|
||||
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
# Except as contained in this notice, the name of a copyright holder
|
||||
# shall not be used in advertising or otherwise to promote the sale, use
|
||||
# or other dealings in this Software without prior written authorization
|
||||
# of the copyright holder.
|
||||
|
||||
# Change history:
|
||||
# 2002/05/23
|
||||
# Fix for Python2 compatibility. Thanks to Douglas Bates <bates@stat.wisc.edu>
|
||||
# Fix to support SUTRS (requires asn1 updates, too)
|
||||
# 2002/05/28
|
||||
# Make SUTRS printing a little more useful
|
||||
# Correctly close connection when done
|
||||
# Handle receiving diagnostics instead of records a little better
|
||||
|
||||
"""<p>PyZ3950 currently is capable of sending and receiving v2 or v3 PDUs
|
||||
Initialize, Search, Present, Scan, Sort, Close, and Delete. For client
|
||||
work, you probably want to use ZOOM, which should be in the same
|
||||
distribution as this file, in zoom.py. The Server class in this file
|
||||
implements a server, but could use some work. Both interoperate with
|
||||
the <a href="http://www.indexdata.dk/yaz"> Yaz toolkit</a> and the
|
||||
client interoperates with a variety of libraries. <p>
|
||||
|
||||
Useful resources:
|
||||
<ul>
|
||||
<li><a href="http://lcweb.loc.gov/z3950/agency/">
|
||||
Library of Congress Z39.50 Maintenance Agency Page</a></li>
|
||||
<li><a href="http://lcweb.loc.gov/z3950/agency/document.html">
|
||||
Official Specification</a></li>
|
||||
<li><a href="http://www.loc.gov/z3950/agency/clarify/">Clarifications</a></li>
|
||||
</ul>
|
||||
"""
|
||||
|
||||
from __future__ import nested_scopes
|
||||
import getopt
|
||||
import sys
|
||||
import exceptions
|
||||
import random
|
||||
import socket
|
||||
import string
|
||||
import traceback
|
||||
|
||||
import codecs
|
||||
|
||||
from PyZ3950 import asn1
|
||||
from PyZ3950 import zmarc
|
||||
from PyZ3950.zdefs import *
|
||||
|
||||
out_encoding = None
|
||||
|
||||
trace_recv = 0
|
||||
trace_init = 0
|
||||
|
||||
print_hex = 0
|
||||
|
||||
class Z3950Error(Exception):
|
||||
pass
|
||||
|
||||
# Note: following 3 exceptions are defaults, but can be changed by
|
||||
# calling conn.set_exs
|
||||
|
||||
class ConnectionError(Z3950Error): # TCP or other transport error
|
||||
pass
|
||||
|
||||
class ProtocolError(Z3950Error): # Unexpected message or badly formatted
|
||||
pass
|
||||
|
||||
class UnexpectedCloseError(ProtocolError):
|
||||
pass
|
||||
|
||||
vers = '0.62'
|
||||
default_resultSetName = 'default'
|
||||
|
||||
|
||||
DEFAULT_PORT = 2101
|
||||
|
||||
Z3950_VERS = 3 # This is a global switch: do we support V3 at all?
|
||||
|
||||
def extract_recs (resp):
|
||||
(typ, recs) = resp.records
|
||||
if (typ <> 'responseRecords'):
|
||||
raise ProtocolError ("Bad records typ " + str (typ) + str (recs))
|
||||
if len (recs) == 0:
|
||||
raise ProtocolError ("No records")
|
||||
fmtoid = None
|
||||
extract = []
|
||||
for r in recs:
|
||||
(typ, data) = r.record
|
||||
if (typ <> 'retrievalRecord'):
|
||||
raise ProtocolError ("Bad typ %s data %s" % (str (typ), str(data)))
|
||||
oid = data.direct_reference
|
||||
if fmtoid == None:
|
||||
fmtoid = oid
|
||||
elif fmtoid <> oid:
|
||||
raise ProtocolError (
|
||||
"Differing OIDs %s %s" % (str (fmtoid), str (oid)))
|
||||
# Not, strictly speaking, an error.
|
||||
dat = data.encoding
|
||||
(typ, dat) = dat
|
||||
if (oid == Z3950_RECSYN_USMARC_ov):
|
||||
if typ <> 'octet-aligned':
|
||||
raise ProtocolError ("Weird record EXTERNAL MARC type: " + typ)
|
||||
extract.append (dat)
|
||||
return (fmtoid, extract)
|
||||
|
||||
def get_formatter (oid):
|
||||
def printer (x):
|
||||
print oid, repr (x)
|
||||
def print_marc (marc):
|
||||
print str (zmarc.MARC(marc))
|
||||
def print_sutrs (x):
|
||||
print "SUTRS:",
|
||||
if isinstance (x, type ('')):
|
||||
print x
|
||||
elif isinstance (x, type (u'')):
|
||||
if out_encoding == None:
|
||||
print repr (x)
|
||||
else:
|
||||
try:
|
||||
print x.encode (out_encoding)
|
||||
except UnicodeError, u:
|
||||
print "Cannot print %s in current encoding %s" % (
|
||||
repr (x), out_encoding)
|
||||
if oid == Z3950_RECSYN_SUTRS_ov:
|
||||
return print_sutrs
|
||||
if oid == Z3950_RECSYN_USMARC_ov:
|
||||
return print_marc
|
||||
else:
|
||||
return printer
|
||||
|
||||
def disp_resp (resp):
|
||||
try:
|
||||
(fmtoid, recs) = extract_recs (resp)
|
||||
except ProtocolError, val:
|
||||
print "Bad records", str (val)
|
||||
formatter = get_formatter (fmtoid)
|
||||
for rec in recs:
|
||||
formatter (rec)
|
||||
|
||||
class Conn:
|
||||
rdsz = 65536
|
||||
def __init__ (self, sock = None, ConnectionError = ConnectionError,
|
||||
ProtocolError = ProtocolError, UnexpectedCloseError =
|
||||
UnexpectedCloseError):
|
||||
self.set_exns (ConnectionError, ProtocolError, UnexpectedCloseError)
|
||||
if sock == None:
|
||||
self.sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
|
||||
else:
|
||||
self.sock = sock
|
||||
self.decode_ctx = asn1.IncrementalDecodeCtx (APDU)
|
||||
self.encode_ctx = asn1.Ctx ()
|
||||
def set_exns (self, conn, protocol, unexp_close):
|
||||
self.ConnectionError = conn
|
||||
self.ProtocolError = protocol
|
||||
self.UnexpectedCloseError = unexp_close
|
||||
|
||||
def set_codec (self, charset_name, charsets_in_records):
|
||||
self.charset_name = charset_name
|
||||
self.charsets_in_records = not not charsets_in_records # collapse None and 0
|
||||
if trace_charset:
|
||||
print "Setting up codec!", self.charset_name
|
||||
strip_bom = self.charset_name == 'utf-16'
|
||||
# XXX should create a new codec which wraps utf-16 but
|
||||
# strips the Byte Order Mark, or use stream codecs
|
||||
if self.charset_name <> None:
|
||||
self.encode_ctx.set_codec (asn1.GeneralString,
|
||||
codecs.lookup (self.charset_name),
|
||||
strip_bom)
|
||||
self.decode_ctx.set_codec (asn1.GeneralString,
|
||||
codecs.lookup (self.charset_name),
|
||||
strip_bom)
|
||||
if not charsets_in_records: # None or 0
|
||||
register_retrieval_record_oids(self.decode_ctx)
|
||||
register_retrieval_record_oids(self.encode_ctx)
|
||||
|
||||
def readproc (self):
|
||||
if self.sock == None:
|
||||
raise self.ConnectionError ('disconnected')
|
||||
try:
|
||||
b = self.sock.recv (self.rdsz)
|
||||
except socket.error, val:
|
||||
self.sock = None
|
||||
raise self.ConnectionError ('socket', str (val))
|
||||
if len (b) == 0: # graceful close
|
||||
self.sock = None
|
||||
raise self.ConnectionError ('graceful close')
|
||||
if trace_recv:
|
||||
print map (lambda x: hex(ord(x)), b)
|
||||
return b
|
||||
def read_PDU (self):
|
||||
while 1:
|
||||
if self.decode_ctx.val_count () > 0:
|
||||
return self.decode_ctx.get_first_decoded ()
|
||||
try:
|
||||
b = self.readproc ()
|
||||
self.decode_ctx.feed (map (ord, b))
|
||||
except asn1.BERError, val:
|
||||
raise self.ProtocolError ('ASN1 BER', str(val))
|
||||
|
||||
|
||||
class Server (Conn):
|
||||
test = 0
|
||||
def __init__ (self, sock):
|
||||
Conn.__init__ (self, sock)
|
||||
self.expecting_init = 1
|
||||
self.done = 0
|
||||
self.result_sets = {}
|
||||
self.charset_name = None
|
||||
def run (self):
|
||||
while not self.done:
|
||||
(typ, val) = self.read_PDU ()
|
||||
fn = self.fn_dict.get (typ, None)
|
||||
if fn == None:
|
||||
raise self.ProtocolError ("Bad typ", typ + " " + str (val))
|
||||
if typ <> 'initRequest' and self.expecting_init:
|
||||
raise self.ProtocolError ("Init expected", typ)
|
||||
fn (self, val)
|
||||
def send (self, val):
|
||||
b = self.encode_ctx.encode (APDU, val)
|
||||
if self.test:
|
||||
print "Internal Testing"
|
||||
# a reminder not to leave this switched on by accident
|
||||
self.decode_ctx.feed (b)
|
||||
decoded = self.read_PDU ()
|
||||
assert (val== decoded)
|
||||
self.sock.send (b)
|
||||
|
||||
def do_close (self, reason, info):
|
||||
close = Close ()
|
||||
close.closeReason = reason
|
||||
close.diagnosticInformation = info
|
||||
self.send (('close', close))
|
||||
|
||||
def close (self, parm):
|
||||
self.done = 1
|
||||
self.do_close (0, 'Normal close')
|
||||
|
||||
def search_child (self, query):
|
||||
return range (random.randint (2,10))
|
||||
def search (self, sreq):
|
||||
if sreq.replaceIndicator == 0 and self.result_sets.has_key (
|
||||
sreq.resultSetName):
|
||||
raise self.ProtocolError ("replaceIndicator 0")
|
||||
result = self.search_child (sreq.query)
|
||||
sresp = SearchResponse ()
|
||||
self.result_sets[sreq.resultSetName] = result
|
||||
sresp.resultCount = len (result)
|
||||
sresp.numberOfRecordsReturned = 0
|
||||
sresp.nextResultSetPosition = 1
|
||||
sresp.searchStatus = 1
|
||||
sresp.resultSetStatus = 0
|
||||
sresp.presentStatus = PresentStatus.get_num_from_name ('success')
|
||||
sresp.records = ('responseRecords', [])
|
||||
self.send (('searchResponse', sresp))
|
||||
def format_records (self, start, count, res_set, prefsyn):
|
||||
l = []
|
||||
for i in range (start - 1, start + count - 1):
|
||||
elt = res_set[i]
|
||||
elt_external = asn1.EXTERNAL ()
|
||||
elt_external.direct_reference = Z3950_RECSYN_SUTRS_ov
|
||||
|
||||
# Not only has this text been extensively translated, but
|
||||
# it also prefigures Z39.50's separation of Search and Present,
|
||||
# once rearranged a little.
|
||||
strings = [
|
||||
'seek, and ye shall find; ask, and it shall be given you',
|
||||
u"""Car quiconque demande re\u00e7oit, qui cherche trouve, et \u00e0 quit frappe on ouvrira""", # This (next) verse has non-ASCII characters
|
||||
u"\u0391\u03b9\u03c4\u03b5\u03b9\u03c4\u03b5, "
|
||||
u"\u03ba\u03b1\u03b9 \u03b4\u03bf\u03b8\u03b7\u03c3\u03b5\u03c4\u03b1\u03b9 "+
|
||||
u"\u03c5\u03bc\u03b9\u03bd; \u03b6\u03b7\u03c4\u03b5\u03b9\u03c4\u03b5 " +
|
||||
u"\u03ba\u03b1\u03b9 \u03b5\u03c5\u03c1\u03b7\u03c3\u03b5\u03c4\u03b5",
|
||||
u"\u05e8\u05d0\u05d4 \u05d6\u05d4 \u05de\u05e6\u05d0\u05ea\u05d9"]
|
||||
if self.charsets_in_records:
|
||||
encode_charset = self.charset_name
|
||||
else:
|
||||
encode_charset = 'ascii'
|
||||
def can_encode (s):
|
||||
try:
|
||||
s.encode (encode_charset)
|
||||
except UnicodeError:
|
||||
return 0
|
||||
return 1
|
||||
if self.charset_name == None:
|
||||
candidate_strings = [strings[0]]
|
||||
else:
|
||||
candidate_strings = [s for s in strings if can_encode (s)]
|
||||
# Note: this code is for debugging/testing purposes. Usually,
|
||||
# language/content selection should not be made on the
|
||||
# basis of the selected charset, and a surrogate diagnostic
|
||||
# should be generated if the data cannot be encoded.
|
||||
text = random.choice (candidate_strings)
|
||||
add_str = " #%d charset %s cir %d" % (elt, encode_charset,
|
||||
self.charsets_in_records)
|
||||
elt_external.encoding = ('single-ASN1-type', text + add_str)
|
||||
n = NamePlusRecord ()
|
||||
n.name = 'foo'
|
||||
n.record = ('retrievalRecord', elt_external)
|
||||
l.append (n)
|
||||
return l
|
||||
|
||||
def present (self, preq):
|
||||
presp = PresentResponse ()
|
||||
res_set = self.result_sets [preq.resultSetId]
|
||||
presp.numberOfRecordsReturned = preq.numberOfRecordsRequested
|
||||
presp.nextResultSetPosition = preq.resultSetStartPoint + \
|
||||
preq.numberOfRecordsRequested
|
||||
presp.presentStatus = 0
|
||||
presp.records = ('responseRecords',
|
||||
self.format_records (preq.resultSetStartPoint,
|
||||
preq.numberOfRecordsRequested,
|
||||
res_set,
|
||||
preq.preferredRecordSyntax))
|
||||
self.send (('presentResponse', presp))
|
||||
|
||||
def init (self, ireq):
|
||||
if trace_init:
|
||||
print "Init received", ireq
|
||||
self.v3_flag = (ireq.protocolVersion ['version_3'] and
|
||||
Z3950_VERS == 3)
|
||||
|
||||
ir = InitializeResponse ()
|
||||
ir.protocolVersion = ProtocolVersion ()
|
||||
ir.protocolVersion ['version_1'] = 1
|
||||
ir.protocolVersion ['version_2'] = 1
|
||||
ir.protocolVersion ['version_3'] = self.v3_flag
|
||||
val = get_charset_negot (ireq)
|
||||
charset_name = None
|
||||
records_in_charsets = 0
|
||||
if val <> None:
|
||||
csreq = CharsetNegotReq ()
|
||||
csreq.unpack_proposal (val)
|
||||
def rand_choose (list_or_none):
|
||||
if list_or_none == None or len (list_or_none) == 0:
|
||||
return None
|
||||
return random.choice (list_or_none)
|
||||
charset_name = rand_choose (csreq.charset_list)
|
||||
if charset_name <> None:
|
||||
try:
|
||||
codecs.lookup (charset_name)
|
||||
except LookupError, l:
|
||||
charset_name = None
|
||||
csresp = CharsetNegotResp (
|
||||
charset_name,
|
||||
rand_choose (csreq.lang_list),
|
||||
csreq.records_in_charsets)
|
||||
records_in_charsets = csresp.records_in_charsets
|
||||
if trace_charset:
|
||||
print csreq, csresp
|
||||
set_charset_negot (ir, csresp.pack_negot_resp (), self.v3_flag)
|
||||
|
||||
optionslist = ['search', 'present', 'delSet', 'scan','negotiation']
|
||||
ir.options = Options ()
|
||||
for o in optionslist:
|
||||
ir.options[o] = 1
|
||||
|
||||
ir.preferredMessageSize = 0
|
||||
|
||||
ir.exceptionalRecordSize = 0
|
||||
# z9350-2001 3.2.1.1.4, 0 means client should be prepared to accept
|
||||
# arbitrarily long messages.
|
||||
|
||||
ir.implementationId = implementationId
|
||||
|
||||
ir.implementationName = 'PyZ3950 Test server'
|
||||
ir.implementationVersion = impl_vers
|
||||
ir.result = 1
|
||||
|
||||
if trace_charset or trace_init:
|
||||
print ir
|
||||
self.expecting_init = 0
|
||||
self.send (('initResponse', ir))
|
||||
self.set_codec (charset_name, records_in_charsets)
|
||||
|
||||
def sort (self, sreq):
|
||||
sresp = SortResponse ()
|
||||
sresp.sortStatus = 0
|
||||
self.send (('sortResponse', sresp))
|
||||
def delete (self, dreq):
|
||||
dresp = DeleteResultSetResponse ()
|
||||
dresp.deleteOperationStatus = 0
|
||||
self.send (('deleteResultSetResponse', dresp))
|
||||
def esrequest (self, esreq):
|
||||
print "ES", esreq
|
||||
esresp = ExtendedServicesResponse ()
|
||||
esresp.operationStatus = ExtendedServicesResponse['operationStatus'].get_num_from_name ('failure')
|
||||
self.send (('extendedServicesResponse', esresp))
|
||||
|
||||
fn_dict = {'searchRequest': search,
|
||||
'presentRequest': present,
|
||||
'initRequest' : init,
|
||||
'close' : close,
|
||||
'sortRequest' : sort,
|
||||
'deleteResultSetRequest' : delete,
|
||||
'extendedServicesRequest': esrequest}
|
||||
|
||||
|
||||
def run_server (test = 0):
|
||||
listen = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
|
||||
listen.setsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
listen.bind (('', DEFAULT_PORT))
|
||||
listen.listen (1)
|
||||
while 1:
|
||||
(sock,addr) = listen.accept ()
|
||||
try:
|
||||
serv = Server (sock)
|
||||
serv.test = test
|
||||
serv.run ()
|
||||
except:
|
||||
(typ, val, tb) = sys.exc_info ()
|
||||
if typ == exceptions.KeyboardInterrupt:
|
||||
print "kbd interrupt, leaving"
|
||||
raise
|
||||
print "error %s %s from %s" % (typ, val, addr)
|
||||
traceback.print_exc(40)
|
||||
sock.close ()
|
||||
|
||||
def extract_apt (rpnQuery):
|
||||
"""Takes RPNQuery to AttributePlusTerm"""
|
||||
RPNStruct = rpnQuery.rpn
|
||||
assert (RPNStruct [0] == 'op')
|
||||
operand = RPNStruct [1]
|
||||
assert (operand [0] == 'attrTerm')
|
||||
return operand [1]
|
||||
|
||||
|
||||
class Client (Conn):
|
||||
test = 0
|
||||
|
||||
def __init__ (self, addr, port = DEFAULT_PORT, optionslist = None,
|
||||
charset = None, lang = None, user = None, password = None,
|
||||
preferredMessageSize = 0x100000, group = None,
|
||||
maximumRecordSize = 0x100000, implementationId = "",
|
||||
implementationName = "", implementationVersion = "",
|
||||
ConnectionError = ConnectionError,
|
||||
ProtocolError = ProtocolError,
|
||||
UnexpectedCloseError = UnexpectedCloseError):
|
||||
|
||||
Conn.__init__ (self, ConnectionError = ConnectionError,
|
||||
ProtocolError = ProtocolError,
|
||||
UnexpectedCloseError = UnexpectedCloseError)
|
||||
try:
|
||||
self.sock.connect ((addr, port))
|
||||
except socket.error, val:
|
||||
self.sock = None
|
||||
raise self.ConnectionError ('socket', str(val))
|
||||
try_v3 = Z3950_VERS == 3
|
||||
|
||||
if (charset and not isinstance(charset, list)):
|
||||
charset = [charset]
|
||||
if (lang and not isinstance(lang, list)):
|
||||
charset = [lang]
|
||||
negotiate_charset = charset or lang
|
||||
|
||||
if (user or password or group):
|
||||
authentication = (user, password, group)
|
||||
else:
|
||||
authentication = None
|
||||
|
||||
InitReq = make_initreq (optionslist, authentication = authentication,
|
||||
v3 = try_v3,
|
||||
preferredMessageSize = preferredMessageSize,
|
||||
maximumRecordSize = maximumRecordSize,
|
||||
implementationId = implementationId,
|
||||
implementationName = implementationName,
|
||||
implementationVersion = implementationVersion,
|
||||
negotiate_charset = negotiate_charset)
|
||||
if negotiate_charset:
|
||||
# languages = ['eng', 'fre', 'enm']
|
||||
# Thanne longen folk to looken in catalogues
|
||||
# and clerkes for to seken straunge bookes ...
|
||||
cnr = CharsetNegotReq (charset, lang, random.choice((0,1,None)))
|
||||
if trace_charset:
|
||||
print cnr
|
||||
set_charset_negot (InitReq, cnr.pack_proposal (), try_v3)
|
||||
|
||||
if trace_init:
|
||||
print "Initialize request", InitReq
|
||||
|
||||
self.initresp = self.transact (
|
||||
('initRequest', InitReq), 'initResponse')
|
||||
if trace_init:
|
||||
print "Initialize Response", self.initresp
|
||||
self.v3_flag = self.initresp.protocolVersion ['version_3']
|
||||
val = get_charset_negot (self.initresp)
|
||||
if val <> None:
|
||||
csr = CharsetNegotResp ()
|
||||
csr.unpack_negot_resp (val)
|
||||
if trace_charset:
|
||||
print "Got csr", str (csr)
|
||||
self.set_codec (csr.charset, csr.records_in_charsets)
|
||||
|
||||
self.search_results = {}
|
||||
self.max_to_request = 20
|
||||
self.default_recordSyntax = Z3950_RECSYN_USMARC_ov
|
||||
def get_option (self, option_name):
|
||||
return self.initresp.options[option_name]
|
||||
def transact (self, to_send, expected):
|
||||
b = self.encode_ctx.encode (APDU, to_send)
|
||||
if print_hex:
|
||||
print map (hex, b)
|
||||
if self.test:
|
||||
print "Internal Testing"
|
||||
# a reminder not to leave this switched on by accident
|
||||
self.decode_ctx.feed (b)
|
||||
decoded = self.read_PDU ()
|
||||
print "to_send", to_send, "decoded", decoded
|
||||
assert (to_send == decoded)
|
||||
if self.sock == None:
|
||||
raise self.ConnectionError ('disconnected')
|
||||
try:
|
||||
self.sock.send (b)
|
||||
except socket.error, val:
|
||||
self.sock = None
|
||||
raise self.ConnectionError('socket', str(val))
|
||||
|
||||
if expected == None:
|
||||
return
|
||||
pdu = self.read_PDU ()
|
||||
(arm, val) = pdu
|
||||
if self.test:
|
||||
print "Internal Testing 2"
|
||||
b = self.encode_ctx.encode (APDU, (arm, val))
|
||||
self.decode_ctx.feed (b)
|
||||
redecoded = self.read_PDU ()
|
||||
if redecoded <> (arm, val):
|
||||
print "Redecoded", redecoded
|
||||
print "old", (arm, val)
|
||||
assert (redecoded == (arm, val))
|
||||
if arm == expected: # may be 'close'
|
||||
return val
|
||||
elif arm == 'close':
|
||||
raise self.UnexpectedCloseError (
|
||||
"Server closed connection reason %d diag info %s" % \
|
||||
(getattr (val, 'closeReason', -1),
|
||||
getattr (val, 'diagnosticInformation', 'None given')))
|
||||
else:
|
||||
raise self.ProtocolError (
|
||||
"Unexpected response from server %s %s " % (expected,
|
||||
repr ((arm, val))))
|
||||
def set_dbnames (self, dbnames):
|
||||
self.dbnames = dbnames
|
||||
def search_2 (self, query, rsn = default_resultSetName, **kw):
|
||||
# We used to check self.initresp.options['search'], but
|
||||
# support for search is required by the standard, and
|
||||
# www.cnshb.ru:210 doesn't set the search bit if you negotiate
|
||||
# v2, but supports search anyway
|
||||
sreq = make_sreq (query, self.dbnames, rsn, **kw)
|
||||
recv = self.transact (('searchRequest', sreq), 'searchResponse')
|
||||
self.search_results [rsn] = recv
|
||||
return recv
|
||||
def search (self, query, rsn = default_resultSetName, **kw):
|
||||
# for backwards compat
|
||||
recv = self.search_2 (('type_1', query), rsn, **kw)
|
||||
return recv.searchStatus and (recv.resultCount > 0)
|
||||
# If searchStatus is failure, check result-set-status -
|
||||
# -subset - partial, valid results available
|
||||
# -interim - partial, not necessarily valid
|
||||
# -none - no result set
|
||||
# If searchStatus is success, check present-status:
|
||||
# - success - OK
|
||||
# - partial-1 - not all, access control
|
||||
# - partial-2 - not all, won't fit in msg size (but we currently don't ask for
|
||||
# any records in search, shouldn't happen)
|
||||
# - partial-3 - not all, resource control (origin)
|
||||
# - partial-4 - not all, resource control (target)
|
||||
# - failure - no records, nonsurrogate diagnostic.
|
||||
def get_count (self, rsn = default_resultSetName):
|
||||
return self.search_results[rsn].resultCount
|
||||
def delete (self, rsn):
|
||||
if not self.initresp.options['delSet']:
|
||||
return None
|
||||
delreq = DeleteResultSetRequest ()
|
||||
delreq.deleteFunction = 0 # list
|
||||
delreq.resultSetList = [rsn]
|
||||
return self.transact (('deleteResultSetRequest', delreq),
|
||||
'deleteResultSetResponse')
|
||||
def present (self, rsn= default_resultSetName, start = None,
|
||||
count = None, recsyn = None, esn = None):
|
||||
# don't check for support in init resp: see search for reasoning
|
||||
|
||||
# XXX Azaroth 2004-01-08. This does work when rs is result of sort.
|
||||
try:
|
||||
sresp = self.search_results [rsn]
|
||||
if start == None:
|
||||
start = sresp.nextResultSetPosition
|
||||
if count == None:
|
||||
count = sresp.resultCount
|
||||
if self.max_to_request > 0:
|
||||
count = min (self.max_to_request, count)
|
||||
except:
|
||||
pass
|
||||
if recsyn == None:
|
||||
recsyn = self.default_recordSyntax
|
||||
preq = PresentRequest ()
|
||||
preq.resultSetId = rsn
|
||||
preq.resultSetStartPoint = start
|
||||
preq.numberOfRecordsRequested = count
|
||||
preq.preferredRecordSyntax = recsyn
|
||||
if esn <> None:
|
||||
preq.recordComposition = ('simple', esn)
|
||||
return self.transact (('presentRequest', preq), 'presentResponse')
|
||||
def scan (self, query, **kw):
|
||||
sreq = ScanRequest ()
|
||||
sreq.databaseNames = self.dbnames
|
||||
assert (query[0] == 'type_1' or query [0] == 'type_101')
|
||||
sreq.attributeSet = query[1].attributeSet
|
||||
sreq.termListAndStartPoint = extract_apt (query[1])
|
||||
sreq.numberOfTermsRequested = 20 # default
|
||||
for (key, val) in kw.items ():
|
||||
setattr (sreq, key, val)
|
||||
|
||||
return self.transact (('scanRequest', sreq), 'scanResponse')
|
||||
def close (self):
|
||||
close = Close ()
|
||||
close.closeReason = 0
|
||||
close.diagnosticInformation = 'Normal close'
|
||||
try:
|
||||
rv = self.transact (('close', close), 'close')
|
||||
except self.ConnectionError:
|
||||
rv = None
|
||||
if self.sock <> None:
|
||||
self.sock.close ()
|
||||
self.sock = None
|
||||
return rv
|
||||
|
||||
|
||||
def mk_compound_query ():
|
||||
aelt1 = AttributeElement (attributeType = 1,
|
||||
attributeValue = ('numeric',4))
|
||||
apt1 = AttributesPlusTerm ()
|
||||
apt1.attributes = [aelt1]
|
||||
apt1.term = ('general', '1066')
|
||||
aelt2 = AttributeElement (attributeType = 1,
|
||||
attributeValue = ('numeric', 1))
|
||||
apt2 = AttributesPlusTerm ()
|
||||
apt2.attributes = [aelt2]
|
||||
apt2.term = ('general', 'Sellar')
|
||||
myrpnRpnOp = RpnRpnOp ()
|
||||
myrpnRpnOp.rpn1 = ('op', ('attrTerm', apt1))
|
||||
myrpnRpnOp.rpn2 = ('op', ('attrTerm', apt2))
|
||||
myrpnRpnOp.op = ('and', None)
|
||||
rpnq = RPNQuery (attributeSet = Z3950_ATTRS_BIB1_ov)
|
||||
rpnq.rpn = ('rpnRpnOp', myrpnRpnOp)
|
||||
return rpnq
|
||||
|
||||
def mk_simple_query (title):
|
||||
aelt1 = AttributeElement (attributeType = 1,
|
||||
attributeValue = ('numeric', 1003))
|
||||
apt1 = AttributesPlusTerm ()
|
||||
apt1.attributes = [aelt1]
|
||||
apt1.term = ('general', title) # XXX or should be characterString, not general, but only when V3.
|
||||
rpnq = RPNQuery (attributeSet = Z3950_ATTRS_BIB1_ov)
|
||||
rpnq.rpn = ('op', ('attrTerm', apt1))
|
||||
return rpnq
|
||||
|
||||
def_host = 'LC'
|
||||
|
||||
host_dict = {'BIBSYS': ('z3950.bibsys.no', 2100, 'BIBSYS'),
|
||||
'YAZ': ('127.0.0.1', 9999, 'foo'),
|
||||
'LCTEST' : ('ilssun2.loc.gov', 7090, 'Voyager'),
|
||||
'LC' : ('z3950.loc.gov', 7090, 'Voyager'),
|
||||
'NLC' : ('amicus.nlc-bnc.ca', 210, 'NL'),
|
||||
'BNC' : ('amicus.nlc-bnc.ca', 210, 'NL'),
|
||||
# On parle franc,ais aussi.
|
||||
'LOCAL': ('127.0.0.1', 9999, 'Default'),
|
||||
'LOCAL2': ('127.0.0.1', 2101, 'foo'),
|
||||
'BL' :('blpcz.bl.uk', 21021, 'BLPC-ALL'),
|
||||
'BELLLABS' : ('z3950.bell-labs.com', 210, 'books'),
|
||||
'BIBHIT' : ('www.bibhit.dk', 210, 'Default'),
|
||||
'YALE': ('webpac.library.yale.edu', 210, 'YALEOPAC'),
|
||||
'OXFORD': ('library.ox.ac.uk', 210, 'ADVANCE'),
|
||||
'OVID': ('z3950.ovid.com', 2213, 'pmed'), # scan only
|
||||
'UC': ('ipac.lib.uchicago.edu', 210, 'uofc'),
|
||||
'KUB' : ('dbiref.kub.nl', 1800, 'jel'),
|
||||
'INDEXDATA' : ('muffin.indexdata.dk', 9004, 'thatt')}
|
||||
# last two are Zthes servers.
|
||||
|
||||
if __name__ == '__main__':
|
||||
optlist, args = getopt.getopt (sys.argv[1:], 'e:sh:tc:l:')
|
||||
server = 0
|
||||
host = def_host
|
||||
test = 0
|
||||
charset_list = None
|
||||
lang_list = None
|
||||
for (opt, val) in optlist:
|
||||
if opt == '-s':
|
||||
server = 1
|
||||
elif opt == '-h':
|
||||
host = val
|
||||
elif opt == '-t':
|
||||
test = 1
|
||||
elif opt == '-e':
|
||||
out_encoding = val
|
||||
elif opt == '-c':
|
||||
charset_list = val.split (',')
|
||||
elif opt == '-l':
|
||||
lang_list = val.split (',')
|
||||
if server:
|
||||
run_server (test)
|
||||
|
||||
host = host.upper ()
|
||||
(name, port, dbname) = host_dict.get (host, host_dict[def_host])
|
||||
cli = Client (name, port, charset = charset_list,
|
||||
lang = lang_list)
|
||||
cli.test = test
|
||||
cli.set_dbnames ([dbname])
|
||||
print "Starting search"
|
||||
# rpnq = mk_simple_query ('Perec, Georges')
|
||||
# rpnq = mk_simple_query ('Johnson, Kim')
|
||||
rpnq = mk_compound_query ()
|
||||
if cli.search (rpnq, smallSetUpperBound = 0, mediumSetPresentNumber = 0,
|
||||
largeSetLowerBound = 1):
|
||||
disp_resp (cli.present (recsyn = Z3950_RECSYN_USMARC_ov))
|
||||
else:
|
||||
print "Not found"
|
||||
print "Deleting"
|
||||
cli.delete (default_resultSetName)
|
||||
cli.delete ('bogus')
|
||||
print "Closing"
|
||||
try:
|
||||
cli.close ()
|
||||
except ConnectionError:
|
||||
# looks like LC, at least, sends a FIN on receipt of Close PDU
|
||||
# guess we should check for gracefullness of close, and complain
|
||||
# if not.
|
||||
pass
|
||||
|
1503
python/PyZ3950/z3950_2001.py
Normal file
1503
python/PyZ3950/z3950_2001.py
Normal file
File diff suppressed because it is too large
Load Diff
340
python/PyZ3950/zdefs.py
Normal file
340
python/PyZ3950/zdefs.py
Normal file
@ -0,0 +1,340 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import codecs
|
||||
|
||||
from PyZ3950.z3950_2001 import *
|
||||
from PyZ3950.oids import *
|
||||
|
||||
|
||||
asn1.register_oid (Z3950_RECSYN_GRS1, GenericRecord)
|
||||
asn1.register_oid (Z3950_RECSYN_SUTRS, asn1.GeneralString)
|
||||
asn1.register_oid (Z3950_RECSYN_EXPLAIN, Explain_Record)
|
||||
asn1.register_oid (Z3950_RECSYN_OPAC, OPACRecord)
|
||||
|
||||
asn1.register_oid (Z3950_ES_PERSISTRS, PersistentResultSet)
|
||||
asn1.register_oid (Z3950_ES_PERSISTQRY, PersistentQuery)
|
||||
asn1.register_oid (Z3950_ES_PERIODQRY, PeriodicQuerySchedule)
|
||||
asn1.register_oid (Z3950_ES_ITEMORDER, ItemOrder)
|
||||
asn1.register_oid (Z3950_ES_DBUPDATE, Update)
|
||||
asn1.register_oid (Z3950_ES_DBUPDATE_REV_1, Update_updrev1)
|
||||
asn1.register_oid (Z3950_ES_EXPORTSPEC, ExportSpecification)
|
||||
asn1.register_oid (Z3950_ES_EXPORTINV, ExportInvocation)
|
||||
|
||||
|
||||
asn1.register_oid (Z3950_USR_SEARCHRES1, SearchInfoReport)
|
||||
asn1.register_oid (Z3950_USR_INFO1, OtherInformation)
|
||||
asn1.register_oid (Z3950_NEG_CHARSET3, CharSetandLanguageNegotiation_3)
|
||||
asn1.register_oid (Z3950_USR_PRIVATE_OCLC_INFO, OCLC_UserInformation)
|
||||
|
||||
# below here is subject to change without notice, as I try to
|
||||
# figure out the appropriate balance between convenience and flexibility
|
||||
|
||||
trace_charset = 0
|
||||
|
||||
impl_vers = "1.0 beta" # XXX
|
||||
implementationId = 'PyZ39.50 - contact asl2@pobox.com' # haven't been assigned an official id, apply XXX
|
||||
|
||||
def make_attr(set=None, atype=None, val=None, valType=None):
|
||||
ae = AttributeElement()
|
||||
if (set <> None):
|
||||
ae.attributeSet = set
|
||||
ae.attributeType = atype
|
||||
if (valType == 'numeric' or (valType == None and isinstance(val, int))):
|
||||
ae.attributeValue = ('numeric', val)
|
||||
else:
|
||||
cattr = AttributeElement['attributeValue']['complex']()
|
||||
if (valType == None):
|
||||
valType = 'string'
|
||||
cattr.list = [(valType, val)]
|
||||
ae.attributeValue = ('complex', cattr)
|
||||
return ae
|
||||
|
||||
# This list is needed to support recordsInSelectedCharSets == 0 when
|
||||
# character set negotiation is in effect. The reason we don't
|
||||
# just iterate over Z3950_RECSYN is that many of those are carried
|
||||
# in OCTET STRINGs, and thus immune to negotiation; but maybe we should
|
||||
# anyway.
|
||||
|
||||
retrievalRecord_oids = [
|
||||
Z3950_RECSYN_EXPLAIN_ov,
|
||||
Z3950_RECSYN_SUTRS_ov,
|
||||
Z3950_RECSYN_OPAC_ov,
|
||||
Z3950_RECSYN_SUMMARY_ov,
|
||||
Z3950_RECSYN_GRS1_ov,
|
||||
Z3950_RECSYN_ES_ov,
|
||||
Z3950_RECSYN_FRAGMENT_ov,
|
||||
Z3950_RECSYN_SQL_ov]
|
||||
|
||||
|
||||
def register_retrieval_record_oids (ctx, new_codec_name = 'ascii'):
|
||||
new_codec = codecs.lookup (new_codec_name)
|
||||
def switch_codec ():
|
||||
ctx.push_codec ()
|
||||
ctx.set_codec (asn1.GeneralString, new_codec)
|
||||
for oid in retrievalRecord_oids:
|
||||
ctx.register_charset_switcher (oid, switch_codec)
|
||||
|
||||
iso_10646_oid_to_name = {
|
||||
UNICODE_PART1_XFERSYN_UCS2_ov : 'utf-16', # XXX ucs-2 should differ from utf-16, in that ucs-2 forbids any characters not in the BMP, whereas utf-16 is a 16-bit encoding which encodes those characters into multiple 16-bit units
|
||||
|
||||
# UNICODE_PART1_XFERSYN_UCS4_ov : 'ucs-4', # XXX no python support for this encoding?
|
||||
UNICODE_PART1_XFERSYN_UTF16_ov : 'utf-16',
|
||||
UNICODE_PART1_XFERSYN_UTF8_ov : 'utf-8'
|
||||
}
|
||||
|
||||
def try_get_iso10646_oid (charset_name):
|
||||
for k,v in iso_10646_oid_to_name.iteritems ():
|
||||
if charset_name == v:
|
||||
return k
|
||||
# XXX note that we don't know which of {UCS2, UTF16} oids we'll
|
||||
# get from this.
|
||||
|
||||
def asn_charset_to_name (charset_tup):
|
||||
if trace_charset:
|
||||
print "asn_charset_to_name", charset_tup
|
||||
charset_name = None
|
||||
(typ, charset) = charset_tup
|
||||
if typ == 'iso10646':
|
||||
charset_name = iso_10646_oid_to_name.get (charset.encodingLevel,
|
||||
None)
|
||||
elif typ == 'private':
|
||||
(spectyp, val) = charset
|
||||
if spectyp == 'externallySpecified':
|
||||
oid = getattr (val, 'direct_reference', None)
|
||||
if oid == Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov:
|
||||
enctyp, encval = val.encoding
|
||||
if enctyp == 'octet-aligned':
|
||||
charset_name = encval
|
||||
if trace_charset:
|
||||
print "returning charset", charset_name
|
||||
return charset_name
|
||||
|
||||
|
||||
def charset_to_asn (charset_name):
|
||||
oid = try_get_iso10646_oid (charset_name)
|
||||
if oid <> None:
|
||||
iso10646 = Iso10646_3 ()
|
||||
iso10646.encodingLevel = oid
|
||||
return ('iso10646', iso10646)
|
||||
else:
|
||||
ext = asn1.EXTERNAL ()
|
||||
ext.direct_reference = Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov
|
||||
ext.encoding = ('octet-aligned', charset_name)
|
||||
return ('private', ('externallySpecified', ext))
|
||||
|
||||
class CharsetNegotReq:
|
||||
def __init__ (self, charset_list = None, lang_list = None,
|
||||
records_in_charsets = None):
|
||||
"""charset_list is a list of character set names, either ISO10646
|
||||
(UTF-8 or UTF-16), or private. We support Index Data's semantics
|
||||
for private character sets (see
|
||||
http://www.indexdata.dk/pipermail/yazlist/2003-March/000504.html), so
|
||||
you can pass any character set name for which Python has a codec installed
|
||||
(but please don't use rot13 in production). Note that there should be
|
||||
at most one of each of (ISO10646, private). (No, I don't know why, but
|
||||
it says so in the ASN.1 definition comments.)
|
||||
|
||||
lang_list is a list of language codes, as defined in ANSI Z39.53-1994
|
||||
(see, e.g., http://xml.coverpages.org/nisoLang3-1994.html).
|
||||
|
||||
records_in_charsets governs whether charset negotiation applies to
|
||||
records, as well.)
|
||||
|
||||
Any of these parameters can be None, since the corresponding
|
||||
elements in the ASN.1 are OPTIONAL.
|
||||
"""
|
||||
self.charset_list = charset_list
|
||||
self.lang_list = lang_list
|
||||
self.records_in_charsets = records_in_charsets
|
||||
def __str__ (self):
|
||||
return "Charset negot request %s %s %s" % (
|
||||
str (self.charset_list), str (self.lang_list),
|
||||
str (self.records_in_charsets))
|
||||
def pack_proposal (self):
|
||||
origin_prop = OriginProposal_3 ()
|
||||
if self.charset_list <> None:
|
||||
proposedCharSets = []
|
||||
for charset_name in self.charset_list:
|
||||
proposedCharSets.append (charset_to_asn (charset_name))
|
||||
|
||||
origin_prop.proposedCharSets = proposedCharSets
|
||||
if self.lang_list <> None:
|
||||
origin_prop.proposedlanguages = self.lang_list
|
||||
if self.records_in_charsets <> None:
|
||||
origin_prop.recordsInSelectedCharSets = (
|
||||
self.records_in_charsets)
|
||||
return ('proposal', origin_prop)
|
||||
def unpack_proposal (self, csn):
|
||||
(tag, proposal) = csn
|
||||
assert (tag == 'proposal')
|
||||
pcs = getattr (proposal, 'proposedCharSets', None)
|
||||
if pcs <> None:
|
||||
if trace_charset:
|
||||
print "pcs", pcs
|
||||
self.charset_list = []
|
||||
|
||||
for charset in pcs:
|
||||
charset_name = asn_charset_to_name (charset)
|
||||
if charset_name <> None:
|
||||
self.charset_list.append (charset_name)
|
||||
|
||||
lang = getattr (proposal, 'proposedlanguages', None)
|
||||
if lang <> None:
|
||||
self.lang_list = lang
|
||||
self.records_in_charsets = getattr (proposal,
|
||||
'recordsInSelectedCharSets', None)
|
||||
|
||||
|
||||
class CharsetNegotResp:
|
||||
def __init__ (self, charset = None, lang = None,
|
||||
records_in_charsets = None):
|
||||
self.charset = charset
|
||||
self.lang = lang
|
||||
self.records_in_charsets = records_in_charsets
|
||||
def __str__ (self):
|
||||
return "CharsetNegotResp: %s %s %s" % (
|
||||
str (self.charset), str (self.lang),
|
||||
str (self.records_in_charsets))
|
||||
def unpack_negot_resp (self, neg_resp):
|
||||
typ, val = neg_resp
|
||||
assert (typ == 'response')
|
||||
self.charset = None
|
||||
scs = getattr (val, 'selectedCharSets', None)
|
||||
if scs <> None:
|
||||
self.charset = asn_charset_to_name (scs)
|
||||
self.lang = getattr (val, 'selectedLanguage', None)
|
||||
self.records_in_charsets = getattr (
|
||||
val, 'recordsInSelectedCharSets', None)
|
||||
def pack_negot_resp (self):
|
||||
resp = TargetResponse_3 ()
|
||||
if self.charset <> None:
|
||||
resp.selectedCharSets = charset_to_asn (self.charset)
|
||||
if self.lang <> None:
|
||||
resp.selectedLanguage = self.lang
|
||||
if self.records_in_charsets <> None:
|
||||
resp.recordsInSelectedCharSets = self.records_in_charsets
|
||||
return ('response', resp)
|
||||
|
||||
|
||||
def get_charset_negot (init): # can be passed either InitializeRequest or InitializeResponse
|
||||
if trace_charset:
|
||||
print init
|
||||
if not init.options ['negotiation']:
|
||||
return None
|
||||
otherInfo = []
|
||||
if hasattr (init, 'otherInfo'):
|
||||
otherInfo = init.otherInfo
|
||||
elif hasattr (init, 'userInformationField'):
|
||||
ui = init.userInformationField
|
||||
if ui.direct_reference == Z3950_USR_INFO1_ov:
|
||||
(enctype, otherInfo) = ui.encoding
|
||||
|
||||
for oi in otherInfo:
|
||||
if trace_charset:
|
||||
print oi
|
||||
(typ, val) = oi.information
|
||||
if typ == 'externallyDefinedInfo':
|
||||
if val.direct_reference == Z3950_NEG_CHARSET3_ov:
|
||||
(typ, val) = val.encoding
|
||||
if typ == 'single-ASN1-type':
|
||||
return val
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def set_charset_negot (init, val, v3_flag):
|
||||
# again, can be passed either InitializeRequest or Response
|
||||
negot = asn1.EXTERNAL ()
|
||||
negot.direct_reference = Z3950_NEG_CHARSET3_ov
|
||||
negot.encoding= ('single-ASN1-type', val)
|
||||
OtherInfoElt = OtherInformation[0]
|
||||
oi_elt = OtherInfoElt ()
|
||||
oi_elt.information = ('externallyDefinedInfo', negot)
|
||||
other_info = [oi_elt]
|
||||
if trace_charset:
|
||||
print v3_flag, oi_elt
|
||||
|
||||
if v3_flag:
|
||||
init.otherInfo = other_info
|
||||
else:
|
||||
ui = asn1.EXTERNAL ()
|
||||
|
||||
ui.direct_reference = Z3950_USR_INFO1_ov
|
||||
ui.encoding = ('single-ASN1-type', other_info) # XXX test this
|
||||
# see http://lcweb.loc.gov/z3950/agency/defns/user-1.html
|
||||
init.userInformationField = ui
|
||||
|
||||
|
||||
def_msg_size = 0x10000
|
||||
|
||||
# rethink optionslist. Maybe we should just turn on all the
|
||||
# bits the underlying code supports? We do need to be able to
|
||||
# turn off multiple result sets for testing (see tests/test2.py),
|
||||
# but that doesn't have to be the default.
|
||||
def make_initreq (optionslist = None, authentication = None, v3 = 0,
|
||||
negotiate_charset = 0, preferredMessageSize = 0x100000,
|
||||
maximumRecordSize = 0x100000, implementationId = "",
|
||||
implementationName = "", implementationVersion = ""):
|
||||
|
||||
# see http://lcweb.loc.gov/z3950/agency/wisdom/unicode.html
|
||||
InitReq = InitializeRequest ()
|
||||
InitReq.protocolVersion = ProtocolVersion ()
|
||||
InitReq.protocolVersion ['version_1'] = 1
|
||||
InitReq.protocolVersion ['version_2'] = 1
|
||||
InitReq.protocolVersion ['version_3'] = v3
|
||||
InitReq.options = Options ()
|
||||
if optionslist <> None:
|
||||
for o in optionslist:
|
||||
InitReq.options[o] = 1
|
||||
InitReq.options ['search'] = 1
|
||||
InitReq.options ['present'] = 1
|
||||
InitReq.options ['delSet'] = 1
|
||||
InitReq.options ['scan'] = 1
|
||||
InitReq.options ['sort'] = 1
|
||||
InitReq.options ['extendedServices'] = 1
|
||||
InitReq.options ['dedup'] = 1
|
||||
InitReq.options ['negotiation'] = negotiate_charset # XXX can negotiate other stuff, too
|
||||
|
||||
# Preferred and Exceptional msg sizes are pretty arbitrary --
|
||||
# we dynamically allocate no matter what
|
||||
InitReq.preferredMessageSize = preferredMessageSize
|
||||
InitReq.exceptionalRecordSize = maximumRecordSize
|
||||
|
||||
if (implementationId):
|
||||
InitReq.implementationId = implementationId
|
||||
else:
|
||||
InitReq.implementationId = impl_id
|
||||
if (implementationName):
|
||||
InitReq.implementationName = implementationName
|
||||
else:
|
||||
InitReq.implementationName = 'PyZ3950'
|
||||
if (implementationVersion):
|
||||
InitReq.implementationVersion = implementationVersion
|
||||
else:
|
||||
InitReq.implementationVersion = impl_vers
|
||||
|
||||
if authentication <> None:
|
||||
class UP: pass
|
||||
up = UP ()
|
||||
upAttrList = ['userId', 'password', 'groupId']
|
||||
for val, attr in zip (authentication, upAttrList): # silently truncate
|
||||
if val <> None:
|
||||
setattr (up, attr, val)
|
||||
InitReq.idAuthentication = ('idPass', up)
|
||||
|
||||
return InitReq
|
||||
|
||||
def make_sreq (query, dbnames, rsn, **kw):
|
||||
sreq = SearchRequest ()
|
||||
sreq.smallSetUpperBound = 0
|
||||
sreq.largeSetLowerBound = 1
|
||||
sreq.mediumSetPresentNumber = 0
|
||||
# as per http://lcweb.loc.gov/z3950/lcserver.html, Jun 07 2001,
|
||||
# to work around Endeavor bugs in 1.13
|
||||
sreq.replaceIndicator = 1
|
||||
sreq.resultSetName = rsn
|
||||
sreq.databaseNames = dbnames
|
||||
sreq.query = query
|
||||
for (key, val) in kw.items ():
|
||||
setattr (sreq, key, val)
|
||||
return sreq
|
1252
python/PyZ3950/zmarc.py
Normal file
1252
python/PyZ3950/zmarc.py
Normal file
File diff suppressed because it is too large
Load Diff
965
python/PyZ3950/zoom.py
Normal file
965
python/PyZ3950/zoom.py
Normal file
@ -0,0 +1,965 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""Implements the ZOOM 1.4 API (http://zoom.z3950.org/api)
|
||||
for Z39.50.
|
||||
|
||||
Some global notes on the binding (these will only make sense when read
|
||||
after the API document):
|
||||
|
||||
Get/Set Option is implemented as member attribute access or
|
||||
assignment. Implementations are encouraged to throw an AttributeError
|
||||
for unsupported (or, possibly, mistyped) attributes. (Production
|
||||
applications are encouraged to catch such errors.)
|
||||
|
||||
All errors are reported as exceptions deriving from ZoomError (or, at
|
||||
least, it's a bug if they aren't). Bib1Err is defined as part of the
|
||||
binding; all the rest are specific to this implementation.
|
||||
|
||||
ResultSet provides a sequence interface, with standard Python
|
||||
iteration, indexing, and slicing. So if rs is a ResultSet, use len
|
||||
(rs) for Get_Size and rs[i] for Get_Record, or iterate with for r in
|
||||
rs: foo(r). Any attempt to access a record for which the server
|
||||
returned a surrogate diagnostic will raise the appropriate Bib1Err
|
||||
exception.
|
||||
|
||||
For Record, Render_Record is implemented as Python __str__. The
|
||||
'syntax' member contains the string-format record syntax, and the
|
||||
'data' member contains the raw data.
|
||||
|
||||
The following query types are supported:
|
||||
- "CCL", ISO 8777, (http://www.indexdata.dk/yaz/doc/tools.tkl#CCL)
|
||||
- "S-CCL", the same, but interpreted on the server side
|
||||
- "CQL", the Common Query Language, (http://www.loc.gov/z3950/agency/zing/cql/)
|
||||
- "S-CQL", the same, but interpreted on the server side
|
||||
- "PQF", Index Data's Prefix Query Format, (http://www.indexdata.dk/yaz/doc/tools.tkl#PQF)
|
||||
- "C2", Cheshire II query syntax, (http://cheshire.berkeley.edu/cheshire2.html#zfind)
|
||||
- "ZSQL", Z-SQL, see (http://archive.dstc.edu.au/DDU/projects/Z3950/Z+SQL/)
|
||||
- "CQL-TREE", a general-purpose escape allowing any object with a toRPN method to be used, e.g. the CQL tree objects
|
||||
|
||||
ScanSet, like ResultSet, has a sequence interface. The i-th element
|
||||
is a dictionary. See the ScanSet documentation for supported keys.
|
||||
|
||||
Sample usage:
|
||||
from PyZ3950 import zoom
|
||||
conn = zoom.Connection ('z3950.loc.gov', 7090)
|
||||
conn.databaseName = 'VOYAGER'
|
||||
conn.preferredRecordSyntax = 'USMARC'
|
||||
query = zoom.Query ('CCL', 'ti="1066 and all that"')
|
||||
res = conn.search (query)
|
||||
for r in res:
|
||||
print str(r)
|
||||
conn.close ()
|
||||
I hope everything else is clear from the docstrings and the abstract
|
||||
API: let me know if that's wrong, and I'll try to do better.
|
||||
|
||||
For some purposes (I think the only one is writing Z39.50 servers),
|
||||
you may want to use the functions in the z3950 module instead. """
|
||||
|
||||
from __future__ import nested_scopes
|
||||
|
||||
__author__ = 'Aaron Lav (asl2@pobox.com)'
|
||||
__version__ = '1.0' # XXX
|
||||
|
||||
import getopt
|
||||
import sys
|
||||
|
||||
# TODO:
|
||||
# finish lang/charset (requires charset normalization, confer w/ Adam)
|
||||
# implement piggyback
|
||||
# implement schema (Non useful)
|
||||
# implement setname (Impossible?)
|
||||
|
||||
from PyZ3950 import z3950
|
||||
from PyZ3950 import ccl
|
||||
from PyZ3950 import asn1
|
||||
from PyZ3950 import zmarc
|
||||
from PyZ3950 import bib1msg
|
||||
from PyZ3950 import grs1
|
||||
from PyZ3950 import oids
|
||||
|
||||
# Azaroth 2003-12-04:
|
||||
from PyZ3950 import CQLParser, SRWDiagnostics, pqf
|
||||
from PyZ3950 import c2query as c2
|
||||
asn1.register_oid (oids.Z3950_QUERY_SQL, z3950.SQLQuery)
|
||||
|
||||
|
||||
def my_enumerate (l): # replace w/ enumerate when we go to Python 2.3
|
||||
return zip (range (len (l)), l)
|
||||
|
||||
trace_extract = 0
|
||||
"""trace extracting records from search/present reqs"""
|
||||
|
||||
class ZoomError (Exception):
|
||||
"""Base class for all errors reported from this module"""
|
||||
pass
|
||||
|
||||
class ConnectionError(ZoomError):
|
||||
"""Exception for TCP error"""
|
||||
pass
|
||||
|
||||
class ClientNotImplError (ZoomError):
|
||||
"""Exception for ZOOM client-side functionality not implemented (bug
|
||||
author)"""
|
||||
pass
|
||||
|
||||
class ServerNotImplError (ZoomError):
|
||||
"""Exception for function not implemented on server"""
|
||||
pass
|
||||
|
||||
class QuerySyntaxError (ZoomError):
|
||||
"""Exception for query not parsable by client"""
|
||||
pass
|
||||
|
||||
class ProtocolError (ZoomError):
|
||||
"""Exception for malformatted server response"""
|
||||
pass
|
||||
|
||||
class UnexpectedCloseError (ProtocolError):
|
||||
"""Exception for unexpected (z3950, not tcp) close from server"""
|
||||
pass
|
||||
|
||||
class UnknownRecSyn (ZoomError):
|
||||
"""Exception for unknown record syntax returned from server"""
|
||||
pass
|
||||
|
||||
class Bib1Err (ZoomError):
|
||||
"""Exception for BIB-1 error"""
|
||||
def __init__ (self, condition, message, addtlInfo):
|
||||
self.condition = condition
|
||||
self.message = message
|
||||
self.addtlInfo = addtlInfo
|
||||
ZoomError.__init__ (self)
|
||||
def __str__ (self):
|
||||
return "Bib1Err: %d %s %s" % (self.condition, self.message, self.addtlInfo)
|
||||
|
||||
|
||||
class _ErrHdlr:
|
||||
"""Error-handling services"""
|
||||
err_attrslist = ['errCode','errMsg', 'addtlInfo']
|
||||
def err (self, condition, addtlInfo, oid):
|
||||
"""Translate condition + oid to message, save, and raise exception"""
|
||||
self.errCode = condition
|
||||
self.errMsg = bib1msg.lookup_errmsg (condition, oid)
|
||||
self.addtlInfo = addtlInfo
|
||||
raise Bib1Err (self.errCode, self.errMsg, self.addtlInfo)
|
||||
def err_diagrec (self, diagrec):
|
||||
(typ, data) = diagrec
|
||||
if typ == 'externallyDefined':
|
||||
raise ClientNotImplErr ("Unknown external diagnostic" + str (data))
|
||||
addinfo = data.addinfo [1] # don't care about v2 vs v3
|
||||
self.err (data.condition, addinfo, data.diagnosticSetId)
|
||||
|
||||
|
||||
_record_type_dict = {}
|
||||
"""Map oid to renderer, field-counter, and field-getter functions"""
|
||||
|
||||
def _oid_to_key (oid):
|
||||
for (k,v) in _record_type_dict.items ():
|
||||
if v.oid == oid:
|
||||
return k
|
||||
raise UnknownRecSyn (oid)
|
||||
|
||||
def _extract_attrs (obj, attrlist):
|
||||
kw = {}
|
||||
for key in attrlist:
|
||||
if hasattr (obj, key):
|
||||
kw[key] = getattr (obj, key)
|
||||
return kw
|
||||
|
||||
class _AttrCheck:
|
||||
"""Prevent typos"""
|
||||
attrlist = []
|
||||
not_implement_attrs = []
|
||||
def __setattr__ (self, attr, val):
|
||||
"""Ensure attr is in attrlist (list of allowed attributes), or
|
||||
private (begins w/ '_'), or begins with 'X-' (reserved for users)"""
|
||||
if attr[0] == '_' or attr in self.attrlist or attr[0:2] == 'X-':
|
||||
self.__dict__[attr] = val
|
||||
elif (attr in self.not_implement_attrs):
|
||||
raise ClientNotImplError(attr)
|
||||
else:
|
||||
raise AttributeError (attr, val)
|
||||
|
||||
class Connection(_AttrCheck, _ErrHdlr):
|
||||
"""Connection object"""
|
||||
|
||||
not_implement_attrs = ['piggyback',
|
||||
'schema',
|
||||
'proxy',
|
||||
'async']
|
||||
search_attrs = ['smallSetUpperBound',
|
||||
'largeSetLowerBound',
|
||||
'mediumSetPresentNumber',
|
||||
'smallSetElementSetNames',
|
||||
'mediumSetElementSetNames']
|
||||
init_attrs = ['user',
|
||||
'password',
|
||||
'group',
|
||||
'maximumRecordSize',
|
||||
'preferredMessageSize',
|
||||
'lang',
|
||||
'charset',
|
||||
'implementationId',
|
||||
'implementationName',
|
||||
'implementationVersion'
|
||||
]
|
||||
scan_zoom_to_z3950 = {
|
||||
# translate names from ZOOM spec to Z39.50 spec names
|
||||
'stepSize' : 'stepSize',
|
||||
'numberOfEntries' : 'numberOfTermsRequested',
|
||||
'responsePosition' : 'preferredPositionInResponse'
|
||||
}
|
||||
|
||||
attrlist = search_attrs + init_attrs + scan_zoom_to_z3950.keys () + [
|
||||
'databaseName',
|
||||
'namedResultSets',
|
||||
'preferredRecordSyntax', # these three inheritable by RecordSet
|
||||
'elementSetName',
|
||||
'presentChunk',
|
||||
'targetImplementationId',
|
||||
'targetImplementationName',
|
||||
'targetImplementationVersion',
|
||||
'host',
|
||||
'port',
|
||||
|
||||
] + _ErrHdlr.err_attrslist
|
||||
|
||||
_queryTypes = ['S-CQL', 'S-CCL', 'RPN', 'ZSQL']
|
||||
_cli = None
|
||||
host = ""
|
||||
port = 0
|
||||
|
||||
# and now, some defaults
|
||||
namedResultSets = 1
|
||||
elementSetName = 'F'
|
||||
preferredRecordSyntax = 'USMARC'
|
||||
preferredMessageSize = 0x100000
|
||||
maximumRecordSize = 0x100000
|
||||
stepSize = 0
|
||||
numberOfEntries = 20 # for SCAN
|
||||
responsePosition = 1
|
||||
databaseName = 'Default'
|
||||
implementationId = 'PyZ3950'
|
||||
implementationName = 'PyZ3950 1.0/ZOOM v1.4'
|
||||
implementationVersion = '1.0'
|
||||
lang = None
|
||||
charset = None
|
||||
user = None
|
||||
password = None
|
||||
group = None
|
||||
presentChunk = 20 # for result sets
|
||||
|
||||
def __init__(self, host, port, connect=1, **kw):
|
||||
"""Establish connection to hostname:port. kw contains initial
|
||||
values for options, and is useful for options which affect
|
||||
the InitializeRequest. Currently supported values:
|
||||
|
||||
user Username for authentication
|
||||
password Password for authentication
|
||||
group Group for authentication
|
||||
maximumRecordSize Maximum size in bytes of one record
|
||||
preferredMessageSize Maximum size in bytes for response
|
||||
lang 3 letter language code
|
||||
charset Character set
|
||||
implementationId Id for client implementation
|
||||
implementationName Name for client implementation
|
||||
implementationVersion Version of client implementation
|
||||
|
||||
"""
|
||||
|
||||
self.host = host
|
||||
self.port = port
|
||||
self._resultSetCtr = 0
|
||||
for (k,v) in kw.items ():
|
||||
setattr (self, k, v)
|
||||
if (connect):
|
||||
self.connect()
|
||||
|
||||
def connect(self):
|
||||
self._resultSetCtr += 1
|
||||
self._lastConnectCtr = self._resultSetCtr
|
||||
|
||||
# Bump counters first, since even if we didn't reconnect
|
||||
# this time, we could have, and so any use of old connections
|
||||
# is an error. (Old cached-and-accessed data is OK to use:
|
||||
# cached but not-yet-accessed data is probably an error, but
|
||||
# a not-yet-caught error.)
|
||||
|
||||
if self._cli <> None and self._cli.sock <> None:
|
||||
return
|
||||
|
||||
initkw = {}
|
||||
for attr in self.init_attrs:
|
||||
initkw[attr] = getattr(self, attr)
|
||||
if (self.namedResultSets):
|
||||
options = ['namedResultSets']
|
||||
else:
|
||||
options = []
|
||||
initkw ['ConnectionError'] = ConnectionError
|
||||
initkw ['ProtocolError'] = ProtocolError
|
||||
initkw ['UnexpectedCloseError'] = UnexpectedCloseError
|
||||
self._cli = z3950.Client (self.host, self.port,
|
||||
optionslist = options, **initkw)
|
||||
self.namedResultSets = self._cli.get_option ('namedResultSets')
|
||||
self.targetImplementationId = getattr (self._cli.initresp, 'implementationId', None)
|
||||
self.targetImplementationName = getattr (self._cli.initresp, 'implementationName', None)
|
||||
self.targetImplementationVersion = getattr (self._cli.initresp, 'implementationVersion', None)
|
||||
if (hasattr (self._cli.initresp, 'userInformationField')):
|
||||
# weird. U of Chicago returns an EXTERNAL with nothing
|
||||
# but 'encoding', ('octet-aligned', '2545') filled in.
|
||||
if (hasattr (self._cli.initresp.userInformationField,
|
||||
'direct_reference') and
|
||||
self._cli.initresp.userInformationField.direct_reference ==
|
||||
oids.Z3950_USR_PRIVATE_OCLC_INFO_ov):
|
||||
# see http://www.oclc.org/support/documentation/firstsearch/z3950/fs_z39_config_guide/ for docs
|
||||
oclc_info = self._cli.initresp.userInformationField.encoding [1]
|
||||
# the docs are a little unclear, but I presume we're
|
||||
# supposed to report failure whenever a failReason is given.
|
||||
|
||||
if hasattr (oclc_info, 'failReason'):
|
||||
raise UnexpectedCloseError ('OCLC_Info ',
|
||||
oclc_info.failReason,
|
||||
getattr (oclc_info, 'text',
|
||||
' no text given '))
|
||||
|
||||
|
||||
|
||||
def search (self, query):
|
||||
"""Search, taking Query object, returning ResultSet"""
|
||||
if (not self._cli):
|
||||
self.connect()
|
||||
assert (query.typ in self._queryTypes)
|
||||
dbnames = self.databaseName.split ('+')
|
||||
self._cli.set_dbnames (dbnames)
|
||||
cur_rsn = self._make_rsn ()
|
||||
recv = self._cli.search_2 (query.query,
|
||||
rsn = cur_rsn,
|
||||
**_extract_attrs (self, self.search_attrs))
|
||||
self._resultSetCtr += 1
|
||||
rs = ResultSet (self, recv, cur_rsn, self._resultSetCtr)
|
||||
return rs
|
||||
# and 'Error Code', 'Error Message', and 'Addt'l Info' methods still
|
||||
# eeded
|
||||
def scan (self, query):
|
||||
if (not self._cli):
|
||||
self.connect()
|
||||
self._cli.set_dbnames ([self.databaseName])
|
||||
kw = {}
|
||||
for k, xl in self.scan_zoom_to_z3950.items ():
|
||||
if hasattr (self, k):
|
||||
kw [xl] = getattr (self, k)
|
||||
return ScanSet (self._cli.scan (query.query, **kw))
|
||||
def _make_rsn (self):
|
||||
"""Return result set name"""
|
||||
if self.namedResultSets:
|
||||
return "rs%d" % self._resultSetCtr
|
||||
else:
|
||||
return z3950.default_resultSetName
|
||||
def close (self):
|
||||
"""Close connection"""
|
||||
self._cli.close ()
|
||||
|
||||
def sort (self, sets, keys):
|
||||
""" Sort sets by keys, return resultset interface """
|
||||
if (not self._cli):
|
||||
self.connect()
|
||||
|
||||
# XXX This should probably be shuffled down into z3950.py
|
||||
sortrelations = ['ascending', 'descending', 'ascendingByFrequency', 'descendingByFrequency']
|
||||
|
||||
req = z3950.SortRequest()
|
||||
req.inputResultSetNames = []
|
||||
for s in sets:
|
||||
s._check_stale ()
|
||||
req.inputResultSetNames.append(s._resultSetName)
|
||||
cur_rsn = self._make_rsn()
|
||||
req.sortedResultSetName = cur_rsn
|
||||
|
||||
zkeys = []
|
||||
for k in keys:
|
||||
zk = z3950.SortKeySpec()
|
||||
zk.sortRelation = sortrelations.index(k.relation)
|
||||
zk.caseSensitivity = k.caseInsensitive
|
||||
if (k.missingValueAction):
|
||||
zk.missingValueAction = (k.missingValueAction, None)
|
||||
if (k.missingValueData):
|
||||
zk.missingValueAction = ('missingValueData', k.missingValueData)
|
||||
value = k.sequence
|
||||
if (k.type == 'accessPoint'):
|
||||
if (value.typ <> 'RPN'):
|
||||
raise ValueError # XXX
|
||||
l = z3950.SortKey['sortAttributes']()
|
||||
l.id = value.query[1].attributeSet
|
||||
l.list = value.query[1].rpn[1][1].attributes
|
||||
seq = ('sortAttributes', l)
|
||||
elif (k.type == 'private'):
|
||||
seq = ('privateSortKey', value)
|
||||
elif (k.type == 'elementSetName'):
|
||||
spec = z3950.Specification()
|
||||
spec.elementSpec = ('elementSetName', value)
|
||||
seq = ('elementSpec', spec)
|
||||
else:
|
||||
raise ValueError # XXX
|
||||
spec = ('generic', seq)
|
||||
zk.sortElement = spec
|
||||
zkeys.append(zk)
|
||||
req.sortSequence = zkeys
|
||||
recv = self._cli.transact(('sortRequest', req), 'sortResponse')
|
||||
|
||||
self._resultSetCtr += 1
|
||||
if (hasattr(recv, 'diagnostics')):
|
||||
diag = recv.diagnostics[0][1]
|
||||
self.err(diag.condition, diag.addinfo, diag.diagnosticSetId)
|
||||
|
||||
if (not hasattr(recv, 'resultCount')):
|
||||
# First guess: sum of all input sets
|
||||
recv.resultCount = 0
|
||||
for set in sets:
|
||||
recv.resultCount += len(set)
|
||||
# Check for addInfo to override
|
||||
try:
|
||||
val = recv.otherInfo[0].information[1]
|
||||
if (val[:14] == 'Result-count: '):
|
||||
recv.resultCount = int(val[14:])
|
||||
except:
|
||||
pass
|
||||
|
||||
rs = ResultSet (self, recv, cur_rsn, self._resultSetCtr)
|
||||
return rs
|
||||
|
||||
|
||||
class SortKey(_AttrCheck):
|
||||
attrlist = ['relation', 'caseInsensitive', 'missingValueAction', 'missingValueData', 'type', 'sequence']
|
||||
relation = "ascending"
|
||||
caseInsensitive = 1
|
||||
missingValueAction = ""
|
||||
missingValueData = ""
|
||||
type = "accessPoint"
|
||||
sequence = ""
|
||||
|
||||
def __init__ (self, **kw):
|
||||
for k in kw.keys():
|
||||
setattr(self, k, kw[k])
|
||||
|
||||
class Query:
|
||||
def __init__ (self, typ, query):
|
||||
"""Creates Query object.
|
||||
Supported query types: CCL, S-CCL, CQL, S-CQL, PQF, C2, ZSQL, CQL-TREE
|
||||
"""
|
||||
typ = typ.upper()
|
||||
# XXX maybe replace if ... elif ... with dict mapping querytype to func
|
||||
if typ == 'CCL':
|
||||
self.typ = 'RPN'
|
||||
try:
|
||||
self.query = ccl.mk_rpn_query (query)
|
||||
except ccl.QuerySyntaxError, err:
|
||||
print "zoom raising", str (err), " for", query
|
||||
raise QuerySyntaxError (str(err))
|
||||
elif typ == 'S-CCL': # server-side ccl
|
||||
self.typ = typ
|
||||
self.query = ('type-2', query)
|
||||
elif typ == 'S-CQL': # server-side cql
|
||||
self.typ = typ
|
||||
xq = asn1.EXTERNAL()
|
||||
xq.direct_reference = oids.Z3950_QUERY_CQL_ov
|
||||
xq.encoding = ('single-ASN1-type', query)
|
||||
self.query = ('type_104', xq)
|
||||
elif typ == 'CQL': # CQL to RPN transformation
|
||||
self.typ = 'RPN'
|
||||
try:
|
||||
q = CQLParser.parse(query)
|
||||
rpnq = z3950.RPNQuery()
|
||||
# XXX Allow Attribute Architecture somehow?
|
||||
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
|
||||
rpnq.rpn = q.toRPN()
|
||||
self.query = ('type_1', rpnq)
|
||||
except SRWDiagnostics.SRWDiagnostic, err:
|
||||
raise err
|
||||
except:
|
||||
raise QuerySyntaxError
|
||||
elif typ == 'PQF': # PQF to RPN transformation
|
||||
self.typ = 'RPN'
|
||||
try:
|
||||
self.query = pqf.parse(query)
|
||||
except:
|
||||
raise QuerySyntaxError
|
||||
|
||||
elif typ == 'C2': # Cheshire2 Syntax
|
||||
self.typ = 'RPN'
|
||||
try:
|
||||
q = c2.parse(query)
|
||||
self.query = q[0]
|
||||
except:
|
||||
raise QuerySyntaxError
|
||||
elif typ == 'ZSQL': # External SQL
|
||||
self.typ = typ
|
||||
xq = asn1.EXTERNAL()
|
||||
xq.direct_reference = oids.Z3950_QUERY_SQL_ov
|
||||
q = z3950.SQLQuery()
|
||||
q.queryExpression = query
|
||||
xq.encoding = ('single-ASN1-type', q)
|
||||
self.query = ('type_104', xq)
|
||||
elif typ == 'CQL-TREE': # Tree to RPN
|
||||
self.typ = 'RPN'
|
||||
try:
|
||||
rpnq = z3950.RPNQuery()
|
||||
# XXX Allow Attribute Architecture
|
||||
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
|
||||
rpnq.rpn = query.toRPN()
|
||||
self.query = ('type_1', rpnq)
|
||||
except SRWDiagnostics.SRWDiagnostic, err:
|
||||
raise err
|
||||
except:
|
||||
raise QuerySyntaxError
|
||||
else:
|
||||
raise ClientNotImplError ('%s queries not supported' % typ)
|
||||
|
||||
|
||||
class ResultSet(_AttrCheck, _ErrHdlr):
|
||||
"""Cache results, presenting read-only sequence interface. If
|
||||
a surrogate diagnostic is returned for the i-th record, an
|
||||
appropriate exception will be raised on access to the i-th
|
||||
element (either access by itself or as part of a slice)."""
|
||||
|
||||
inherited_elts = ['elementSetName', 'preferredRecordSyntax',
|
||||
'presentChunk']
|
||||
attrlist = inherited_elts + _ErrHdlr.err_attrslist
|
||||
not_implement_attrs = ['piggyback',
|
||||
'schema']
|
||||
|
||||
def __init__ (self, conn, searchResult, resultSetName, ctr):
|
||||
"""Only for creation by Connection object"""
|
||||
self._conn = conn # needed for 'option inheritance', see ZOOM spec
|
||||
self._searchResult = searchResult
|
||||
self._resultSetName = resultSetName
|
||||
self._records = {}
|
||||
self._ctr = ctr
|
||||
# _records is a dict indexed by preferredRecordSyntax of
|
||||
# dicts indexed by elementSetName of lists of records
|
||||
self._ensure_recs ()
|
||||
|
||||
# whether there are any records or not, there may be
|
||||
# nonsurrogate diagnostics. _extract_recs will get them.
|
||||
if hasattr (self._searchResult, 'records'):
|
||||
self._extract_recs (self._searchResult.records, 0)
|
||||
def __getattr__ (self, key):
|
||||
"""Forward attribute access to Connection if appropriate"""
|
||||
if self.__dict__.has_key (key):
|
||||
return self.__dict__[key]
|
||||
if key in self.inherited_elts:
|
||||
return getattr (self._conn, key) # may raise AttributeError
|
||||
raise AttributeError (key)
|
||||
def _make_keywords (self):
|
||||
"""Set up dict of parms for present request"""
|
||||
kw = {}
|
||||
# need for translation here from preferredRecordSyntax to recsyn
|
||||
# is kinda pointless
|
||||
if hasattr (self, 'preferredRecordSyntax'):
|
||||
try:
|
||||
kw['recsyn'] = _record_type_dict [
|
||||
self.preferredRecordSyntax].oid
|
||||
except KeyError, err:
|
||||
raise ClientNotImplError ('Unknown record syntax ' +
|
||||
self.preferredRecordSyntax)
|
||||
if hasattr (self, 'elementSetName'):
|
||||
kw['esn'] = ('genericElementSetName', self.elementSetName)
|
||||
return kw
|
||||
def __len__ (self):
|
||||
"""Get number of records"""
|
||||
return self._searchResult.resultCount
|
||||
def _pin (self, i):
|
||||
"""Handle negative indices"""
|
||||
if i < 0:
|
||||
return i + len (self)
|
||||
return i
|
||||
def _ensure_recs (self):
|
||||
if not self._records.has_key (self.preferredRecordSyntax):
|
||||
self._records [self.preferredRecordSyntax] = {}
|
||||
self._records [self.preferredRecordSyntax][
|
||||
self.elementSetName] = [None] * len (self)
|
||||
if not self._records[self.preferredRecordSyntax].has_key (
|
||||
self.elementSetName):
|
||||
self._records [self.preferredRecordSyntax][
|
||||
self.elementSetName] = [None] * len (self)
|
||||
|
||||
def _get_rec (self, i):
|
||||
return self._records [self.preferredRecordSyntax][
|
||||
self.elementSetName][i]
|
||||
|
||||
def _check_stale (self):
|
||||
if self._ctr < self._conn._lastConnectCtr:
|
||||
raise ConnectionError ('Stale result set used')
|
||||
# XXX is this right?
|
||||
if (not self._conn.namedResultSets) and \
|
||||
self._ctr <> self._conn._resultSetCtr:
|
||||
raise ServerNotImplError ('Multiple Result Sets')
|
||||
# XXX or this?
|
||||
|
||||
def _ensure_present (self, i):
|
||||
self._ensure_recs ()
|
||||
if self._get_rec (i) == None:
|
||||
self._check_stale ()
|
||||
maxreq = self.presentChunk
|
||||
if maxreq == 0: # get everything at once
|
||||
lbound = i
|
||||
count = len (self) - lbound
|
||||
else:
|
||||
lbound = (i / maxreq) * maxreq
|
||||
count = min (maxreq, len (self) - lbound)
|
||||
kw = self._make_keywords ()
|
||||
if self._get_rec (lbound) == None:
|
||||
presentResp = self._conn._cli.present (
|
||||
start = lbound + 1, # + 1 b/c 1-based
|
||||
count = count,
|
||||
rsn = self._resultSetName,
|
||||
**kw)
|
||||
if not hasattr (presentResp, 'records'):
|
||||
raise ProtocolError (str (presentResp))
|
||||
self._extract_recs (presentResp.records, lbound)
|
||||
# Maybe there was too much data to fit into
|
||||
# range (lbound, lbound + count). If so, try
|
||||
# retrieving just one record. XXX could try
|
||||
# retrieving more, up to next cache bdary.
|
||||
if i <> lbound and self._get_rec (i) == None:
|
||||
presentResp = self._conn._cli.present (
|
||||
start = i + 1,
|
||||
count = 1,
|
||||
rsn = self._resultSetName,
|
||||
**kw)
|
||||
self._extract_recs (presentResp.records, i)
|
||||
rec = self._records [self.preferredRecordSyntax][
|
||||
self.elementSetName][i]
|
||||
if rec <> None and rec.is_surrogate_diag ():
|
||||
rec.raise_exn ()
|
||||
def __getitem__ (self, i):
|
||||
"""Ensure item is present, and return a Record"""
|
||||
i = self._pin (i)
|
||||
if i >= len (self):
|
||||
raise IndexError
|
||||
self._ensure_present (i)
|
||||
return self._records [self.preferredRecordSyntax][
|
||||
self.elementSetName][i]
|
||||
def __getslice__(self, i, j):
|
||||
i = self._pin (i)
|
||||
j = self._pin (j)
|
||||
if j > len (self):
|
||||
j = len (self)
|
||||
for k in range (i, j):
|
||||
self._ensure_present (k)
|
||||
if len (self._records) == 0: # XXX is this right?
|
||||
return []
|
||||
return self._records[self.preferredRecordSyntax][
|
||||
self.elementSetName] [i:j]
|
||||
def _extract_recs (self, records, lbound):
|
||||
(typ, recs) = records
|
||||
if trace_extract:
|
||||
print "Extracting", len (recs), "starting at", lbound
|
||||
if typ == 'nonSurrogateDiagnostic':
|
||||
self.err (recs.condition, "", recs.diagnosticSetId)
|
||||
elif typ == 'multipleNonSurDiagnostics':
|
||||
# see Zoom mailing list discussion of 2002/7/24 to justify
|
||||
# ignoring all but first error.
|
||||
diagRec = recs [0]
|
||||
self.err_diagrec (diagRec)
|
||||
if (typ <> 'responseRecords'):
|
||||
raise ProtocolError ("Bad records typ " + str (typ) + str (recs))
|
||||
for i,r in my_enumerate (recs):
|
||||
r = recs [i]
|
||||
dbname = getattr (r, 'name', '')
|
||||
(typ, data) = r.record
|
||||
if (typ == 'surrogateDiagnostic'):
|
||||
rec = SurrogateDiagnostic (data)
|
||||
|
||||
elif typ == 'retrievalRecord':
|
||||
oid = data.direct_reference
|
||||
dat = data.encoding
|
||||
(typ, dat) = dat
|
||||
if (oid == oids.Z3950_RECSYN_USMARC_ov):
|
||||
if typ <> 'octet-aligned':
|
||||
raise ProtocolError (
|
||||
"Weird record EXTERNAL MARC type: " + typ)
|
||||
rec = Record (oid, dat, dbname)
|
||||
else:
|
||||
raise ProtocolError ("Bad typ %s data %s" %
|
||||
(str (typ), str(data)))
|
||||
self._records[self.preferredRecordSyntax][
|
||||
self.elementSetName][lbound + i] = rec
|
||||
def delete (self): # XXX or can I handle this w/ a __del__ method?
|
||||
"""Delete result set"""
|
||||
res = self._conn._cli.delete (self._resultSetName)
|
||||
if res == None: return # server doesn't support Delete
|
||||
# XXX should I throw an exn for delete errors? Probably.
|
||||
|
||||
# and 'Error Code', 'Error Message', and 'Addt'l Info' methods
|
||||
|
||||
def sort(self, keys):
|
||||
return self._conn.sort([self], keys)
|
||||
|
||||
|
||||
class SurrogateDiagnostic(_ErrHdlr):
|
||||
"""Represent surrogate diagnostic. Raise appropriate exception
|
||||
on access to syntax or data, or when raise_exn method is called.
|
||||
Currently, RecordSet relies on the return from is_surrogate_diag (),
|
||||
and calls raise_exn based on that."""
|
||||
def __init__ (self, diagrec):
|
||||
self.diagrec = diagrec
|
||||
def is_surrogate_diag (self):
|
||||
return 1
|
||||
def raise_exn (self):
|
||||
self.err_diagrec (self.diagrec)
|
||||
def __getattr__ (self, attr):
|
||||
if attr == 'data' or attr == 'syntax':
|
||||
self.raise_exn ()
|
||||
return _ErrHdlr.__getattr (self, attr)
|
||||
|
||||
class Record:
|
||||
"""Represent retrieved record. 'syntax' attribute is a string,
|
||||
'data' attribute is the data, which is:
|
||||
|
||||
USMARC -- raw MARC data
|
||||
SUTRS -- a string (possibly in the future unicode)
|
||||
XML -- ditto
|
||||
GRS-1 -- a tree (see grs1.py for details)
|
||||
EXPLAIN -- a hard-to-describe format (contact me if you're actually \
|
||||
using this)
|
||||
OPAC -- ditto
|
||||
|
||||
Other representations are not yet defined."""
|
||||
def __init__ (self, oid, data, dbname):
|
||||
"""Only for use by ResultSet"""
|
||||
self.syntax = _oid_to_key (oid)
|
||||
self._rt = _record_type_dict [self.syntax]
|
||||
self.data = self._rt.preproc (data)
|
||||
self.databaseName = dbname
|
||||
def is_surrogate_diag (self):
|
||||
return 0
|
||||
def get_fieldcount (self):
|
||||
"""Get number of fields"""
|
||||
return self._rt.fieldcount (self.data)
|
||||
def get_field (self,spec):
|
||||
"""Get field"""
|
||||
return self._rt.field (self.data, spec)
|
||||
def __str__ (self):
|
||||
"""Render printably"""
|
||||
s = self._rt.renderer (self.data)
|
||||
return 'Rec: ' + str (self.syntax) + " " + s
|
||||
|
||||
class _RecordType:
|
||||
"""Map syntax string to OID and per-syntax utility functions"""
|
||||
def __init__ (self, name, oid, renderer = lambda v:v,
|
||||
fieldcount = lambda v:1, field = None, preproc = lambda v:v):
|
||||
"""Register syntax"""
|
||||
self.oid = oid
|
||||
self.renderer = renderer
|
||||
self.fieldcount = fieldcount
|
||||
self.field = field
|
||||
self.preproc = preproc
|
||||
_record_type_dict [name] = self
|
||||
|
||||
# XXX do I want an OPAC class? Probably, and render_OPAC should be
|
||||
# a member function.
|
||||
|
||||
|
||||
def render_OPAC (opac_data):
|
||||
s_list = []
|
||||
biblio_oid = opac_data.bibliographicRecord.direct_reference
|
||||
if (biblio_oid == z3950.Z3950_RECSYN_USMARC_ov):
|
||||
bib_marc = zmarc.MARC (opac_data.bibliographicRecord.encoding [1])
|
||||
s_list.append ("Bibliographic %s\n" % (str (bib_marc),) )
|
||||
else:
|
||||
s_list.append ("Unknown bibliographicRecord OID: " + str(biblio_oid))
|
||||
for i, hd in my_enumerate (opac_data.holdingsData):
|
||||
typ, data = hd
|
||||
s_list.append ('Holdings %d:' % (i,))
|
||||
if typ == 'holdingsAndCirc':
|
||||
def render (item, level = 1):
|
||||
s_list = []
|
||||
if isinstance (item, asn1.StructBase):
|
||||
for attr, val in item.__dict__.items ():
|
||||
if attr [0] <> '_':
|
||||
s_list.append ("%s%s: %s" % (
|
||||
"\t" * level, attr, "\n".join(render (val, level + 1))))
|
||||
elif (isinstance (item, type ([])) and len (item) > 0
|
||||
and isinstance (item [0], asn1.StructBase)):
|
||||
s_list.append ("") # generate newline
|
||||
for i, v in my_enumerate (item):
|
||||
s_list.append ("\t" * (level + 1) + str (i))
|
||||
s_list += render (v, level + 1)
|
||||
else:
|
||||
s_list.append (repr (item))
|
||||
return s_list
|
||||
s_list.append ("\n".join (render (data)))
|
||||
elif typ == 'marcHoldingsRecord':
|
||||
hold_oid = data.direct_reference
|
||||
if hold_oid == z3950.Z3950_RECSYN_USMARC_ov:
|
||||
holdings_marc = zmarc.MARC (data.encoding [1])
|
||||
s_list.append ("Holdings %s\n" % (str (holdings_marc),))
|
||||
else:
|
||||
s_list.append ("Unknown holdings OID: " + str (hold_oid))
|
||||
else:
|
||||
s_list.append ("Unknown holdings type: " + typ)
|
||||
# shouldn't happen unless z39.50 definition is extended
|
||||
return "\n".join (s_list)
|
||||
|
||||
_RecordType ('USMARC', z3950.Z3950_RECSYN_USMARC_ov,
|
||||
renderer = lambda v: str(zmarc.MARC(v)))
|
||||
_RecordType ('UKMARC', z3950.Z3950_RECSYN_UKMARC_ov,
|
||||
renderer = lambda v: str(zmarc.MARC(v)))
|
||||
_RecordType ('SUTRS', z3950.Z3950_RECSYN_SUTRS_ov)
|
||||
_RecordType ('XML', z3950.Z3950_RECSYN_MIME_XML_ov)
|
||||
_RecordType ('SGML', z3950.Z3950_RECSYN_MIME_SGML_ov)
|
||||
_RecordType ('GRS-1', z3950.Z3950_RECSYN_GRS1_ov,
|
||||
renderer = lambda v: str (v),
|
||||
preproc = grs1.preproc)
|
||||
_RecordType ('OPAC', z3950.Z3950_RECSYN_OPAC_ov, renderer = render_OPAC)
|
||||
_RecordType ('EXPLAIN', z3950.Z3950_RECSYN_EXPLAIN_ov,
|
||||
renderer = lambda v: str (v))
|
||||
|
||||
class ScanSet (_AttrCheck, _ErrHdlr):
|
||||
"""Hold result of scan.
|
||||
"""
|
||||
zoom_to_z3950 = { # XXX need to provide more processing for attrs, alt
|
||||
'freq' : 'globalOccurrences',
|
||||
'display': 'displayTerm',
|
||||
'attrs' : 'suggestedAttributes',
|
||||
'alt' : 'alternativeTerm',
|
||||
'other' : 'otherTermInfo'}
|
||||
attrlist = _ErrHdlr.err_attrslist
|
||||
|
||||
def __init__ (self, scanresp):
|
||||
"""For internal use only!"""
|
||||
self._scanresp = scanresp
|
||||
if hasattr (scanresp.entries, 'nonsurrogateDiagnostics'):
|
||||
self.err_diagrec (scanresp.entries.nonsurrogateDiagnostics[0])
|
||||
# Note that specification says that both entries and
|
||||
# nonsurrogate diags can be present. This code will always
|
||||
# raise the exn, and will need to be changed if both are needed.
|
||||
|
||||
def __len__ (self):
|
||||
"""Return number of entries"""
|
||||
return self._scanresp.numberOfEntriesReturned
|
||||
def _get_rec (self, i):
|
||||
if (not hasattr(self._scanresp.entries, 'entries')):
|
||||
raise IndexError
|
||||
t = self._scanresp.entries.entries[i]
|
||||
if t[0] == 'termInfo':
|
||||
return t[1]
|
||||
else:
|
||||
# Only way asserts can fail here is if someone changes
|
||||
# the Z39.50 ASN.1 definitions.
|
||||
assert (t[0] == 'surrogateDiagnostic')
|
||||
diagRec = t[1]
|
||||
if diagRec [0] == 'externallyDefined':
|
||||
raise ClientNotImplError (
|
||||
'Scan unknown surrogate diagnostic type: ' +
|
||||
str (diagRec))
|
||||
assert (diagRec[0] == 'defaultFormat')
|
||||
defDiagFmt = diagRec [1]
|
||||
self.err (defDiagFmt.condition, defDiagFmt.addinfo,
|
||||
defDiagFmt.diagnosticSetId)
|
||||
def get_term (self, i):
|
||||
"""Return term. Note that get_{term,field,fields} can throw an
|
||||
exception if the i'th term is a surrogate diagnostic."""
|
||||
return self._get_rec (i).term
|
||||
def get_field (self, field, i):
|
||||
"""Returns value of field:
|
||||
term: term
|
||||
freq: integer
|
||||
display: string
|
||||
attrs: currently z3950 structure, should be string of attributes
|
||||
alt: currently z3950 structure, should be [string of attrs, term]
|
||||
other: currently z3950 structure, dunno what the best Python representation would be
|
||||
"""
|
||||
f = self.zoom_to_z3950 [field]
|
||||
r = self._get_rec (i)
|
||||
return r.__dict__[f]
|
||||
def get_fields (self, i):
|
||||
"""Return a dictionary mapping ZOOM's field names to values
|
||||
present in the response. (Like get_field, but for all fields.)"""
|
||||
r = self._get_rec (i)
|
||||
d = {}
|
||||
for k,v in self.zoom_to_z3950.items ():
|
||||
val = getattr (r, v, None)
|
||||
if val <> None:
|
||||
d[k] = val
|
||||
d["term"] = self.get_term (i)
|
||||
return d
|
||||
def _pin (self, i):
|
||||
if i < 0:
|
||||
return i + len (self)
|
||||
return i
|
||||
def __getitem__ (self, i):
|
||||
return self.get_fields (self._pin (i))
|
||||
def __getslice__ (self, i, j):
|
||||
i = self._pin (i)
|
||||
j = self._pin (j)
|
||||
if j > len (self):
|
||||
j = len (self)
|
||||
return [self.get_fields (k) for k in range (i,j)]
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
optlist, args = getopt.getopt (sys.argv[1:], 'h:q:t:f:a:e:v:')
|
||||
host = 'LC'
|
||||
query = ''
|
||||
qtype = 'CCL'
|
||||
fmts = ['USMARC']
|
||||
esns = ['F']
|
||||
validation = None
|
||||
for (opt, val) in optlist:
|
||||
if opt == '-h':
|
||||
host = val
|
||||
elif opt == '-q':
|
||||
query = val
|
||||
elif opt == '-t':
|
||||
qtype = val
|
||||
elif opt == '-f':
|
||||
fmts = val.split (',')
|
||||
elif opt == '-e':
|
||||
esns = val.split (',')
|
||||
elif opt == '-v':
|
||||
validation = val.split (',')
|
||||
|
||||
rv = z3950.host_dict.get (host)
|
||||
if rv == None:
|
||||
(name, port, dbname) = host.split (':')
|
||||
port = int (port)
|
||||
else:
|
||||
(name, port, dbname) = rv
|
||||
|
||||
conn = Connection (name, port)
|
||||
conn.databaseName = dbname
|
||||
|
||||
conn.preferredRecordSyntax = fmts [0]
|
||||
def run_one (q):
|
||||
try:
|
||||
query = Query (qtype, q)
|
||||
res = conn.search (query)
|
||||
for esn in esns:
|
||||
for syn in fmts:
|
||||
print "Syntax", syn, "Esn", esn
|
||||
res.preferredRecordSyntax = syn
|
||||
if esn <> 'NONE':
|
||||
res.elementSetName = esn
|
||||
try:
|
||||
for r in res:
|
||||
print str(r)
|
||||
except ZoomError, err:
|
||||
print "Zoom exception", err.__class__, err
|
||||
# res.delete ()
|
||||
# Looks as if Oxford will close the connection if a delete is sent,
|
||||
# despite claiming delete support (verified with yaz client, too).
|
||||
except ZoomError, err:
|
||||
print "Zoom exception", err.__class__, err
|
||||
|
||||
|
||||
|
||||
if query == '':
|
||||
while 1:
|
||||
q_str = raw_input ('CCL query: ')
|
||||
if q_str == '': break
|
||||
run_one (q_str)
|
||||
else:
|
||||
run_one (query)
|
||||
conn.close ()
|
34
python/interface.py
Normal file
34
python/interface.py
Normal file
@ -0,0 +1,34 @@
|
||||
from xml.dom import minidom
|
||||
from PyZ3950 import zoom
|
||||
|
||||
exit_commands = ['exit', 'abort', 'quit', 'bye', 'eat flaming death', 'q']
|
||||
|
||||
class Bibsys():
|
||||
def __init__(self):
|
||||
self.conn = zoom.Connection ('z3950.bibsys.no', 2100)
|
||||
self.conn.databaseName = 'BIBSYS'
|
||||
self.conn.preferredRecordSyntax = 'XML'
|
||||
|
||||
def isbn_search(self, isbn):
|
||||
query = zoom.Query('CCL', 'ISBN='+isbn)
|
||||
result = self.conn.search(query)
|
||||
return result
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
#class Menu():
|
||||
|
||||
def get_book_loop():
|
||||
bib = Bibsys()
|
||||
while True:
|
||||
input = raw_input('Enter ISBN number> ')
|
||||
if input in exit_commands:
|
||||
break
|
||||
else:
|
||||
r = bib.isbn_search(input)
|
||||
if len(r) > 0:
|
||||
print r[0]
|
||||
bib.close()
|
||||
|
||||
get_book_loop()
|
Reference in New Issue
Block a user